1.5.1 // KeyHandler Swiftification.

Merge pull request !22 from upd/1.5.1
This commit is contained in:
ShikiSuen 2022-04-18 11:05:38 +00:00 committed by Gitee
commit a201ee4d14
609 changed files with 2774 additions and 162094 deletions

View File

@ -1,171 +1 @@
---
Language: Cpp
# BasedOnStyle: Microsoft
AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveMacros: false
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: true
AllowAllConstructorInitializersOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowShortBlocksOnASingleLine: Never
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: All
AllowShortLambdasOnASingleLine: All
AllowShortIfStatementsOnASingleLine: WithoutElse
AllowShortLoopsOnASingleLine: true
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: true
AlwaysBreakTemplateDeclarations: Yes
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterCaseLabel: false
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Attach
BreakBeforeInheritanceComma: false
BreakInheritanceList: BeforeColon
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeColon
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: true
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DeriveLineEnding: true
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IncludeBlocks: Regroup
IncludeCategories:
- Regex: '^<ext/.*\.h>'
Priority: 2
SortPriority: 0
- Regex: '^<.*\.h>'
Priority: 1
SortPriority: 0
- Regex: '^<.*'
Priority: 2
SortPriority: 0
- Regex: '.*'
Priority: 3
SortPriority: 0
IncludeIsMainRegex: '([-_](test|unittest))?$'
IncludeIsMainSourceRegex: ''
IndentCaseLabels: true
IndentGotoLabels: true
IndentPPDirectives: None
IndentWidth: 4
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Never
ObjCBlockIndentWidth: 4
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 1
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyReturnTypeOnItsOwnLine: 200
PointerAlignment: Left
RawStringFormats:
- Language: Cpp
Delimiters:
- h
- m
- hh
- mm
- cc
- CC
- cpp
- Cpp
- CPP
- 'c++'
- 'C++'
CanonicalDelimiter: ''
BasedOnStyle: Microsoft
- Language: TextProto
Delimiters:
- pb
- PB
- proto
- PROTO
EnclosingFunctions:
- EqualsProto
- EquivToProto
- PARSE_PARTIAL_TEXT_PROTO
- PARSE_TEST_PROTO
- PARSE_TEXT_PROTO
- ParseTextOrDie
- ParseTextProtoOrDie
CanonicalDelimiter: ''
BasedOnStyle: Microsoft
ReflowComments: true
SortIncludes: true
SortUsingDeclarations: true
SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyBlock: false
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 2
SpacesInAngles: false
SpacesInConditionalStatement: false
SpacesInContainerLiterals: true
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
SpaceBeforeSquareBrackets: false
Standard: Auto
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 4
UseCRLF: false
UseTab: Always
...
BasedOnStyle: Microsoft

View File

@ -31,10 +31,12 @@ extension String {
// Ref: https://stackoverflow.com/a/40993403/4162914 && https://stackoverflow.com/a/71291137/4162914
do {
let regex = try NSRegularExpression(
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines])
let range = NSRange(self.startIndex..., in: self)
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines]
)
let range = NSRange(startIndex..., in: self)
self = regex.stringByReplacingMatches(
in: self, options: [], range: range, withTemplate: replaceWith)
in: self, options: [], range: range, withTemplate: replaceWith
)
} catch { return }
}
}
@ -59,9 +61,11 @@ if CommandLine.arguments.count == 3 {
}
strXcodeProjContent.regReplace(
pattern: #"CURRENT_PROJECT_VERSION = .*$"#, replaceWith: "CURRENT_PROJECT_VERSION = " + verBuild + ";")
pattern: #"CURRENT_PROJECT_VERSION = .*$"#, replaceWith: "CURRENT_PROJECT_VERSION = " + verBuild + ";"
)
strXcodeProjContent.regReplace(
pattern: #"MARKETING_VERSION = .*$"#, replaceWith: "MARKETING_VERSION = " + verMarket + ";")
pattern: #"MARKETING_VERSION = .*$"#, replaceWith: "MARKETING_VERSION = " + verMarket + ";"
)
do {
try strXcodeProjContent.write(to: URL(fileURLWithPath: dirXcodeProjectFile), atomically: false, encoding: .utf8)
} catch {
@ -81,5 +85,4 @@ if CommandLine.arguments.count == 3 {
theDictionary?.setValue(verMarket, forKeyPath: "CFBundleShortVersionString")
theDictionary?.write(toFile: dirUpdateInfoPlist, atomically: true)
NSLog(" - 更新用通知 plist 版本資訊更新完成:\(verMarket) \(verBuild)")
}

5
CHANGELOG Normal file
View File

@ -0,0 +1,5 @@
https://gitee.com/vchewing/vChewing-macOS/wikis/sort_id=5401886
Please read the CHANGELOG through the wiki link above since it will be a disaster to use Git to manage it together with other files in this repo.
因內容管理上的便利性需求,請改洽上述網址檢視歷代發行說明。

42
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,42 @@
# 威注音輸入法研發參與相關說明
威注音輸入法歡迎有人參與。但為了不讓參與者們浪費各自的熱情,特設此文以說明該專案目前最需要協助的地方。
1. 有人能用 Swift 將該專案內這兩個源自 LibFormosa 的組件套件重寫:
- Mandarin 組件,用以分析普通話音韻數據、創建且控制 Syllable Composer 注音拼識組件。
- Gramambular 套裝,這包括了 Source 資料夾下的其餘全部的 (Obj)C(++) 檔案LMConsolidator 除外)。
- LMConsolidator 有 Swift 版本,已經用於威注音語彙編輯器內。給主程式用 C++ 版本僅為了與 Gramambular 協作方便。
- 這也包括了所有與 Language Model 有關的實現,因為都是 Gramambular 內的某個語言模組 Protocol 衍生出來的東西。
- LMInstantiator 是用來將語言模組副本化的組件,原本不屬於 Gramambular但與其衍生的各類語言模組高度耦合。
- KeyValueBlobReader 不屬於 Gramambular但與其衍生的各類語言模組高度耦合、也與 KeyHandler 高度耦合。
2. 讓 Alt+波浪鍵選單能夠在諸如 MS Word 以及終端機內正常工作(可以用方向鍵控制高亮候選內容,等)。
- 原理上而言恐怕得欺騙當前正在接受輸入的應用、使其誤以為當前有組字區。這只是推測。
3. SQLite 實現。
除了上述各項以外的貢獻,除非特邀、或者有足夠的說服理由與吸引力(比如語法錯誤或更好的重構方法等),否則敝專案可能會無視或者拒絕。
請注意不要浪費自己的時間精力與感情,一定要在自己動工之前打個招呼。
如果您對威注音的產品功能發展另有所期的話,威注音雖不接受相關的爭論,但您可以自行建立分流專案。只需要遵守 MIT-NTL 協議、不沿用威注音的品牌名稱即可。
## 格式規範:
該專案對源碼格式有規範,且 Swift 與其他 (Obj)C(++) 系語言持不同規範:
- Swift: 採 [Apple 官方 Swift-Format](https://github.com/apple/swift-format),且施加如下例外修改項目:
- Indentation 僅使用 `"indentation" : { "tabs" : 1 },`,不以空格來縮進。
- `"indentSwitchCaseLabels" : true,`
- `"lineLength" : 120,`
- `"NoBlockComments" : false,`
- `"tabWidth" : 4,`
- `"OnlyOneTrailingClosureArgument" : false,` // SwiftUI 相容
- `"UseTripleSlashForDocumentationComments" : false,`
- `"DontRepeatTypeInStaticProperties" : false,`
- (Obj)C(++) 系語言:使用 clang-format 命令、且採 Microsoft 行文規範。
- 該規範以四個西文半形空格為行縮進單位。
- 由於今後不會再用這類語言給該倉庫新增內容,所以相關規範就不改動了。
至於對 Swift 檔案改採 1-Tab 縮進,則是為了在尊重所有用戶的需求的同時、最大程度上節約檔案體積。使用者可自行修改 Xcode 的預設 Tab 縮進尺寸。
$ EOF.

View File

@ -27,20 +27,24 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import Foundation
// MARK: -
extension String {
fileprivate mutating func regReplace(pattern: String, replaceWith: String = "") {
// Ref: https://stackoverflow.com/a/40993403/4162914 && https://stackoverflow.com/a/71291137/4162914
do {
let regex = try NSRegularExpression(
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines])
let range = NSRange(self.startIndex..., in: self)
pattern: pattern, options: [.caseInsensitive, .anchorsMatchLines]
)
let range = NSRange(startIndex..., in: self)
self = regex.stringByReplacingMatches(
in: self, options: [], range: range, withTemplate: replaceWith)
in: self, options: [], range: range, withTemplate: replaceWith
)
} catch { return }
}
}
// MARK: -
// Ref: https://stackoverflow.com/a/32581409/4162914
extension Float {
fileprivate func rounded(toPlaces places: Int) -> Float {
@ -50,6 +54,7 @@ extension Float {
}
// MARK: -
// Ref: https://stackoverflow.com/a/41581695/4162914
precedencegroup ExponentiationPrecedence {
associativity: right
@ -59,11 +64,11 @@ precedencegroup ExponentiationPrecedence {
infix operator **: ExponentiationPrecedence
func ** (_ base: Double, _ exp: Double) -> Double {
return pow(base, exp)
pow(base, exp)
}
func ** (_ base: Float, _ exp: Float) -> Float {
return pow(base, exp)
pow(base, exp)
}
// MARK: -
@ -101,7 +106,7 @@ private let urlOutputCHT: String = "./data-cht.txt"
func rawDictForPhrases(isCHS: Bool) -> [Entry] {
var arrEntryRAW: [Entry] = []
var strRAW: String = ""
var strRAW = ""
let urlCustom: String = isCHS ? urlCHSforCustom : urlCHTforCustom
let urlMCBP: String = isCHS ? urlCHSforMCBP : urlCHTforMCBP
let urlMOE: String = isCHS ? urlCHSforMOE : urlCHTforMOE
@ -136,7 +141,7 @@ func rawDictForPhrases(isCHS: Bool) -> [Entry] {
for lineData in arrData {
//
let arrLineData = lineData.components(separatedBy: " ")
var varLineDataProcessed: String = ""
var varLineDataProcessed = ""
var count = 0
for currentCell in arrLineData {
count += 1
@ -165,9 +170,10 @@ func rawDictForPhrases(isCHS: Bool) -> [Entry] {
}
if phrase != "" { //
arrEntryRAW += [
Entry.init(
Entry(
valPhone: phone, valPhrase: phrase, valWeight: 0.0,
valCount: occurrence)
valCount: occurrence
)
]
}
}
@ -179,7 +185,7 @@ func rawDictForPhrases(isCHS: Bool) -> [Entry] {
func rawDictForKanjis(isCHS: Bool) -> [Entry] {
var arrEntryRAW: [Entry] = []
var strRAW: String = ""
var strRAW = ""
let i18n: String = isCHS ? "簡體中文" : "繁體中文"
//
do {
@ -201,7 +207,7 @@ func rawDictForKanjis(isCHS: Bool) -> [Entry] {
//
let arrData = Array(
NSOrderedSet(array: strRAW.components(separatedBy: "\n")).array as! [String])
var varLineData: String = ""
var varLineData = ""
for lineData in arrData {
// 1,2,4 1,3,4
let varLineDataPre = lineData.components(separatedBy: " ").prefix(isCHS ? 2 : 1)
@ -212,7 +218,7 @@ func rawDictForKanjis(isCHS: Bool) -> [Entry] {
separator: "\t")
varLineData = varLineDataPre + "\t" + varLineDataPost
let arrLineData = varLineData.components(separatedBy: " ")
var varLineDataProcessed: String = ""
var varLineDataProcessed = ""
var count = 0
for currentCell in arrLineData {
count += 1
@ -241,9 +247,10 @@ func rawDictForKanjis(isCHS: Bool) -> [Entry] {
}
if phrase != "" { //
arrEntryRAW += [
Entry.init(
Entry(
valPhone: phone, valPhrase: phrase, valWeight: 0.0,
valCount: occurrence)
valCount: occurrence
)
]
}
}
@ -255,7 +262,7 @@ func rawDictForKanjis(isCHS: Bool) -> [Entry] {
func rawDictForNonKanjis(isCHS: Bool) -> [Entry] {
var arrEntryRAW: [Entry] = []
var strRAW: String = ""
var strRAW = ""
let i18n: String = isCHS ? "簡體中文" : "繁體中文"
//
do {
@ -279,14 +286,14 @@ func rawDictForNonKanjis(isCHS: Bool) -> [Entry] {
//
let arrData = Array(
NSOrderedSet(array: strRAW.components(separatedBy: "\n")).array as! [String])
var varLineData: String = ""
var varLineData = ""
for lineData in arrData {
varLineData = lineData
//
varLineData = varLineData.components(separatedBy: " ").prefix(3).joined(
separator: "\t") //
let arrLineData = varLineData.components(separatedBy: " ")
var varLineDataProcessed: String = ""
var varLineDataProcessed = ""
var count = 0
for currentCell in arrLineData {
count += 1
@ -315,9 +322,10 @@ func rawDictForNonKanjis(isCHS: Bool) -> [Entry] {
}
if phrase != "" { //
arrEntryRAW += [
Entry.init(
Entry(
valPhone: phone, valPhrase: phrase, valWeight: 0.0,
valCount: occurrence)
valCount: occurrence
)
]
}
}
@ -356,16 +364,17 @@ func weightAndSort(_ arrStructUncalculated: [Entry], isCHS: Bool) -> [Entry] {
}
let weightRounded: Float = weight.rounded(toPlaces: 3) //
arrStructCalculated += [
Entry.init(
Entry(
valPhone: entry.valPhone, valPhrase: entry.valPhrase, valWeight: weightRounded,
valCount: entry.valCount)
valCount: entry.valCount
)
]
}
NSLog(" - \(i18n): 成功計算權重。")
// ==========================================
//
let arrStructSorted: [Entry] = arrStructCalculated.sorted(by: { (lhs, rhs) -> Bool in
return (lhs.valPhone, rhs.valCount) < (rhs.valPhone, lhs.valCount)
let arrStructSorted: [Entry] = arrStructCalculated.sorted(by: { lhs, rhs -> Bool in
(lhs.valPhone, rhs.valCount) < (rhs.valPhone, lhs.valCount)
})
NSLog(" - \(i18n): 排序整理完畢,準備編譯要寫入的檔案內容。")
return arrStructSorted
@ -406,6 +415,7 @@ func fileOutput(isCHS: Bool) {
}
// MARK: -
func main() {
NSLog("// 準備編譯繁體中文核心語料檔案。")
fileOutput(isCHS: false)

View File

@ -31,7 +31,8 @@ private let kTargetType = "app"
private let kTargetBundle = "vChewing.app"
private let urlDestinationPartial = FileManager.default.urls(
for: .inputMethodsDirectory, in: .userDomainMask)[0]
for: .inputMethodsDirectory, in: .userDomainMask
)[0]
private let urlTargetPartial = urlDestinationPartial.appendingPathComponent(kTargetBundle)
private let urlTargetFullBinPartial = urlTargetPartial.appendingPathComponent("Contents/MacOS/")
.appendingPathComponent(kTargetBin)
@ -46,12 +47,12 @@ private let kTranslocationRemovalDeadline: TimeInterval = 60.0
@NSApplicationMain
@objc(AppDelegate)
class AppDelegate: NSWindowController, NSApplicationDelegate {
@IBOutlet weak private var installButton: NSButton!
@IBOutlet weak private var cancelButton: NSButton!
@IBOutlet weak private var progressSheet: NSWindow!
@IBOutlet weak private var progressIndicator: NSProgressIndicator!
@IBOutlet weak private var appVersionLabel: NSTextField!
@IBOutlet weak private var appCopyrightLabel: NSTextField!
@IBOutlet private var installButton: NSButton!
@IBOutlet private var cancelButton: NSButton!
@IBOutlet private var progressSheet: NSWindow!
@IBOutlet private var progressIndicator: NSProgressIndicator!
@IBOutlet private var appVersionLabel: NSTextField!
@IBOutlet private var appCopyrightLabel: NSTextField!
@IBOutlet private var appEULAContent: NSTextView!
private var archiveUtil: ArchiveUtil?
@ -69,7 +70,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
alert.runModal()
}
func applicationDidFinishLaunching(_ notification: Notification) {
func applicationDidFinishLaunching(_: Notification) {
guard
let installingVersion = Bundle.main.infoDictionary?[kCFBundleVersionKey as String]
as? String,
@ -96,11 +97,13 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
appEULAContent.string = eulaContent
}
appVersionLabel.stringValue = String(
format: "%@ Build %@", versionString, installingVersion)
format: "%@ Build %@", versionString, installingVersion
)
window?.title = String(
format: NSLocalizedString("%@ (for version %@, r%@)", comment: ""), window?.title ?? "",
versionString, installingVersion)
versionString, installingVersion
)
window?.standardWindowButton(.closeButton)?.isHidden = true
window?.standardWindowButton(.miniaturizeButton)?.isHidden = true
window?.standardWindowButton(.zoomButton)?.isHidden = true
@ -130,7 +133,7 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
NSApp.activate(ignoringOtherApps: true)
}
@IBAction func agreeAndInstallAction(_ sender: AnyObject) {
@IBAction func agreeAndInstallAction(_: AnyObject) {
cancelButton.isEnabled = false
installButton.isEnabled = false
removeThenInstallInputMethod()
@ -154,7 +157,8 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
== false
{
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
previousExists: false, previousVersionNotFullyDeactivatedWarning: false
)
return
}
@ -194,11 +198,13 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
if returnCode == .continue {
self.installInputMethod(
previousExists: true,
previousVersionNotFullyDeactivatedWarning: false)
previousVersionNotFullyDeactivatedWarning: false
)
} else {
self.installInputMethod(
previousExists: true,
previousVersionNotFullyDeactivatedWarning: true)
previousVersionNotFullyDeactivatedWarning: true
)
}
}
}
@ -206,15 +212,17 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
translocationRemovalStartTime = Date()
Timer.scheduledTimer(
timeInterval: kTranslocationRemovalTickInterval, target: self,
selector: #selector(timerTick(_:)), userInfo: nil, repeats: true)
selector: #selector(timerTick(_:)), userInfo: nil, repeats: true
)
} else {
installInputMethod(
previousExists: false, previousVersionNotFullyDeactivatedWarning: false)
previousExists: false, previousVersionNotFullyDeactivatedWarning: false
)
}
}
func installInputMethod(
previousExists: Bool, previousVersionNotFullyDeactivatedWarning warning: Bool
previousExists _: Bool, previousVersionNotFullyDeactivatedWarning warning: Bool
) {
guard
let targetBundle = archiveUtil?.unzipNotarizedArchive()
@ -234,7 +242,8 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
runAlertPanel(
title: NSLocalizedString("Install Failed", comment: ""),
message: NSLocalizedString("Cannot copy the file to the destination.", comment: ""),
buttonTitle: NSLocalizedString("Cancel", comment: ""))
buttonTitle: NSLocalizedString("Cancel", comment: "")
)
endAppWithDelay()
}
@ -254,11 +263,14 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
if !status {
let message = String(
format: NSLocalizedString(
"Cannot find input source %@ after registration.", comment: ""),
imeIdentifier)
"Cannot find input source %@ after registration.", comment: ""
),
imeIdentifier
)
runAlertPanel(
title: NSLocalizedString("Fatal Error", comment: ""), message: message,
buttonTitle: NSLocalizedString("Abort", comment: ""))
buttonTitle: NSLocalizedString("Abort", comment: "")
)
endAppWithDelay()
return
}
@ -267,11 +279,14 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
if inputSource == nil {
let message = String(
format: NSLocalizedString(
"Cannot find input source %@ after registration.", comment: ""),
imeIdentifier)
"Cannot find input source %@ after registration.", comment: ""
),
imeIdentifier
)
runAlertPanel(
title: NSLocalizedString("Fatal Error", comment: ""), message: message,
buttonTitle: NSLocalizedString("Abort", comment: ""))
buttonTitle: NSLocalizedString("Abort", comment: "")
)
}
}
@ -304,20 +319,24 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
ntfPostInstall.messageText = NSLocalizedString("Attention", comment: "")
ntfPostInstall.informativeText = NSLocalizedString(
"vChewing is upgraded, but please log out or reboot for the new version to be fully functional.",
comment: "")
comment: ""
)
ntfPostInstall.addButton(withTitle: NSLocalizedString("OK", comment: ""))
} else {
if !mainInputSourceEnabled && !isMacOS12OrAbove {
if !mainInputSourceEnabled, !isMacOS12OrAbove {
ntfPostInstall.messageText = NSLocalizedString("Warning", comment: "")
ntfPostInstall.informativeText = NSLocalizedString(
"Input method may not be fully enabled. Please enable it through System Preferences > Keyboard > Input Sources.",
comment: "")
comment: ""
)
ntfPostInstall.addButton(withTitle: NSLocalizedString("Continue", comment: ""))
} else {
ntfPostInstall.messageText = NSLocalizedString(
"Installation Successful", comment: "")
"Installation Successful", comment: ""
)
ntfPostInstall.informativeText = NSLocalizedString(
"vChewing is ready to use.", comment: "")
"vChewing is ready to use.", comment: ""
)
ntfPostInstall.addButton(withTitle: NSLocalizedString("OK", comment: ""))
}
}
@ -332,12 +351,11 @@ class AppDelegate: NSWindowController, NSApplicationDelegate {
}
}
@IBAction func cancelAction(_ sender: AnyObject) {
@IBAction func cancelAction(_: AnyObject) {
NSApp.terminate(self)
}
func windowWillClose(_ notification: Notification) {
func windowWillClose(_: Notification) {
NSApp.terminate(self)
}
}

View File

@ -53,7 +53,8 @@ struct ArchiveUtil {
let notarizedArchiveExists = FileManager.default.fileExists(atPath: notarizedArchive)
let devModeAppBundleExists = FileManager.default.fileExists(atPath: devModeAppBundlePath)
if count > 0 {
if !notarizedArchivesContent.isEmpty {
// count > 0!isEmpty滿
if count != 1 || !notarizedArchiveExists || devModeAppBundleExists {
let alert = NSAlert()
alert.alertStyle = .informational
@ -105,7 +106,8 @@ struct ArchiveUtil {
let result = (tempFilePath as NSString).appendingPathComponent(targetAppBundleName)
assert(
FileManager.default.fileExists(atPath: result),
"App bundle must be unzipped at \(result).")
"App bundle must be unzipped at \(result)."
)
return result
}
@ -130,5 +132,4 @@ struct ArchiveUtil {
notarizedArchiveBasename)
return notarizedArchive
}
}

View File

@ -20,13 +20,17 @@ debug:
DSTROOT = /Library/Input Methods
VC_APP_ROOT = $(DSTROOT)/vChewing.app
.PHONY: clang-format lint batchfix format
.PHONY: clang-format lint batchfix format clang-format-swift clang-format-cpp
format: batchfix clang-format lint
clang-format:
clang-format: clang-format-swift clang-format-cpp
clang-format-swift:
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format format --in-place --configuration ./.clang-format-swift.json --parallel
@git ls-files --exclude-standard | grep -E '\.(cpp|hpp|c|cc|cxx|hxx|ixx|h|m|mm|hh)$$' | xargs clang-format -i -style=Microsoft
clang-format-cpp:
@git ls-files --exclude-standard | grep -E '\.(cpp|hpp|c|cc|cxx|hxx|ixx|h|m|mm|hh)$$' | xargs clang-format -i
lint:
@git ls-files --exclude-standard | grep -E '\.swift$$' | xargs swift-format lint --configuration ./.clang-format-swift.json --parallel

View File

@ -6,14 +6,14 @@ from common import sort_items
if len(sys.argv) < 2:
print("Sort the dictionary")
print(("Usage: ", sys.argv[0], "[input] ([output])"))
print(("Usage: ", sys.argv[0], "[inputVal] ([outputVal])"))
exit(1)
input = sys.argv[1]
inputVal = sys.argv[1]
if len(sys.argv) < 3:
output = input
outputVal = inputVal
else:
output = sys.argv[2]
outputVal = sys.argv[2]
sort_items(input, output)
sort_items(inputVal, outputVal)

View File

@ -1,5 +0,0 @@
---
Language: Cpp
BasedOnStyle: Google
PointerAlignment: Left
...

View File

@ -1,56 +0,0 @@
# This is the official list of benchmark authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
#
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steeleal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Carto
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dirac Research
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Gergő Szitár <szitar.gergo@gmail.com>
Google Inc.
International Business Machines Corporation
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
MongoDB Inc.
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Paul Redmond <paul.redmond@gmail.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Steinar H. Gunderson <sgunderson@bigfoot.com>
Stripe, Inc.
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>

View File

@ -1,44 +0,0 @@
licenses(["notice"])
config_setting(
name = "windows",
values = {
"cpu": "x64_windows",
},
visibility = [":__subpackages__"],
)
load("@rules_cc//cc:defs.bzl", "cc_library")
cc_library(
name = "benchmark",
srcs = glob(
[
"src/*.cc",
"src/*.h",
],
exclude = ["src/benchmark_main.cc"],
),
hdrs = ["include/benchmark/benchmark.h"],
linkopts = select({
":windows": ["-DEFAULTLIB:shlwapi.lib"],
"//conditions:default": ["-pthread"],
}),
strip_include_prefix = "include",
visibility = ["//visibility:public"],
)
cc_library(
name = "benchmark_main",
srcs = ["src/benchmark_main.cc"],
hdrs = ["include/benchmark/benchmark.h"],
strip_include_prefix = "include",
visibility = ["//visibility:public"],
deps = [":benchmark"],
)
cc_library(
name = "benchmark_internal_headers",
hdrs = glob(["src/*.h"]),
visibility = ["//test:__pkg__"],
)

View File

@ -1,279 +0,0 @@
cmake_minimum_required (VERSION 3.5.1)
foreach(p
CMP0048 # OK to clear PROJECT_VERSION on project()
CMP0054 # CMake 3.1
CMP0056 # export EXE_LINKER_FLAGS to try_run
CMP0057 # Support no if() IN_LIST operator
CMP0063 # Honor visibility properties for all targets
CMP0077 # Allow option() overrides in importing projects
)
if(POLICY ${p})
cmake_policy(SET ${p} NEW)
endif()
endforeach()
project (benchmark CXX)
option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." OFF)
option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON)
option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF)
option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF)
if(NOT MSVC)
option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF)
else()
set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE)
endif()
option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON)
# Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which
# may require downloading the source code.
option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree building of unmet dependencies" OFF)
# This option can be used to disable building and running unit tests which depend on gtest
# in cases where it is not possible to build or find a valid version of gtest.
option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON)
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF)
function(should_enable_assembly_tests)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
# FIXME: The --coverage flag needs to be removed when building assembly
# tests for this to work.
return()
endif()
endif()
if (MSVC)
return()
elseif(NOT CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
return()
elseif(NOT CMAKE_SIZEOF_VOID_P EQUAL 8)
# FIXME: Make these work on 32 bit builds
return()
elseif(BENCHMARK_BUILD_32_BITS)
# FIXME: Make these work on 32 bit builds
return()
endif()
find_program(LLVM_FILECHECK_EXE FileCheck)
if (LLVM_FILECHECK_EXE)
set(LLVM_FILECHECK_EXE "${LLVM_FILECHECK_EXE}" CACHE PATH "llvm filecheck" FORCE)
message(STATUS "LLVM FileCheck Found: ${LLVM_FILECHECK_EXE}")
else()
message(STATUS "Failed to find LLVM FileCheck")
return()
endif()
set(ENABLE_ASSEMBLY_TESTS_DEFAULT ON PARENT_SCOPE)
endfunction()
should_enable_assembly_tests()
# This option disables the building and running of the assembly verification tests
option(BENCHMARK_ENABLE_ASSEMBLY_TESTS "Enable building and running the assembly tests"
${ENABLE_ASSEMBLY_TESTS_DEFAULT})
# Make sure we can import out CMake functions
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules")
list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake")
# Read the git tags to determine the project version
include(GetGitVersion)
get_git_version(GIT_VERSION)
# Tell the user what versions we are using
string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION})
message(STATUS "Version: ${VERSION}")
# The version of the libraries
set(GENERIC_LIB_VERSION ${VERSION})
string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION)
# Import our CMake modules
include(CheckCXXCompilerFlag)
include(AddCXXCompilerFlag)
include(CXXFeatureCheck)
if (BENCHMARK_BUILD_32_BITS)
add_required_cxx_compiler_flag(-m32)
endif()
if (MSVC)
# Turn compiler warnings up to 11
string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
add_definitions(-D_CRT_SECURE_NO_WARNINGS)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-EHs-)
add_cxx_compiler_flag(-EHa-)
add_definitions(-D_HAS_EXCEPTIONS=0)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_RELEASE "${CMAKE_STATIC_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_RELEASE "${CMAKE_SHARED_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_RELEASE "${CMAKE_EXE_LINKER_FLAGS_RELEASE} /LTCG")
set(CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO} /GL")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_STATIC_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_SHARED_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
string(REGEX REPLACE "[-/]INCREMENTAL" "/INCREMENTAL:NO" CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO}")
set(CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO "${CMAKE_EXE_LINKER_FLAGS_RELWITHDEBINFO} /LTCG")
set(CMAKE_CXX_FLAGS_MINSIZEREL "${CMAKE_CXX_FLAGS_MINSIZEREL} /GL")
set(CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL "${CMAKE_STATIC_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL "${CMAKE_SHARED_LINKER_FLAGS_MINSIZEREL} /LTCG")
set(CMAKE_EXE_LINKER_FLAGS_MINSIZEREL "${CMAKE_EXE_LINKER_FLAGS_MINSIZEREL} /LTCG")
endif()
else()
# Try and enable C++11. Don't use C++14 because it doesn't work in some
# configurations.
add_cxx_compiler_flag(-std=c++11)
if (NOT HAVE_CXX_FLAG_STD_CXX11)
add_cxx_compiler_flag(-std=c++0x)
endif()
# Turn compiler warnings up to 11
add_cxx_compiler_flag(-Wall)
add_cxx_compiler_flag(-Wextra)
add_cxx_compiler_flag(-Wshadow)
add_cxx_compiler_flag(-Werror RELEASE)
add_cxx_compiler_flag(-Werror RELWITHDEBINFO)
add_cxx_compiler_flag(-Werror MINSIZEREL)
# Disabled until googletest (gmock) stops emitting variadic macro warnings
#add_cxx_compiler_flag(-pedantic)
#add_cxx_compiler_flag(-pedantic-errors)
add_cxx_compiler_flag(-Wshorten-64-to-32)
add_cxx_compiler_flag(-fstrict-aliasing)
# Disable warnings regarding deprecated parts of the library while building
# and testing those parts of the library.
add_cxx_compiler_flag(-Wno-deprecated-declarations)
if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel")
# Intel silently ignores '-Wno-deprecated-declarations',
# warning no. 1786 must be explicitly disabled.
# See #631 for rationale.
add_cxx_compiler_flag(-wd1786)
endif()
# Disable deprecation warnings for release builds (when -Werror is enabled).
add_cxx_compiler_flag(-Wno-deprecated RELEASE)
add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO)
add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL)
if (NOT BENCHMARK_ENABLE_EXCEPTIONS)
add_cxx_compiler_flag(-fno-exceptions)
endif()
if (HAVE_CXX_FLAG_FSTRICT_ALIASING)
if (NOT CMAKE_CXX_COMPILER_ID STREQUAL "Intel") #ICC17u2: Many false positives for Wstrict-aliasing
add_cxx_compiler_flag(-Wstrict-aliasing)
endif()
endif()
# ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden
# (because of deprecated overload)
add_cxx_compiler_flag(-wd654)
add_cxx_compiler_flag(-Wthread-safety)
if (HAVE_CXX_FLAG_WTHREAD_SAFETY)
cxx_feature_check(THREAD_SAFETY_ATTRIBUTES)
endif()
# On most UNIX like platforms g++ and clang++ define _GNU_SOURCE as a
# predefined macro, which turns on all of the wonderful libc extensions.
# However g++ doesn't do this in Cygwin so we have to define it ourselfs
# since we depend on GNU/POSIX/BSD extensions.
if (CYGWIN)
add_definitions(-D_GNU_SOURCE=1)
endif()
if (QNXNTO)
add_definitions(-D_QNX_SOURCE)
endif()
# Link time optimisation
if (BENCHMARK_ENABLE_LTO)
add_cxx_compiler_flag(-flto)
if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
find_program(GCC_AR gcc-ar)
if (GCC_AR)
set(CMAKE_AR ${GCC_AR})
endif()
find_program(GCC_RANLIB gcc-ranlib)
if (GCC_RANLIB)
set(CMAKE_RANLIB ${GCC_RANLIB})
endif()
elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
include(llvm-toolchain)
endif()
endif()
# Coverage build type
set(BENCHMARK_CXX_FLAGS_COVERAGE "${CMAKE_CXX_FLAGS_DEBUG}"
CACHE STRING "Flags used by the C++ compiler during coverage builds."
FORCE)
set(BENCHMARK_EXE_LINKER_FLAGS_COVERAGE "${CMAKE_EXE_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used for linking binaries during coverage builds."
FORCE)
set(BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE "${CMAKE_SHARED_LINKER_FLAGS_DEBUG}"
CACHE STRING "Flags used by the shared libraries linker during coverage builds."
FORCE)
mark_as_advanced(
BENCHMARK_CXX_FLAGS_COVERAGE
BENCHMARK_EXE_LINKER_FLAGS_COVERAGE
BENCHMARK_SHARED_LINKER_FLAGS_COVERAGE)
set(CMAKE_BUILD_TYPE "${CMAKE_BUILD_TYPE}" CACHE STRING
"Choose the type of build, options are: None Debug Release RelWithDebInfo MinSizeRel Coverage.")
add_cxx_compiler_flag(--coverage COVERAGE)
endif()
if (BENCHMARK_USE_LIBCXX)
if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang")
add_cxx_compiler_flag(-stdlib=libc++)
elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR
"${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
add_cxx_compiler_flag(-nostdinc++)
message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS")
# Adding -nodefaultlibs directly to CMAKE_<TYPE>_LINKER_FLAGS will break
# configuration checks such as 'find_package(Threads)'
list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs)
# -lc++ cannot be added directly to CMAKE_<TYPE>_LINKER_FLAGS because
# linker flags appear before all linker inputs and -lc++ must appear after.
list(APPEND BENCHMARK_CXX_LIBRARIES c++)
else()
message(FATAL_ERROR "-DBENCHMARK_USE_LIBCXX:BOOL=ON is not supported for compiler")
endif()
endif(BENCHMARK_USE_LIBCXX)
# C++ feature checks
# Determine the correct regular expression engine to use
cxx_feature_check(STD_REGEX)
cxx_feature_check(GNU_POSIX_REGEX)
cxx_feature_check(POSIX_REGEX)
if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(FATAL_ERROR "Failed to determine the source files for the regular expression backend")
endif()
if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX
AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX)
message(WARNING "Using std::regex with exceptions disabled is not fully supported")
endif()
cxx_feature_check(STEADY_CLOCK)
# Ensure we have pthreads
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
# Set up directories
include_directories(${PROJECT_SOURCE_DIR}/include)
# Build the targets
add_subdirectory(src)
if (BENCHMARK_ENABLE_TESTING)
enable_testing()
if (BENCHMARK_ENABLE_GTEST_TESTS AND
NOT (TARGET gtest AND TARGET gtest_main AND
TARGET gmock AND TARGET gmock_main))
include(GoogleTest)
endif()
add_subdirectory(test)
endif()

View File

@ -1,58 +0,0 @@
# How to contribute #
We'd love to accept your patches and contributions to this project. There are
a just a few small guidelines you need to follow.
## Contributor License Agreement ##
Contributions to any Google project must be accompanied by a Contributor
License Agreement. This is not a copyright **assignment**, it simply gives
Google permission to use and redistribute your contributions as part of the
project.
* If you are an individual writing original source code and you're sure you
own the intellectual property, then you'll need to sign an [individual
CLA][].
* If you work for a company that wants to allow you to contribute your work,
then you'll need to sign a [corporate CLA][].
You generally only need to submit a CLA once, so if you've already submitted
one (even if it was for a different project), you probably don't need to do it
again.
[individual CLA]: https://developers.google.com/open-source/cla/individual
[corporate CLA]: https://developers.google.com/open-source/cla/corporate
Once your CLA is submitted (or if you already submitted one for
another Google project), make a commit adding yourself to the
[AUTHORS][] and [CONTRIBUTORS][] files. This commit can be part
of your first [pull request][].
[AUTHORS]: AUTHORS
[CONTRIBUTORS]: CONTRIBUTORS
## Submitting a patch ##
1. It's generally best to start by opening a new issue describing the bug or
feature you're intending to fix. Even if you think it's relatively minor,
it's helpful to know what people are working on. Mention in the initial
issue that you are planning to work on that bug or feature so that it can
be assigned to you.
1. Follow the normal process of [forking][] the project, and setup a new
branch to work in. It's important that each group of changes be done in
separate branches in order to ensure that a pull request only includes the
commits related to that bug or feature.
1. Do your best to have [well-formed commit messages][] for each change.
This provides consistency throughout the project, and ensures that commit
messages are able to be formatted properly by various git tools.
1. Finally, push the commits to your fork and submit a [pull request][].
[forking]: https://help.github.com/articles/fork-a-repo
[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
[pull request]: https://help.github.com/articles/creating-a-pull-request

View File

@ -1,78 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# Names should be added to this file only after verifying that
# the individual or the individual's organization has agreed to
# the appropriate Contributor License Agreement, found here:
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# The agreement for individuals can be filled out on the web.
#
# When adding J Random Contributor's name to this file,
# either J's name or J's organization's name should be
# added to the AUTHORS file, depending on whether the
# individual or corporate CLA was used.
#
# Names should be added to this file as:
# Name <email address>
#
# Please keep the list sorted.
Albert Pretorius <pretoalb@gmail.com>
Alex Steele <steelal123@gmail.com>
Andriy Berestovskyy <berestovskyy@gmail.com>
Arne Beer <arne@twobeer.de>
Billy Robert O'Neal III <billy.oneal@gmail.com> <bion@microsoft.com>
Chris Kennelly <ckennelly@google.com> <ckennelly@ckennelly.com>
Christopher Seymour <chris.j.seymour@hotmail.com>
Colin Braley <braley.colin@gmail.com>
Cyrille Faucheux <cyrille.faucheux@gmail.com>
Daniel Harvey <danielharvey458@gmail.com>
David Coeurjolly <david.coeurjolly@liris.cnrs.fr>
Deniz Evrenci <denizevrenci@gmail.com>
Dominic Hamon <dma@stripysock.com> <dominic@google.com>
Dominik Czarnota <dominik.b.czarnota@gmail.com>
Eric Backus <eric_backus@alum.mit.edu>
Eric Fiselier <eric@efcs.ca>
Eugene Zhuk <eugene.zhuk@gmail.com>
Evgeny Safronov <division494@gmail.com>
Federico Ficarelli <federico.ficarelli@gmail.com>
Felix Homann <linuxaudio@showlabor.de>
Geoffrey Martin-Noble <gcmn@google.com> <gmngeoffrey@gmail.com>
Gergő Szitár <szitar.gergo@gmail.com>
Hannes Hauswedell <h2@fsfe.org>
Ismael Jimenez Martinez <ismael.jimenez.martinez@gmail.com>
Jern-Kuan Leong <jernkuan@gmail.com>
JianXiong Zhou <zhoujianxiong2@gmail.com>
Joao Paulo Magalhaes <joaoppmagalhaes@gmail.com>
John Millikin <jmillikin@stripe.com>
Jordan Williams <jwillikers@protonmail.com>
Jussi Knuuttila <jussi.knuuttila@gmail.com>
Kai Wolf <kai.wolf@gmail.com>
Kaito Udagawa <umireon@gmail.com>
Kishan Kumar <kumar.kishan@outlook.com>
Lei Xu <eddyxu@gmail.com>
Matt Clarkson <mattyclarkson@gmail.com>
Maxim Vafin <maxvafin@gmail.com>
Nick Hutchinson <nshutchinson@gmail.com>
Oleksandr Sochka <sasha.sochka@gmail.com>
Ori Livneh <ori.livneh@gmail.com>
Pascal Leroy <phl@google.com>
Paul Redmond <paul.redmond@gmail.com>
Pierre Phaneuf <pphaneuf@google.com>
Radoslav Yovchev <radoslav.tm@gmail.com>
Raul Marin <rmrodriguez@cartodb.com>
Ray Glover <ray.glover@uk.ibm.com>
Robert Guo <robert.guo@mongodb.com>
Roman Lebedev <lebedev.ri@gmail.com>
Sayan Bhattacharjee <aero.sayan@gmail.com>
Shuo Chen <chenshuo@chenshuo.com>
Tobias Ulvgård <tobias.ulvgard@dirac.se>
Tom Madams <tom.ej.madams@gmail.com> <tmadams@google.com>
Yixuan Qiu <yixuanq@gmail.com>
Yusuke Suzuki <utatane.tea@gmail.com>
Zbigniew Skowron <zbychs@gmail.com>

View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

View File

@ -1,30 +0,0 @@
workspace(name = "com_github_google_benchmark")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
urls = ["https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.zip"],
)
http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e",
urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"],
)
http_archive(
name = "pybind11",
build_file = "@//bindings/python:pybind11.BUILD",
sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d",
strip_prefix = "pybind11-2.4.3",
urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"],
)
new_local_repository(
name = "python_headers",
build_file = "@//bindings/python:python_headers.BUILD",
path = "/usr/include/python3.6", # May be overwritten by setup.py.
)

View File

@ -1 +0,0 @@
theme: jekyll-theme-midnight

View File

@ -1,50 +0,0 @@
version: '{build}'
image: Visual Studio 2017
configuration:
- Debug
- Release
environment:
matrix:
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017"
- compiler: msvc-15-seh
generator: "Visual Studio 15 2017 Win64"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015"
- compiler: msvc-14-seh
generator: "Visual Studio 14 2015 Win64"
- compiler: gcc-5.3.0-posix
generator: "MinGW Makefiles"
cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin'
APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2015
matrix:
fast_finish: true
install:
# git bash conflicts with MinGW makefiles
- if "%generator%"=="MinGW Makefiles" (set "PATH=%PATH:C:\Program Files\Git\usr\bin;=%")
- if not "%cxx_path%"=="" (set "PATH=%PATH%;%cxx_path%")
build_script:
- md _build -Force
- cd _build
- echo %configuration%
- cmake -G "%generator%" "-DCMAKE_BUILD_TYPE=%configuration%" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON ..
- cmake --build . --config %configuration%
test_script:
- ctest --build-config %configuration% --timeout 300 --output-on-failure
artifacts:
- path: '_build/CMakeFiles/*.log'
name: logs
- path: '_build/Testing/**/*.xml'
name: test_results

View File

@ -1,62 +0,0 @@
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python benchmarking utilities.
Example usage:
import benchmark
@benchmark.register
def my_benchmark(state):
... # Code executed outside `while` loop is not timed.
while state:
... # Code executed within `while` loop is timed.
if __name__ == '__main__':
benchmark.main()
"""
from absl import app
from benchmark import _benchmark
__all__ = [
"register",
"main",
]
__version__ = "0.1.0"
def register(f=None, *, name=None):
if f is None:
return lambda f: register(f, name=name)
if name is None:
name = f.__name__
_benchmark.RegisterBenchmark(name, f)
return f
def _flags_parser(argv):
argv = _benchmark.Initialize(argv)
return app.parse_flags_with_usage(argv)
def _run_benchmarks(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
return _benchmark.RunSpecifiedBenchmarks()
def main(argv=None):
return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser)

View File

@ -1,50 +0,0 @@
// Benchmark for Python.
#include "benchmark/benchmark.h"
#include "pybind11/pybind11.h"
#include "pybind11/stl.h"
namespace
{
namespace py = ::pybind11;
std::vector<std::string> Initialize(const std::vector<std::string> &argv)
{
// The `argv` pointers here become invalid when this function returns, but
// benchmark holds the pointer to `argv[0]`. We create a static copy of it
// so it persists, and replace the pointer below.
static std::string executable_name(argv[0]);
std::vector<char *> ptrs;
ptrs.reserve(argv.size());
for (auto &arg : argv)
{
ptrs.push_back(const_cast<char *>(arg.c_str()));
}
ptrs[0] = const_cast<char *>(executable_name.c_str());
int argc = static_cast<int>(argv.size());
benchmark::Initialize(&argc, ptrs.data());
std::vector<std::string> remaining_argv;
remaining_argv.reserve(argc);
for (int i = 0; i < argc; ++i)
{
remaining_argv.emplace_back(ptrs[i]);
}
return remaining_argv;
}
void RegisterBenchmark(const char *name, py::function f)
{
benchmark::RegisterBenchmark(name, [f](benchmark::State &state) { f(&state); });
}
PYBIND11_MODULE(_benchmark, m)
{
m.def("Initialize", Initialize);
m.def("RegisterBenchmark", RegisterBenchmark);
m.def("RunSpecifiedBenchmarks", []() { benchmark::RunSpecifiedBenchmarks(); });
py::class_<benchmark::State>(m, "State")
.def("__bool__", &benchmark::State::KeepRunning)
.def_property_readonly("keep_running", &benchmark::State::KeepRunning);
};
} // namespace

View File

@ -1,32 +0,0 @@
# Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Python using C++ benchmark framework."""
import benchmark
@benchmark.register
def empty(state):
while state:
pass
@benchmark.register
def sum_million(state):
while state:
sum(range(1_000_000))
if __name__ == '__main__':
benchmark.main()

View File

@ -1,25 +0,0 @@
_SHARED_LIB_SUFFIX = {
"//conditions:default": ".so",
"//:windows": ".dll",
}
def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []):
for shared_lib_suffix in _SHARED_LIB_SUFFIX.values():
shared_lib_name = name + shared_lib_suffix
native.cc_binary(
name = shared_lib_name,
linkshared = 1,
linkstatic = 1,
srcs = srcs + hdrs,
copts = copts,
features = features,
deps = deps,
)
return native.py_library(
name = name,
data = select({
platform: [name + shared_lib_suffix]
for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items()
}),
)

View File

@ -1,20 +0,0 @@
cc_library(
name = "pybind11",
hdrs = glob(
include = [
"include/pybind11/*.h",
"include/pybind11/detail/*.h",
],
exclude = [
"include/pybind11/common.h",
"include/pybind11/eigen.h",
],
),
copts = [
"-fexceptions",
"-Wno-undefined-inline",
"-Wno-pragma-once-outside-header",
],
includes = ["include"],
visibility = ["//visibility:public"],
)

View File

@ -1,6 +0,0 @@
cc_library(
name = "python_headers",
hdrs = glob(["**/*.h"]),
includes = ["."],
visibility = ["//visibility:public"],
)

View File

@ -1,74 +0,0 @@
# - Adds a compiler flag if it is supported by the compiler
#
# This function checks that the supplied compiler flag is supported and then
# adds it to the corresponding compiler flags
#
# add_cxx_compiler_flag(<FLAG> [<VARIANT>])
#
# - Example
#
# include(AddCXXCompilerFlag)
# add_cxx_compiler_flag(-Wall)
# add_cxx_compiler_flag(-no-strict-aliasing RELEASE)
# Requires CMake 2.6+
if(__add_cxx_compiler_flag)
return()
endif()
set(__add_cxx_compiler_flag INCLUDED)
include(CheckCXXCompilerFlag)
function(mangle_compiler_flag FLAG OUTPUT)
string(TOUPPER "HAVE_CXX_FLAG_${FLAG}" SANITIZED_FLAG)
string(REPLACE "+" "X" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "[^A-Za-z_0-9]" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
string(REGEX REPLACE "_+" "_" SANITIZED_FLAG ${SANITIZED_FLAG})
set(${OUTPUT} "${SANITIZED_FLAG}" PARENT_SCOPE)
endfunction(mangle_compiler_flag)
function(add_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
set(VARIANT ${ARGV1})
if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
endif()
endfunction()
function(add_required_cxx_compiler_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
if(${MANGLED_FLAG})
set(VARIANT ${ARGV1})
if(ARGV1)
string(TOUPPER "_${VARIANT}" VARIANT)
endif()
set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE)
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_MODULE_LINKER_FLAGS "${CMAKE_MODULE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE)
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} ${FLAG}" PARENT_SCOPE)
else()
message(FATAL_ERROR "Required flag '${FLAG}' is not supported by the compiler")
endif()
endfunction()
function(check_cxx_warning_flag FLAG)
mangle_compiler_flag("${FLAG}" MANGLED_FLAG)
set(OLD_CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS}")
# Add -Werror to ensure the compiler generates an error if the warning flag
# doesn't exist.
set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -Werror ${FLAG}")
check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG})
set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}")
endfunction()

View File

@ -1,64 +0,0 @@
# - Compile and run code to check for C++ features
#
# This functions compiles a source file under the `cmake` folder
# and adds the corresponding `HAVE_[FILENAME]` flag to the CMake
# environment
#
# cxx_feature_check(<FLAG> [<VARIANT>])
#
# - Example
#
# include(CXXFeatureCheck)
# cxx_feature_check(STD_REGEX)
# Requires CMake 2.8.12+
if(__cxx_feature_check)
return()
endif()
set(__cxx_feature_check INCLUDED)
function(cxx_feature_check FILE)
string(TOLOWER ${FILE} FILE)
string(TOUPPER ${FILE} VAR)
string(TOUPPER "HAVE_${VAR}" FEATURE)
if (DEFINED HAVE_${VAR})
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
return()
endif()
if (NOT DEFINED COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE}")
if(CMAKE_CROSSCOMPILING)
try_compile(COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
if(COMPILE_${FEATURE})
message(WARNING
"If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0")
set(RUN_${FEATURE} 0 CACHE INTERNAL "")
else()
set(RUN_${FEATURE} 1 CACHE INTERNAL "")
endif()
else()
message(STATUS "Performing Test ${FEATURE}")
try_run(RUN_${FEATURE} COMPILE_${FEATURE}
${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp
CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS}
LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES})
endif()
endif()
if(RUN_${FEATURE} EQUAL 0)
message(STATUS "Performing Test ${FEATURE} -- success")
set(HAVE_${VAR} 1 PARENT_SCOPE)
add_definitions(-DHAVE_${VAR})
else()
if(NOT COMPILE_${FEATURE})
message(STATUS "Performing Test ${FEATURE} -- failed to compile")
else()
message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run")
endif()
endif()
endfunction()

View File

@ -1 +0,0 @@
include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")

View File

@ -1,54 +0,0 @@
# - Returns a version string from Git tags
#
# This function inspects the annotated git tags for the project and returns a string
# into a CMake variable
#
# get_git_version(<var>)
#
# - Example
#
# include(GetGitVersion)
# get_git_version(GIT_VERSION)
#
# Requires CMake 2.8.11+
find_package(Git)
if(__get_git_version)
return()
endif()
set(__get_git_version INCLUDED)
function(get_git_version var)
if(GIT_EXECUTABLE)
execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
RESULT_VARIABLE status
OUTPUT_VARIABLE GIT_VERSION
ERROR_QUIET)
if(${status})
set(GIT_VERSION "v0.0.0")
else()
string(STRIP ${GIT_VERSION} GIT_VERSION)
string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION})
endif()
# Work out if the repository is dirty
execute_process(COMMAND ${GIT_EXECUTABLE} update-index -q --refresh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_QUIET
ERROR_QUIET)
execute_process(COMMAND ${GIT_EXECUTABLE} diff-index --name-only HEAD --
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}
OUTPUT_VARIABLE GIT_DIFF_INDEX
ERROR_QUIET)
string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY)
if (${GIT_DIRTY})
set(GIT_VERSION "${GIT_VERSION}-dirty")
endif()
else()
set(GIT_VERSION "v0.0.0")
endif()
message(STATUS "git Version: ${GIT_VERSION}")
set(${var} ${GIT_VERSION} PARENT_SCOPE)
endfunction()

View File

@ -1,41 +0,0 @@
# Download and unpack googletest at configure time
set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest")
configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY)
set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes
execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}"
-DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "CMake step for googletest failed: ${result}")
endif()
execute_process(
COMMAND ${CMAKE_COMMAND} --build .
RESULT_VARIABLE result
WORKING_DIRECTORY ${GOOGLETEST_PREFIX}
)
if(result)
message(FATAL_ERROR "Build step for googletest failed: ${result}")
endif()
# Prevent overriding the parent project's compiler/linker
# settings on Windows
set(gtest_force_shared_crt ON CACHE BOOL "" FORCE)
include(${GOOGLETEST_PREFIX}/googletest-paths.cmake)
# Add googletest directly to our build. This defines
# the gtest and gtest_main targets.
add_subdirectory(${GOOGLETEST_SOURCE_DIR}
${GOOGLETEST_BINARY_DIR}
EXCLUDE_FROM_ALL)
set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gtest_main,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock,INTERFACE_INCLUDE_DIRECTORIES>)
set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $<TARGET_PROPERTY:gmock_main,INTERFACE_INCLUDE_DIRECTORIES>)

View File

@ -1,58 +0,0 @@
cmake_minimum_required(VERSION 2.8.12)
project(googletest-download NONE)
# Enable ExternalProject CMake module
include(ExternalProject)
option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF)
set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH
"Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs")
# Download and install GoogleTest
message(STATUS "Looking for Google Test sources")
message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}")
if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND
EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt")
message(STATUS "Found Google Test in ${GOOGLETEST_PATH}")
ExternalProject_Add(
googletest
PREFIX "${CMAKE_BINARY_DIR}"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir.
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
else()
if(NOT ALLOW_DOWNLOADING_GOOGLETEST)
message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable ALLOW_DOWNLOADING_GOOGLETEST, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.")
else()
message(WARNING "Did not find Google Test sources! Fetching from web...")
ExternalProject_Add(
googletest
GIT_REPOSITORY https://github.com/google/googletest.git
GIT_TAG master
PREFIX "${CMAKE_BINARY_DIR}"
STAMP_DIR "${CMAKE_BINARY_DIR}/stamp"
DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download"
SOURCE_DIR "${CMAKE_BINARY_DIR}/src"
BINARY_DIR "${CMAKE_BINARY_DIR}/build"
CONFIGURE_COMMAND ""
BUILD_COMMAND ""
INSTALL_COMMAND ""
TEST_COMMAND ""
)
endif()
endif()
ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR)
file(WRITE googletest-paths.cmake
"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\")
set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\")
")

View File

@ -1,12 +0,0 @@
prefix=@CMAKE_INSTALL_PREFIX@
exec_prefix=${prefix}
libdir=${prefix}/lib
includedir=${prefix}/include
Name: @PROJECT_NAME@
Description: Google microbenchmark framework
Version: @VERSION@
Libs: -L${libdir} -lbenchmark
Libs.private: -lpthread
Cflags: -I${includedir}

View File

@ -1,13 +0,0 @@
#include <gnuregex.h>
#include <string>
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
return ec;
}
return regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
}

View File

@ -1,8 +0,0 @@
find_package(LLVMAr REQUIRED)
set(CMAKE_AR "${LLVMAR_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMNm REQUIRED)
set(CMAKE_NM "${LLVMNM_EXECUTABLE}" CACHE FILEPATH "" FORCE)
find_package(LLVMRanLib REQUIRED)
set(CMAKE_RANLIB "${LLVMRANLIB_EXECUTABLE}" CACHE FILEPATH "" FORCE)

View File

@ -1,15 +0,0 @@
#include <regex.h>
#include <string>
int main()
{
std::string str = "test0159";
regex_t re;
int ec = regcomp(&re, "^[a-z]+[0-9]+$", REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
return ec;
}
int ret = regexec(&re, str.c_str(), 0, nullptr, 0) ? -1 : 0;
regfree(&re);
return ret;
}

View File

@ -1,3 +0,0 @@
macro(split_list listname)
string(REPLACE ";" " " ${listname} "${${listname}}")
endmacro()

View File

@ -1,9 +0,0 @@
#include <regex>
#include <string>
int main()
{
const std::string str = "test0159";
std::regex re;
re = std::regex("^[a-z]+[0-9]+$", std::regex_constants::extended | std::regex_constants::nosubs);
return std::regex_search(str, re) ? 0 : -1;
}

View File

@ -1,8 +0,0 @@
#include <chrono>
int main()
{
typedef std::chrono::steady_clock Clock;
Clock::time_point tp = Clock::now();
((void)tp);
}

View File

@ -1,6 +0,0 @@
#define HAVE_THREAD_SAFETY_ATTRIBUTES
#include "../src/mutex.h"
int main()
{
}

View File

@ -1,7 +0,0 @@
cmake_minimum_required(VERSION 2.8.11)
project(cmake_wrapper)
include(conanbuildinfo.cmake)
conan_basic_setup()
include(${CMAKE_SOURCE_DIR}/CMakeListsOriginal.txt)

View File

@ -1,10 +0,0 @@
cmake_minimum_required(VERSION 2.8.11)
project(test_package)
set(CMAKE_VERBOSE_MAKEFILE TRUE)
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
add_executable(${PROJECT_NAME} test_package.cpp)
target_link_libraries(${PROJECT_NAME} ${CONAN_LIBS})

View File

@ -1,19 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from conans import ConanFile, CMake
import os
class TestPackageConan(ConanFile):
settings = "os", "compiler", "build_type", "arch"
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def test(self):
bin_path = os.path.join("bin", "test_package")
self.run(bin_path, run_environment=True)

View File

@ -1,20 +0,0 @@
#include "benchmark/benchmark.h"
void BM_StringCreation(benchmark::State &state)
{
while (state.KeepRunning())
std::string empty_string;
}
BENCHMARK(BM_StringCreation);
void BM_StringCopy(benchmark::State &state)
{
std::string x = "hello";
while (state.KeepRunning())
std::string copy(x);
}
BENCHMARK(BM_StringCopy);
BENCHMARK_MAIN();

View File

@ -1,79 +0,0 @@
from conans import ConanFile, CMake, tools
from conans.errors import ConanInvalidConfiguration
import shutil
import os
class GoogleBenchmarkConan(ConanFile):
name = "benchmark"
description = "A microbenchmark support library."
topics = ("conan", "benchmark", "google", "microbenchmark")
url = "https://github.com/google/benchmark"
homepage = "https://github.com/google/benchmark"
author = "Google Inc."
license = "Apache-2.0"
exports_sources = ["*"]
generators = "cmake"
settings = "arch", "build_type", "compiler", "os"
options = {
"shared": [True, False],
"fPIC": [True, False],
"enable_lto": [True, False],
"enable_exceptions": [True, False]
}
default_options = {"shared": False, "fPIC": True, "enable_lto": False, "enable_exceptions": True}
_build_subfolder = "."
def source(self):
# Wrap the original CMake file to call conan_basic_setup
shutil.move("CMakeLists.txt", "CMakeListsOriginal.txt")
shutil.move(os.path.join("conan", "CMakeLists.txt"), "CMakeLists.txt")
def config_options(self):
if self.settings.os == "Windows":
if self.settings.compiler == "Visual Studio" and float(self.settings.compiler.version.value) <= 12:
raise ConanInvalidConfiguration("{} {} does not support Visual Studio <= 12".format(self.name, self.version))
del self.options.fPIC
def configure(self):
if self.settings.os == "Windows" and self.options.shared:
raise ConanInvalidConfiguration("Windows shared builds are not supported right now, see issue #639")
def _configure_cmake(self):
cmake = CMake(self)
cmake.definitions["BENCHMARK_ENABLE_TESTING"] = "OFF"
cmake.definitions["BENCHMARK_ENABLE_GTEST_TESTS"] = "OFF"
cmake.definitions["BENCHMARK_ENABLE_LTO"] = "ON" if self.options.enable_lto else "OFF"
cmake.definitions["BENCHMARK_ENABLE_EXCEPTIONS"] = "ON" if self.options.enable_exceptions else "OFF"
# See https://github.com/google/benchmark/pull/638 for Windows 32 build explanation
if self.settings.os != "Windows":
cmake.definitions["BENCHMARK_BUILD_32_BITS"] = "ON" if "64" not in str(self.settings.arch) else "OFF"
cmake.definitions["BENCHMARK_USE_LIBCXX"] = "ON" if (str(self.settings.compiler.libcxx) == "libc++") else "OFF"
else:
cmake.definitions["BENCHMARK_USE_LIBCXX"] = "OFF"
cmake.configure(build_folder=self._build_subfolder)
return cmake
def build(self):
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy(pattern="LICENSE", dst="licenses")
def package_info(self):
self.cpp_info.libs = tools.collect_libs(self)
if self.settings.os == "Linux":
self.cpp_info.libs.extend(["pthread", "rt"])
elif self.settings.os == "Windows":
self.cpp_info.libs.append("shlwapi")
elif self.settings.os == "SunOS":
self.cpp_info.libs.append("kstat")

View File

@ -1,18 +0,0 @@
# Build tool dependency policy
To ensure the broadest compatibility when building the benchmark library, but
still allow forward progress, we require any build tooling to be available for:
* Debian stable AND
* The last two Ubuntu LTS releases AND
Currently, this means using build tool versions that are available for Ubuntu
16.04 (Xenial), Ubuntu 18.04 (Bionic), and Debian stretch.
_Note, [travis](.travis.yml) runs under Ubuntu 14.04 (Trusty) for linux builds._
## cmake
The current supported version is cmake 3.5.1 as of 2018-06-06.
_Note, this version is also available for Ubuntu 14.04, the previous Ubuntu LTS
release, as `cmake3`._

View File

@ -1,147 +0,0 @@
# Assembly Tests
The Benchmark library provides a number of functions whose primary
purpose in to affect assembly generation, including `DoNotOptimize`
and `ClobberMemory`. In addition there are other functions,
such as `KeepRunning`, for which generating good assembly is paramount.
For these functions it's important to have tests that verify the
correctness and quality of the implementation. This requires testing
the code generated by the compiler.
This document describes how the Benchmark library tests compiler output,
as well as how to properly write new tests.
## Anatomy of a Test
Writing a test has two steps:
* Write the code you want to generate assembly for.
* Add `// CHECK` lines to match against the verified assembly.
Example:
```c++
// CHECK-LABEL: test_add:
extern "C" int test_add() {
extern int ExternInt;
return ExternInt + 1;
// CHECK: movl ExternInt(%rip), %eax
// CHECK: addl %eax
// CHECK: ret
}
```
#### LLVM Filecheck
[LLVM's Filecheck](https://llvm.org/docs/CommandGuide/FileCheck.html)
is used to test the generated assembly against the `// CHECK` lines
specified in the tests source file. Please see the documentation
linked above for information on how to write `CHECK` directives.
#### Tips and Tricks:
* Tests should match the minimal amount of output required to establish
correctness. `CHECK` directives don't have to match on the exact next line
after the previous match, so tests should omit checks for unimportant
bits of assembly. ([`CHECK-NEXT`](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-next-directive)
can be used to ensure a match occurs exactly after the previous match).
* The tests are compiled with `-O3 -g0`. So we're only testing the
optimized output.
* The assembly output is further cleaned up using `tools/strip_asm.py`.
This removes comments, assembler directives, and unused labels before
the test is run.
* The generated and stripped assembly file for a test is output under
`<build-directory>/test/<test-name>.s`
* Filecheck supports using [`CHECK` prefixes](https://llvm.org/docs/CommandGuide/FileCheck.html#cmdoption-check-prefixes)
to specify lines that should only match in certain situations.
The Benchmark tests use `CHECK-CLANG` and `CHECK-GNU` for lines that
are only expected to match Clang or GCC's output respectively. Normal
`CHECK` lines match against all compilers. (Note: `CHECK-NOT` and
`CHECK-LABEL` are NOT prefixes. They are versions of non-prefixed
`CHECK` lines)
* Use `extern "C"` to disable name mangling for specific functions. This
makes them easier to name in the `CHECK` lines.
## Problems Writing Portable Tests
Writing tests which check the code generated by a compiler are
inherently non-portable. Different compilers and even different compiler
versions may generate entirely different code. The Benchmark tests
must tolerate this.
LLVM Filecheck provides a number of mechanisms to help write
"more portable" tests; including [matching using regular expressions](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-pattern-matching-syntax),
allowing the creation of [named variables](https://llvm.org/docs/CommandGuide/FileCheck.html#filecheck-variables)
for later matching, and [checking non-sequential matches](https://llvm.org/docs/CommandGuide/FileCheck.html#the-check-dag-directive).
#### Capturing Variables
For example, say GCC stores a variable in a register but Clang stores
it in memory. To write a test that tolerates both cases we "capture"
the destination of the store, and then use the captured expression
to write the remainder of the test.
```c++
// CHECK-LABEL: test_div_no_op_into_shr:
extern "C" void test_div_no_op_into_shr(int value) {
int divisor = 2;
benchmark::DoNotOptimize(divisor); // hide the value from the optimizer
return value / divisor;
// CHECK: movl $2, [[DEST:.*]]
// CHECK: idivl [[DEST]]
// CHECK: ret
}
```
#### Using Regular Expressions to Match Differing Output
Often tests require testing assembly lines which may subtly differ
between compilers or compiler versions. A common example of this
is matching stack frame addresses. In this case regular expressions
can be used to match the differing bits of output. For example:
```c++
int ExternInt;
struct Point { int x, y, z; };
// CHECK-LABEL: test_store_point:
extern "C" void test_store_point() {
Point p{ExternInt, ExternInt, ExternInt};
benchmark::DoNotOptimize(p);
// CHECK: movl ExternInt(%rip), %eax
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: movl %eax, -{{[0-9]+}}(%rsp)
// CHECK: ret
}
```
## Current Requirements and Limitations
The tests require Filecheck to be installed along the `PATH` of the
build machine. Otherwise the tests will be disabled.
Additionally, as mentioned in the previous section, codegen tests are
inherently non-portable. Currently the tests are limited to:
* x86_64 targets.
* Compiled with GCC or Clang
Further work could be done, at least on a limited basis, to extend the
tests to other architectures and compilers (using `CHECK` prefixes).
Furthermore, the tests fail for builds which specify additional flags
that modify code generation, including `--coverage` or `-fsanitize=`.

View File

@ -1 +0,0 @@
theme: jekyll-theme-midnight

View File

@ -1,199 +0,0 @@
# Benchmark Tools
## compare.py
The `compare.py` can be used to compare the result of benchmarks.
**NOTE**: the utility relies on the scipy package which can be installed using [these instructions](https://www.scipy.org/install.html).
### Displaying aggregates only
The switch `-a` / `--display_aggregates_only` can be used to control the
displayment of the normal iterations vs the aggregates. When passed, it will
be passthrough to the benchmark binaries to be run, and will be accounted for
in the tool itself; only the aggregates will be displayed, but not normal runs.
It only affects the display, the separate runs will still be used to calculate
the U test.
### Modes of operation
There are three modes of operation:
1. Just compare two benchmarks
The program is invoked like:
``` bash
$ compare.py benchmarks <benchmark_baseline> <benchmark_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarks ./a.out ./a.out
RUNNING: ./a.out --benchmark_out=/tmp/tmprBT5nW
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:44
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19101577 211.669MB/s
BM_memcpy/64 76 ns 76 ns 9412571 800.199MB/s
BM_memcpy/512 84 ns 84 ns 8249070 5.64771GB/s
BM_memcpy/1024 116 ns 116 ns 6181763 8.19505GB/s
BM_memcpy/8192 643 ns 643 ns 1062855 11.8636GB/s
BM_copy/8 222 ns 222 ns 3137987 34.3772MB/s
BM_copy/64 1608 ns 1608 ns 432758 37.9501MB/s
BM_copy/512 12589 ns 12589 ns 54806 38.7867MB/s
BM_copy/1024 25169 ns 25169 ns 27713 38.8003MB/s
BM_copy/8192 201165 ns 201112 ns 3486 38.8466MB/s
RUNNING: ./a.out --benchmark_out=/tmp/tmpt1wwG_
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:16:53
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 19397903 211.255MB/s
BM_memcpy/64 73 ns 73 ns 9691174 839.635MB/s
BM_memcpy/512 85 ns 85 ns 8312329 5.60101GB/s
BM_memcpy/1024 118 ns 118 ns 6438774 8.11608GB/s
BM_memcpy/8192 656 ns 656 ns 1068644 11.6277GB/s
BM_copy/8 223 ns 223 ns 3146977 34.2338MB/s
BM_copy/64 1611 ns 1611 ns 435340 37.8751MB/s
BM_copy/512 12622 ns 12622 ns 54818 38.6844MB/s
BM_copy/1024 25257 ns 25239 ns 27779 38.6927MB/s
BM_copy/8192 205013 ns 205010 ns 3479 38.108MB/s
Comparing ./a.out to ./a.out
Benchmark Time CPU Time Old Time New CPU Old CPU New
------------------------------------------------------------------------------------------------------
BM_memcpy/8 +0.0020 +0.0020 36 36 36 36
BM_memcpy/64 -0.0468 -0.0470 76 73 76 73
BM_memcpy/512 +0.0081 +0.0083 84 85 84 85
BM_memcpy/1024 +0.0098 +0.0097 116 118 116 118
BM_memcpy/8192 +0.0200 +0.0203 643 656 643 656
BM_copy/8 +0.0046 +0.0042 222 223 222 223
BM_copy/64 +0.0020 +0.0020 1608 1611 1608 1611
BM_copy/512 +0.0027 +0.0026 12589 12622 12589 12622
BM_copy/1024 +0.0035 +0.0028 25169 25257 25169 25239
BM_copy/8192 +0.0191 +0.0194 201165 205013 201112 205010
```
What it does is for the every benchmark from the first run it looks for the benchmark with exactly the same name in the second run, and then compares the results. If the names differ, the benchmark is omitted from the diff.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
2. Compare two different filters of one benchmark
The program is invoked like:
``` bash
$ compare.py filters <benchmark> <filter_baseline> <filter_contender> [benchmark options]...
```
Where `<benchmark>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py filters ./a.out BM_memcpy BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmpBWKk0k
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:28
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 36 ns 36 ns 17891491 211.215MB/s
BM_memcpy/64 74 ns 74 ns 9400999 825.646MB/s
BM_memcpy/512 87 ns 87 ns 8027453 5.46126GB/s
BM_memcpy/1024 111 ns 111 ns 6116853 8.5648GB/s
BM_memcpy/8192 657 ns 656 ns 1064679 11.6247GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpAvWcOM
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:37:33
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 227 ns 227 ns 3038700 33.6264MB/s
BM_copy/64 1640 ns 1640 ns 426893 37.2154MB/s
BM_copy/512 12804 ns 12801 ns 55417 38.1444MB/s
BM_copy/1024 25409 ns 25407 ns 27516 38.4365MB/s
BM_copy/8192 202986 ns 202990 ns 3454 38.4871MB/s
Comparing BM_memcpy to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.2829 +5.2812 36 227 36 227
[BM_memcpy vs. BM_copy]/64 +21.1719 +21.1856 74 1640 74 1640
[BM_memcpy vs. BM_copy]/512 +145.6487 +145.6097 87 12804 87 12801
[BM_memcpy vs. BM_copy]/1024 +227.1860 +227.1776 111 25409 111 25407
[BM_memcpy vs. BM_copy]/8192 +308.1664 +308.2898 657 202986 656 202990
```
As you can see, it applies filter to the benchmarks, both when running the benchmark, and before doing the diff. And to make the diff work, the matches are replaced with some common string. Thus, you can compare two different benchmark families within one benchmark binary.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
3. Compare filter one from benchmark one to filter two from benchmark two:
The program is invoked like:
``` bash
$ compare.py filters <benchmark_baseline> <filter_baseline> <benchmark_contender> <filter_contender> [benchmark options]...
```
Where `<benchmark_baseline>` and `<benchmark_contender>` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file.
Where `<filter_baseline>` and `<filter_contender>` are the same regex filters that you would pass to the `[--benchmark_filter=<regex>]` parameter of the benchmark binary.
`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes.
Example output:
```
$ ./compare.py benchmarksfiltered ./a.out BM_memcpy ./a.out BM_copy
RUNNING: ./a.out --benchmark_filter=BM_memcpy --benchmark_out=/tmp/tmp_FvbYg
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:27
------------------------------------------------------
Benchmark Time CPU Iterations
------------------------------------------------------
BM_memcpy/8 37 ns 37 ns 18953482 204.118MB/s
BM_memcpy/64 74 ns 74 ns 9206578 828.245MB/s
BM_memcpy/512 91 ns 91 ns 8086195 5.25476GB/s
BM_memcpy/1024 120 ns 120 ns 5804513 7.95662GB/s
BM_memcpy/8192 664 ns 664 ns 1028363 11.4948GB/s
RUNNING: ./a.out --benchmark_filter=BM_copy --benchmark_out=/tmp/tmpDfL5iE
Run on (8 X 4000 MHz CPU s)
2017-11-07 21:38:32
----------------------------------------------------
Benchmark Time CPU Iterations
----------------------------------------------------
BM_copy/8 230 ns 230 ns 2985909 33.1161MB/s
BM_copy/64 1654 ns 1653 ns 419408 36.9137MB/s
BM_copy/512 13122 ns 13120 ns 53403 37.2156MB/s
BM_copy/1024 26679 ns 26666 ns 26575 36.6218MB/s
BM_copy/8192 215068 ns 215053 ns 3221 36.3283MB/s
Comparing BM_memcpy (from ./a.out) to BM_copy (from ./a.out)
Benchmark Time CPU Time Old Time New CPU Old CPU New
--------------------------------------------------------------------------------------------------------------------
[BM_memcpy vs. BM_copy]/8 +5.1649 +5.1637 37 230 37 230
[BM_memcpy vs. BM_copy]/64 +21.4352 +21.4374 74 1654 74 1653
[BM_memcpy vs. BM_copy]/512 +143.6022 +143.5865 91 13122 91 13120
[BM_memcpy vs. BM_copy]/1024 +221.5903 +221.4790 120 26679 120 26666
[BM_memcpy vs. BM_copy]/8192 +322.9059 +323.0096 664 215068 664 215053
```
This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one.
As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`.
### U test
If there is a sufficient repetition count of the benchmarks, the tool can do
a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the
null hypothesis that it is equally likely that a randomly selected value from
one sample will be less than or greater than a randomly selected value from a
second sample.
If the calculated p-value is below this value is lower than the significance
level alpha, then the result is said to be statistically significant and the
null hypothesis is rejected. Which in other words means that the two benchmarks
aren't identical.
**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be
meaningful!

View File

@ -1,320 +0,0 @@
#! /usr/bin/env python
# encoding: utf-8
import argparse
import errno
import logging
import os
import platform
import re
import sys
import subprocess
import tempfile
try:
import winreg
except ImportError:
import _winreg as winreg
try:
import urllib.request as request
except ImportError:
import urllib as request
try:
import urllib.parse as parse
except ImportError:
import urlparse as parse
class EmptyLogger(object):
'''
Provides an implementation that performs no logging
'''
def debug(self, *k, **kw):
pass
def info(self, *k, **kw):
pass
def warn(self, *k, **kw):
pass
def error(self, *k, **kw):
pass
def critical(self, *k, **kw):
pass
def setLevel(self, *k, **kw):
pass
urls = (
'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20'
'targetting%20Win32/Personal%20Builds/mingw-builds/installer/'
'repository.txt',
'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/'
'repository.txt'
)
'''
A list of mingw-build repositories
'''
def repository(urls = urls, log = EmptyLogger()):
'''
Downloads and parse mingw-build repository files and parses them
'''
log.info('getting mingw-builds repository')
versions = {}
re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files')
re_sub = r'http://downloads.sourceforge.net/project/\1'
for url in urls:
log.debug(' - requesting: %s', url)
socket = request.urlopen(url)
repo = socket.read()
if not isinstance(repo, str):
repo = repo.decode();
socket.close()
for entry in repo.split('\n')[:-1]:
value = entry.split('|')
version = tuple([int(n) for n in value[0].strip().split('.')])
version = versions.setdefault(version, {})
arch = value[1].strip()
if arch == 'x32':
arch = 'i686'
elif arch == 'x64':
arch = 'x86_64'
arch = version.setdefault(arch, {})
threading = arch.setdefault(value[2].strip(), {})
exceptions = threading.setdefault(value[3].strip(), {})
revision = exceptions.setdefault(int(value[4].strip()[3:]),
re_sourceforge.sub(re_sub, value[5].strip()))
return versions
def find_in_path(file, path=None):
'''
Attempts to find an executable in the path
'''
if platform.system() == 'Windows':
file += '.exe'
if path is None:
path = os.environ.get('PATH', '')
if type(path) is type(''):
path = path.split(os.pathsep)
return list(filter(os.path.exists,
map(lambda dir, file=file: os.path.join(dir, file), path)))
def find_7zip(log = EmptyLogger()):
'''
Attempts to find 7zip for unpacking the mingw-build archives
'''
log.info('finding 7zip')
path = find_in_path('7z')
if not path:
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip')
path, _ = winreg.QueryValueEx(key, 'Path')
path = [os.path.join(path, '7z.exe')]
log.debug('found \'%s\'', path[0])
return path[0]
find_7zip()
def unpack(archive, location, log = EmptyLogger()):
'''
Unpacks a mingw-builds archive
'''
sevenzip = find_7zip(log)
log.info('unpacking %s', os.path.basename(archive))
cmd = [sevenzip, 'x', archive, '-o' + location, '-y']
log.debug(' - %r', cmd)
with open(os.devnull, 'w') as devnull:
subprocess.check_call(cmd, stdout = devnull)
def download(url, location, log = EmptyLogger()):
'''
Downloads and unpacks a mingw-builds archive
'''
log.info('downloading MinGW')
log.debug(' - url: %s', url)
log.debug(' - location: %s', location)
re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*')
stream = request.urlopen(url)
try:
content = stream.getheader('Content-Disposition') or ''
except AttributeError:
content = stream.headers.getheader('Content-Disposition') or ''
matches = re_content.match(content)
if matches:
filename = matches.group(2)
else:
parsed = parse.urlparse(stream.geturl())
filename = os.path.basename(parsed.path)
try:
os.makedirs(location)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(location):
pass
else:
raise
archive = os.path.join(location, filename)
with open(archive, 'wb') as out:
while True:
buf = stream.read(1024)
if not buf:
break
out.write(buf)
unpack(archive, location, log = log)
os.remove(archive)
possible = os.path.join(location, 'mingw64')
if not os.path.exists(possible):
possible = os.path.join(location, 'mingw32')
if not os.path.exists(possible):
raise ValueError('Failed to find unpacked MinGW: ' + possible)
return possible
def root(location = None, arch = None, version = None, threading = None,
exceptions = None, revision = None, log = EmptyLogger()):
'''
Returns the root folder of a specific version of the mingw-builds variant
of gcc. Will download the compiler if needed
'''
# Get the repository if we don't have all the information
if not (arch and version and threading and exceptions and revision):
versions = repository(log = log)
# Determine some defaults
version = version or max(versions.keys())
if not arch:
arch = platform.machine().lower()
if arch == 'x86':
arch = 'i686'
elif arch == 'amd64':
arch = 'x86_64'
if not threading:
keys = versions[version][arch].keys()
if 'posix' in keys:
threading = 'posix'
elif 'win32' in keys:
threading = 'win32'
else:
threading = keys[0]
if not exceptions:
keys = versions[version][arch][threading].keys()
if 'seh' in keys:
exceptions = 'seh'
elif 'sjlj' in keys:
exceptions = 'sjlj'
else:
exceptions = keys[0]
if revision is None:
revision = max(versions[version][arch][threading][exceptions].keys())
if not location:
location = os.path.join(tempfile.gettempdir(), 'mingw-builds')
# Get the download url
url = versions[version][arch][threading][exceptions][revision]
# Tell the user whatzzup
log.info('finding MinGW %s', '.'.join(str(v) for v in version))
log.debug(' - arch: %s', arch)
log.debug(' - threading: %s', threading)
log.debug(' - exceptions: %s', exceptions)
log.debug(' - revision: %s', revision)
log.debug(' - url: %s', url)
# Store each specific revision differently
slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}'
slug = slug.format(
version = '.'.join(str(v) for v in version),
arch = arch,
threading = threading,
exceptions = exceptions,
revision = revision
)
if arch == 'x86_64':
root_dir = os.path.join(location, slug, 'mingw64')
elif arch == 'i686':
root_dir = os.path.join(location, slug, 'mingw32')
else:
raise ValueError('Unknown MinGW arch: ' + arch)
# Download if needed
if not os.path.exists(root_dir):
downloaded = download(url, os.path.join(location, slug), log = log)
if downloaded != root_dir:
raise ValueError('The location of mingw did not match\n%s\n%s'
% (downloaded, root_dir))
return root_dir
def str2ver(string):
'''
Converts a version string into a tuple
'''
try:
version = tuple(int(v) for v in string.split('.'))
if len(version) is not 3:
raise ValueError()
except ValueError:
raise argparse.ArgumentTypeError(
'please provide a three digit version string')
return version
def main():
'''
Invoked when the script is run directly by the python interpreter
'''
parser = argparse.ArgumentParser(
description = 'Downloads a specific version of MinGW',
formatter_class = argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--location',
help = 'the location to download the compiler to',
default = os.path.join(tempfile.gettempdir(), 'mingw-builds'))
parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'],
help = 'the target MinGW architecture string')
parser.add_argument('--version', type = str2ver,
help = 'the version of GCC to download')
parser.add_argument('--threading', choices = ['posix', 'win32'],
help = 'the threading type of the compiler')
parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'],
help = 'the method to throw exceptions')
parser.add_argument('--revision', type=int,
help = 'the revision of the MinGW release')
group = parser.add_mutually_exclusive_group()
group.add_argument('-v', '--verbose', action='store_true',
help='increase the script output verbosity')
group.add_argument('-q', '--quiet', action='store_true',
help='only print errors and warning')
args = parser.parse_args()
# Create the logger
logger = logging.getLogger('mingw')
handler = logging.StreamHandler()
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(logging.INFO)
if args.quiet:
logger.setLevel(logging.WARN)
if args.verbose:
logger.setLevel(logging.DEBUG)
# Get MinGW
root_dir = root(location = args.location, arch = args.arch,
version = args.version, threading = args.threading,
exceptions = args.exceptions, revision = args.revision,
log = logger)
sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin'))
if __name__ == '__main__':
try:
main()
except IOError as e:
sys.stderr.write('IO error: %s\n' % e)
sys.exit(1)
except OSError as e:
sys.stderr.write('OS error: %s\n' % e)
sys.exit(1)
except KeyboardInterrupt as e:
sys.stderr.write('Killed\n')
sys.exit(1)

View File

@ -1,16 +0,0 @@
# How to release
* Make sure you're on master and synced to HEAD
* Ensure the project builds and tests run (sanity check only, obviously)
* `parallel -j0 exec ::: test/*_test` can help ensure everything at least
passes
* Prepare release notes
* `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of
commits between the last annotated tag and HEAD
* Pick the most interesting.
* Create a release through github's interface
* Note this will create a lightweight tag.
* Update this to an annotated tag:
* `git pull --tags`
* `git tag -a -f <tag> <tag>`
* `git push --force origin`

View File

@ -1,124 +0,0 @@
import os
import posixpath
import re
import shutil
import sys
from distutils import sysconfig
import setuptools
from setuptools.command import build_ext
here = os.path.dirname(os.path.abspath(__file__))
IS_WINDOWS = sys.platform.startswith('win')
def _get_version():
"""Parse the version string from __init__.py."""
with open(os.path.join(here, 'bindings', 'python', 'benchmark', '__init__.py')) as f:
try:
version_line = next(
line for line in f if line.startswith('__version__'))
except StopIteration:
raise ValueError('__version__ not defined in __init__.py')
else:
ns = {}
exec(version_line, ns) # pylint: disable=exec-used
return ns['__version__']
def _parse_requirements(path):
with open(os.path.join(here, path)) as f:
return [
line.rstrip() for line in f
if not (line.isspace() or line.startswith('#'))
]
class BazelExtension(setuptools.Extension):
"""A C/C++ extension that is defined as a Bazel BUILD target."""
def __init__(self, name, bazel_target):
self.bazel_target = bazel_target
self.relpath, self.target_name = (
posixpath.relpath(bazel_target, '//').split(':'))
setuptools.Extension.__init__(self, name, sources=[])
class BuildBazelExtension(build_ext.build_ext):
"""A command that runs Bazel to build a C/C++ extension."""
def run(self):
for ext in self.extensions:
self.bazel_build(ext)
build_ext.build_ext.run(self)
def bazel_build(self, ext):
with open('WORKSPACE', 'r') as f:
workspace_contents = f.read()
with open('WORKSPACE', 'w') as f:
f.write(re.sub(
r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)',
sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep),
workspace_contents))
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
bazel_argv = [
'bazel',
'build',
ext.bazel_target,
'--symlink_prefix=' + os.path.join(self.build_temp, 'bazel-'),
'--compilation_mode=' + ('dbg' if self.debug else 'opt'),
]
if IS_WINDOWS:
# Link with python*.lib.
for library_dir in self.library_dirs:
bazel_argv.append('--linkopt=/LIBPATH:' + library_dir)
self.spawn(bazel_argv)
shared_lib_suffix = '.dll' if IS_WINDOWS else '.so'
ext_bazel_bin_path = os.path.join(
self.build_temp, 'bazel-bin',
ext.relpath, ext.target_name + shared_lib_suffix)
ext_dest_path = self.get_ext_fullpath(ext.name)
ext_dest_dir = os.path.dirname(ext_dest_path)
if not os.path.exists(ext_dest_dir):
os.makedirs(ext_dest_dir)
shutil.copyfile(ext_bazel_bin_path, ext_dest_path)
setuptools.setup(
name='google-benchmark',
version=_get_version(),
url='https://github.com/google/benchmark',
description='A library to benchmark code snippets.',
author='Google',
author_email='benchmark-py@google.com',
# Contained modules and scripts.
package_dir={'': 'bindings/python'},
packages=setuptools.find_packages('bindings/python'),
install_requires=_parse_requirements('bindings/python/requirements.txt'),
cmdclass=dict(build_ext=BuildBazelExtension),
ext_modules=[BazelExtension('benchmark._benchmark', '//bindings/python/benchmark:_benchmark')],
zip_safe=False,
# PyPI package information.
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Testing',
'Topic :: System :: Benchmark',
],
license='Apache 2.0',
keywords='benchmark',
)

View File

@ -1,114 +0,0 @@
# Allow the source files to find headers in src/
include(GNUInstallDirs)
include_directories(${PROJECT_SOURCE_DIR}/src)
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_SHARED_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
list(APPEND CMAKE_MODULE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
file(GLOB
SOURCE_FILES
*.cc
${PROJECT_SOURCE_DIR}/include/benchmark/*.h
${CMAKE_CURRENT_SOURCE_DIR}/*.h)
file(GLOB BENCHMARK_MAIN "benchmark_main.cc")
foreach(item ${BENCHMARK_MAIN})
list(REMOVE_ITEM SOURCE_FILES "${item}")
endforeach()
add_library(benchmark ${SOURCE_FILES})
add_library(benchmark::benchmark ALIAS benchmark)
set_target_properties(benchmark PROPERTIES
OUTPUT_NAME "benchmark"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
# Link threads.
target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
find_library(LIBRT rt)
if(LIBRT)
target_link_libraries(benchmark ${LIBRT})
endif()
if(CMAKE_BUILD_TYPE)
string(TOUPPER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_UPPER)
endif()
if(NOT CMAKE_THREAD_LIBS_INIT AND "${CMAKE_CXX_FLAGS} ${CMAKE_CXX_FLAGS_${CMAKE_BUILD_TYPE_UPPER}}" MATCHES ".*-fsanitize=[^ ]*address.*")
message(WARNING "CMake's FindThreads.cmake did not fail, but CMAKE_THREAD_LIBS_INIT ended up being empty. This was fixed in https://github.com/Kitware/CMake/commit/d53317130e84898c5328c237186dbd995aaf1c12 Let's guess that -pthread is sufficient.")
target_link_libraries(benchmark -pthread)
endif()
# We need extra libraries on Windows
if(${CMAKE_SYSTEM_NAME} MATCHES "Windows")
target_link_libraries(benchmark shlwapi)
endif()
# We need extra libraries on Solaris
if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
target_link_libraries(benchmark kstat)
endif()
# Benchmark main library
add_library(benchmark_main "benchmark_main.cc")
add_library(benchmark::benchmark_main ALIAS benchmark_main)
set_target_properties(benchmark_main PROPERTIES
OUTPUT_NAME "benchmark_main"
VERSION ${GENERIC_LIB_VERSION}
SOVERSION ${GENERIC_LIB_SOVERSION}
)
target_include_directories(benchmark PUBLIC
$<BUILD_INTERFACE:${CMAKE_CURRENT_SOURCE_DIR}/../include>
)
target_link_libraries(benchmark_main benchmark::benchmark)
set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc")
set(targets_export_name "${PROJECT_NAME}Targets")
set(namespace "${PROJECT_NAME}::")
include(CMakePackageConfigHelpers)
write_basic_package_version_file(
"${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion
)
configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY)
configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY)
if (BENCHMARK_ENABLE_INSTALL)
# Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable)
install(
TARGETS benchmark benchmark_main
EXPORT ${targets_export_name}
ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR}
LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR}
RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR}
INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR})
install(
DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark"
DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}
FILES_MATCHING PATTERN "*.*h")
install(
FILES "${project_config}" "${version_config}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
install(
FILES "${pkg_config}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig")
install(
EXPORT "${targets_export_name}"
NAMESPACE "${namespace}"
DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
endif()

View File

@ -1,33 +0,0 @@
#ifndef BENCHMARK_ARRAYSIZE_H_
#define BENCHMARK_ARRAYSIZE_H_
#include "internal_macros.h"
namespace benchmark
{
namespace internal
{
// The arraysize(arr) macro returns the # of elements in an array arr.
// The expression is a compile-time constant, and therefore can be
// used in defining new arrays, for example. If you use arraysize on
// a pointer by mistake, you will get a compile-time error.
//
// This template function declaration is used in defining arraysize.
// Note that the function doesn't need an implementation, as we only
// use its type.
template <typename T, size_t N> char (&ArraySizeHelper(T (&array)[N]))[N];
// That gcc wants both of these prototypes seems mysterious. VC, for
// its part, can't decide which to use (another mystery). Matching of
// template overloads: the final frontier.
#ifndef COMPILER_MSVC
template <typename T, size_t N> char (&ArraySizeHelper(const T (&array)[N]))[N];
#endif
#define arraysize(array) (sizeof(::benchmark::internal::ArraySizeHelper(array)))
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_ARRAYSIZE_H_

View File

@ -1,538 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "benchmark_runner.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
// Print a list of benchmarks. This option overrides all other options.
DEFINE_bool(benchmark_list_tests, false);
// A regular expression that specifies the set of benchmarks to execute. If
// this flag is empty, or if this flag is the string \"all\", all benchmarks
// linked into the binary are run.
DEFINE_string(benchmark_filter, ".");
// Minimum number of seconds we should run benchmark before results are
// considered significant. For cpu-time based tests, this is the lower bound
// on the total cpu time used by all threads that make up the test. For
// real-time based tests, this is the lower bound on the elapsed time of the
// benchmark execution, regardless of number of threads.
DEFINE_double(benchmark_min_time, 0.5);
// The number of runs of each benchmark. If greater than 1, the mean and
// standard deviation of the runs will be reported.
DEFINE_int32(benchmark_repetitions, 1);
// Report the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are reported for
// repeated benchmarks. Affects all reporters.
DEFINE_bool(benchmark_report_aggregates_only, false);
// Display the result of each benchmark repetitions. When 'true' is specified
// only the mean, standard deviation, and other statistics are displayed for
// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects
// the display reporter, but *NOT* file reporter, which will still contain
// all the output.
DEFINE_bool(benchmark_display_aggregates_only, false);
// The format to use for console output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_format, "console");
// The format to use for file output.
// Valid values are 'console', 'json', or 'csv'.
DEFINE_string(benchmark_out_format, "json");
// The file to write additional output to.
DEFINE_string(benchmark_out, "");
// Whether to use colors in the output. Valid values:
// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if
// the output is being sent to a terminal and the TERM environment variable is
// set to a terminal type that supports colors.
DEFINE_string(benchmark_color, "auto");
// Whether to use tabular format when printing user counters to the console.
// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false.
DEFINE_bool(benchmark_counters_tabular, false);
// The level of verbose logging to output
DEFINE_int32(v, 0);
namespace benchmark
{
namespace internal
{
// FIXME: wouldn't LTO mess this up?
void UseCharPointer(char const volatile *)
{
}
} // namespace internal
State::State(IterationCount max_iters, const std::vector<int64_t> &ranges, int thread_i, int n_threads,
internal::ThreadTimer *timer, internal::ThreadManager *manager)
: total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), started_(false), finished_(false),
error_occurred_(false), range_(ranges), complexity_n_(0), counters(), thread_index(thread_i), threads(n_threads),
timer_(timer), manager_(manager)
{
CHECK(max_iterations != 0) << "At least one iteration must be run";
CHECK_LT(thread_index, threads) << "thread_index must be less than threads";
// Note: The use of offsetof below is technically undefined until C++17
// because State is not a standard layout type. However, all compilers
// currently provide well-defined behavior as an extension (which is
// demonstrated since constexpr evaluation must diagnose all undefined
// behavior). However, GCC and Clang also warn about this use of offsetof,
// which must be suppressed.
#if defined(__INTEL_COMPILER)
#pragma warning push
#pragma warning(disable : 1875)
#elif defined(__GNUC__)
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Winvalid-offsetof"
#endif
// Offset tests to ensure commonly accessed data is on the first cache line.
const int cache_line_size = 64;
static_assert(offsetof(State, error_occurred_) <= (cache_line_size - sizeof(error_occurred_)), "");
#if defined(__INTEL_COMPILER)
#pragma warning pop
#elif defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
}
void State::PauseTiming()
{
// Add in time accumulated so far
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StopTimer();
}
void State::ResumeTiming()
{
CHECK(started_ && !finished_ && !error_occurred_);
timer_->StartTimer();
}
void State::SkipWithError(const char *msg)
{
CHECK(msg);
error_occurred_ = true;
{
MutexLock l(manager_->GetBenchmarkMutex());
if (manager_->results.has_error_ == false)
{
manager_->results.error_message_ = msg;
manager_->results.has_error_ = true;
}
}
total_iterations_ = 0;
if (timer_->running())
timer_->StopTimer();
}
void State::SetIterationTime(double seconds)
{
timer_->SetIterationTime(seconds);
}
void State::SetLabel(const char *label)
{
MutexLock l(manager_->GetBenchmarkMutex());
manager_->results.report_label_ = label;
}
void State::StartKeepRunning()
{
CHECK(!started_ && !finished_);
started_ = true;
total_iterations_ = error_occurred_ ? 0 : max_iterations;
manager_->StartStopBarrier();
if (!error_occurred_)
ResumeTiming();
}
void State::FinishKeepRunning()
{
CHECK(started_ && (!finished_ || error_occurred_));
if (!error_occurred_)
{
PauseTiming();
}
// Total iterations has now wrapped around past 0. Fix this.
total_iterations_ = 0;
finished_ = true;
manager_->StartStopBarrier();
}
namespace internal
{
namespace
{
void RunBenchmarks(const std::vector<BenchmarkInstance> &benchmarks, BenchmarkReporter *display_reporter,
BenchmarkReporter *file_reporter)
{
// Note the file_reporter can be null.
CHECK(display_reporter != nullptr);
// Determine the width of the name field using a minimum width of 10.
bool might_have_aggregates = FLAGS_benchmark_repetitions > 1;
size_t name_field_width = 10;
size_t stat_field_width = 0;
for (const BenchmarkInstance &benchmark : benchmarks)
{
name_field_width = std::max<size_t>(name_field_width, benchmark.name.str().size());
might_have_aggregates |= benchmark.repetitions > 1;
for (const auto &Stat : *benchmark.statistics)
stat_field_width = std::max<size_t>(stat_field_width, Stat.name_.size());
}
if (might_have_aggregates)
name_field_width += 1 + stat_field_width;
// Print header here
BenchmarkReporter::Context context;
context.name_field_width = name_field_width;
// Keep track of running times of all instances of current benchmark
std::vector<BenchmarkReporter::Run> complexity_reports;
// We flush streams after invoking reporter methods that write to them. This
// ensures users get timely updates even when streams are not line-buffered.
auto flushStreams = [](BenchmarkReporter *reporter) {
if (!reporter)
return;
std::flush(reporter->GetOutputStream());
std::flush(reporter->GetErrorStream());
};
if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context)))
{
flushStreams(display_reporter);
flushStreams(file_reporter);
for (const auto &benchmark : benchmarks)
{
RunResults run_results = RunBenchmark(benchmark, &complexity_reports);
auto report = [&run_results](BenchmarkReporter *reporter, bool report_aggregates_only) {
assert(reporter);
// If there are no aggregates, do output non-aggregates.
report_aggregates_only &= !run_results.aggregates_only.empty();
if (!report_aggregates_only)
reporter->ReportRuns(run_results.non_aggregates);
if (!run_results.aggregates_only.empty())
reporter->ReportRuns(run_results.aggregates_only);
};
report(display_reporter, run_results.display_report_aggregates_only);
if (file_reporter)
report(file_reporter, run_results.file_report_aggregates_only);
flushStreams(display_reporter);
flushStreams(file_reporter);
}
}
display_reporter->Finalize();
if (file_reporter)
file_reporter->Finalize();
flushStreams(display_reporter);
flushStreams(file_reporter);
}
// Disable deprecated warnings temporarily because we need to reference
// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
#endif
std::unique_ptr<BenchmarkReporter> CreateReporter(std::string const &name, ConsoleReporter::OutputOptions output_opts)
{
typedef std::unique_ptr<BenchmarkReporter> PtrType;
if (name == "console")
{
return PtrType(new ConsoleReporter(output_opts));
}
else if (name == "json")
{
return PtrType(new JSONReporter);
}
else if (name == "csv")
{
return PtrType(new CSVReporter);
}
else
{
std::cerr << "Unexpected format: '" << name << "'\n";
std::exit(1);
}
}
#ifdef __GNUC__
#pragma GCC diagnostic pop
#endif
} // end namespace
bool IsZero(double n)
{
return std::abs(n) < std::numeric_limits<double>::epsilon();
}
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color)
{
int output_opts = ConsoleReporter::OO_Defaults;
auto is_benchmark_color = [force_no_color]() -> bool {
if (force_no_color)
{
return false;
}
if (FLAGS_benchmark_color == "auto")
{
return IsColorTerminal();
}
return IsTruthyFlagValue(FLAGS_benchmark_color);
};
if (is_benchmark_color())
{
output_opts |= ConsoleReporter::OO_Color;
}
else
{
output_opts &= ~ConsoleReporter::OO_Color;
}
if (FLAGS_benchmark_counters_tabular)
{
output_opts |= ConsoleReporter::OO_Tabular;
}
else
{
output_opts &= ~ConsoleReporter::OO_Tabular;
}
return static_cast<ConsoleReporter::OutputOptions>(output_opts);
}
} // end namespace internal
size_t RunSpecifiedBenchmarks()
{
return RunSpecifiedBenchmarks(nullptr, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter)
{
return RunSpecifiedBenchmarks(display_reporter, nullptr);
}
size_t RunSpecifiedBenchmarks(BenchmarkReporter *display_reporter, BenchmarkReporter *file_reporter)
{
std::string spec = FLAGS_benchmark_filter;
if (spec.empty() || spec == "all")
spec = "."; // Regexp that matches all benchmarks
// Setup the reporters
std::ofstream output_file;
std::unique_ptr<BenchmarkReporter> default_display_reporter;
std::unique_ptr<BenchmarkReporter> default_file_reporter;
if (!display_reporter)
{
default_display_reporter = internal::CreateReporter(FLAGS_benchmark_format, internal::GetOutputOptions());
display_reporter = default_display_reporter.get();
}
auto &Out = display_reporter->GetOutputStream();
auto &Err = display_reporter->GetErrorStream();
std::string const &fname = FLAGS_benchmark_out;
if (fname.empty() && file_reporter)
{
Err << "A custom file reporter was provided but "
"--benchmark_out=<file> was not specified."
<< std::endl;
std::exit(1);
}
if (!fname.empty())
{
output_file.open(fname);
if (!output_file.is_open())
{
Err << "invalid file name: '" << fname << std::endl;
std::exit(1);
}
if (!file_reporter)
{
default_file_reporter = internal::CreateReporter(FLAGS_benchmark_out_format, ConsoleReporter::OO_None);
file_reporter = default_file_reporter.get();
}
file_reporter->SetOutputStream(&output_file);
file_reporter->SetErrorStream(&output_file);
}
std::vector<internal::BenchmarkInstance> benchmarks;
if (!FindBenchmarksInternal(spec, &benchmarks, &Err))
return 0;
if (benchmarks.empty())
{
Err << "Failed to match any benchmarks against regex: " << spec << "\n";
return 0;
}
if (FLAGS_benchmark_list_tests)
{
for (auto const &benchmark : benchmarks)
Out << benchmark.name.str() << "\n";
}
else
{
internal::RunBenchmarks(benchmarks, display_reporter, file_reporter);
}
return benchmarks.size();
}
void RegisterMemoryManager(MemoryManager *manager)
{
internal::memory_manager = manager;
}
namespace internal
{
void PrintUsageAndExit()
{
fprintf(stdout, "benchmark"
" [--benchmark_list_tests={true|false}]\n"
" [--benchmark_filter=<regex>]\n"
" [--benchmark_min_time=<min_time>]\n"
" [--benchmark_repetitions=<num_repetitions>]\n"
" [--benchmark_report_aggregates_only={true|false}]\n"
" [--benchmark_display_aggregates_only={true|false}]\n"
" [--benchmark_format=<console|json|csv>]\n"
" [--benchmark_out=<filename>]\n"
" [--benchmark_out_format=<json|console|csv>]\n"
" [--benchmark_color={auto|true|false}]\n"
" [--benchmark_counters_tabular={true|false}]\n"
" [--v=<verbosity>]\n");
exit(0);
}
void ParseCommandLineFlags(int *argc, char **argv)
{
using namespace benchmark;
BenchmarkReporter::Context::executable_name = (argc && *argc > 0) ? argv[0] : "unknown";
for (int i = 1; argc && i < *argc; ++i)
{
if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) ||
ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) ||
ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) ||
ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) ||
ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) ||
ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", &FLAGS_benchmark_display_aggregates_only) ||
ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) ||
ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) ||
ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) ||
ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) ||
// "color_print" is the deprecated name for "benchmark_color".
// TODO: Remove this.
ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) ||
ParseBoolFlag(argv[i], "benchmark_counters_tabular", &FLAGS_benchmark_counters_tabular) ||
ParseInt32Flag(argv[i], "v", &FLAGS_v))
{
for (int j = i; j != *argc - 1; ++j)
argv[j] = argv[j + 1];
--(*argc);
--i;
}
else if (IsFlag(argv[i], "help"))
{
PrintUsageAndExit();
}
}
for (auto const *flag : {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format})
if (*flag != "console" && *flag != "json" && *flag != "csv")
{
PrintUsageAndExit();
}
if (FLAGS_benchmark_color.empty())
{
PrintUsageAndExit();
}
}
int InitializeStreams()
{
static std::ios_base::Init init;
return 0;
}
} // end namespace internal
void Initialize(int *argc, char **argv)
{
internal::ParseCommandLineFlags(argc, argv);
internal::LogLevel() = FLAGS_v;
}
bool ReportUnrecognizedArguments(int argc, char **argv)
{
for (int i = 1; i < argc; ++i)
{
fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]);
}
return argc > 1;
}
} // end namespace benchmark

View File

@ -1,17 +0,0 @@
#include "benchmark_api_internal.h"
namespace benchmark
{
namespace internal
{
State BenchmarkInstance::Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager *manager) const
{
State st(iters, arg, thread_id, threads, timer, manager);
benchmark->Run(st);
return st;
}
} // namespace internal
} // namespace benchmark

View File

@ -1,54 +0,0 @@
#ifndef BENCHMARK_API_INTERNAL_H
#define BENCHMARK_API_INTERNAL_H
#include "benchmark/benchmark.h"
#include "commandlineflags.h"
#include <cmath>
#include <iosfwd>
#include <limits>
#include <memory>
#include <string>
#include <vector>
namespace benchmark
{
namespace internal
{
// Information kept per benchmark we may want to run
struct BenchmarkInstance
{
BenchmarkName name;
Benchmark *benchmark;
AggregationReportMode aggregation_report_mode;
std::vector<int64_t> arg;
TimeUnit time_unit;
int range_multiplier;
bool measure_process_cpu_time;
bool use_real_time;
bool use_manual_time;
BigO complexity;
BigOFunc *complexity_lambda;
UserCounters counters;
const std::vector<Statistics> *statistics;
bool last_benchmark_instance;
int repetitions;
double min_time;
IterationCount iterations;
int threads; // Number of concurrent threads to us
State Run(IterationCount iters, int thread_id, internal::ThreadTimer *timer,
internal::ThreadManager *manager) const;
};
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
bool IsZero(double n);
ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false);
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_API_INTERNAL_H

View File

@ -1,17 +0,0 @@
// Copyright 2018 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
BENCHMARK_MAIN();

View File

@ -1,66 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <benchmark/benchmark.h>
namespace benchmark
{
namespace
{
// Compute the total size of a pack of std::strings
size_t size_impl()
{
return 0;
}
template <typename Head, typename... Tail> size_t size_impl(const Head &head, const Tail &...tail)
{
return head.size() + size_impl(tail...);
}
// Join a pack of std::strings using a delimiter
// TODO: use absl::StrJoin
void join_impl(std::string &, char)
{
}
template <typename Head, typename... Tail>
void join_impl(std::string &s, const char delimiter, const Head &head, const Tail &...tail)
{
if (!s.empty() && !head.empty())
{
s += delimiter;
}
s += head;
join_impl(s, delimiter, tail...);
}
template <typename... Ts> std::string join(char delimiter, const Ts &...ts)
{
std::string s;
s.reserve(sizeof...(Ts) + size_impl(ts...));
join_impl(s, delimiter, ts...);
return s;
}
} // namespace
std::string BenchmarkName::str() const
{
return join('/', function_name, args, min_time, iterations, repetitions, time_type, threads);
}
} // namespace benchmark

View File

@ -1,564 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark_register.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <memory>
#include <sstream>
#include <thread>
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "check.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "timers.h"
namespace benchmark
{
namespace
{
// For non-dense Range, intermediate values are powers of kRangeMultiplier.
static const int kRangeMultiplier = 8;
// The size of a benchmark family determines is the number of inputs to repeat
// the benchmark on. If this is "large" then warn the user during configuration.
static const size_t kMaxFamilySize = 100;
} // end namespace
namespace internal
{
//=============================================================================//
// BenchmarkFamilies
//=============================================================================//
// Class for managing registered benchmarks. Note that each registered
// benchmark identifies a family of related benchmarks to run.
class BenchmarkFamilies
{
public:
static BenchmarkFamilies *GetInstance();
// Registers a benchmark family and returns the index assigned to it.
size_t AddBenchmark(std::unique_ptr<Benchmark> family);
// Clear all registered benchmark families.
void ClearBenchmarks();
// Extract the list of benchmark instances that match the specified
// regular expression.
bool FindBenchmarks(std::string re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err);
private:
BenchmarkFamilies()
{
}
std::vector<std::unique_ptr<Benchmark>> families_;
Mutex mutex_;
};
BenchmarkFamilies *BenchmarkFamilies::GetInstance()
{
static BenchmarkFamilies instance;
return &instance;
}
size_t BenchmarkFamilies::AddBenchmark(std::unique_ptr<Benchmark> family)
{
MutexLock l(mutex_);
size_t index = families_.size();
families_.push_back(std::move(family));
return index;
}
void BenchmarkFamilies::ClearBenchmarks()
{
MutexLock l(mutex_);
families_.clear();
families_.shrink_to_fit();
}
bool BenchmarkFamilies::FindBenchmarks(std::string spec, std::vector<BenchmarkInstance> *benchmarks,
std::ostream *ErrStream)
{
CHECK(ErrStream);
auto &Err = *ErrStream;
// Make regular expression out of command-line flag
std::string error_msg;
Regex re;
bool isNegativeFilter = false;
if (spec[0] == '-')
{
spec.replace(0, 1, "");
isNegativeFilter = true;
}
if (!re.Init(spec, &error_msg))
{
Err << "Could not compile benchmark re: " << error_msg << std::endl;
return false;
}
// Special list of thread counts to use when none are specified
const std::vector<int> one_thread = {1};
MutexLock l(mutex_);
for (std::unique_ptr<Benchmark> &family : families_)
{
// Family was deleted or benchmark doesn't match
if (!family)
continue;
if (family->ArgsCnt() == -1)
{
family->Args({});
}
const std::vector<int> *thread_counts =
(family->thread_counts_.empty() ? &one_thread
: &static_cast<const std::vector<int> &>(family->thread_counts_));
const size_t family_size = family->args_.size() * thread_counts->size();
// The benchmark will be run at least 'family_size' different inputs.
// If 'family_size' is very large warn the user.
if (family_size > kMaxFamilySize)
{
Err << "The number of inputs is very large. " << family->name_ << " will be repeated at least "
<< family_size << " times.\n";
}
// reserve in the special case the regex ".", since we know the final
// family size.
if (spec == ".")
benchmarks->reserve(family_size);
for (auto const &args : family->args_)
{
for (int num_threads : *thread_counts)
{
BenchmarkInstance instance;
instance.name.function_name = family->name_;
instance.benchmark = family.get();
instance.aggregation_report_mode = family->aggregation_report_mode_;
instance.arg = args;
instance.time_unit = family->time_unit_;
instance.range_multiplier = family->range_multiplier_;
instance.min_time = family->min_time_;
instance.iterations = family->iterations_;
instance.repetitions = family->repetitions_;
instance.measure_process_cpu_time = family->measure_process_cpu_time_;
instance.use_real_time = family->use_real_time_;
instance.use_manual_time = family->use_manual_time_;
instance.complexity = family->complexity_;
instance.complexity_lambda = family->complexity_lambda_;
instance.statistics = &family->statistics_;
instance.threads = num_threads;
// Add arguments to instance name
size_t arg_i = 0;
for (auto const &arg : args)
{
if (!instance.name.args.empty())
{
instance.name.args += '/';
}
if (arg_i < family->arg_names_.size())
{
const auto &arg_name = family->arg_names_[arg_i];
if (!arg_name.empty())
{
instance.name.args += StrFormat("%s:", arg_name.c_str());
}
}
instance.name.args += StrFormat("%" PRId64, arg);
++arg_i;
}
if (!IsZero(family->min_time_))
instance.name.min_time = StrFormat("min_time:%0.3f", family->min_time_);
if (family->iterations_ != 0)
{
instance.name.iterations =
StrFormat("iterations:%lu", static_cast<unsigned long>(family->iterations_));
}
if (family->repetitions_ != 0)
instance.name.repetitions = StrFormat("repeats:%d", family->repetitions_);
if (family->measure_process_cpu_time_)
{
instance.name.time_type = "process_time";
}
if (family->use_manual_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "manual_time";
}
else if (family->use_real_time_)
{
if (!instance.name.time_type.empty())
{
instance.name.time_type += '/';
}
instance.name.time_type += "real_time";
}
// Add the number of threads used to the name
if (!family->thread_counts_.empty())
{
instance.name.threads = StrFormat("threads:%d", instance.threads);
}
const auto full_name = instance.name.str();
if ((re.Match(full_name) && !isNegativeFilter) || (!re.Match(full_name) && isNegativeFilter))
{
instance.last_benchmark_instance = (&args == &family->args_.back());
benchmarks->push_back(std::move(instance));
}
}
}
}
return true;
}
Benchmark *RegisterBenchmarkInternal(Benchmark *bench)
{
std::unique_ptr<Benchmark> bench_ptr(bench);
BenchmarkFamilies *families = BenchmarkFamilies::GetInstance();
families->AddBenchmark(std::move(bench_ptr));
return bench;
}
// FIXME: This function is a hack so that benchmark.cc can access
// `BenchmarkFamilies`
bool FindBenchmarksInternal(const std::string &re, std::vector<BenchmarkInstance> *benchmarks, std::ostream *Err)
{
return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err);
}
//=============================================================================//
// Benchmark
//=============================================================================//
Benchmark::Benchmark(const char *name)
: name_(name), aggregation_report_mode_(ARM_Unspecified), time_unit_(kNanosecond),
range_multiplier_(kRangeMultiplier), min_time_(0), iterations_(0), repetitions_(0),
measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone),
complexity_lambda_(nullptr)
{
ComputeStatistics("mean", StatisticsMean);
ComputeStatistics("median", StatisticsMedian);
ComputeStatistics("stddev", StatisticsStdDev);
}
Benchmark::~Benchmark()
{
}
Benchmark *Benchmark::Arg(int64_t x)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
args_.push_back({x});
return this;
}
Benchmark *Benchmark::Unit(TimeUnit unit)
{
time_unit_ = unit;
return this;
}
Benchmark *Benchmark::Range(int64_t start, int64_t limit)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
std::vector<int64_t> arglist;
AddRange(&arglist, start, limit, range_multiplier_);
for (int64_t i : arglist)
{
args_.push_back({i});
}
return this;
}
Benchmark *Benchmark::Ranges(const std::vector<std::pair<int64_t, int64_t>> &ranges)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(ranges.size()));
std::vector<std::vector<int64_t>> arglists(ranges.size());
std::size_t total = 1;
for (std::size_t i = 0; i < ranges.size(); i++)
{
AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_);
total *= arglists[i].size();
}
std::vector<std::size_t> ctr(arglists.size(), 0);
for (std::size_t i = 0; i < total; i++)
{
std::vector<int64_t> tmp;
tmp.reserve(arglists.size());
for (std::size_t j = 0; j < arglists.size(); j++)
{
tmp.push_back(arglists[j].at(ctr[j]));
}
args_.push_back(std::move(tmp));
for (std::size_t j = 0; j < arglists.size(); j++)
{
if (ctr[j] + 1 < arglists[j].size())
{
++ctr[j];
break;
}
ctr[j] = 0;
}
}
return this;
}
Benchmark *Benchmark::ArgName(const std::string &name)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
arg_names_ = {name};
return this;
}
Benchmark *Benchmark::ArgNames(const std::vector<std::string> &names)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(names.size()));
arg_names_ = names;
return this;
}
Benchmark *Benchmark::DenseRange(int64_t start, int64_t limit, int step)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == 1);
CHECK_LE(start, limit);
for (int64_t arg = start; arg <= limit; arg += step)
{
args_.push_back({arg});
}
return this;
}
Benchmark *Benchmark::Args(const std::vector<int64_t> &args)
{
CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast<int>(args.size()));
args_.push_back(args);
return this;
}
Benchmark *Benchmark::Apply(void (*custom_arguments)(Benchmark *benchmark))
{
custom_arguments(this);
return this;
}
Benchmark *Benchmark::RangeMultiplier(int multiplier)
{
CHECK(multiplier > 1);
range_multiplier_ = multiplier;
return this;
}
Benchmark *Benchmark::MinTime(double t)
{
CHECK(t > 0.0);
CHECK(iterations_ == 0);
min_time_ = t;
return this;
}
Benchmark *Benchmark::Iterations(IterationCount n)
{
CHECK(n > 0);
CHECK(IsZero(min_time_));
iterations_ = n;
return this;
}
Benchmark *Benchmark::Repetitions(int n)
{
CHECK(n > 0);
repetitions_ = n;
return this;
}
Benchmark *Benchmark::ReportAggregatesOnly(bool value)
{
aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default;
return this;
}
Benchmark *Benchmark::DisplayAggregatesOnly(bool value)
{
// If we were called, the report mode is no longer 'unspecified', in any case.
aggregation_report_mode_ = static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_Default);
if (value)
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly);
}
else
{
aggregation_report_mode_ =
static_cast<AggregationReportMode>(aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly);
}
return this;
}
Benchmark *Benchmark::MeasureProcessCPUTime()
{
// Can be used together with UseRealTime() / UseManualTime().
measure_process_cpu_time_ = true;
return this;
}
Benchmark *Benchmark::UseRealTime()
{
CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_real_time_ = true;
return this;
}
Benchmark *Benchmark::UseManualTime()
{
CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously.";
use_manual_time_ = true;
return this;
}
Benchmark *Benchmark::Complexity(BigO complexity)
{
complexity_ = complexity;
return this;
}
Benchmark *Benchmark::Complexity(BigOFunc *complexity)
{
complexity_lambda_ = complexity;
complexity_ = oLambda;
return this;
}
Benchmark *Benchmark::ComputeStatistics(std::string name, StatisticsFunc *statistics)
{
statistics_.emplace_back(name, statistics);
return this;
}
Benchmark *Benchmark::Threads(int t)
{
CHECK_GT(t, 0);
thread_counts_.push_back(t);
return this;
}
Benchmark *Benchmark::ThreadRange(int min_threads, int max_threads)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
AddRange(&thread_counts_, min_threads, max_threads, 2);
return this;
}
Benchmark *Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride)
{
CHECK_GT(min_threads, 0);
CHECK_GE(max_threads, min_threads);
CHECK_GE(stride, 1);
for (auto i = min_threads; i < max_threads; i += stride)
{
thread_counts_.push_back(i);
}
thread_counts_.push_back(max_threads);
return this;
}
Benchmark *Benchmark::ThreadPerCpu()
{
thread_counts_.push_back(CPUInfo::Get().num_cpus);
return this;
}
void Benchmark::SetName(const char *name)
{
name_ = name;
}
int Benchmark::ArgsCnt() const
{
if (args_.empty())
{
if (arg_names_.empty())
return -1;
return static_cast<int>(arg_names_.size());
}
return static_cast<int>(args_.front().size());
}
//=============================================================================//
// FunctionBenchmark
//=============================================================================//
void FunctionBenchmark::Run(State &st)
{
func_(st);
}
} // end namespace internal
void ClearRegisteredBenchmarks()
{
internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks();
}
} // end namespace benchmark

View File

@ -1,116 +0,0 @@
#ifndef BENCHMARK_REGISTER_H
#define BENCHMARK_REGISTER_H
#include <vector>
#include "check.h"
namespace benchmark
{
namespace internal
{
// Append the powers of 'mult' in the closed interval [lo, hi].
// Returns iterator to the start of the inserted range.
template <typename T> typename std::vector<T>::iterator AddPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
CHECK_GE(lo, 0);
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
const size_t start_offset = dst->size();
static const T kmax = std::numeric_limits<T>::max();
// Space out the values in multiples of "mult"
for (T i = 1; i <= hi; i *= mult)
{
if (i >= lo)
{
dst->push_back(i);
}
// Break the loop here since multiplying by
// 'mult' would move outside of the range of T
if (i > kmax / mult)
break;
}
return dst->begin() + start_offset;
}
template <typename T> void AddNegatedPowers(std::vector<T> *dst, T lo, T hi, int mult)
{
// We negate lo and hi so we require that they cannot be equal to 'min'.
CHECK_GT(lo, std::numeric_limits<T>::min());
CHECK_GT(hi, std::numeric_limits<T>::min());
CHECK_GE(hi, lo);
CHECK_LE(hi, 0);
// Add positive powers, then negate and reverse.
// Casts necessary since small integers get promoted
// to 'int' when negating.
const auto lo_complement = static_cast<T>(-lo);
const auto hi_complement = static_cast<T>(-hi);
const auto it = AddPowers(dst, hi_complement, lo_complement, mult);
std::for_each(it, dst->end(), [](T &t) { t *= -1; });
std::reverse(it, dst->end());
}
template <typename T> void AddRange(std::vector<T> *dst, T lo, T hi, int mult)
{
static_assert(std::is_integral<T>::value && std::is_signed<T>::value, "Args type must be a signed integer");
CHECK_GE(hi, lo);
CHECK_GE(mult, 2);
// Add "lo"
dst->push_back(lo);
// Handle lo == hi as a special case, so we then know
// lo < hi and so it is safe to add 1 to lo and subtract 1
// from hi without falling outside of the range of T.
if (lo == hi)
return;
// Ensure that lo_inner <= hi_inner below.
if (lo + 1 == hi)
{
dst->push_back(hi);
return;
}
// Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive).
const auto lo_inner = static_cast<T>(lo + 1);
const auto hi_inner = static_cast<T>(hi - 1);
// Insert negative values
if (lo_inner < 0)
{
AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult);
}
// Treat 0 as a special case (see discussion on #762).
if (lo <= 0 && hi >= 0)
{
dst->push_back(0);
}
// Insert positive values
if (hi_inner > 0)
{
AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult);
}
// Add "hi" (if different from last value).
if (hi != dst->back())
{
dst->push_back(hi);
}
}
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_REGISTER_H

View File

@ -1,374 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark_runner.h"
#include "benchmark/benchmark.h"
#include "benchmark_api_internal.h"
#include "internal_macros.h"
#ifndef BENCHMARK_OS_WINDOWS
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <unistd.h>
#endif
#include <algorithm>
#include <atomic>
#include <condition_variable>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <thread>
#include <utility>
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "log.h"
#include "mutex.h"
#include "re.h"
#include "statistics.h"
#include "string_util.h"
#include "thread_manager.h"
#include "thread_timer.h"
namespace benchmark
{
namespace internal
{
MemoryManager *memory_manager = nullptr;
namespace
{
static constexpr IterationCount kMaxIterations = 1000000000;
BenchmarkReporter::Run CreateRunReport(const benchmark::internal::BenchmarkInstance &b,
const internal::ThreadManager::Result &results, IterationCount memory_iterations,
const MemoryManager::Result &memory_result, double seconds,
int64_t repetition_index)
{
// Create report about this benchmark run.
BenchmarkReporter::Run report;
report.run_name = b.name;
report.error_occurred = results.has_error_;
report.error_message = results.error_message_;
report.report_label = results.report_label_;
// This is the total iterations across all threads.
report.iterations = results.iterations;
report.time_unit = b.time_unit;
report.threads = b.threads;
report.repetition_index = repetition_index;
report.repetitions = b.repetitions;
if (!report.error_occurred)
{
if (b.use_manual_time)
{
report.real_accumulated_time = results.manual_time_used;
}
else
{
report.real_accumulated_time = results.real_time_used;
}
report.cpu_accumulated_time = results.cpu_time_used;
report.complexity_n = results.complexity_n;
report.complexity = b.complexity;
report.complexity_lambda = b.complexity_lambda;
report.statistics = b.statistics;
report.counters = results.counters;
if (memory_iterations > 0)
{
report.has_memory_result = true;
report.allocs_per_iter =
memory_iterations ? static_cast<double>(memory_result.num_allocs) / memory_iterations : 0;
report.max_bytes_used = memory_result.max_bytes_used;
}
internal::Finish(&report.counters, results.iterations, seconds, b.threads);
}
return report;
}
// Execute one thread of benchmark b for the specified number of iterations.
// Adds the stats collected for the thread into *total.
void RunInThread(const BenchmarkInstance *b, IterationCount iters, int thread_id, ThreadManager *manager)
{
internal::ThreadTimer timer(b->measure_process_cpu_time ? internal::ThreadTimer::CreateProcessCpuTime()
: internal::ThreadTimer::Create());
State st = b->Run(iters, thread_id, &timer, manager);
CHECK(st.error_occurred() || st.iterations() >= st.max_iterations)
<< "Benchmark returned before State::KeepRunning() returned false!";
{
MutexLock l(manager->GetBenchmarkMutex());
internal::ThreadManager::Result &results = manager->results;
results.iterations += st.iterations();
results.cpu_time_used += timer.cpu_time_used();
results.real_time_used += timer.real_time_used();
results.manual_time_used += timer.manual_time_used();
results.complexity_n += st.complexity_length_n();
internal::Increment(&results.counters, st.counters);
}
manager->NotifyThreadComplete();
}
class BenchmarkRunner
{
public:
BenchmarkRunner(const benchmark::internal::BenchmarkInstance &b_,
std::vector<BenchmarkReporter::Run> *complexity_reports_)
: b(b_), complexity_reports(*complexity_reports_),
min_time(!IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time),
repeats(b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions),
has_explicit_iteration_count(b.iterations != 0), pool(b.threads - 1),
iters(has_explicit_iteration_count ? b.iterations : 1)
{
run_results.display_report_aggregates_only =
(FLAGS_benchmark_report_aggregates_only || FLAGS_benchmark_display_aggregates_only);
run_results.file_report_aggregates_only = FLAGS_benchmark_report_aggregates_only;
if (b.aggregation_report_mode != internal::ARM_Unspecified)
{
run_results.display_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_DisplayReportAggregatesOnly);
run_results.file_report_aggregates_only =
(b.aggregation_report_mode & internal::ARM_FileReportAggregatesOnly);
}
for (int repetition_num = 0; repetition_num < repeats; repetition_num++)
{
DoOneRepetition(repetition_num);
}
// Calculate additional statistics
run_results.aggregates_only = ComputeStats(run_results.non_aggregates);
// Maybe calculate complexity report
if ((b.complexity != oNone) && b.last_benchmark_instance)
{
auto additional_run_stats = ComputeBigO(complexity_reports);
run_results.aggregates_only.insert(run_results.aggregates_only.end(), additional_run_stats.begin(),
additional_run_stats.end());
complexity_reports.clear();
}
}
RunResults &&get_results()
{
return std::move(run_results);
}
private:
RunResults run_results;
const benchmark::internal::BenchmarkInstance &b;
std::vector<BenchmarkReporter::Run> &complexity_reports;
const double min_time;
const int repeats;
const bool has_explicit_iteration_count;
std::vector<std::thread> pool;
IterationCount iters; // preserved between repetitions!
// So only the first repetition has to find/calculate it,
// the other repetitions will just use that precomputed iteration count.
struct IterationResults
{
internal::ThreadManager::Result results;
IterationCount iters;
double seconds;
};
IterationResults DoNIterations()
{
VLOG(2) << "Running " << b.name.str() << " for " << iters << "\n";
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(b.threads));
// Run all but one thread in separate threads
for (std::size_t ti = 0; ti < pool.size(); ++ti)
{
pool[ti] = std::thread(&RunInThread, &b, iters, static_cast<int>(ti + 1), manager.get());
}
// And run one thread here directly.
// (If we were asked to run just one thread, we don't create new threads.)
// Yes, we need to do this here *after* we start the separate threads.
RunInThread(&b, iters, 0, manager.get());
// The main thread has finished. Now let's wait for the other threads.
manager->WaitForAllThreads();
for (std::thread &thread : pool)
thread.join();
IterationResults i;
// Acquire the measurements/counters from the manager, UNDER THE LOCK!
{
MutexLock l(manager->GetBenchmarkMutex());
i.results = manager->results;
}
// And get rid of the manager.
manager.reset();
// Adjust real/manual time stats since they were reported per thread.
i.results.real_time_used /= b.threads;
i.results.manual_time_used /= b.threads;
// If we were measuring whole-process CPU usage, adjust the CPU time too.
if (b.measure_process_cpu_time)
i.results.cpu_time_used /= b.threads;
VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" << i.results.real_time_used << "\n";
// So for how long were we running?
i.iters = iters;
// Base decisions off of real time if requested by this benchmark.
i.seconds = i.results.cpu_time_used;
if (b.use_manual_time)
{
i.seconds = i.results.manual_time_used;
}
else if (b.use_real_time)
{
i.seconds = i.results.real_time_used;
}
return i;
}
IterationCount PredictNumItersNeeded(const IterationResults &i) const
{
// See how much iterations should be increased by.
// Note: Avoid division by zero with max(seconds, 1ns).
double multiplier = min_time * 1.4 / std::max(i.seconds, 1e-9);
// If our last run was at least 10% of FLAGS_benchmark_min_time then we
// use the multiplier directly.
// Otherwise we use at most 10 times expansion.
// NOTE: When the last run was at least 10% of the min time the max
// expansion should be 14x.
bool is_significant = (i.seconds / min_time) > 0.1;
multiplier = is_significant ? multiplier : std::min(10.0, multiplier);
if (multiplier <= 1.0)
multiplier = 2.0;
// So what seems to be the sufficiently-large iteration count? Round up.
const IterationCount max_next_iters = static_cast<IterationCount>(
std::lround(std::max(multiplier * static_cast<double>(i.iters), static_cast<double>(i.iters) + 1.0)));
// But we do have *some* sanity limits though..
const IterationCount next_iters = std::min(max_next_iters, kMaxIterations);
VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n";
return next_iters; // round up before conversion to integer.
}
bool ShouldReportIterationResults(const IterationResults &i) const
{
// Determine if this run should be reported;
// Either it has run for a sufficient amount of time
// or because an error was reported.
return i.results.has_error_ || i.iters >= kMaxIterations || // Too many iterations already.
i.seconds >= min_time || // The elapsed time is large enough.
// CPU time is specified but the elapsed real time greatly exceeds
// the minimum time.
// Note that user provided timers are except from this sanity check.
((i.results.real_time_used >= 5 * min_time) && !b.use_manual_time);
}
void DoOneRepetition(int64_t repetition_index)
{
const bool is_the_first_repetition = repetition_index == 0;
IterationResults i;
// We *may* be gradually increasing the length (iteration count)
// of the benchmark until we decide the results are significant.
// And once we do, we report those last results and exit.
// Please do note that the if there are repetitions, the iteration count
// is *only* calculated for the *first* repetition, and other repetitions
// simply use that precomputed iteration count.
for (;;)
{
i = DoNIterations();
// Do we consider the results to be significant?
// If we are doing repetitions, and the first repetition was already done,
// it has calculated the correct iteration time, so we have run that very
// iteration count just now. No need to calculate anything. Just report.
// Else, the normal rules apply.
const bool results_are_significant =
!is_the_first_repetition || has_explicit_iteration_count || ShouldReportIterationResults(i);
if (results_are_significant)
break; // Good, let's report them!
// Nope, bad iteration. Let's re-estimate the hopefully-sufficient
// iteration count, and run the benchmark again...
iters = PredictNumItersNeeded(i);
assert(iters > i.iters && "if we did more iterations than we want to do the next time, "
"then we should have accepted the current iteration run.");
}
// Oh, one last thing, we need to also produce the 'memory measurements'..
MemoryManager::Result memory_result;
IterationCount memory_iterations = 0;
if (memory_manager != nullptr)
{
// Only run a few iterations to reduce the impact of one-time
// allocations in benchmarks that are not properly managed.
memory_iterations = std::min<IterationCount>(16, iters);
memory_manager->Start();
std::unique_ptr<internal::ThreadManager> manager;
manager.reset(new internal::ThreadManager(1));
RunInThread(&b, memory_iterations, 0, manager.get());
manager->WaitForAllThreads();
manager.reset();
memory_manager->Stop(&memory_result);
}
// Ok, now actualy report.
BenchmarkReporter::Run report =
CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, repetition_index);
if (!report.error_occurred && b.complexity != oNone)
complexity_reports.push_back(report);
run_results.non_aggregates.push_back(report);
}
};
} // end namespace
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports)
{
internal::BenchmarkRunner r(b, complexity_reports);
return r.get_results();
}
} // end namespace internal
} // end namespace benchmark

View File

@ -1,53 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_RUNNER_H_
#define BENCHMARK_RUNNER_H_
#include "benchmark_api_internal.h"
#include "internal_macros.h"
DECLARE_double(benchmark_min_time);
DECLARE_int32(benchmark_repetitions);
DECLARE_bool(benchmark_report_aggregates_only);
DECLARE_bool(benchmark_display_aggregates_only);
namespace benchmark
{
namespace internal
{
extern MemoryManager *memory_manager;
struct RunResults
{
std::vector<BenchmarkReporter::Run> non_aggregates;
std::vector<BenchmarkReporter::Run> aggregates_only;
bool display_report_aggregates_only = false;
bool file_report_aggregates_only = false;
};
RunResults RunBenchmark(const benchmark::internal::BenchmarkInstance &b,
std::vector<BenchmarkReporter::Run> *complexity_reports);
} // namespace internal
} // end namespace benchmark
#endif // BENCHMARK_RUNNER_H_

View File

@ -1,89 +0,0 @@
#ifndef CHECK_H_
#define CHECK_H_
#include <cmath>
#include <cstdlib>
#include <ostream>
#include "internal_macros.h"
#include "log.h"
namespace benchmark
{
namespace internal
{
typedef void(AbortHandlerT)();
inline AbortHandlerT *&GetAbortHandler()
{
static AbortHandlerT *handler = &std::abort;
return handler;
}
BENCHMARK_NORETURN inline void CallAbortHandler()
{
GetAbortHandler()();
std::abort(); // fallback to enforce noreturn
}
// CheckHandler is the class constructed by failing CHECK macros. CheckHandler
// will log information about the failures and abort when it is destructed.
class CheckHandler
{
public:
CheckHandler(const char *check, const char *file, const char *func, int line) : log_(GetErrorLogInstance())
{
log_ << file << ":" << line << ": " << func << ": Check `" << check << "' failed. ";
}
LogType &GetLog()
{
return log_;
}
BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false)
{
log_ << std::endl;
CallAbortHandler();
}
CheckHandler &operator=(const CheckHandler &) = delete;
CheckHandler(const CheckHandler &) = delete;
CheckHandler() = delete;
private:
LogType &log_;
};
} // end namespace internal
} // end namespace benchmark
// The CHECK macro returns a std::ostream object that can have extra information
// written to it.
#ifndef NDEBUG
#define CHECK(b) \
(b ? ::benchmark::internal::GetNullLogInstance() \
: ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__).GetLog())
#else
#define CHECK(b) ::benchmark::internal::GetNullLogInstance()
#endif
// clang-format off
// preserve whitespacing between operators for alignment
#define CHECK_EQ(a, b) CHECK((a) == (b))
#define CHECK_NE(a, b) CHECK((a) != (b))
#define CHECK_GE(a, b) CHECK((a) >= (b))
#define CHECK_LE(a, b) CHECK((a) <= (b))
#define CHECK_GT(a, b) CHECK((a) > (b))
#define CHECK_LT(a, b) CHECK((a) < (b))
#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps))
#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps))
#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps))
#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps))
#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps))
#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps))
//clang-format on
#endif // CHECK_H_

View File

@ -1,198 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "colorprint.h"
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <memory>
#include <string>
#include "check.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <io.h>
#include <windows.h>
#else
#include <unistd.h>
#endif // BENCHMARK_OS_WINDOWS
namespace benchmark
{
namespace
{
#ifdef BENCHMARK_OS_WINDOWS
typedef WORD PlatformColorCode;
#else
typedef const char *PlatformColorCode;
#endif
PlatformColorCode GetPlatformColorCode(LogColor color)
{
#ifdef BENCHMARK_OS_WINDOWS
switch (color)
{
case COLOR_RED:
return FOREGROUND_RED;
case COLOR_GREEN:
return FOREGROUND_GREEN;
case COLOR_YELLOW:
return FOREGROUND_RED | FOREGROUND_GREEN;
case COLOR_BLUE:
return FOREGROUND_BLUE;
case COLOR_MAGENTA:
return FOREGROUND_BLUE | FOREGROUND_RED;
case COLOR_CYAN:
return FOREGROUND_BLUE | FOREGROUND_GREEN;
case COLOR_WHITE: // fall through to default
default:
return 0;
}
#else
switch (color)
{
case COLOR_RED:
return "1";
case COLOR_GREEN:
return "2";
case COLOR_YELLOW:
return "3";
case COLOR_BLUE:
return "4";
case COLOR_MAGENTA:
return "5";
case COLOR_CYAN:
return "6";
case COLOR_WHITE:
return "7";
default:
return nullptr;
};
#endif
}
} // end namespace
std::string FormatString(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
std::size_t size = 256;
char local_buff[256];
auto ret = vsnprintf(local_buff, size, msg, args_cp);
va_end(args_cp);
// currently there is no error handling for failure, so this is hack.
CHECK(ret >= 0);
if (ret == 0) // handle empty expansion
return {};
else if (static_cast<size_t>(ret) < size)
return local_buff;
else
{
// we did not provide a long enough buffer on our first attempt.
size = (size_t)ret + 1; // + 1 for the null byte
std::unique_ptr<char[]> buff(new char[size]);
ret = vsnprintf(buff.get(), size, msg, args);
CHECK(ret > 0 && ((size_t)ret) < size);
return buff.get();
}
}
std::string FormatString(const char *msg, ...)
{
va_list args;
va_start(args, msg);
auto tmp = FormatString(msg, args);
va_end(args);
return tmp;
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
ColorPrintf(out, color, fmt, args);
va_end(args);
}
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args)
{
#ifdef BENCHMARK_OS_WINDOWS
((void)out); // suppress unused warning
const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
// Gets the current text color.
CONSOLE_SCREEN_BUFFER_INFO buffer_info;
GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
const WORD old_color_attrs = buffer_info.wAttributes;
// We need to flush the stream buffers into the console before each
// SetConsoleTextAttribute call lest it affect the text that is already
// printed but has not yet reached the console.
fflush(stdout);
SetConsoleTextAttribute(stdout_handle, GetPlatformColorCode(color) | FOREGROUND_INTENSITY);
vprintf(fmt, args);
fflush(stdout);
// Restores the text color.
SetConsoleTextAttribute(stdout_handle, old_color_attrs);
#else
const char *color_code = GetPlatformColorCode(color);
if (color_code)
out << FormatString("\033[0;3%sm", color_code);
out << FormatString(fmt, args) << "\033[m";
#endif
}
bool IsColorTerminal()
{
#if BENCHMARK_OS_WINDOWS
// On Windows the TERM variable is usually not set, but the
// console there does support colors.
return 0 != _isatty(_fileno(stdout));
#else
// On non-Windows platforms, we rely on the TERM variable. This list of
// supported TERM values is copied from Google Test:
// <https://github.com/google/googletest/blob/master/googletest/src/gtest.cc#L2925>.
const char *const SUPPORTED_TERM_VALUES[] = {
"xterm", "xterm-color", "xterm-256color", "screen", "screen-256color", "tmux",
"tmux-256color", "rxvt-unicode", "rxvt-unicode-256color", "linux", "cygwin",
};
const char *const term = getenv("TERM");
bool term_supports_color = false;
for (const char *candidate : SUPPORTED_TERM_VALUES)
{
if (term && 0 == strcmp(term, candidate))
{
term_supports_color = true;
break;
}
}
return 0 != isatty(fileno(stdout)) && term_supports_color;
#endif // BENCHMARK_OS_WINDOWS
}
} // end namespace benchmark

View File

@ -1,34 +0,0 @@
#ifndef BENCHMARK_COLORPRINT_H_
#define BENCHMARK_COLORPRINT_H_
#include <cstdarg>
#include <iostream>
#include <string>
namespace benchmark
{
enum LogColor
{
COLOR_DEFAULT,
COLOR_RED,
COLOR_GREEN,
COLOR_YELLOW,
COLOR_BLUE,
COLOR_MAGENTA,
COLOR_CYAN,
COLOR_WHITE
};
std::string FormatString(const char *msg, va_list args);
std::string FormatString(const char *msg, ...);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, va_list args);
void ColorPrintf(std::ostream &out, LogColor color, const char *fmt, ...);
// Returns true if stdout appears to be a terminal that supports colored
// output, false otherwise.
bool IsColorTerminal();
} // end namespace benchmark
#endif // BENCHMARK_COLORPRINT_H_

View File

@ -1,251 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "commandlineflags.h"
#include <algorithm>
#include <cctype>
#include <cstdlib>
#include <cstring>
#include <iostream>
#include <limits>
namespace benchmark
{
namespace
{
// Parses 'str' for a 32-bit signed integer. If successful, writes
// the result to *value and returns true; otherwise leaves *value
// unchanged and returns false.
bool ParseInt32(const std::string &src_text, const char *str, int32_t *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const long long_value = strtol(str, &end, 10); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
// Is the parsed value in the range of an Int32?
const int32_t result = static_cast<int32_t>(long_value);
if (long_value == std::numeric_limits<long>::max() || long_value == std::numeric_limits<long>::min() ||
// The parsed value overflows as a long. (strtol() returns
// LONG_MAX or LONG_MIN when the input overflows.)
result != long_value
// The parsed value overflows as an Int32.
)
{
std::cerr << src_text << " is expected to be a 32-bit integer, "
<< "but actually has value \"" << str << "\", "
<< "which overflows.\n";
return false;
}
*value = result;
return true;
}
// Parses 'str' for a double. If successful, writes the result to *value and
// returns true; otherwise leaves *value unchanged and returns false.
bool ParseDouble(const std::string &src_text, const char *str, double *value)
{
// Parses the environment variable as a decimal integer.
char *end = nullptr;
const double double_value = strtod(str, &end); // NOLINT
// Has strtol() consumed all characters in the string?
if (*end != '\0')
{
// No - an invalid character was encountered.
std::cerr << src_text << " is expected to be a double, "
<< "but actually has value \"" << str << "\".\n";
return false;
}
*value = double_value;
return true;
}
// Returns the name of the environment variable corresponding to the
// given flag. For example, FlagToEnvVar("foo") will return
// "BENCHMARK_FOO" in the open-source version.
static std::string FlagToEnvVar(const char *flag)
{
const std::string flag_str(flag);
std::string env_var;
for (size_t i = 0; i != flag_str.length(); ++i)
env_var += static_cast<char>(::toupper(flag_str.c_str()[i]));
return "BENCHMARK_" + env_var;
}
} // namespace
bool BoolFromEnv(const char *flag, bool default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str);
}
int32_t Int32FromEnv(const char *flag, int32_t default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
int32_t value = default_val;
if (value_str == nullptr || !ParseInt32(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
double DoubleFromEnv(const char *flag, double default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value_str = getenv(env_var.c_str());
double value = default_val;
if (value_str == nullptr || !ParseDouble(std::string("Environment variable ") + env_var, value_str, &value))
{
return default_val;
}
return value;
}
const char *StringFromEnv(const char *flag, const char *default_val)
{
const std::string env_var = FlagToEnvVar(flag);
const char *const value = getenv(env_var.c_str());
return value == nullptr ? default_val : value;
}
// Parses a string as a command line flag. The string should have
// the format "--flag=value". When def_optional is true, the "=value"
// part can be omitted.
//
// Returns the value of the flag, or nullptr if the parsing failed.
const char *ParseFlagValue(const char *str, const char *flag, bool def_optional)
{
// str and flag must not be nullptr.
if (str == nullptr || flag == nullptr)
return nullptr;
// The flag must start with "--".
const std::string flag_str = std::string("--") + std::string(flag);
const size_t flag_len = flag_str.length();
if (strncmp(str, flag_str.c_str(), flag_len) != 0)
return nullptr;
// Skips the flag name.
const char *flag_end = str + flag_len;
// When def_optional is true, it's OK to not have a "=value" part.
if (def_optional && (flag_end[0] == '\0'))
return flag_end;
// If def_optional is true and there are more characters after the
// flag name, or if def_optional is false, there must be a '=' after
// the flag name.
if (flag_end[0] != '=')
return nullptr;
// Returns the string after "=".
return flag_end + 1;
}
bool ParseBoolFlag(const char *str, const char *flag, bool *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, true);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Converts the string value to a bool.
*value = IsTruthyFlagValue(value_str);
return true;
}
bool ParseInt32Flag(const char *str, const char *flag, int32_t *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseInt32(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseDoubleFlag(const char *str, const char *flag, double *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
// Sets *value to the value of the flag.
return ParseDouble(std::string("The value of flag --") + flag, value_str, value);
}
bool ParseStringFlag(const char *str, const char *flag, std::string *value)
{
// Gets the value of the flag as a string.
const char *const value_str = ParseFlagValue(str, flag, false);
// Aborts if the parsing failed.
if (value_str == nullptr)
return false;
*value = value_str;
return true;
}
bool IsFlag(const char *str, const char *flag)
{
return (ParseFlagValue(str, flag, true) != nullptr);
}
bool IsTruthyFlagValue(const std::string &value)
{
if (value.size() == 1)
{
char v = value[0];
return isalnum(v) && !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N');
}
else if (!value.empty())
{
std::string value_lower(value);
std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(),
[](char c) { return static_cast<char>(::tolower(c)); });
return !(value_lower == "false" || value_lower == "no" || value_lower == "off");
}
else
return true;
}
} // end namespace benchmark

View File

@ -1,96 +0,0 @@
#ifndef BENCHMARK_COMMANDLINEFLAGS_H_
#define BENCHMARK_COMMANDLINEFLAGS_H_
#include <cstdint>
#include <string>
// Macro for referencing flags.
#define FLAG(name) FLAGS_##name
// Macros for declaring flags.
#define DECLARE_bool(name) extern bool FLAG(name)
#define DECLARE_int32(name) extern int32_t FLAG(name)
#define DECLARE_double(name) extern double FLAG(name)
#define DECLARE_string(name) extern std::string FLAG(name)
// Macros for defining flags.
#define DEFINE_bool(name, default_val) bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val)
#define DEFINE_int32(name, default_val) int32_t FLAG(name) = benchmark::Int32FromEnv(#name, default_val)
#define DEFINE_double(name, default_val) double FLAG(name) = benchmark::DoubleFromEnv(#name, default_val)
#define DEFINE_string(name, default_val) std::string FLAG(name) = benchmark::StringFromEnv(#name, default_val)
namespace benchmark
{
// Parses a bool from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns IsTruthyFlagValue() value; if not,
// returns the given default value.
bool BoolFromEnv(const char *flag, bool default_val);
// Parses an Int32 from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns ParseInt32() value; if not, returns
// the given default value.
int32_t Int32FromEnv(const char *flag, int32_t default_val);
// Parses an Double from the environment variable
// corresponding to the given flag.
//
// If the variable exists, returns ParseDouble(); if not, returns
// the given default value.
double DoubleFromEnv(const char *flag, double default_val);
// Parses a string from the environment variable
// corresponding to the given flag.
//
// If variable exists, returns its value; if not, returns
// the given default value.
const char *StringFromEnv(const char *flag, const char *default_val);
// Parses a string for a bool flag, in the form of either
// "--flag=value" or "--flag".
//
// In the former case, the value is taken as true if it passes IsTruthyValue().
//
// In the latter case, the value is taken as true.
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseBoolFlag(const char *str, const char *flag, bool *value);
// Parses a string for an Int32 flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseInt32Flag(const char *str, const char *flag, int32_t *value);
// Parses a string for a Double flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseDoubleFlag(const char *str, const char *flag, double *value);
// Parses a string for a string flag, in the form of
// "--flag=value".
//
// On success, stores the value of the flag in *value, and returns
// true. On failure, returns false without changing *value.
bool ParseStringFlag(const char *str, const char *flag, std::string *value);
// Returns true if the string matches the flag.
bool IsFlag(const char *str, const char *flag);
// Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or
// some non-alphanumeric character. Also returns false if the value matches
// one of 'no', 'false', 'off' (case-insensitive). As a special case, also
// returns true if value is the empty string.
bool IsTruthyFlagValue(const std::string &value);
} // end namespace benchmark
#endif // BENCHMARK_COMMANDLINEFLAGS_H_

View File

@ -1,251 +0,0 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#include "benchmark/benchmark.h"
#include "check.h"
#include "complexity.h"
#include <algorithm>
#include <cmath>
namespace benchmark
{
// Internal function to calculate the different scalability forms
BigOFunc *FittingCurve(BigO complexity)
{
static const double kLog2E = 1.44269504088896340736;
switch (complexity)
{
case oN:
return [](IterationCount n) -> double { return static_cast<double>(n); };
case oNSquared:
return [](IterationCount n) -> double { return std::pow(n, 2); };
case oNCubed:
return [](IterationCount n) -> double { return std::pow(n, 3); };
case oLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return kLog2E * log(static_cast<double>(n)); };
case oNLogN:
/* Note: can't use log2 because Android's GNU STL lacks it */
return [](IterationCount n) { return kLog2E * n * log(static_cast<double>(n)); };
case o1:
default:
return [](IterationCount) { return 1.0; };
}
}
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity)
{
switch (complexity)
{
case oN:
return "N";
case oNSquared:
return "N^2";
case oNCubed:
return "N^3";
case oLogN:
return "lgN";
case oNLogN:
return "NlgN";
case o1:
return "(1)";
default:
return "f(N)";
}
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error, for the fitting curve
// given by the lambda expression.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };).
// For a deeper explanation on the algorithm logic, please refer to
// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, BigOFunc *fitting_curve)
{
double sigma_gn = 0.0;
double sigma_gn_squared = 0.0;
double sigma_time = 0.0;
double sigma_time_gn = 0.0;
// Calculate least square fitting parameter
for (size_t i = 0; i < n.size(); ++i)
{
double gn_i = fitting_curve(n[i]);
sigma_gn += gn_i;
sigma_gn_squared += gn_i * gn_i;
sigma_time += time[i];
sigma_time_gn += time[i] * gn_i;
}
LeastSq result;
result.complexity = oLambda;
// Calculate complexity.
result.coef = sigma_time_gn / sigma_gn_squared;
// Calculate RMS
double rms = 0.0;
for (size_t i = 0; i < n.size(); ++i)
{
double fit = result.coef * fitting_curve(n[i]);
rms += pow((time[i] - fit), 2);
}
// Normalized RMS by the mean of the observed values
double mean = sigma_time / n.size();
result.rms = sqrt(rms / n.size()) / mean;
return result;
}
// Find the coefficient for the high-order term in the running time, by
// minimizing the sum of squares of relative error.
// - n : Vector containing the size of the benchmark tests.
// - time : Vector containing the times for the benchmark tests.
// - complexity : If different than oAuto, the fitting curve will stick to
// this one. If it is oAuto, it will be calculated the best
// fitting curve.
LeastSq MinimalLeastSq(const std::vector<int64_t> &n, const std::vector<double> &time, const BigO complexity)
{
CHECK_EQ(n.size(), time.size());
CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two
// benchmark runs are given
CHECK_NE(complexity, oNone);
LeastSq best_fit;
if (complexity == oAuto)
{
std::vector<BigO> fit_curves = {oLogN, oN, oNLogN, oNSquared, oNCubed};
// Take o1 as default best fitting curve
best_fit = MinimalLeastSq(n, time, FittingCurve(o1));
best_fit.complexity = o1;
// Compute all possible fitting curves and stick to the best one
for (const auto &fit : fit_curves)
{
LeastSq current_fit = MinimalLeastSq(n, time, FittingCurve(fit));
if (current_fit.rms < best_fit.rms)
{
best_fit = current_fit;
best_fit.complexity = fit;
}
}
}
else
{
best_fit = MinimalLeastSq(n, time, FittingCurve(complexity));
best_fit.complexity = complexity;
}
return best_fit;
}
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
if (reports.size() < 2)
return results;
// Accumulators.
std::vector<int64_t> n;
std::vector<double> real_time;
std::vector<double> cpu_time;
// Populate the accumulators.
for (const Run &run : reports)
{
CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?";
n.push_back(run.complexity_n);
real_time.push_back(run.real_accumulated_time / run.iterations);
cpu_time.push_back(run.cpu_accumulated_time / run.iterations);
}
LeastSq result_cpu;
LeastSq result_real;
if (reports[0].complexity == oLambda)
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity_lambda);
result_real = MinimalLeastSq(n, real_time, reports[0].complexity_lambda);
}
else
{
result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity);
result_real = MinimalLeastSq(n, real_time, result_cpu.complexity);
}
// Drop the 'args' when reporting complexity.
auto run_name = reports[0].run_name;
run_name.args.clear();
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run big_o;
big_o.run_name = run_name;
big_o.run_type = BenchmarkReporter::Run::RT_Aggregate;
big_o.repetitions = reports[0].repetitions;
big_o.repetition_index = Run::no_repetition_index;
big_o.threads = reports[0].threads;
big_o.aggregate_name = "BigO";
big_o.report_label = reports[0].report_label;
big_o.iterations = 0;
big_o.real_accumulated_time = result_real.coef;
big_o.cpu_accumulated_time = result_cpu.coef;
big_o.report_big_o = true;
big_o.complexity = result_cpu.complexity;
// All the time results are reported after being multiplied by the
// time unit multiplier. But since RMS is a relative quantity it
// should not be multiplied at all. So, here, we _divide_ it by the
// multiplier so that when it is multiplied later the result is the
// correct one.
double multiplier = GetTimeUnitMultiplier(reports[0].time_unit);
// Only add label to mean/stddev if it is same for all runs
Run rms;
rms.run_name = run_name;
rms.run_type = BenchmarkReporter::Run::RT_Aggregate;
rms.aggregate_name = "RMS";
rms.report_label = big_o.report_label;
rms.iterations = 0;
rms.repetition_index = Run::no_repetition_index;
rms.repetitions = reports[0].repetitions;
rms.threads = reports[0].threads;
rms.real_accumulated_time = result_real.rms / multiplier;
rms.cpu_accumulated_time = result_cpu.rms / multiplier;
rms.report_rms = true;
rms.complexity = result_cpu.complexity;
// don't forget to keep the time unit, or we won't be able to
// recover the correct value.
rms.time_unit = reports[0].time_unit;
results.push_back(big_o);
results.push_back(rms);
return results;
}
} // end namespace benchmark

View File

@ -1,58 +0,0 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Source project : https://github.com/ismaelJimenez/cpp.leastsq
// Adapted to be used with google benchmark
#ifndef COMPLEXITY_H_
#define COMPLEXITY_H_
#include <string>
#include <vector>
#include "benchmark/benchmark.h"
namespace benchmark
{
// Return a vector containing the bigO and RMS information for the specified
// list of reports. If 'reports.size() < 2' an empty vector is returned.
std::vector<BenchmarkReporter::Run> ComputeBigO(const std::vector<BenchmarkReporter::Run> &reports);
// This data structure will contain the result returned by MinimalLeastSq
// - coef : Estimated coeficient for the high-order term as
// interpolated from data.
// - rms : Normalized Root Mean Squared Error.
// - complexity : Scalability form (e.g. oN, oNLogN). In case a scalability
// form has been provided to MinimalLeastSq this will return
// the same value. In case BigO::oAuto has been selected, this
// parameter will return the best fitting curve detected.
struct LeastSq
{
LeastSq() : coef(0.0), rms(0.0), complexity(oNone)
{
}
double coef;
double rms;
BigO complexity;
};
// Function to return an string for the calculated complexity
std::string GetBigOString(BigO complexity);
} // end namespace benchmark
#endif // COMPLEXITY_H_

View File

@ -1,194 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <cstdint>
#include <cstdio>
#include <cstring>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "benchmark/benchmark.h"
#include "check.h"
#include "colorprint.h"
#include "commandlineflags.h"
#include "complexity.h"
#include "counter.h"
#include "internal_macros.h"
#include "string_util.h"
#include "timers.h"
namespace benchmark
{
bool ConsoleReporter::ReportContext(const Context &context)
{
name_field_width_ = context.name_field_width;
printed_header_ = false;
prev_counters_.clear();
PrintBasicContext(&GetErrorStream(), context);
#ifdef BENCHMARK_OS_WINDOWS
if ((output_options_ & OO_Color) && &std::cout != &GetOutputStream())
{
GetErrorStream() << "Color printing is only supported for stdout on windows."
" Disabling color printing\n";
output_options_ = static_cast<OutputOptions>(output_options_ & ~OO_Color);
}
#endif
return true;
}
void ConsoleReporter::PrintHeader(const Run &run)
{
std::string str = FormatString("%-*s %13s %15s %12s", static_cast<int>(name_field_width_), "Benchmark", "Time",
"CPU", "Iterations");
if (!run.counters.empty())
{
if (output_options_ & OO_Tabular)
{
for (auto const &c : run.counters)
{
str += FormatString(" %10s", c.first.c_str());
}
}
else
{
str += " UserCounters...";
}
}
std::string line = std::string(str.length(), '-');
GetOutputStream() << line << "\n" << str << "\n" << line << "\n";
}
void ConsoleReporter::ReportRuns(const std::vector<Run> &reports)
{
for (const auto &run : reports)
{
// print the header:
// --- if none was printed yet
bool print_header = !printed_header_;
// --- or if the format is tabular and this run
// has different fields from the prev header
print_header |= (output_options_ & OO_Tabular) && (!internal::SameNames(run.counters, prev_counters_));
if (print_header)
{
printed_header_ = true;
prev_counters_ = run.counters;
PrintHeader(run);
}
// As an alternative to printing the headers like this, we could sort
// the benchmarks by header and then print. But this would require
// waiting for the full results before printing, or printing twice.
PrintRunData(run);
}
}
static void IgnoreColorPrint(std::ostream &out, LogColor, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
out << FormatString(fmt, args);
va_end(args);
}
static std::string FormatTime(double time)
{
// Align decimal places...
if (time < 1.0)
{
return FormatString("%10.3f", time);
}
if (time < 10.0)
{
return FormatString("%10.2f", time);
}
if (time < 100.0)
{
return FormatString("%10.1f", time);
}
return FormatString("%10.0f", time);
}
void ConsoleReporter::PrintRunData(const Run &result)
{
typedef void(PrinterFn)(std::ostream &, LogColor, const char *, ...);
auto &Out = GetOutputStream();
PrinterFn *printer = (output_options_ & OO_Color) ? (PrinterFn *)ColorPrintf : IgnoreColorPrint;
auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN;
printer(Out, name_color, "%-*s ", name_field_width_, result.benchmark_name().c_str());
if (result.error_occurred)
{
printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", result.error_message.c_str());
printer(Out, COLOR_DEFAULT, "\n");
return;
}
const double real_time = result.GetAdjustedRealTime();
const double cpu_time = result.GetAdjustedCPUTime();
const std::string real_time_str = FormatTime(real_time);
const std::string cpu_time_str = FormatTime(cpu_time);
if (result.report_big_o)
{
std::string big_o = GetBigOString(result.complexity);
printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, big_o.c_str(), cpu_time, big_o.c_str());
}
else if (result.report_rms)
{
printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", cpu_time * 100, "%");
}
else
{
const char *timeLabel = GetTimeUnitString(result.time_unit);
printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), timeLabel, cpu_time_str.c_str(),
timeLabel);
}
if (!result.report_big_o && !result.report_rms)
{
printer(Out, COLOR_CYAN, "%10lld", result.iterations);
}
for (auto &c : result.counters)
{
const std::size_t cNameLen = std::max(std::string::size_type(10), c.first.length());
auto const &s = HumanReadableNumber(c.second.value, c.second.oneK);
const char *unit = "";
if (c.second.flags & Counter::kIsRate)
unit = (c.second.flags & Counter::kInvert) ? "s" : "/s";
if (output_options_ & OO_Tabular)
{
printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit);
}
else
{
printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit);
}
}
if (!result.report_label.empty())
{
printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str());
}
printer(Out, COLOR_DEFAULT, "\n");
}
} // end namespace benchmark

View File

@ -1,98 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "counter.h"
namespace benchmark
{
namespace internal
{
double Finish(Counter const &c, IterationCount iterations, double cpu_time, double num_threads)
{
double v = c.value;
if (c.flags & Counter::kIsRate)
{
v /= cpu_time;
}
if (c.flags & Counter::kAvgThreads)
{
v /= num_threads;
}
if (c.flags & Counter::kIsIterationInvariant)
{
v *= iterations;
}
if (c.flags & Counter::kAvgIterations)
{
v /= iterations;
}
if (c.flags & Counter::kInvert)
{ // Invert is *always* last.
v = 1.0 / v;
}
return v;
}
void Finish(UserCounters *l, IterationCount iterations, double cpu_time, double num_threads)
{
for (auto &c : *l)
{
c.second.value = Finish(c.second, iterations, cpu_time, num_threads);
}
}
void Increment(UserCounters *l, UserCounters const &r)
{
// add counters present in both or just in *l
for (auto &c : *l)
{
auto it = r.find(c.first);
if (it != r.end())
{
c.second.value = c.second + it->second;
}
}
// add counters present in r, but not in *l
for (auto const &tc : r)
{
auto it = l->find(tc.first);
if (it == l->end())
{
(*l)[tc.first] = tc.second;
}
}
}
bool SameNames(UserCounters const &l, UserCounters const &r)
{
if (&l == &r)
return true;
if (l.size() != r.size())
{
return false;
}
for (auto const &c : l)
{
if (r.find(c.first) == r.end())
{
return false;
}
}
return true;
}
} // end namespace internal
} // end namespace benchmark

View File

@ -1,33 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_COUNTER_H_
#define BENCHMARK_COUNTER_H_
#include "benchmark/benchmark.h"
namespace benchmark
{
// these counter-related functions are hidden to reduce API surface.
namespace internal
{
void Finish(UserCounters *l, IterationCount iterations, double time, double num_threads);
void Increment(UserCounters *l, UserCounters const &r);
bool SameNames(UserCounters const &l, UserCounters const &r);
} // end namespace internal
} // end namespace benchmark
#endif // BENCHMARK_COUNTER_H_

View File

@ -1,186 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cstdint>
#include <iostream>
#include <string>
#include <tuple>
#include <vector>
#include "check.h"
#include "string_util.h"
#include "timers.h"
// File format reference: http://edoceo.com/utilitas/csv-file-format.
namespace benchmark
{
namespace
{
std::vector<std::string> elements = {"name", "iterations", "real_time", "cpu_time",
"time_unit", "bytes_per_second", "items_per_second", "label",
"error_occurred", "error_message"};
} // namespace
std::string CsvEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size() + 2);
for (char c : s)
{
switch (c)
{
case '"':
tmp += "\"\"";
break;
default:
tmp += c;
break;
}
}
return '"' + tmp + '"';
}
bool CSVReporter::ReportContext(const Context &context)
{
PrintBasicContext(&GetErrorStream(), context);
return true;
}
void CSVReporter::ReportRuns(const std::vector<Run> &reports)
{
std::ostream &Out = GetOutputStream();
if (!printed_header_)
{
// save the names of all the user counters
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
user_counter_names_.insert(cnt.first);
}
}
// print the header
for (auto B = elements.begin(); B != elements.end();)
{
Out << *B++;
if (B != elements.end())
Out << ",";
}
for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();)
{
Out << ",\"" << *B++ << "\"";
}
Out << "\n";
printed_header_ = true;
}
else
{
// check that all the current counters are saved in the name set
for (const auto &run : reports)
{
for (const auto &cnt : run.counters)
{
if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second")
continue;
CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end())
<< "All counters must be present in each run. "
<< "Counter named \"" << cnt.first << "\" was not in a run after being added to the header";
}
}
}
// print results for each run
for (const auto &run : reports)
{
PrintRunData(run);
}
}
void CSVReporter::PrintRunData(const Run &run)
{
std::ostream &Out = GetOutputStream();
Out << CsvEscape(run.benchmark_name()) << ",";
if (run.error_occurred)
{
Out << std::string(elements.size() - 3, ',');
Out << "true,";
Out << CsvEscape(run.error_message) << "\n";
return;
}
// Do not print iteration on bigO and RMS report
if (!run.report_big_o && !run.report_rms)
{
Out << run.iterations;
}
Out << ",";
Out << run.GetAdjustedRealTime() << ",";
Out << run.GetAdjustedCPUTime() << ",";
// Do not print timeLabel on bigO and RMS report
if (run.report_big_o)
{
Out << GetBigOString(run.complexity);
}
else if (!run.report_rms)
{
Out << GetTimeUnitString(run.time_unit);
}
Out << ",";
if (run.counters.find("bytes_per_second") != run.counters.end())
{
Out << run.counters.at("bytes_per_second");
}
Out << ",";
if (run.counters.find("items_per_second") != run.counters.end())
{
Out << run.counters.at("items_per_second");
}
Out << ",";
if (!run.report_label.empty())
{
Out << CsvEscape(run.report_label);
}
Out << ",,"; // for error_occurred and error_message
// Print user counters
for (const auto &ucn : user_counter_names_)
{
auto it = run.counters.find(ucn);
if (it == run.counters.end())
{
Out << ",";
}
else
{
Out << "," << it->second;
}
}
Out << '\n';
}
} // end namespace benchmark

View File

@ -1,209 +0,0 @@
// ----------------------------------------------------------------------
// CycleClock
// A CycleClock tells you the current time in Cycles. The "time"
// is actually time since power-on. This is like time() but doesn't
// involve a system call and is much more precise.
//
// NOTE: Not all cpu/platform/kernel combinations guarantee that this
// clock increments at a constant rate or is synchronized across all logical
// cpus in a system.
//
// If you need the above guarantees, please consider using a different
// API. There are efforts to provide an interface which provides a millisecond
// granularity and implemented as a memory read. A memory read is generally
// cheaper than the CycleClock for many architectures.
//
// Also, in some out of order CPU implementations, the CycleClock is not
// serializing. So if you're trying to count at cycles granularity, your
// data might be inaccurate due to out of order instruction execution.
// ----------------------------------------------------------------------
#ifndef BENCHMARK_CYCLECLOCK_H_
#define BENCHMARK_CYCLECLOCK_H_
#include <cstdint>
#include "benchmark/benchmark.h"
#include "internal_macros.h"
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_time.h>
#endif
// For MSVC, we want to use '_asm rdtsc' when possible (since it works
// with even ancient MSVC compilers), and when not possible the
// __rdtsc intrinsic, declared in <intrin.h>. Unfortunately, in some
// environments, <windows.h> and <intrin.h> have conflicting
// declarations of some other intrinsics, breaking compilation.
// Therefore, we simply declare __rdtsc ourselves. See also
// http://connect.microsoft.com/VisualStudio/feedback/details/262047
#if defined(COMPILER_MSVC) && !defined(_M_IX86)
extern "C" uint64_t __rdtsc();
#pragma intrinsic(__rdtsc)
#endif
#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW)
#include <sys/time.h>
#include <time.h>
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
namespace benchmark
{
// NOTE: only i386 and x86_64 have been well tested.
// PPC, sparc, alpha, and ia64 are based on
// http://peter.kuscsik.com/wordpress/?p=14
// with modifications by m3b. See also
// https://setisvn.ssl.berkeley.edu/svn/lib/fftw-3.0.1/kernel/cycle.h
namespace cycleclock
{
// This should return the number of cycles since power-on. Thread-safe.
inline BENCHMARK_ALWAYS_INLINE int64_t Now()
{
#if defined(BENCHMARK_OS_MACOSX)
// this goes at the top because we need ALL Macs, regardless of
// architecture, to return the number of "mach time units" that
// have passed since startup. See sysinfo.cc where
// InitializeSystemInfo() sets the supposed cpu clock frequency of
// macs to the number of mach time units per second, not actual
// CPU clock frequency (which can change in the face of CPU
// frequency scaling). Also note that when the Mac sleeps, this
// counter pauses; it does not continue counting, nor does it
// reset to zero.
return mach_absolute_time();
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// this goes above x86-specific code because old versions of Emscripten
// define __x86_64__, although they have nothing to do with it.
return static_cast<int64_t>(emscripten_get_now() * 1e+6);
#elif defined(__i386__)
int64_t ret;
__asm__ volatile("rdtsc" : "=A"(ret));
return ret;
#elif defined(__x86_64__) || defined(__amd64__)
uint64_t low, high;
__asm__ volatile("rdtsc" : "=a"(low), "=d"(high));
return (high << 32) | low;
#elif defined(__powerpc__) || defined(__ppc__)
// This returns a time-base, which is not always precisely a cycle-count.
#if defined(__powerpc64__) || defined(__ppc64__)
int64_t tb;
asm volatile("mfspr %0, 268" : "=r"(tb));
return tb;
#else
uint32_t tbl, tbu0, tbu1;
asm volatile("mftbu %0\n"
"mftbl %1\n"
"mftbu %2"
: "=r"(tbu0), "=r"(tbl), "=r"(tbu1));
tbl &= -static_cast<int32_t>(tbu0 == tbu1);
// high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed)
return (static_cast<uint64_t>(tbu1) << 32) | tbl;
#endif
#elif defined(__sparc__)
int64_t tick;
asm(".byte 0x83, 0x41, 0x00, 0x00");
asm("mov %%g1, %0" : "=r"(tick));
return tick;
#elif defined(__ia64__)
int64_t itc;
asm("mov %0 = ar.itc" : "=r"(itc));
return itc;
#elif defined(COMPILER_MSVC) && defined(_M_IX86)
// Older MSVC compilers (like 7.x) don't seem to support the
// __rdtsc intrinsic properly, so I prefer to use _asm instead
// when I know it will work. Otherwise, I'll use __rdtsc and hope
// the code is being compiled with a non-ancient compiler.
_asm rdtsc
#elif defined(COMPILER_MSVC)
return __rdtsc();
#elif defined(BENCHMARK_OS_NACL)
// Native Client validator on x86/x86-64 allows RDTSC instructions,
// and this case is handled above. Native Client validator on ARM
// rejects MRC instructions (used in the ARM-specific sequence below),
// so we handle it here. Portable Native Client compiles to
// architecture-agnostic bytecode, which doesn't provide any
// cycle counter access mnemonics.
// Native Client does not provide any API to access cycle counter.
// Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday
// because is provides nanosecond resolution (which is noticable at
// least for PNaCl modules running on x86 Mac & Linux).
// Initialize to always return 0 if clock_gettime fails.
struct timespec ts = {0, 0};
clock_gettime(CLOCK_MONOTONIC, &ts);
return static_cast<int64_t>(ts.tv_sec) * 1000000000 + ts.tv_nsec;
#elif defined(__aarch64__)
// System timer of ARMv8 runs at a different frequency than the CPU's.
// The frequency is fixed, typically in the range 1-50MHz. It can be
// read at CNTFRQ special register. We assume the OS has set up
// the virtual timer properly.
int64_t virtual_timer_value;
asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
return virtual_timer_value;
#elif defined(__ARM_ARCH)
// V6 is the earliest arch that has a standard cyclecount
// Native Client validator doesn't allow MRC instructions.
#if (__ARM_ARCH >= 6)
uint32_t pmccntr;
uint32_t pmuseren;
uint32_t pmcntenset;
// Read the user mode perf monitor counter access permissions.
asm volatile("mrc p15, 0, %0, c9, c14, 0" : "=r"(pmuseren));
if (pmuseren & 1)
{ // Allows reading perfmon counters for user mode code.
asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r"(pmcntenset));
if (pmcntenset & 0x80000000ul)
{ // Is it counting?
asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r"(pmccntr));
// The counter is set up to count every 64th cycle
return static_cast<int64_t>(pmccntr) * 64; // Should optimize to << 6
}
}
#endif
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__mips__)
// mips apparently only allows rdtsc for superusers, so we fall
// back to gettimeofday. It's possible clock_gettime would be better.
struct timeval tv;
gettimeofday(&tv, nullptr);
return static_cast<int64_t>(tv.tv_sec) * 1000000 + tv.tv_usec;
#elif defined(__s390__) // Covers both s390 and s390x.
// Return the CPU clock.
uint64_t tsc;
asm("stck %0" : "=Q"(tsc) : : "cc");
return tsc;
#elif defined(__riscv) // RISC-V
// Use RDCYCLE (and RDCYCLEH on riscv32)
#if __riscv_xlen == 32
uint32_t cycles_lo, cycles_hi0, cycles_hi1;
// This asm also includes the PowerPC overflow handling strategy, as above.
// Implemented in assembly because Clang insisted on branching.
asm volatile("rdcycleh %0\n"
"rdcycle %1\n"
"rdcycleh %2\n"
"sub %0, %0, %2\n"
"seqz %0, %0\n"
"sub %0, zero, %0\n"
"and %1, %1, %0\n"
: "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1));
return (static_cast<uint64_t>(cycles_hi1) << 32) | cycles_lo;
#else
uint64_t cycles;
asm volatile("rdcycle %0" : "=r"(cycles));
return cycles;
#endif
#else
// The soft failover to a generic implementation is automatic only for ARM.
// For other platforms the developer is expected to make an attempt to create
// a fast implementation and use generic version if nothing better is available.
#error You need to define CycleTimer for your OS and CPU
#endif
}
} // end namespace cycleclock
} // end namespace benchmark
#endif // BENCHMARK_CYCLECLOCK_H_

View File

@ -1,94 +0,0 @@
#ifndef BENCHMARK_INTERNAL_MACROS_H_
#define BENCHMARK_INTERNAL_MACROS_H_
#include "benchmark/benchmark.h"
/* Needed to detect STL */
#include <cstdlib>
// clang-format off
#ifndef __has_feature
#define __has_feature(x) 0
#endif
#if defined(__clang__)
#if !defined(COMPILER_CLANG)
#define COMPILER_CLANG
#endif
#elif defined(_MSC_VER)
#if !defined(COMPILER_MSVC)
#define COMPILER_MSVC
#endif
#elif defined(__GNUC__)
#if !defined(COMPILER_GCC)
#define COMPILER_GCC
#endif
#endif
#if __has_feature(cxx_attributes)
#define BENCHMARK_NORETURN [[noreturn]]
#elif defined(__GNUC__)
#define BENCHMARK_NORETURN __attribute__((noreturn))
#elif defined(COMPILER_MSVC)
#define BENCHMARK_NORETURN __declspec(noreturn)
#else
#define BENCHMARK_NORETURN
#endif
#if defined(__CYGWIN__)
#define BENCHMARK_OS_CYGWIN 1
#elif defined(_WIN32)
#define BENCHMARK_OS_WINDOWS 1
#if defined(__MINGW32__)
#define BENCHMARK_OS_MINGW 1
#endif
#elif defined(__APPLE__)
#define BENCHMARK_OS_APPLE 1
#include "TargetConditionals.h"
#if defined(TARGET_OS_MAC)
#define BENCHMARK_OS_MACOSX 1
#if defined(TARGET_OS_IPHONE)
#define BENCHMARK_OS_IOS 1
#endif
#endif
#elif defined(__FreeBSD__)
#define BENCHMARK_OS_FREEBSD 1
#elif defined(__NetBSD__)
#define BENCHMARK_OS_NETBSD 1
#elif defined(__OpenBSD__)
#define BENCHMARK_OS_OPENBSD 1
#elif defined(__linux__)
#define BENCHMARK_OS_LINUX 1
#elif defined(__native_client__)
#define BENCHMARK_OS_NACL 1
#elif defined(__EMSCRIPTEN__)
#define BENCHMARK_OS_EMSCRIPTEN 1
#elif defined(__rtems__)
#define BENCHMARK_OS_RTEMS 1
#elif defined(__Fuchsia__)
#define BENCHMARK_OS_FUCHSIA 1
#elif defined (__SVR4) && defined (__sun)
#define BENCHMARK_OS_SOLARIS 1
#elif defined(__QNX__)
#define BENCHMARK_OS_QNX 1
#endif
#if defined(__ANDROID__) && defined(__GLIBCXX__)
#define BENCHMARK_STL_ANDROID_GNUSTL 1
#endif
#if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \
&& !defined(__EXCEPTIONS)
#define BENCHMARK_HAS_NO_EXCEPTIONS
#endif
#if defined(COMPILER_CLANG) || defined(COMPILER_GCC)
#define BENCHMARK_MAYBE_UNUSED __attribute__((unused))
#else
#define BENCHMARK_MAYBE_UNUSED
#endif
// clang-format on
#endif // BENCHMARK_INTERNAL_MACROS_H_

View File

@ -1,294 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "complexity.h"
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <iomanip> // for setprecision
#include <iostream>
#include <limits>
#include <string>
#include <tuple>
#include <vector>
#include "string_util.h"
#include "timers.h"
namespace benchmark
{
namespace
{
std::string StrEscape(const std::string &s)
{
std::string tmp;
tmp.reserve(s.size());
for (char c : s)
{
switch (c)
{
case '\b':
tmp += "\\b";
break;
case '\f':
tmp += "\\f";
break;
case '\n':
tmp += "\\n";
break;
case '\r':
tmp += "\\r";
break;
case '\t':
tmp += "\\t";
break;
case '\\':
tmp += "\\\\";
break;
case '"':
tmp += "\\\"";
break;
default:
tmp += c;
break;
}
}
return tmp;
}
std::string FormatKV(std::string const &key, std::string const &value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const &key, const char *value)
{
return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), StrEscape(value).c_str());
}
std::string FormatKV(std::string const &key, bool value)
{
return StrFormat("\"%s\": %s", StrEscape(key).c_str(), value ? "true" : "false");
}
std::string FormatKV(std::string const &key, int64_t value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const &key, IterationCount value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": " << value;
return ss.str();
}
std::string FormatKV(std::string const &key, double value)
{
std::stringstream ss;
ss << '"' << StrEscape(key) << "\": ";
if (std::isnan(value))
ss << (value < 0 ? "-" : "") << "NaN";
else if (std::isinf(value))
ss << (value < 0 ? "-" : "") << "Infinity";
else
{
const auto max_digits10 = std::numeric_limits<decltype(value)>::max_digits10;
const auto max_fractional_digits10 = max_digits10 - 1;
ss << std::scientific << std::setprecision(max_fractional_digits10) << value;
}
return ss.str();
}
int64_t RoundDouble(double v)
{
return std::lround(v);
}
} // end namespace
bool JSONReporter::ReportContext(const Context &context)
{
std::ostream &out = GetOutputStream();
out << "{\n";
std::string inner_indent(2, ' ');
// Open context block and print context information.
out << inner_indent << "\"context\": {\n";
std::string indent(4, ' ');
std::string walltime_value = LocalDateTimeString();
out << indent << FormatKV("date", walltime_value) << ",\n";
out << indent << FormatKV("host_name", context.sys_info.name) << ",\n";
if (Context::executable_name)
{
out << indent << FormatKV("executable", Context::executable_name) << ",\n";
}
CPUInfo const &info = context.cpu_info;
out << indent << FormatKV("num_cpus", static_cast<int64_t>(info.num_cpus)) << ",\n";
out << indent << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n";
out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) << ",\n";
out << indent << "\"caches\": [\n";
indent = std::string(6, ' ');
std::string cache_indent(8, ' ');
for (size_t i = 0; i < info.caches.size(); ++i)
{
auto &CI = info.caches[i];
out << indent << "{\n";
out << cache_indent << FormatKV("type", CI.type) << ",\n";
out << cache_indent << FormatKV("level", static_cast<int64_t>(CI.level)) << ",\n";
out << cache_indent << FormatKV("size", static_cast<int64_t>(CI.size)) << ",\n";
out << cache_indent << FormatKV("num_sharing", static_cast<int64_t>(CI.num_sharing)) << "\n";
out << indent << "}";
if (i != info.caches.size() - 1)
out << ",";
out << "\n";
}
indent = std::string(4, ' ');
out << indent << "],\n";
out << indent << "\"load_avg\": [";
for (auto it = info.load_avg.begin(); it != info.load_avg.end();)
{
out << *it++;
if (it != info.load_avg.end())
out << ",";
}
out << "],\n";
#if defined(NDEBUG)
const char build_type[] = "release";
#else
const char build_type[] = "debug";
#endif
out << indent << FormatKV("library_build_type", build_type) << "\n";
// Close context block and open the list of benchmarks.
out << inner_indent << "},\n";
out << inner_indent << "\"benchmarks\": [\n";
return true;
}
void JSONReporter::ReportRuns(std::vector<Run> const &reports)
{
if (reports.empty())
{
return;
}
std::string indent(4, ' ');
std::ostream &out = GetOutputStream();
if (!first_report_)
{
out << ",\n";
}
first_report_ = false;
for (auto it = reports.begin(); it != reports.end(); ++it)
{
out << indent << "{\n";
PrintRunData(*it);
out << indent << '}';
auto it_cp = it;
if (++it_cp != reports.end())
{
out << ",\n";
}
}
}
void JSONReporter::Finalize()
{
// Close the list of benchmarks and the top level object.
GetOutputStream() << "\n ]\n}\n";
}
void JSONReporter::PrintRunData(Run const &run)
{
std::string indent(6, ' ');
std::ostream &out = GetOutputStream();
out << indent << FormatKV("name", run.benchmark_name()) << ",\n";
out << indent << FormatKV("run_name", run.run_name.str()) << ",\n";
out << indent << FormatKV("run_type", [&run]() -> const char * {
switch (run.run_type)
{
case BenchmarkReporter::Run::RT_Iteration:
return "iteration";
case BenchmarkReporter::Run::RT_Aggregate:
return "aggregate";
}
BENCHMARK_UNREACHABLE();
}()) << ",\n";
out << indent << FormatKV("repetitions", run.repetitions) << ",\n";
if (run.run_type != BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("repetition_index", run.repetition_index) << ",\n";
}
out << indent << FormatKV("threads", run.threads) << ",\n";
if (run.run_type == BenchmarkReporter::Run::RT_Aggregate)
{
out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n";
}
if (run.error_occurred)
{
out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n";
out << indent << FormatKV("error_message", run.error_message) << ",\n";
}
if (!run.report_big_o && !run.report_rms)
{
out << indent << FormatKV("iterations", run.iterations) << ",\n";
out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime());
out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_big_o)
{
out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n";
out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n";
out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n";
out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit));
}
else if (run.report_rms)
{
out << indent << FormatKV("rms", run.GetAdjustedCPUTime());
}
for (auto &c : run.counters)
{
out << ",\n" << indent << FormatKV(c.first, c.second);
}
if (run.has_memory_result)
{
out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter);
out << ",\n" << indent << FormatKV("max_bytes_used", run.max_bytes_used);
}
if (!run.report_label.empty())
{
out << ",\n" << indent << FormatKV("label", run.report_label);
}
out << '\n';
}
} // end namespace benchmark

View File

@ -1,86 +0,0 @@
#ifndef BENCHMARK_LOG_H_
#define BENCHMARK_LOG_H_
#include <iostream>
#include <ostream>
#include "benchmark/benchmark.h"
namespace benchmark
{
namespace internal
{
typedef std::basic_ostream<char> &(EndLType)(std::basic_ostream<char> &);
class LogType
{
friend LogType &GetNullLogInstance();
friend LogType &GetErrorLogInstance();
// FIXME: Add locking to output.
template <class Tp> friend LogType &operator<<(LogType &, Tp const &);
friend LogType &operator<<(LogType &, EndLType *);
private:
LogType(std::ostream *out) : out_(out)
{
}
std::ostream *out_;
BENCHMARK_DISALLOW_COPY_AND_ASSIGN(LogType);
};
template <class Tp> LogType &operator<<(LogType &log, Tp const &value)
{
if (log.out_)
{
*log.out_ << value;
}
return log;
}
inline LogType &operator<<(LogType &log, EndLType *m)
{
if (log.out_)
{
*log.out_ << m;
}
return log;
}
inline int &LogLevel()
{
static int log_level = 0;
return log_level;
}
inline LogType &GetNullLogInstance()
{
static LogType log(nullptr);
return log;
}
inline LogType &GetErrorLogInstance()
{
static LogType log(&std::clog);
return log;
}
inline LogType &GetLogInstanceForLevel(int level)
{
if (level <= LogLevel())
{
return GetErrorLogInstance();
}
return GetNullLogInstance();
}
} // end namespace internal
} // end namespace benchmark
// clang-format off
#define VLOG(x) \
(::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \
" ")
// clang-format on
#endif

View File

@ -1,174 +0,0 @@
#ifndef BENCHMARK_MUTEX_H_
#define BENCHMARK_MUTEX_H_
#include <condition_variable>
#include <mutex>
#include "check.h"
// Enable thread safety attributes only with clang.
// The attributes can be safely erased when compiling with other compilers.
#if defined(HAVE_THREAD_SAFETY_ATTRIBUTES)
#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x))
#else
#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op
#endif
#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x))
#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable)
#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x))
#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x))
#define ACQUIRED_BEFORE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__))
#define ACQUIRED_AFTER(...) THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__))
#define REQUIRES(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__))
#define REQUIRES_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__))
#define ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__))
#define ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__))
#define RELEASE(...) THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__))
#define RELEASE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__))
#define TRY_ACQUIRE(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__))
#define TRY_ACQUIRE_SHARED(...) THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__))
#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__))
#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x))
#define ASSERT_SHARED_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x))
#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x))
#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis)
namespace benchmark
{
typedef std::condition_variable Condition;
// NOTE: Wrappers for std::mutex and std::unique_lock are provided so that
// we can annotate them with thread safety attributes and use the
// -Wthread-safety warning with clang. The standard library types cannot be
// used directly because they do not provide the required annotations.
class CAPABILITY("mutex") Mutex
{
public:
Mutex()
{
}
void lock() ACQUIRE()
{
mut_.lock();
}
void unlock() RELEASE()
{
mut_.unlock();
}
std::mutex &native_handle()
{
return mut_;
}
private:
std::mutex mut_;
};
class SCOPED_CAPABILITY MutexLock
{
typedef std::unique_lock<std::mutex> MutexLockImp;
public:
MutexLock(Mutex &m) ACQUIRE(m) : ml_(m.native_handle())
{
}
~MutexLock() RELEASE()
{
}
MutexLockImp &native_handle()
{
return ml_;
}
private:
MutexLockImp ml_;
};
class Barrier
{
public:
Barrier(int num_threads) : running_threads_(num_threads)
{
}
// Called by each thread
bool wait() EXCLUDES(lock_)
{
bool last_thread = false;
{
MutexLock ml(lock_);
last_thread = createBarrier(ml);
}
if (last_thread)
phase_condition_.notify_all();
return last_thread;
}
void removeThread() EXCLUDES(lock_)
{
MutexLock ml(lock_);
--running_threads_;
if (entered_ != 0)
phase_condition_.notify_all();
}
private:
Mutex lock_;
Condition phase_condition_;
int running_threads_;
// State for barrier management
int phase_number_ = 0;
int entered_ = 0; // Number of threads that have entered this barrier
// Enter the barrier and wait until all other threads have also
// entered the barrier. Returns iff this is the last thread to
// enter the barrier.
bool createBarrier(MutexLock &ml) REQUIRES(lock_)
{
CHECK_LT(entered_, running_threads_);
entered_++;
if (entered_ < running_threads_)
{
// Wait for all threads to enter
int phase_number_cp = phase_number_;
auto cb = [this, phase_number_cp]() {
return this->phase_number_ > phase_number_cp ||
entered_ == running_threads_; // A thread has aborted in error
};
phase_condition_.wait(ml.native_handle(), cb);
if (phase_number_ > phase_number_cp)
return false;
// else (running_threads_ == entered_) and we are the last thread.
}
// Last thread has reached the barrier
phase_number_++;
entered_ = 0;
return true;
}
};
} // end namespace benchmark
#endif // BENCHMARK_MUTEX_H_

View File

@ -1,177 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef BENCHMARK_RE_H_
#define BENCHMARK_RE_H_
#include "internal_macros.h"
// clang-format off
#if !defined(HAVE_STD_REGEX) && \
!defined(HAVE_GNU_POSIX_REGEX) && \
!defined(HAVE_POSIX_REGEX)
// No explicit regex selection; detect based on builtin hints.
#if defined(BENCHMARK_OS_LINUX) || defined(BENCHMARK_OS_APPLE)
#define HAVE_POSIX_REGEX 1
#elif __cplusplus >= 199711L
#define HAVE_STD_REGEX 1
#endif
#endif
// Prefer C regex libraries when compiling w/o exceptions so that we can
// correctly report errors.
#if defined(BENCHMARK_HAS_NO_EXCEPTIONS) && \
defined(BENCHMARK_HAVE_STD_REGEX) && \
(defined(HAVE_GNU_POSIX_REGEX) || defined(HAVE_POSIX_REGEX))
#undef HAVE_STD_REGEX
#endif
#if defined(HAVE_STD_REGEX)
#include <regex>
#elif defined(HAVE_GNU_POSIX_REGEX)
#include <gnuregex.h>
#elif defined(HAVE_POSIX_REGEX)
#include <regex.h>
#else
#error No regular expression backend was found!
#endif
// clang-format on
#include <string>
#include "check.h"
namespace benchmark
{
// A wrapper around the POSIX regular expression API that provides automatic
// cleanup
class Regex
{
public:
Regex() : init_(false)
{
}
~Regex();
// Compile a regular expression matcher from spec. Returns true on success.
//
// On failure (and if error is not nullptr), error is populated with a human
// readable error message if an error occurs.
bool Init(const std::string &spec, std::string *error);
// Returns whether str matches the compiled regular expression.
bool Match(const std::string &str);
private:
bool init_;
// Underlying regular expression object
#if defined(HAVE_STD_REGEX)
std::regex re_;
#elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX)
regex_t re_;
#else
#error No regular expression backend implementation available
#endif
};
#if defined(HAVE_STD_REGEX)
inline bool Regex::Init(const std::string &spec, std::string *error)
{
#ifdef BENCHMARK_HAS_NO_EXCEPTIONS
((void)error); // suppress unused warning
#else
try
{
#endif
re_ = std::regex(spec, std::regex_constants::extended);
init_ = true;
#ifndef BENCHMARK_HAS_NO_EXCEPTIONS
}
catch (const std::regex_error &e)
{
if (error)
{
*error = e.what();
}
}
#endif
return init_;
}
inline Regex::~Regex()
{
}
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return std::regex_search(str, re_);
}
#else
inline bool Regex::Init(const std::string &spec, std::string *error)
{
int ec = regcomp(&re_, spec.c_str(), REG_EXTENDED | REG_NOSUB);
if (ec != 0)
{
if (error)
{
size_t needed = regerror(ec, &re_, nullptr, 0);
char *errbuf = new char[needed];
regerror(ec, &re_, errbuf, needed);
// regerror returns the number of bytes necessary to null terminate
// the string, so we move that when assigning to error.
CHECK_NE(needed, 0);
error->assign(errbuf, needed - 1);
delete[] errbuf;
}
return false;
}
init_ = true;
return true;
}
inline Regex::~Regex()
{
if (init_)
{
regfree(&re_);
}
}
inline bool Regex::Match(const std::string &str)
{
if (!init_)
{
return false;
}
return regexec(&re_, str.c_str(), 0, nullptr, 0) == 0;
}
#endif
} // end namespace benchmark
#endif // BENCHMARK_RE_H_

View File

@ -1,120 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "timers.h"
#include <cstdlib>
#include <iostream>
#include <tuple>
#include <vector>
#include "check.h"
#include "string_util.h"
namespace benchmark
{
BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr)
{
}
BenchmarkReporter::~BenchmarkReporter()
{
}
void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context)
{
CHECK(out) << "cannot be null";
auto &Out = *out;
Out << LocalDateTimeString() << "\n";
if (context.executable_name)
Out << "Running " << context.executable_name << "\n";
const CPUInfo &info = context.cpu_info;
Out << "Run on (" << info.num_cpus << " X " << (info.cycles_per_second / 1000000.0) << " MHz CPU "
<< ((info.num_cpus > 1) ? "s" : "") << ")\n";
if (info.caches.size() != 0)
{
Out << "CPU Caches:\n";
for (auto &CInfo : info.caches)
{
Out << " L" << CInfo.level << " " << CInfo.type << " " << (CInfo.size / 1024) << " KiB";
if (CInfo.num_sharing != 0)
Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")";
Out << "\n";
}
}
if (!info.load_avg.empty())
{
Out << "Load Average: ";
for (auto It = info.load_avg.begin(); It != info.load_avg.end();)
{
Out << StrFormat("%.2f", *It++);
if (It != info.load_avg.end())
Out << ", ";
}
Out << "\n";
}
if (info.scaling_enabled)
{
Out << "***WARNING*** CPU scaling is enabled, the benchmark "
"real time measurements may be noisy and will incur extra "
"overhead.\n";
}
#ifndef NDEBUG
Out << "***WARNING*** Library was built as DEBUG. Timings may be "
"affected.\n";
#endif
}
// No initializer because it's already initialized to NULL.
const char *BenchmarkReporter::Context::executable_name;
BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get())
{
}
std::string BenchmarkReporter::Run::benchmark_name() const
{
std::string name = run_name.str();
if (run_type == RT_Aggregate)
{
name += "_" + aggregate_name;
}
return name;
}
double BenchmarkReporter::Run::GetAdjustedRealTime() const
{
double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
double BenchmarkReporter::Run::GetAdjustedCPUTime() const
{
double new_time = cpu_accumulated_time * GetTimeUnitMultiplier(time_unit);
if (iterations != 0)
new_time /= static_cast<double>(iterations);
return new_time;
}
} // end namespace benchmark

View File

@ -1,59 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "sleep.h"
#include <cerrno>
#include <cstdlib>
#include <ctime>
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <windows.h>
#endif
namespace benchmark
{
#ifdef BENCHMARK_OS_WINDOWS
// Window's Sleep takes milliseconds argument.
void SleepForMilliseconds(int milliseconds)
{
Sleep(milliseconds);
}
void SleepForSeconds(double seconds)
{
SleepForMilliseconds(static_cast<int>(kNumMillisPerSecond * seconds));
}
#else // BENCHMARK_OS_WINDOWS
void SleepForMicroseconds(int microseconds)
{
struct timespec sleep_time;
sleep_time.tv_sec = microseconds / kNumMicrosPerSecond;
sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro;
while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR)
; // Ignore signals and wait for the full interval to elapse.
}
void SleepForMilliseconds(int milliseconds)
{
SleepForMicroseconds(milliseconds * kNumMicrosPerMilli);
}
void SleepForSeconds(double seconds)
{
SleepForMicroseconds(static_cast<int>(seconds * kNumMicrosPerSecond));
}
#endif // BENCHMARK_OS_WINDOWS
} // end namespace benchmark

View File

@ -1,16 +0,0 @@
#ifndef BENCHMARK_SLEEP_H_
#define BENCHMARK_SLEEP_H_
namespace benchmark
{
const int kNumMillisPerSecond = 1000;
const int kNumMicrosPerMilli = 1000;
const int kNumMicrosPerSecond = kNumMillisPerSecond * 1000;
const int kNumNanosPerMicro = 1000;
const int kNumNanosPerSecond = kNumNanosPerMicro * kNumMicrosPerSecond;
void SleepForMilliseconds(int milliseconds);
void SleepForSeconds(double seconds);
} // end namespace benchmark
#endif // BENCHMARK_SLEEP_H_

View File

@ -1,209 +0,0 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
// Copyright 2017 Roman Lebedev. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "benchmark/benchmark.h"
#include "check.h"
#include "statistics.h"
#include <algorithm>
#include <cmath>
#include <numeric>
#include <string>
#include <vector>
namespace benchmark
{
auto StatisticsSum = [](const std::vector<double> &v) { return std::accumulate(v.begin(), v.end(), 0.0); };
double StatisticsMean(const std::vector<double> &v)
{
if (v.empty())
return 0.0;
return StatisticsSum(v) * (1.0 / v.size());
}
double StatisticsMedian(const std::vector<double> &v)
{
if (v.size() < 3)
return StatisticsMean(v);
std::vector<double> copy(v);
auto center = copy.begin() + v.size() / 2;
std::nth_element(copy.begin(), center, copy.end());
// did we have an odd number of samples?
// if yes, then center is the median
// it no, then we are looking for the average between center and the value
// before
if (v.size() % 2 == 1)
return *center;
auto center2 = copy.begin() + v.size() / 2 - 1;
std::nth_element(copy.begin(), center2, copy.end());
return (*center + *center2) / 2.0;
}
// Return the sum of the squares of this sample set
auto SumSquares = [](const std::vector<double> &v) { return std::inner_product(v.begin(), v.end(), v.begin(), 0.0); };
auto Sqr = [](const double dat) { return dat * dat; };
auto Sqrt = [](const double dat) {
// Avoid NaN due to imprecision in the calculations
if (dat < 0.0)
return 0.0;
return std::sqrt(dat);
};
double StatisticsStdDev(const std::vector<double> &v)
{
const auto mean = StatisticsMean(v);
if (v.empty())
return mean;
// Sample standard deviation is undefined for n = 1
if (v.size() == 1)
return 0.0;
const double avg_squares = SumSquares(v) * (1.0 / v.size());
return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean)));
}
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports)
{
typedef BenchmarkReporter::Run Run;
std::vector<Run> results;
auto error_count = std::count_if(reports.begin(), reports.end(), [](Run const &run) { return run.error_occurred; });
if (reports.size() - error_count < 2)
{
// We don't report aggregated data if there was a single run.
return results;
}
// Accumulators.
std::vector<double> real_accumulated_time_stat;
std::vector<double> cpu_accumulated_time_stat;
real_accumulated_time_stat.reserve(reports.size());
cpu_accumulated_time_stat.reserve(reports.size());
// All repetitions should be run with the same number of iterations so we
// can take this information from the first benchmark.
const IterationCount run_iterations = reports.front().iterations;
// create stats for user counters
struct CounterStat
{
Counter c;
std::vector<double> s;
};
std::map<std::string, CounterStat> counter_stats;
for (Run const &r : reports)
{
for (auto const &cnt : r.counters)
{
auto it = counter_stats.find(cnt.first);
if (it == counter_stats.end())
{
counter_stats.insert({cnt.first, {cnt.second, std::vector<double>{}}});
it = counter_stats.find(cnt.first);
it->second.s.reserve(reports.size());
}
else
{
CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags);
}
}
}
// Populate the accumulators.
for (Run const &run : reports)
{
CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name());
CHECK_EQ(run_iterations, run.iterations);
if (run.error_occurred)
continue;
real_accumulated_time_stat.emplace_back(run.real_accumulated_time);
cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time);
// user counters
for (auto const &cnt : run.counters)
{
auto it = counter_stats.find(cnt.first);
CHECK_NE(it, counter_stats.end());
it->second.s.emplace_back(cnt.second);
}
}
// Only add label if it is same for all runs
std::string report_label = reports[0].report_label;
for (std::size_t i = 1; i < reports.size(); i++)
{
if (reports[i].report_label != report_label)
{
report_label = "";
break;
}
}
const double iteration_rescale_factor = double(reports.size()) / double(run_iterations);
for (const auto &Stat : *reports[0].statistics)
{
// Get the data from the accumulator to BenchmarkReporter::Run's.
Run data;
data.run_name = reports[0].run_name;
data.run_type = BenchmarkReporter::Run::RT_Aggregate;
data.threads = reports[0].threads;
data.repetitions = reports[0].repetitions;
data.repetition_index = Run::no_repetition_index;
data.aggregate_name = Stat.name_;
data.report_label = report_label;
// It is incorrect to say that an aggregate is computed over
// run's iterations, because those iterations already got averaged.
// Similarly, if there are N repetitions with 1 iterations each,
// an aggregate will be computed over N measurements, not 1.
// Thus it is best to simply use the count of separate reports.
data.iterations = reports.size();
data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat);
data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat);
// We will divide these times by data.iterations when reporting, but the
// data.iterations is not nessesairly the scale of these measurements,
// because in each repetition, these timers are sum over all the iterations.
// And if we want to say that the stats are over N repetitions and not
// M iterations, we need to multiply these by (N/M).
data.real_accumulated_time *= iteration_rescale_factor;
data.cpu_accumulated_time *= iteration_rescale_factor;
data.time_unit = reports[0].time_unit;
// user counters
for (auto const &kv : counter_stats)
{
// Do *NOT* rescale the custom counters. They are already properly scaled.
const auto uc_stat = Stat.compute_(kv.second.s);
auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, counter_stats[kv.first].c.oneK);
data.counters[kv.first] = c;
}
results.push_back(data);
}
return results;
}
} // end namespace benchmark

View File

@ -1,37 +0,0 @@
// Copyright 2016 Ismael Jimenez Martinez. All rights reserved.
// Copyright 2017 Roman Lebedev. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef STATISTICS_H_
#define STATISTICS_H_
#include <vector>
#include "benchmark/benchmark.h"
namespace benchmark
{
// Return a vector containing the mean, median and standard devation information
// (and any user-specified info) for the specified list of reports. If 'reports'
// contains less than two non-errored runs an empty vector is returned
std::vector<BenchmarkReporter::Run> ComputeStats(const std::vector<BenchmarkReporter::Run> &reports);
double StatisticsMean(const std::vector<double> &v);
double StatisticsMedian(const std::vector<double> &v);
double StatisticsStdDev(const std::vector<double> &v);
} // end namespace benchmark
#endif // STATISTICS_H_

View File

@ -1,281 +0,0 @@
#include "string_util.h"
#include <array>
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
#include <cerrno>
#endif
#include <cmath>
#include <cstdarg>
#include <cstdio>
#include <memory>
#include <sstream>
#include "arraysize.h"
namespace benchmark
{
namespace
{
// kilo, Mega, Giga, Tera, Peta, Exa, Zetta, Yotta.
const char kBigSIUnits[] = "kMGTPEZY";
// Kibi, Mebi, Gibi, Tebi, Pebi, Exbi, Zebi, Yobi.
const char kBigIECUnits[] = "KMGTPEZY";
// milli, micro, nano, pico, femto, atto, zepto, yocto.
const char kSmallSIUnits[] = "munpfazy";
// We require that all three arrays have the same size.
static_assert(arraysize(kBigSIUnits) == arraysize(kBigIECUnits), "SI and IEC unit arrays must be the same size");
static_assert(arraysize(kSmallSIUnits) == arraysize(kBigSIUnits),
"Small SI and Big SI unit arrays must be the same size");
static const int64_t kUnitsSize = arraysize(kBigSIUnits);
void ToExponentAndMantissa(double val, double thresh, int precision, double one_k, std::string *mantissa,
int64_t *exponent)
{
std::stringstream mantissa_stream;
if (val < 0)
{
mantissa_stream << "-";
val = -val;
}
// Adjust threshold so that it never excludes things which can't be rendered
// in 'precision' digits.
const double adjusted_threshold = std::max(thresh, 1.0 / std::pow(10.0, precision));
const double big_threshold = adjusted_threshold * one_k;
const double small_threshold = adjusted_threshold;
// Values in ]simple_threshold,small_threshold[ will be printed as-is
const double simple_threshold = 0.01;
if (val > big_threshold)
{
// Positive powers
double scaled = val;
for (size_t i = 0; i < arraysize(kBigSIUnits); ++i)
{
scaled /= one_k;
if (scaled <= big_threshold)
{
mantissa_stream << scaled;
*exponent = i + 1;
*mantissa = mantissa_stream.str();
return;
}
}
mantissa_stream << val;
*exponent = 0;
}
else if (val < small_threshold)
{
// Negative powers
if (val < simple_threshold)
{
double scaled = val;
for (size_t i = 0; i < arraysize(kSmallSIUnits); ++i)
{
scaled *= one_k;
if (scaled >= small_threshold)
{
mantissa_stream << scaled;
*exponent = -static_cast<int64_t>(i + 1);
*mantissa = mantissa_stream.str();
return;
}
}
}
mantissa_stream << val;
*exponent = 0;
}
else
{
mantissa_stream << val;
*exponent = 0;
}
*mantissa = mantissa_stream.str();
}
std::string ExponentToPrefix(int64_t exponent, bool iec)
{
if (exponent == 0)
return "";
const int64_t index = (exponent > 0 ? exponent - 1 : -exponent - 1);
if (index >= kUnitsSize)
return "";
const char *array = (exponent > 0 ? (iec ? kBigIECUnits : kBigSIUnits) : kSmallSIUnits);
if (iec)
return array[index] + std::string("i");
else
return std::string(1, array[index]);
}
std::string ToBinaryStringFullySpecified(double value, double threshold, int precision, double one_k = 1024.0)
{
std::string mantissa;
int64_t exponent;
ToExponentAndMantissa(value, threshold, precision, one_k, &mantissa, &exponent);
return mantissa + ExponentToPrefix(exponent, false);
}
} // end namespace
void AppendHumanReadable(int n, std::string *str)
{
std::stringstream ss;
// Round down to the nearest SI prefix.
ss << ToBinaryStringFullySpecified(n, 1.0, 0);
*str += ss.str();
}
std::string HumanReadableNumber(double n, double one_k)
{
// 1.1 means that figures up to 1.1k should be shown with the next unit down;
// this softens edge effects.
// 1 means that we should show one decimal place of precision.
return ToBinaryStringFullySpecified(n, 1.1, 1, one_k);
}
std::string StrFormatImp(const char *msg, va_list args)
{
// we might need a second shot at this, so pre-emptivly make a copy
va_list args_cp;
va_copy(args_cp, args);
// TODO(ericwf): use std::array for first attempt to avoid one memory
// allocation guess what the size might be
std::array<char, 256> local_buff;
std::size_t size = local_buff.size();
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
auto ret = vsnprintf(local_buff.data(), size, msg, args_cp);
va_end(args_cp);
// handle empty expansion
if (ret == 0)
return std::string{};
if (static_cast<std::size_t>(ret) < size)
return std::string(local_buff.data());
// we did not provide a long enough buffer on our first attempt.
// add 1 to size to account for null-byte in size cast to prevent overflow
size = static_cast<std::size_t>(ret) + 1;
auto buff_ptr = std::unique_ptr<char[]>(new char[size]);
// 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation
// in the android-ndk
ret = vsnprintf(buff_ptr.get(), size, msg, args);
return std::string(buff_ptr.get());
}
std::string StrFormat(const char *format, ...)
{
va_list args;
va_start(args, format);
std::string tmp = StrFormatImp(format, args);
va_end(args);
return tmp;
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
/*
* GNU STL in Android NDK lacks support for some C++11 functions, including
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const unsigned long result = strtoul(strStart, &strEnd, base);
const int strtoulErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtoulErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of unsigned long");
}
else if (strEnd == strStart || strtoulErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
int stoi(const std::string &str, size_t *pos, int base)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const long result = strtol(strStart, &strEnd, base);
const int strtolErrno = errno;
/* Restore previous errno */
errno = oldErrno;
/* Check for errors and return */
if (strtolErrno == ERANGE || long(int(result)) != result)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
else if (strEnd == strStart || strtolErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return int(result);
}
double stod(const std::string &str, size_t *pos)
{
/* Record previous errno */
const int oldErrno = errno;
errno = 0;
const char *strStart = str.c_str();
char *strEnd = const_cast<char *>(strStart);
const double result = strtod(strStart, &strEnd);
/* Restore previous errno */
const int strtodErrno = errno;
errno = oldErrno;
/* Check for errors and return */
if (strtodErrno == ERANGE)
{
throw std::out_of_range("stoul failed: " + str + " is outside of range of int");
}
else if (strEnd == strStart || strtodErrno != 0)
{
throw std::invalid_argument("stoul failed: " + str + " is not an integer");
}
if (pos != nullptr)
{
*pos = static_cast<size_t>(strEnd - strStart);
}
return result;
}
#endif
} // end namespace benchmark

View File

@ -1,60 +0,0 @@
#ifndef BENCHMARK_STRING_UTIL_H_
#define BENCHMARK_STRING_UTIL_H_
#include "internal_macros.h"
#include <sstream>
#include <string>
#include <utility>
namespace benchmark
{
void AppendHumanReadable(int n, std::string *str);
std::string HumanReadableNumber(double n, double one_k = 1024.0);
#if defined(__MINGW32__)
__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2)))
#elif defined(__GNUC__)
__attribute__((format(printf, 1, 2)))
#endif
std::string
StrFormat(const char *format, ...);
inline std::ostream &StrCatImp(std::ostream &out) BENCHMARK_NOEXCEPT
{
return out;
}
template <class First, class... Rest> inline std::ostream &StrCatImp(std::ostream &out, First &&f, Rest &&...rest)
{
out << std::forward<First>(f);
return StrCatImp(out, std::forward<Rest>(rest)...);
}
template <class... Args> inline std::string StrCat(Args &&...args)
{
std::ostringstream ss;
StrCatImp(ss, std::forward<Args>(args)...);
return ss.str();
}
#ifdef BENCHMARK_STL_ANDROID_GNUSTL
/*
* GNU STL in Android NDK lacks support for some C++11 functions, including
* stoul, stoi, stod. We reimplement them here using C functions strtoul,
* strtol, strtod. Note that reimplemented functions are in benchmark::
* namespace, not std:: namespace.
*/
unsigned long stoul(const std::string &str, size_t *pos = nullptr, int base = 10);
int stoi(const std::string &str, size_t *pos = nullptr, int base = 10);
double stod(const std::string &str, size_t *pos = nullptr);
#else
using std::stod;
using std::stoi;
using std::stoul;
#endif
} // end namespace benchmark
#endif // BENCHMARK_STRING_UTIL_H_

View File

@ -1,792 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <codecvt>
#include <versionhelpers.h>
#include <windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \
defined BENCHMARK_OS_OPENBSD
#define BENCHMARK_HAS_SYSCTL
#include <sys/sysctl.h>
#endif
#endif
#if defined(BENCHMARK_OS_SOLARIS)
#include <kstat.h>
#endif
#if defined(BENCHMARK_OS_QNX)
#include <sys/syspage.h>
#endif
#include <algorithm>
#include <array>
#include <bitset>
#include <cerrno>
#include <climits>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <fstream>
#include <iostream>
#include <iterator>
#include <limits>
#include <locale>
#include <memory>
#include <sstream>
#include "check.h"
#include "cycleclock.h"
#include "internal_macros.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark
{
namespace
{
void PrintImp(std::ostream &out)
{
out << std::endl;
}
template <class First, class... Rest> void PrintImp(std::ostream &out, First &&f, Rest &&...rest)
{
out << std::forward<First>(f);
PrintImp(out, std::forward<Rest>(rest)...);
}
template <class... Args> BENCHMARK_NORETURN void PrintErrorAndDie(Args &&...args)
{
PrintImp(std::cerr, std::forward<Args>(args)...);
std::exit(EXIT_FAILURE);
}
#ifdef BENCHMARK_HAS_SYSCTL
/// ValueUnion - A type used to correctly alias the byte-for-byte output of
/// `sysctl` with the result type it's to be interpreted as.
struct ValueUnion
{
union DataT {
uint32_t uint32_value;
uint64_t uint64_value;
// For correct aliasing of union members from bytes.
char bytes[8];
};
using DataPtr = std::unique_ptr<DataT, decltype(&std::free)>;
// The size of the data union member + its trailing array size.
size_t Size;
DataPtr Buff;
public:
ValueUnion() : Size(0), Buff(nullptr, &std::free)
{
}
explicit ValueUnion(size_t BuffSize)
: Size(sizeof(DataT) + BuffSize), Buff(::new (std::malloc(Size)) DataT(), &std::free)
{
}
ValueUnion(ValueUnion &&other) = default;
explicit operator bool() const
{
return bool(Buff);
}
char *data() const
{
return Buff->bytes;
}
std::string GetAsString() const
{
return std::string(data());
}
int64_t GetAsInteger() const
{
if (Size == sizeof(Buff->uint32_value))
return static_cast<int32_t>(Buff->uint32_value);
else if (Size == sizeof(Buff->uint64_value))
return static_cast<int64_t>(Buff->uint64_value);
BENCHMARK_UNREACHABLE();
}
uint64_t GetAsUnsigned() const
{
if (Size == sizeof(Buff->uint32_value))
return Buff->uint32_value;
else if (Size == sizeof(Buff->uint64_value))
return Buff->uint64_value;
BENCHMARK_UNREACHABLE();
}
template <class T, int N> std::array<T, N> GetAsArray()
{
const int ArrSize = sizeof(T) * N;
CHECK_LE(ArrSize, Size);
std::array<T, N> Arr;
std::memcpy(Arr.data(), data(), ArrSize);
return Arr;
}
};
ValueUnion GetSysctlImp(std::string const &Name)
{
#if defined BENCHMARK_OS_OPENBSD
int mib[2];
mib[0] = CTL_HW;
if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed"))
{
ValueUnion buff(sizeof(int));
if (Name == "hw.ncpu")
{
mib[1] = HW_NCPU;
}
else
{
mib[1] = HW_CPUSPEED;
}
if (sysctl(mib, 2, buff.data(), &buff.Size, nullptr, 0) == -1)
{
return ValueUnion();
}
return buff;
}
return ValueUnion();
#else
size_t CurBuffSize = 0;
if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1)
return ValueUnion();
ValueUnion buff(CurBuffSize);
if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0)
return buff;
return ValueUnion();
#endif
}
BENCHMARK_MAYBE_UNUSED
bool GetSysctl(std::string const &Name, std::string *Out)
{
Out->clear();
auto Buff = GetSysctlImp(Name);
if (!Buff)
return false;
Out->assign(Buff.data());
return true;
}
template <class Tp, class = typename std::enable_if<std::is_integral<Tp>::value>::type>
bool GetSysctl(std::string const &Name, Tp *Out)
{
*Out = 0;
auto Buff = GetSysctlImp(Name);
if (!Buff)
return false;
*Out = static_cast<Tp>(Buff.GetAsUnsigned());
return true;
}
template <class Tp, size_t N> bool GetSysctl(std::string const &Name, std::array<Tp, N> *Out)
{
auto Buff = GetSysctlImp(Name);
if (!Buff)
return false;
*Out = Buff.GetAsArray<Tp, N>();
return true;
}
#endif
template <class ArgT> bool ReadFromFile(std::string const &fname, ArgT *arg)
{
*arg = ArgT();
std::ifstream f(fname.c_str());
if (!f.is_open())
return false;
f >> *arg;
return f.good();
}
bool CpuScalingEnabled(int num_cpus)
{
// We don't have a valid CPU count, so don't even bother.
if (num_cpus <= 0)
return false;
#ifdef BENCHMARK_OS_QNX
return false;
#endif
#ifndef BENCHMARK_OS_WINDOWS
// On Linux, the CPUfreq subsystem exposes CPU information as files on the
// local file system. If reading the exported files fails, then we may not be
// running on Linux, so we silently ignore all the read errors.
std::string res;
for (int cpu = 0; cpu < num_cpus; ++cpu)
{
std::string governor_file = StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor");
if (ReadFromFile(governor_file, &res) && res != "performance")
return true;
}
#endif
return false;
}
int CountSetBitsInCPUMap(std::string Val)
{
auto CountBits = [](std::string Part) {
using CPUMask = std::bitset<sizeof(std::uintptr_t) * CHAR_BIT>;
Part = "0x" + Part;
CPUMask Mask(benchmark::stoul(Part, nullptr, 16));
return static_cast<int>(Mask.count());
};
size_t Pos;
int total = 0;
while ((Pos = Val.find(',')) != std::string::npos)
{
total += CountBits(Val.substr(0, Pos));
Val = Val.substr(Pos + 1);
}
if (!Val.empty())
{
total += CountBits(Val);
}
return total;
}
BENCHMARK_MAYBE_UNUSED
std::vector<CPUInfo::CacheInfo> GetCacheSizesFromKVFS()
{
std::vector<CPUInfo::CacheInfo> res;
std::string dir = "/sys/devices/system/cpu/cpu0/cache/";
int Idx = 0;
while (true)
{
CPUInfo::CacheInfo info;
std::string FPath = StrCat(dir, "index", Idx++, "/");
std::ifstream f(StrCat(FPath, "size").c_str());
if (!f.is_open())
break;
std::string suffix;
f >> info.size;
if (f.fail())
PrintErrorAndDie("Failed while reading file '", FPath, "size'");
if (f.good())
{
f >> suffix;
if (f.bad())
PrintErrorAndDie("Invalid cache size format: failed to read size suffix");
else if (f && suffix != "K")
PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix);
else if (suffix == "K")
info.size *= 1024;
}
if (!ReadFromFile(StrCat(FPath, "type"), &info.type))
PrintErrorAndDie("Failed to read from file ", FPath, "type");
if (!ReadFromFile(StrCat(FPath, "level"), &info.level))
PrintErrorAndDie("Failed to read from file ", FPath, "level");
std::string map_str;
if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str))
PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map");
info.num_sharing = CountSetBitsInCPUMap(map_str);
res.push_back(info);
}
return res;
}
#ifdef BENCHMARK_OS_MACOSX
std::vector<CPUInfo::CacheInfo> GetCacheSizesMacOSX()
{
std::vector<CPUInfo::CacheInfo> res;
std::array<uint64_t, 4> CacheCounts{{0, 0, 0, 0}};
GetSysctl("hw.cacheconfig", &CacheCounts);
struct
{
std::string name;
std::string type;
int level;
uint64_t num_sharing;
} Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]},
{"hw.l1icachesize", "Instruction", 1, CacheCounts[1]},
{"hw.l2cachesize", "Unified", 2, CacheCounts[2]},
{"hw.l3cachesize", "Unified", 3, CacheCounts[3]}};
for (auto &C : Cases)
{
int val;
if (!GetSysctl(C.name, &val))
continue;
CPUInfo::CacheInfo info;
info.type = C.type;
info.level = C.level;
info.size = val;
info.num_sharing = static_cast<int>(C.num_sharing);
res.push_back(std::move(info));
}
return res;
}
#elif defined(BENCHMARK_OS_WINDOWS)
std::vector<CPUInfo::CacheInfo> GetCacheSizesWindows()
{
std::vector<CPUInfo::CacheInfo> res;
DWORD buffer_size = 0;
using PInfo = SYSTEM_LOGICAL_PROCESSOR_INFORMATION;
using CInfo = CACHE_DESCRIPTOR;
using UPtr = std::unique_ptr<PInfo, decltype(&std::free)>;
GetLogicalProcessorInformation(nullptr, &buffer_size);
UPtr buff((PInfo *)malloc(buffer_size), &std::free);
if (!GetLogicalProcessorInformation(buff.get(), &buffer_size))
PrintErrorAndDie("Failed during call to GetLogicalProcessorInformation: ", GetLastError());
PInfo *it = buff.get();
PInfo *end = buff.get() + (buffer_size / sizeof(PInfo));
for (; it != end; ++it)
{
if (it->Relationship != RelationCache)
continue;
using BitSet = std::bitset<sizeof(ULONG_PTR) * CHAR_BIT>;
BitSet B(it->ProcessorMask);
// To prevent duplicates, only consider caches where CPU 0 is specified
if (!B.test(0))
continue;
CInfo *Cache = &it->Cache;
CPUInfo::CacheInfo C;
C.num_sharing = static_cast<int>(B.count());
C.level = Cache->Level;
C.size = Cache->Size;
switch (Cache->Type)
{
case CacheUnified:
C.type = "Unified";
break;
case CacheInstruction:
C.type = "Instruction";
break;
case CacheData:
C.type = "Data";
break;
case CacheTrace:
C.type = "Trace";
break;
default:
C.type = "Unknown";
break;
}
res.push_back(C);
}
return res;
}
#elif BENCHMARK_OS_QNX
std::vector<CPUInfo::CacheInfo> GetCacheSizesQNX()
{
std::vector<CPUInfo::CacheInfo> res;
struct cacheattr_entry *cache = SYSPAGE_ENTRY(cacheattr);
uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr);
int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize;
for (int i = 0; i < num; ++i)
{
CPUInfo::CacheInfo info;
switch (cache->flags)
{
case CACHE_FLAG_INSTR:
info.type = "Instruction";
info.level = 1;
break;
case CACHE_FLAG_DATA:
info.type = "Data";
info.level = 1;
break;
case CACHE_FLAG_UNIFIED:
info.type = "Unified";
info.level = 2;
case CACHE_FLAG_SHARED:
info.type = "Shared";
info.level = 3;
default:
continue;
break;
}
info.size = cache->line_size * cache->num_lines;
info.num_sharing = 0;
res.push_back(std::move(info));
cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize);
}
return res;
}
#endif
std::vector<CPUInfo::CacheInfo> GetCacheSizes()
{
#ifdef BENCHMARK_OS_MACOSX
return GetCacheSizesMacOSX();
#elif defined(BENCHMARK_OS_WINDOWS)
return GetCacheSizesWindows();
#elif defined(BENCHMARK_OS_QNX)
return GetCacheSizesQNX();
#else
return GetCacheSizesFromKVFS();
#endif
}
std::string GetSystemName()
{
#if defined(BENCHMARK_OS_WINDOWS)
std::string str;
const unsigned COUNT = MAX_COMPUTERNAME_LENGTH + 1;
TCHAR hostname[COUNT] = {'\0'};
DWORD DWCOUNT = COUNT;
if (!GetComputerName(hostname, &DWCOUNT))
return std::string("");
#ifndef UNICODE
str = std::string(hostname, DWCOUNT);
#else
// Using wstring_convert, Is deprecated in C++17
using convert_type = std::codecvt_utf8<wchar_t>;
std::wstring_convert<convert_type, wchar_t> converter;
std::wstring wStr(hostname, DWCOUNT);
str = converter.to_bytes(wStr);
#endif
return str;
#else // defined(BENCHMARK_OS_WINDOWS)
#ifndef HOST_NAME_MAX
#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined
#define HOST_NAME_MAX 64
#elif defined(BENCHMARK_OS_NACL)
#define HOST_NAME_MAX 64
#elif defined(BENCHMARK_OS_QNX)
#define HOST_NAME_MAX 154
#elif defined(BENCHMARK_OS_RTEMS)
#define HOST_NAME_MAX 256
#else
#warning "HOST_NAME_MAX not defined. using 64"
#define HOST_NAME_MAX 64
#endif
#endif // def HOST_NAME_MAX
char hostname[HOST_NAME_MAX];
int retVal = gethostname(hostname, HOST_NAME_MAX);
if (retVal != 0)
return std::string("");
return std::string(hostname);
#endif // Catch-all POSIX block.
}
int GetNumCPUs()
{
#ifdef BENCHMARK_HAS_SYSCTL
int NumCPU = -1;
if (GetSysctl("hw.ncpu", &NumCPU))
return NumCPU;
fprintf(stderr, "Err: %s\n", strerror(errno));
std::exit(EXIT_FAILURE);
#elif defined(BENCHMARK_OS_WINDOWS)
SYSTEM_INFO sysinfo;
// Use memset as opposed to = {} to avoid GCC missing initializer false
// positives.
std::memset(&sysinfo, 0, sizeof(SYSTEM_INFO));
GetSystemInfo(&sysinfo);
return sysinfo.dwNumberOfProcessors; // number of logical
// processors in the current
// group
#elif defined(BENCHMARK_OS_SOLARIS)
// Returns -1 in case of a failure.
int NumCPU = sysconf(_SC_NPROCESSORS_ONLN);
if (NumCPU < 0)
{
fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno));
}
return NumCPU;
#elif defined(BENCHMARK_OS_QNX)
return static_cast<int>(_syspage_ptr->num_cpu);
#else
int NumCPUs = 0;
int MaxID = -1;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n";
return -1;
}
const std::string Key = "processor";
std::string ln;
while (std::getline(f, ln))
{
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':');
std::string value;
#if defined(__s390__)
// s390 has another format in /proc/cpuinfo
// it needs to be parsed differently
if (SplitIdx != std::string::npos)
value = ln.substr(Key.size() + 1, SplitIdx - Key.size() - 1);
#else
if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
#endif
if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0)
{
NumCPUs++;
if (!value.empty())
{
int CurID = benchmark::stoi(value);
MaxID = std::max(CurID, MaxID);
}
}
}
if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n";
return -1;
}
if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return -1;
}
f.close();
if ((MaxID + 1) != NumCPUs)
{
fprintf(stderr, "CPU ID assignments in /proc/cpuinfo seem messed up."
" This is usually caused by a bad BIOS.\n");
}
return NumCPUs;
#endif
BENCHMARK_UNREACHABLE();
}
double GetCPUCyclesPerSecond()
{
#if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN
long freq;
// If the kernel is exporting the tsc frequency use that. There are issues
// where cpuinfo_max_freq cannot be relied on because the BIOS may be
// exporintg an invalid p-state (on x86) or p-states may be used to put the
// processor in a new mode (turbo mode). Essentially, those frequencies
// cannot always be relied upon. The same reasons apply to /proc/cpuinfo as
// well.
if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq)
// If CPU scaling is in effect, we want to use the *maximum* frequency,
// not whatever CPU speed some random processor happens to be using now.
|| ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", &freq))
{
// The value is in kHz (as the file name suggests). For example, on a
// 2GHz warpstation, the file contains the value "2000000".
return freq * 1000.0;
}
const double error_value = -1;
double bogo_clock = error_value;
std::ifstream f("/proc/cpuinfo");
if (!f.is_open())
{
std::cerr << "failed to open /proc/cpuinfo\n";
return error_value;
}
auto startsWithKey = [](std::string const &Value, std::string const &Key) {
if (Key.size() > Value.size())
return false;
auto Cmp = [&](char X, char Y) { return std::tolower(X) == std::tolower(Y); };
return std::equal(Key.begin(), Key.end(), Value.begin(), Cmp);
};
std::string ln;
while (std::getline(f, ln))
{
if (ln.empty())
continue;
size_t SplitIdx = ln.find(':');
std::string value;
if (SplitIdx != std::string::npos)
value = ln.substr(SplitIdx + 1);
// When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only
// accept positive values. Some environments (virtual machines) report zero,
// which would cause infinite looping in WallTime_Init.
if (startsWithKey(ln, "cpu MHz"))
{
if (!value.empty())
{
double cycles_per_second = benchmark::stod(value) * 1000000.0;
if (cycles_per_second > 0)
return cycles_per_second;
}
}
else if (startsWithKey(ln, "bogomips"))
{
if (!value.empty())
{
bogo_clock = benchmark::stod(value) * 1000000.0;
if (bogo_clock < 0.0)
bogo_clock = error_value;
}
}
}
if (f.bad())
{
std::cerr << "Failure reading /proc/cpuinfo\n";
return error_value;
}
if (!f.eof())
{
std::cerr << "Failed to read to end of /proc/cpuinfo\n";
return error_value;
}
f.close();
// If we found the bogomips clock, but nothing better, we'll use it (but
// we're not happy about it); otherwise, fallback to the rough estimation
// below.
if (bogo_clock >= 0.0)
return bogo_clock;
#elif defined BENCHMARK_HAS_SYSCTL
constexpr auto *FreqStr =
#if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD)
"machdep.tsc_freq";
#elif defined BENCHMARK_OS_OPENBSD
"hw.cpuspeed";
#else
"hw.cpufrequency";
#endif
unsigned long long hz = 0;
#if defined BENCHMARK_OS_OPENBSD
if (GetSysctl(FreqStr, &hz))
return hz * 1000000;
#else
if (GetSysctl(FreqStr, &hz))
return hz;
#endif
fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", FreqStr, strerror(errno));
#elif defined BENCHMARK_OS_WINDOWS
// In NT, read MHz from the registry. If we fail to do so or we're in win9x
// then make a crude estimate.
DWORD data, data_size = sizeof(data);
if (IsWindowsXPOrGreater() &&
SUCCEEDED(SHGetValueA(HKEY_LOCAL_MACHINE, "HARDWARE\\DESCRIPTION\\System\\CentralProcessor\\0", "~MHz", nullptr,
&data, &data_size)))
return static_cast<double>((int64_t)data * (int64_t)(1000 * 1000)); // was mhz
#elif defined(BENCHMARK_OS_SOLARIS)
kstat_ctl_t *kc = kstat_open();
if (!kc)
{
std::cerr << "failed to open /dev/kstat\n";
return -1;
}
kstat_t *ksp = kstat_lookup(kc, (char *)"cpu_info", -1, (char *)"cpu_info0");
if (!ksp)
{
std::cerr << "failed to lookup in /dev/kstat\n";
return -1;
}
if (kstat_read(kc, ksp, NULL) < 0)
{
std::cerr << "failed to read from /dev/kstat\n";
return -1;
}
kstat_named_t *knp = (kstat_named_t *)kstat_data_lookup(ksp, (char *)"current_clock_Hz");
if (!knp)
{
std::cerr << "failed to lookup data in /dev/kstat\n";
return -1;
}
if (knp->data_type != KSTAT_DATA_UINT64)
{
std::cerr << "current_clock_Hz is of unexpected data type: " << knp->data_type << "\n";
return -1;
}
double clock_hz = knp->value.ui64;
kstat_close(kc);
return clock_hz;
#elif defined(BENCHMARK_OS_QNX)
return static_cast<double>((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * (int64_t)(1000 * 1000));
#endif
// If we've fallen through, attempt to roughly estimate the CPU clock rate.
const int estimate_time_ms = 1000;
const auto start_ticks = cycleclock::Now();
SleepForMilliseconds(estimate_time_ms);
return static_cast<double>(cycleclock::Now() - start_ticks);
}
std::vector<double> GetLoadAvg()
{
#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || defined BENCHMARK_OS_MACOSX || \
defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD) && \
!defined(__ANDROID__)
constexpr int kMaxSamples = 3;
std::vector<double> res(kMaxSamples, 0.0);
const int nelem = getloadavg(res.data(), kMaxSamples);
if (nelem < 1)
{
res.clear();
}
else
{
res.resize(nelem);
}
return res;
#else
return {};
#endif
}
} // end namespace
const CPUInfo &CPUInfo::Get()
{
static const CPUInfo *info = new CPUInfo();
return *info;
}
CPUInfo::CPUInfo()
: num_cpus(GetNumCPUs()), cycles_per_second(GetCPUCyclesPerSecond()), caches(GetCacheSizes()),
scaling_enabled(CpuScalingEnabled(num_cpus)), load_avg(GetLoadAvg())
{
}
const SystemInfo &SystemInfo::Get()
{
static const SystemInfo *info = new SystemInfo();
return *info;
}
SystemInfo::SystemInfo() : name(GetSystemName())
{
}
} // end namespace benchmark

View File

@ -1,73 +0,0 @@
#ifndef BENCHMARK_THREAD_MANAGER_H
#define BENCHMARK_THREAD_MANAGER_H
#include <atomic>
#include "benchmark/benchmark.h"
#include "mutex.h"
namespace benchmark
{
namespace internal
{
class ThreadManager
{
public:
explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads)
{
}
Mutex &GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_)
{
return benchmark_mutex_;
}
bool StartStopBarrier() EXCLUDES(end_cond_mutex_)
{
return start_stop_barrier_.wait();
}
void NotifyThreadComplete() EXCLUDES(end_cond_mutex_)
{
start_stop_barrier_.removeThread();
if (--alive_threads_ == 0)
{
MutexLock lock(end_cond_mutex_);
end_condition_.notify_all();
}
}
void WaitForAllThreads() EXCLUDES(end_cond_mutex_)
{
MutexLock lock(end_cond_mutex_);
end_condition_.wait(lock.native_handle(), [this]() { return alive_threads_ == 0; });
}
public:
struct Result
{
IterationCount iterations = 0;
double real_time_used = 0;
double cpu_time_used = 0;
double manual_time_used = 0;
int64_t complexity_n = 0;
std::string report_label_;
std::string error_message_;
bool has_error_ = false;
UserCounters counters;
};
GUARDED_BY(GetBenchmarkMutex()) Result results;
private:
mutable Mutex benchmark_mutex_;
std::atomic<int> alive_threads_;
Barrier start_stop_barrier_;
Mutex end_cond_mutex_;
Condition end_condition_;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_MANAGER_H

View File

@ -1,104 +0,0 @@
#ifndef BENCHMARK_THREAD_TIMER_H
#define BENCHMARK_THREAD_TIMER_H
#include "check.h"
#include "timers.h"
namespace benchmark
{
namespace internal
{
class ThreadTimer
{
explicit ThreadTimer(bool measure_process_cpu_time_) : measure_process_cpu_time(measure_process_cpu_time_)
{
}
public:
static ThreadTimer Create()
{
return ThreadTimer(/*measure_process_cpu_time_=*/false);
}
static ThreadTimer CreateProcessCpuTime()
{
return ThreadTimer(/*measure_process_cpu_time_=*/true);
}
// Called by each thread
void StartTimer()
{
running_ = true;
start_real_time_ = ChronoClockNow();
start_cpu_time_ = ReadCpuTimerOfChoice();
}
// Called by each thread
void StopTimer()
{
CHECK(running_);
running_ = false;
real_time_used_ += ChronoClockNow() - start_real_time_;
// Floating point error can result in the subtraction producing a negative
// time. Guard against that.
cpu_time_used_ += std::max<double>(ReadCpuTimerOfChoice() - start_cpu_time_, 0);
}
// Called by each thread
void SetIterationTime(double seconds)
{
manual_time_used_ += seconds;
}
bool running() const
{
return running_;
}
// REQUIRES: timer is not running
double real_time_used() const
{
CHECK(!running_);
return real_time_used_;
}
// REQUIRES: timer is not running
double cpu_time_used() const
{
CHECK(!running_);
return cpu_time_used_;
}
// REQUIRES: timer is not running
double manual_time_used() const
{
CHECK(!running_);
return manual_time_used_;
}
private:
double ReadCpuTimerOfChoice() const
{
if (measure_process_cpu_time)
return ProcessCPUUsage();
return ThreadCPUUsage();
}
// should the thread, or the process, time be measured?
const bool measure_process_cpu_time;
bool running_ = false; // Is the timer running
double start_real_time_ = 0; // If running_
double start_cpu_time_ = 0; // If running_
// Accumulated time so far (does not contain current slice if running_)
double real_time_used_ = 0;
double cpu_time_used_ = 0;
// Manually set iteration time. User sets this with SetIterationTime(seconds).
double manual_time_used_ = 0;
};
} // namespace internal
} // namespace benchmark
#endif // BENCHMARK_THREAD_TIMER_H

View File

@ -1,228 +0,0 @@
// Copyright 2015 Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "timers.h"
#include "internal_macros.h"
#ifdef BENCHMARK_OS_WINDOWS
#include <shlwapi.h>
#undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA
#include <versionhelpers.h>
#include <windows.h>
#else
#include <fcntl.h>
#ifndef BENCHMARK_OS_FUCHSIA
#include <sys/resource.h>
#endif
#include <sys/time.h>
#include <sys/types.h> // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD
#include <unistd.h>
#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX
#include <sys/sysctl.h>
#endif
#if defined(BENCHMARK_OS_MACOSX)
#include <mach/mach_init.h>
#include <mach/mach_port.h>
#include <mach/thread_act.h>
#endif
#endif
#ifdef BENCHMARK_OS_EMSCRIPTEN
#include <emscripten.h>
#endif
#include <cerrno>
#include <cstdint>
#include <cstdio>
#include <cstdlib>
#include <cstring>
#include <ctime>
#include <iostream>
#include <limits>
#include <mutex>
#include "check.h"
#include "log.h"
#include "sleep.h"
#include "string_util.h"
namespace benchmark
{
// Suppress unused warnings on helper functions.
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wunused-function"
#endif
namespace
{
#if defined(BENCHMARK_OS_WINDOWS)
double MakeTime(FILETIME const &kernel_time, FILETIME const &user_time)
{
ULARGE_INTEGER kernel;
ULARGE_INTEGER user;
kernel.HighPart = kernel_time.dwHighDateTime;
kernel.LowPart = kernel_time.dwLowDateTime;
user.HighPart = user_time.dwHighDateTime;
user.LowPart = user_time.dwLowDateTime;
return (static_cast<double>(kernel.QuadPart) + static_cast<double>(user.QuadPart)) * 1e-7;
}
#elif !defined(BENCHMARK_OS_FUCHSIA)
double MakeTime(struct rusage const &ru)
{
return (static_cast<double>(ru.ru_utime.tv_sec) + static_cast<double>(ru.ru_utime.tv_usec) * 1e-6 +
static_cast<double>(ru.ru_stime.tv_sec) + static_cast<double>(ru.ru_stime.tv_usec) * 1e-6);
}
#endif
#if defined(BENCHMARK_OS_MACOSX)
double MakeTime(thread_basic_info_data_t const &info)
{
return (static_cast<double>(info.user_time.seconds) + static_cast<double>(info.user_time.microseconds) * 1e-6 +
static_cast<double>(info.system_time.seconds) + static_cast<double>(info.system_time.microseconds) * 1e-6);
}
#endif
#if defined(CLOCK_PROCESS_CPUTIME_ID) || defined(CLOCK_THREAD_CPUTIME_ID)
double MakeTime(struct timespec const &ts)
{
return ts.tv_sec + (static_cast<double>(ts.tv_nsec) * 1e-9);
}
#endif
BENCHMARK_NORETURN static void DiagnoseAndExit(const char *msg)
{
std::cerr << "ERROR: " << msg << std::endl;
std::exit(EXIT_FAILURE);
}
} // end namespace
double ProcessCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE proc = GetCurrentProcess();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, &user_time))
return MakeTime(kernel_time, user_time);
DiagnoseAndExit("GetProccessTimes() failed");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten.
// Use Emscripten-specific API. Reported CPU time would be exactly the
// same as total time, but this is ok because there aren't long-latency
// syncronous system calls in Emscripten.
return emscripten_get_now() * 1e-3;
#elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
struct timespec spec;
if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0)
return MakeTime(spec);
DiagnoseAndExit("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed");
#else
struct rusage ru;
if (getrusage(RUSAGE_SELF, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_SELF, ...) failed");
#endif
}
double ThreadCPUUsage()
{
#if defined(BENCHMARK_OS_WINDOWS)
HANDLE this_thread = GetCurrentThread();
FILETIME creation_time;
FILETIME exit_time;
FILETIME kernel_time;
FILETIME user_time;
GetThreadTimes(this_thread, &creation_time, &exit_time, &kernel_time, &user_time);
return MakeTime(kernel_time, user_time);
#elif defined(BENCHMARK_OS_MACOSX)
// FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See
// https://github.com/google/benchmark/pull/292
mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT;
thread_basic_info_data_t info;
mach_port_t thread = pthread_mach_thread_np(pthread_self());
if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == KERN_SUCCESS)
{
return MakeTime(info);
}
DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info");
#elif defined(BENCHMARK_OS_EMSCRIPTEN)
// Emscripten doesn't support traditional threads
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_RTEMS)
// RTEMS doesn't support CLOCK_THREAD_CPUTIME_ID. See
// https://github.com/RTEMS/rtems/blob/master/cpukit/posix/src/clockgettime.c
return ProcessCPUUsage();
#elif defined(BENCHMARK_OS_SOLARIS)
struct rusage ru;
if (getrusage(RUSAGE_LWP, &ru) == 0)
return MakeTime(ru);
DiagnoseAndExit("getrusage(RUSAGE_LWP, ...) failed");
#elif defined(CLOCK_THREAD_CPUTIME_ID)
struct timespec ts;
if (clock_gettime(CLOCK_THREAD_CPUTIME_ID, &ts) == 0)
return MakeTime(ts);
DiagnoseAndExit("clock_gettime(CLOCK_THREAD_CPUTIME_ID, ...) failed");
#else
#error Per-thread timing is not available on your system.
#endif
}
namespace
{
std::string DateTimeString(bool local)
{
typedef std::chrono::system_clock Clock;
std::time_t now = Clock::to_time_t(Clock::now());
const std::size_t kStorageSize = 128;
char storage[kStorageSize];
std::size_t written;
if (local)
{
#if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now));
#else
std::tm timeinfo;
::localtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
}
else
{
#if defined(BENCHMARK_OS_WINDOWS)
written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now));
#else
std::tm timeinfo;
::gmtime_r(&now, &timeinfo);
written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo);
#endif
}
CHECK(written < kStorageSize);
((void)written); // prevent unused variable in optimized mode.
return std::string(storage);
}
} // end namespace
std::string LocalDateTimeString()
{
return DateTimeString(true);
}
} // end namespace benchmark

View File

@ -1,51 +0,0 @@
#ifndef BENCHMARK_TIMERS_H
#define BENCHMARK_TIMERS_H
#include <chrono>
#include <string>
namespace benchmark
{
// Return the CPU usage of the current process
double ProcessCPUUsage();
// Return the CPU usage of the children of the current process
double ChildrenCPUUsage();
// Return the CPU usage of the current thread
double ThreadCPUUsage();
#if defined(HAVE_STEADY_CLOCK)
template <bool HighResIsSteady = std::chrono::high_resolution_clock::is_steady> struct ChooseSteadyClock
{
typedef std::chrono::high_resolution_clock type;
};
template <> struct ChooseSteadyClock<false>
{
typedef std::chrono::steady_clock type;
};
#endif
struct ChooseClockType
{
#if defined(HAVE_STEADY_CLOCK)
typedef ChooseSteadyClock<>::type type;
#else
typedef std::chrono::high_resolution_clock type;
#endif
};
inline double ChronoClockNow()
{
typedef ChooseClockType::type ClockType;
using FpSeconds = std::chrono::duration<double, std::chrono::seconds::period>;
return FpSeconds(ClockType::now().time_since_epoch()).count();
}
std::string LocalDateTimeString();
} // end namespace benchmark
#endif // BENCHMARK_TIMERS_H

View File

@ -1,46 +0,0 @@
include(split_list)
set(ASM_TEST_FLAGS "")
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
list(APPEND ASM_TEST_FLAGS -O3)
endif()
check_cxx_compiler_flag(-g0 BENCHMARK_HAS_G0_FLAG)
if (BENCHMARK_HAS_G0_FLAG)
list(APPEND ASM_TEST_FLAGS -g0)
endif()
check_cxx_compiler_flag(-fno-stack-protector BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
if (BENCHMARK_HAS_FNO_STACK_PROTECTOR_FLAG)
list(APPEND ASM_TEST_FLAGS -fno-stack-protector)
endif()
split_list(ASM_TEST_FLAGS)
string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER)
macro(add_filecheck_test name)
cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV})
add_library(${name} OBJECT ${name}.cc)
set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}")
set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s")
add_custom_target(copy_${name} ALL
COMMAND ${PROJECT_SOURCE_DIR}/tools/strip_asm.py
$<TARGET_OBJECTS:${name}>
${ASM_OUTPUT_FILE}
BYPRODUCTS ${ASM_OUTPUT_FILE})
add_dependencies(copy_${name} ${name})
if (NOT ARG_CHECK_PREFIXES)
set(ARG_CHECK_PREFIXES "CHECK")
endif()
foreach(prefix ${ARG_CHECK_PREFIXES})
add_test(NAME run_${name}_${prefix}
COMMAND
${LLVM_FILECHECK_EXE} ${name}.cc
--input-file=${ASM_OUTPUT_FILE}
--check-prefixes=CHECK,CHECK-${ASM_TEST_COMPILER}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR})
endforeach()
endmacro()

View File

@ -1,260 +0,0 @@
# Enable the tests
find_package(Threads REQUIRED)
include(CheckCXXCompilerFlag)
# NOTE: Some tests use `<cassert>` to perform the test. Therefore we must
# strip -DNDEBUG from the default CMake flags in DEBUG mode.
string(TOUPPER "${CMAKE_BUILD_TYPE}" uppercase_CMAKE_BUILD_TYPE)
if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" )
add_definitions( -UNDEBUG )
add_definitions(-DTEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS)
# Also remove /D NDEBUG to avoid MSVC warnings about conflicting defines.
foreach (flags_var_to_scrub
CMAKE_CXX_FLAGS_RELEASE
CMAKE_CXX_FLAGS_RELWITHDEBINFO
CMAKE_CXX_FLAGS_MINSIZEREL
CMAKE_C_FLAGS_RELEASE
CMAKE_C_FLAGS_RELWITHDEBINFO
CMAKE_C_FLAGS_MINSIZEREL)
string (REGEX REPLACE "(^| )[/-]D *NDEBUG($| )" " "
"${flags_var_to_scrub}" "${${flags_var_to_scrub}}")
endforeach()
endif()
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
set(BENCHMARK_O3_FLAG "")
if (BENCHMARK_HAS_O3_FLAG)
set(BENCHMARK_O3_FLAG "-O3")
endif()
# NOTE: These flags must be added after find_package(Threads REQUIRED) otherwise
# they will break the configuration check.
if (DEFINED BENCHMARK_CXX_LINKER_FLAGS)
list(APPEND CMAKE_EXE_LINKER_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS})
endif()
add_library(output_test_helper STATIC output_test_helper.cc output_test.h)
macro(compile_benchmark_test name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_benchmark_test)
macro(compile_benchmark_test_with_main name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark_main)
endmacro(compile_benchmark_test_with_main)
macro(compile_output_test name)
add_executable(${name} "${name}.cc" output_test.h)
target_link_libraries(${name} output_test_helper benchmark::benchmark
${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_output_test)
# Demonstration executable
compile_benchmark_test(benchmark_test)
add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(filter_test)
macro(add_filter_test name filter expect)
add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect})
add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect})
endmacro(add_filter_test)
add_filter_test(filter_simple "Foo" 3)
add_filter_test(filter_simple_negative "-Foo" 2)
add_filter_test(filter_suffix "BM_.*" 4)
add_filter_test(filter_suffix_negative "-BM_.*" 1)
add_filter_test(filter_regex_all ".*" 5)
add_filter_test(filter_regex_all_negative "-.*" 0)
add_filter_test(filter_regex_blank "" 5)
add_filter_test(filter_regex_blank_negative "-" 0)
add_filter_test(filter_regex_none "monkey" 0)
add_filter_test(filter_regex_none_negative "-monkey" 5)
add_filter_test(filter_regex_wildcard ".*Foo.*" 3)
add_filter_test(filter_regex_wildcard_negative "-.*Foo.*" 2)
add_filter_test(filter_regex_begin "^BM_.*" 4)
add_filter_test(filter_regex_begin_negative "-^BM_.*" 1)
add_filter_test(filter_regex_begin2 "^N" 1)
add_filter_test(filter_regex_begin2_negative "-^N" 4)
add_filter_test(filter_regex_end ".*Ba$" 1)
add_filter_test(filter_regex_end_negative "-.*Ba$" 4)
compile_benchmark_test(options_test)
add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01)
compile_benchmark_test(basic_test)
add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01)
compile_benchmark_test(diagnostics_test)
add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01)
compile_benchmark_test(skip_with_error_test)
add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01)
compile_benchmark_test(donotoptimize_test)
# Some of the issues with DoNotOptimize only occur when optimization is enabled
check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG)
if (BENCHMARK_HAS_O3_FLAG)
set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3")
endif()
add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01)
compile_benchmark_test(fixture_test)
add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01)
compile_benchmark_test(register_benchmark_test)
add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01)
compile_benchmark_test(map_test)
add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01)
compile_benchmark_test(multiple_ranges_test)
add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01)
compile_benchmark_test_with_main(link_main_test)
add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01)
compile_output_test(reporter_output_test)
add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01)
compile_output_test(templated_fixture_test)
add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01)
compile_output_test(user_counters_test)
add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01)
compile_output_test(internal_threading_test)
add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01)
compile_output_test(report_aggregates_only_test)
add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(display_aggregates_only_test)
add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01)
compile_output_test(user_counters_tabular_test)
add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01)
compile_output_test(user_counters_thousands_test)
add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01)
compile_output_test(memory_manager_test)
add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01)
check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG)
if (BENCHMARK_HAS_CXX03_FLAG)
compile_benchmark_test(cxx03_test)
set_target_properties(cxx03_test
PROPERTIES
CXX_STANDARD 98
CXX_STANDARD_REQUIRED YES)
# libstdc++ provides different definitions within <map> between dialects. When
# LTO is enabled and -Werror is specified GCC diagnoses this ODR violation
# causing the test to fail to compile. To prevent this we explicitly disable
# the warning.
check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR)
if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR)
set_target_properties(cxx03_test
PROPERTIES
LINK_FLAGS "-Wno-odr")
endif()
add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01)
endif()
# Attempt to work around flaky test failures when running on Appveyor servers.
if (DEFINED ENV{APPVEYOR})
set(COMPLEXITY_MIN_TIME "0.5")
else()
set(COMPLEXITY_MIN_TIME "0.01")
endif()
compile_output_test(complexity_test)
add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME})
###############################################################################
# GoogleTest Unit Tests
###############################################################################
if (BENCHMARK_ENABLE_GTEST_TESTS)
macro(compile_gtest name)
add_executable(${name} "${name}.cc")
target_link_libraries(${name} benchmark::benchmark
gmock_main ${CMAKE_THREAD_LIBS_INIT})
endmacro(compile_gtest)
macro(add_gtest name)
compile_gtest(${name})
add_test(NAME ${name} COMMAND ${name})
endmacro()
add_gtest(benchmark_gtest)
add_gtest(benchmark_name_gtest)
add_gtest(commandlineflags_gtest)
add_gtest(statistics_gtest)
add_gtest(string_util_gtest)
endif(BENCHMARK_ENABLE_GTEST_TESTS)
###############################################################################
# Assembly Unit Tests
###############################################################################
if (BENCHMARK_ENABLE_ASSEMBLY_TESTS)
if (NOT LLVM_FILECHECK_EXE)
message(FATAL_ERROR "LLVM FileCheck is required when including this file")
endif()
include(AssemblyTests.cmake)
add_filecheck_test(donotoptimize_assembly_test)
add_filecheck_test(state_assembly_test)
add_filecheck_test(clobber_memory_assembly_test)
endif()
###############################################################################
# Code Coverage Configuration
###############################################################################
# Add the coverage command(s)
if(CMAKE_BUILD_TYPE)
string(TOLOWER ${CMAKE_BUILD_TYPE} CMAKE_BUILD_TYPE_LOWER)
endif()
if (${CMAKE_BUILD_TYPE_LOWER} MATCHES "coverage")
find_program(GCOV gcov)
find_program(LCOV lcov)
find_program(GENHTML genhtml)
find_program(CTEST ctest)
if (GCOV AND LCOV AND GENHTML AND CTEST AND HAVE_CXX_FLAG_COVERAGE)
add_custom_command(
OUTPUT ${CMAKE_BINARY_DIR}/lcov/index.html
COMMAND ${LCOV} -q -z -d .
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o before.lcov -i
COMMAND ${CTEST} --force-new-ctest-process
COMMAND ${LCOV} -q --no-external -c -b "${CMAKE_SOURCE_DIR}" -d . -o after.lcov
COMMAND ${LCOV} -q -a before.lcov -a after.lcov --output-file final.lcov
COMMAND ${LCOV} -q -r final.lcov "'${CMAKE_SOURCE_DIR}/test/*'" -o final.lcov
COMMAND ${GENHTML} final.lcov -o lcov --demangle-cpp --sort -p "${CMAKE_BINARY_DIR}" -t benchmark
DEPENDS filter_test benchmark_test options_test basic_test fixture_test cxx03_test complexity_test
WORKING_DIRECTORY ${CMAKE_BINARY_DIR}
COMMENT "Running LCOV"
)
add_custom_target(coverage
DEPENDS ${CMAKE_BINARY_DIR}/lcov/index.html
COMMENT "LCOV report at lcov/index.html"
)
message(STATUS "Coverage command added")
else()
if (HAVE_CXX_FLAG_COVERAGE)
set(CXX_FLAG_COVERAGE_MESSAGE supported)
else()
set(CXX_FLAG_COVERAGE_MESSAGE unavailable)
endif()
message(WARNING
"Coverage not available:\n"
" gcov: ${GCOV}\n"
" lcov: ${LCOV}\n"
" genhtml: ${GENHTML}\n"
" ctest: ${CTEST}\n"
" --coverage flag: ${CXX_FLAG_COVERAGE_MESSAGE}")
endif()
endif()

View File

@ -1,167 +0,0 @@
#include "benchmark/benchmark.h"
#define BASIC_BENCHMARK_TEST(x) BENCHMARK(x)->Arg(8)->Arg(512)->Arg(8192)
void BM_empty(benchmark::State &state)
{
for (auto _ : state)
{
benchmark::DoNotOptimize(state.iterations());
}
}
BENCHMARK(BM_empty);
BENCHMARK(BM_empty)->ThreadPerCpu();
void BM_spin_empty(benchmark::State &state)
{
for (auto _ : state)
{
for (int x = 0; x < state.range(0); ++x)
{
benchmark::DoNotOptimize(x);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_empty);
BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu();
void BM_spin_pause_before(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before);
BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu();
void BM_spin_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
state.ResumeTiming();
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_during);
BASIC_BENCHMARK_TEST(BM_spin_pause_during)->ThreadPerCpu();
void BM_pause_during(benchmark::State &state)
{
for (auto _ : state)
{
state.PauseTiming();
state.ResumeTiming();
}
}
BENCHMARK(BM_pause_during);
BENCHMARK(BM_pause_during)->ThreadPerCpu();
BENCHMARK(BM_pause_during)->UseRealTime();
BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu();
void BM_spin_pause_after(benchmark::State &state)
{
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu();
void BM_spin_pause_before_and_after(benchmark::State &state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
for (int i = 0; i < state.range(0); ++i)
{
benchmark::DoNotOptimize(i);
}
}
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after);
BASIC_BENCHMARK_TEST(BM_spin_pause_before_and_after)->ThreadPerCpu();
void BM_empty_stop_start(benchmark::State &state)
{
for (auto _ : state)
{
}
}
BENCHMARK(BM_empty_stop_start);
BENCHMARK(BM_empty_stop_start)->ThreadPerCpu();
void BM_KeepRunning(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
assert(iter_count == state.iterations());
while (state.KeepRunning())
{
++iter_count;
}
assert(iter_count == state.iterations());
}
BENCHMARK(BM_KeepRunning);
void BM_KeepRunningBatch(benchmark::State &state)
{
// Choose a prime batch size to avoid evenly dividing max_iterations.
const benchmark::IterationCount batch_size = 101;
benchmark::IterationCount iter_count = 0;
while (state.KeepRunningBatch(batch_size))
{
iter_count += batch_size;
}
assert(state.iterations() == iter_count);
}
BENCHMARK(BM_KeepRunningBatch);
void BM_RangedFor(benchmark::State &state)
{
benchmark::IterationCount iter_count = 0;
for (auto _ : state)
{
++iter_count;
}
assert(iter_count == state.max_iterations);
}
BENCHMARK(BM_RangedFor);
// Ensure that StateIterator provides all the necessary typedefs required to
// instantiate std::iterator_traits.
static_assert(std::is_same<typename std::iterator_traits<benchmark::State::StateIterator>::value_type,
typename benchmark::State::StateIterator::value_type>::value,
"");
BENCHMARK_MAIN();

View File

@ -1,144 +0,0 @@
#include <vector>
#include "../src/benchmark_register.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
namespace benchmark
{
namespace internal
{
namespace
{
TEST(AddRangeTest, Simple)
{
std::vector<int> dst;
AddRange(&dst, 1, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Simple64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(1), static_cast<int64_t>(2), 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2));
}
TEST(AddRangeTest, Advanced)
{
std::vector<int> dst;
AddRange(&dst, 5, 15, 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, Advanced64)
{
std::vector<int64_t> dst;
AddRange(&dst, static_cast<int64_t>(5), static_cast<int64_t>(15), 2);
EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15));
}
TEST(AddRangeTest, FullRange8)
{
std::vector<int8_t> dst;
AddRange(&dst, int8_t{1}, std::numeric_limits<int8_t>::max(), 8);
EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127));
}
TEST(AddRangeTest, FullRange64)
{
std::vector<int64_t> dst;
AddRange(&dst, int64_t{1}, std::numeric_limits<int64_t>::max(), 1024);
EXPECT_THAT(dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, 9223372036854775807LL));
}
TEST(AddRangeTest, NegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0));
}
TEST(AddRangeTest, StrictlyNegative)
{
std::vector<int> dst;
AddRange(&dst, -8, -1, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1));
}
TEST(AddRangeTest, SymmetricNegativeRanges)
{
std::vector<int> dst;
AddRange(&dst, -8, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8));
}
TEST(AddRangeTest, SymmetricNegativeRangesOddMult)
{
std::vector<int> dst;
AddRange(&dst, -30, 32, 5);
EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32));
}
TEST(AddRangeTest, NegativeRangesAsymmetric)
{
std::vector<int> dst;
AddRange(&dst, -3, 5, 2);
EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5));
}
TEST(AddRangeTest, NegativeRangesLargeStep)
{
// Always include -1, 0, 1 when crossing zero.
std::vector<int> dst;
AddRange(&dst, -8, 8, 10);
EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8));
}
TEST(AddRangeTest, ZeroOnlyRange)
{
std::vector<int> dst;
AddRange(&dst, 0, 0, 2);
EXPECT_THAT(dst, testing::ElementsAre(0));
}
TEST(AddRangeTest, NegativeRange64)
{
std::vector<int64_t> dst;
AddRange<int64_t>(&dst, -4, 4, 2);
EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4));
}
TEST(AddRangeTest, NegativeRangePreservesExistingOrder)
{
// If elements already exist in the range, ensure we don't change
// their ordering by adding negative values.
std::vector<int64_t> dst = {1, 2, 3};
AddRange<int64_t>(&dst, -2, 2, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2));
}
TEST(AddRangeTest, FullNegativeRange64)
{
std::vector<int64_t> dst;
const auto min = std::numeric_limits<int64_t>::min();
const auto max = std::numeric_limits<int64_t>::max();
AddRange(&dst, min, max, 1024);
EXPECT_THAT(dst, testing::ElementsAreArray(std::vector<int64_t>{
min, -1152921504606846976LL, -1125899906842624LL, -1099511627776LL, -1073741824LL, -1048576LL,
-1024LL, -1LL, 0LL, 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, 1125899906842624LL,
1152921504606846976LL, max}));
}
TEST(AddRangeTest, Simple8)
{
std::vector<int8_t> dst;
AddRange<int8_t>(&dst, 1, 8, 2);
EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8));
}
} // namespace
} // namespace internal
} // namespace benchmark

View File

@ -1,84 +0,0 @@
#include "benchmark/benchmark.h"
#include "gtest/gtest.h"
namespace
{
using namespace benchmark;
using namespace benchmark::internal;
TEST(BenchmarkNameTest, Empty)
{
const auto name = BenchmarkName();
EXPECT_EQ(name.str(), std::string());
}
TEST(BenchmarkNameTest, FunctionName)
{
auto name = BenchmarkName();
name.function_name = "function_name";
EXPECT_EQ(name.str(), "function_name");
}
TEST(BenchmarkNameTest, FunctionNameAndArgs)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4/5";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/5");
}
TEST(BenchmarkNameTest, MinTime)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.args = "some_args:3/4";
name.min_time = "min_time:3.4s";
EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s");
}
TEST(BenchmarkNameTest, Iterations)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.iterations = "iterations:42";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42");
}
TEST(BenchmarkNameTest, Repetitions)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.repetitions = "repetitions:24";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24");
}
TEST(BenchmarkNameTest, TimeType)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.time_type = "hammer_time";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time");
}
TEST(BenchmarkNameTest, Threads)
{
auto name = BenchmarkName();
name.function_name = "function_name";
name.min_time = "min_time:3.4s";
name.threads = "threads:256";
EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256");
}
TEST(BenchmarkNameTest, TestEmptyFunctionName)
{
auto name = BenchmarkName();
name.args = "first:3/second:4";
name.threads = "threads:22";
EXPECT_EQ(name.str(), "first:3/second:4/threads:22");
}
} // end namespace

View File

@ -1,278 +0,0 @@
#include "benchmark/benchmark.h"
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
#include <limits>
#include <list>
#include <map>
#include <mutex>
#include <set>
#include <sstream>
#include <string>
#include <thread>
#include <utility>
#include <vector>
#if defined(__GNUC__)
#define BENCHMARK_NOINLINE __attribute__((noinline))
#else
#define BENCHMARK_NOINLINE
#endif
namespace
{
int BENCHMARK_NOINLINE Factorial(uint32_t n)
{
return (n == 1) ? 1 : n * Factorial(n - 1);
}
double CalculatePi(int depth)
{
double pi = 0.0;
for (int i = 0; i < depth; ++i)
{
double numerator = static_cast<double>(((i % 2) * 2) - 1);
double denominator = static_cast<double>((2 * i) - 1);
pi += numerator / denominator;
}
return (pi - 1.0) * 4;
}
std::set<int64_t> ConstructRandomSet(int64_t size)
{
std::set<int64_t> s;
for (int i = 0; i < size; ++i)
s.insert(s.end(), i);
return s;
}
std::mutex test_vector_mu;
std::vector<int> *test_vector = nullptr;
} // end namespace
static void BM_Factorial(benchmark::State &state)
{
int fac_42 = 0;
for (auto _ : state)
fac_42 = Factorial(8);
// Prevent compiler optimizations
std::stringstream ss;
ss << fac_42;
state.SetLabel(ss.str());
}
BENCHMARK(BM_Factorial);
BENCHMARK(BM_Factorial)->UseRealTime();
static void BM_CalculatePiRange(benchmark::State &state)
{
double pi = 0.0;
for (auto _ : state)
pi = CalculatePi(static_cast<int>(state.range(0)));
std::stringstream ss;
ss << pi;
state.SetLabel(ss.str());
}
BENCHMARK_RANGE(BM_CalculatePiRange, 1, 1024 * 1024);
static void BM_CalculatePi(benchmark::State &state)
{
static const int depth = 1024;
for (auto _ : state)
{
benchmark::DoNotOptimize(CalculatePi(static_cast<int>(depth)));
}
}
BENCHMARK(BM_CalculatePi)->Threads(8);
BENCHMARK(BM_CalculatePi)->ThreadRange(1, 32);
BENCHMARK(BM_CalculatePi)->ThreadPerCpu();
static void BM_SetInsert(benchmark::State &state)
{
std::set<int64_t> data;
for (auto _ : state)
{
state.PauseTiming();
data = ConstructRandomSet(state.range(0));
state.ResumeTiming();
for (int j = 0; j < state.range(1); ++j)
data.insert(rand());
}
state.SetItemsProcessed(state.iterations() * state.range(1));
state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int));
}
// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower,
// non-timed part of each iteration will make the benchmark take forever.
BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}});
template <typename Container, typename ValueType = typename Container::value_type>
static void BM_Sequential(benchmark::State &state)
{
ValueType v = 42;
for (auto _ : state)
{
Container c;
for (int64_t i = state.range(0); --i;)
c.push_back(v);
}
const int64_t items_processed = state.iterations() * state.range(0);
state.SetItemsProcessed(items_processed);
state.SetBytesProcessed(items_processed * sizeof(v));
}
BENCHMARK_TEMPLATE2(BM_Sequential, std::vector<int>, int)->Range(1 << 0, 1 << 10);
BENCHMARK_TEMPLATE(BM_Sequential, std::list<int>)->Range(1 << 0, 1 << 10);
// Test the variadic version of BENCHMARK_TEMPLATE in C++11 and beyond.
#ifdef BENCHMARK_HAS_CXX11
BENCHMARK_TEMPLATE(BM_Sequential, std::vector<int>, int)->Arg(512);
#endif
static void BM_StringCompare(benchmark::State &state)
{
size_t len = static_cast<size_t>(state.range(0));
std::string s1(len, '-');
std::string s2(len, '-');
for (auto _ : state)
benchmark::DoNotOptimize(s1.compare(s2));
}
BENCHMARK(BM_StringCompare)->Range(1, 1 << 20);
static void BM_SetupTeardown(benchmark::State &state)
{
if (state.thread_index == 0)
{
// No need to lock test_vector_mu here as this is running single-threaded.
test_vector = new std::vector<int>();
}
int i = 0;
for (auto _ : state)
{
std::lock_guard<std::mutex> l(test_vector_mu);
if (i % 2 == 0)
test_vector->push_back(i);
else
test_vector->pop_back();
++i;
}
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_SetupTeardown)->ThreadPerCpu();
static void BM_LongTest(benchmark::State &state)
{
double tracker = 0.0;
for (auto _ : state)
{
for (int i = 0; i < state.range(0); ++i)
benchmark::DoNotOptimize(tracker += i);
}
}
BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28);
static void BM_ParallelMemset(benchmark::State &state)
{
int64_t size = state.range(0) / static_cast<int64_t>(sizeof(int));
int thread_size = static_cast<int>(size) / state.threads;
int from = thread_size * state.thread_index;
int to = from + thread_size;
if (state.thread_index == 0)
{
test_vector = new std::vector<int>(static_cast<size_t>(size));
}
for (auto _ : state)
{
for (int i = from; i < to; i++)
{
// No need to lock test_vector_mu as ranges
// do not overlap between threads.
benchmark::DoNotOptimize(test_vector->at(i) = 1);
}
}
if (state.thread_index == 0)
{
delete test_vector;
}
}
BENCHMARK(BM_ParallelMemset)->Arg(10 << 20)->ThreadRange(1, 4);
static void BM_ManualTiming(benchmark::State &state)
{
int64_t slept_for = 0;
int64_t microseconds = state.range(0);
std::chrono::duration<double, std::micro> sleep_duration{static_cast<double>(microseconds)};
for (auto _ : state)
{
auto start = std::chrono::high_resolution_clock::now();
// Simulate some useful workload with a sleep
std::this_thread::sleep_for(std::chrono::duration_cast<std::chrono::nanoseconds>(sleep_duration));
auto end = std::chrono::high_resolution_clock::now();
auto elapsed = std::chrono::duration_cast<std::chrono::duration<double>>(end - start);
state.SetIterationTime(elapsed.count());
slept_for += microseconds;
}
state.SetItemsProcessed(slept_for);
}
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseRealTime();
BENCHMARK(BM_ManualTiming)->Range(1, 1 << 14)->UseManualTime();
#ifdef BENCHMARK_HAS_CXX11
template <class... Args> void BM_with_args(benchmark::State &state, Args &&...)
{
for (auto _ : state)
{
}
}
BENCHMARK_CAPTURE(BM_with_args, int_test, 42, 43, 44);
BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair<int, double>(42, 3.8));
void BM_non_template_args(benchmark::State &state, int, double)
{
while (state.KeepRunning())
{
}
}
BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0);
#endif // BENCHMARK_HAS_CXX11
static void BM_DenseThreadRanges(benchmark::State &st)
{
switch (st.range(0))
{
case 1:
assert(st.threads == 1 || st.threads == 2 || st.threads == 3);
break;
case 2:
assert(st.threads == 1 || st.threads == 3 || st.threads == 4);
break;
case 3:
assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || st.threads == 14);
break;
default:
assert(false && "Invalid test case number");
}
while (st.KeepRunning())
{
}
}
BENCHMARK(BM_DenseThreadRanges)->Arg(1)->DenseThreadRange(1, 3);
BENCHMARK(BM_DenseThreadRanges)->Arg(2)->DenseThreadRange(1, 4, 2);
BENCHMARK(BM_DenseThreadRanges)->Arg(3)->DenseThreadRange(5, 14, 3);
BENCHMARK_MAIN();

Some files were not shown because too many files have changed in this diff Show More