add jerryscript source code

This commit is contained in:
wgzAIIT
2023-11-20 09:05:58 +08:00
parent d1d846184b
commit 516b8627f7
2062 changed files with 302866 additions and 0 deletions

View File

@@ -0,0 +1,361 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import fnmatch
import json
import logging
import os
import re
import shutil
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
JERRY_CORE = os.path.join(ROOT_DIR, 'jerry-core')
JERRY_PORT = os.path.join(ROOT_DIR, 'jerry-port', 'default')
JERRY_MATH = os.path.join(ROOT_DIR, 'jerry-math')
class Amalgamator(object):
# pylint: disable=too-many-instance-attributes
_RE_INCLUDE = re.compile(r'\s*#include ("|<)(.*?)("|>)\n$')
def __init__(self, h_files, extra_includes=(), remove_includes=(), add_lineinfo=False):
self._h_files = h_files
self._extra_includes = extra_includes
self._remove_includes = remove_includes
self._add_lineinfo = add_lineinfo
self._last_builtin = None
self._processed = []
self._output = []
# The copyright will be loaded from the first input file
self._copyright = {'lines': [], 'loaded': False}
def _process_non_include(self, line, file_level):
# Special case #2: Builtin include header name usage
if line.strip() == "#include BUILTIN_INC_HEADER_NAME":
assert self._last_builtin is not None, 'No previous BUILTIN_INC_HEADER_NAME definition'
logging.debug('[%d] Detected usage of BUILTIN_INC_HEADER_NAME, including: "%s"',
file_level, self._last_builtin)
self.add_file(self._h_files[self._last_builtin])
# return from the function as we have processed the included file
return
# Special case #1: Builtin include header name definition
if line.startswith('#define BUILTIN_INC_HEADER_NAME '):
# the line is in this format: #define BUILTIN_INC_HEADER_NAME "<filename>"
self._last_builtin = line.split('"', 2)[1]
logging.debug('[%d] Detected definition of BUILTIN_INC_HEADER_NAME: "%s"', file_level, self._last_builtin)
# the line is not anything special, just push it into the output
self._output.append(line)
def _emit_lineinfo(self, line_number, filename):
if not self._add_lineinfo:
return
normalized_path = repr(os.path.normpath(filename))[1:-1]
line_info = '#line %d "%s"\n' % (line_number, normalized_path)
if self._output and self._output[-1].startswith('#line'):
# Avoid emitting multiple line infos in sequence, just overwrite the last one
self._output[-1] = line_info
else:
self._output.append(line_info)
def add_file(self, filename, file_level=0):
if os.path.basename(filename) in self._processed:
logging.warning('Tried to to process an already processed file: "%s"', filename)
return
if not file_level:
logging.debug('Adding file: "%s"', filename)
file_level += 1
# mark the start of the new file in the output
self._emit_lineinfo(1, filename)
line_idx = 0
with open(filename, 'r') as input_file:
in_copyright = False
for line in input_file:
line_idx += 1
if not in_copyright and line.startswith('/* Copyright '):
in_copyright = True
if not self._copyright['loaded']:
self._copyright['lines'].append(line)
continue
if in_copyright:
if not self._copyright['loaded']:
self._copyright['lines'].append(line)
if line.strip().endswith('*/'):
in_copyright = False
self._copyright['loaded'] = True
# emit a line info so the line numbering can be tracked correctly
self._emit_lineinfo(line_idx + 1, filename)
continue
# check if the line is an '#include' line
match = self._RE_INCLUDE.match(line)
if not match:
# the line is not a header
self._process_non_include(line, file_level)
continue
if match.group(1) == '<':
# found a "global" include
self._output.append(line)
continue
name = match.group(2)
if name in self._remove_includes:
logging.debug('[%d] Removing include line (%s:%d): %s',
file_level, filename, line_idx, line.strip())
# emit a line info so the line numbering can be tracked correctly
self._emit_lineinfo(line_idx + 1, filename)
continue
if name not in self._h_files:
logging.warning('[%d] Include not found (%s:%d): "%s"', file_level, filename, line_idx, name)
self._output.append(line)
continue
if name in self._processed:
logging.debug('[%d] Already included: "%s"', file_level, name)
# emit a line info so the line numbering can be tracked correctly
self._emit_lineinfo(line_idx + 1, filename)
continue
logging.debug('[%d] Including: "%s"', file_level, self._h_files[name])
self.add_file(self._h_files[name], file_level)
# mark the continuation of the current file in the output
self._emit_lineinfo(line_idx + 1, filename)
if not name.endswith('.inc.h'):
# if the included file is not a "*.inc.h" file mark it as processed
self._processed.append(name)
file_level -= 1
if not filename.endswith('.inc.h'):
self._processed.append(os.path.basename(filename))
def write_output(self, out_fp):
for line in self._copyright['lines']:
out_fp.write(line)
for include in self._extra_includes:
out_fp.write('#include "%s"\n' % include)
for line in self._output:
out_fp.write(line)
def match_files(base_dir, pattern):
"""
Return the files matching the given pattern.
:param base_dir: directory to search in
:param pattern: file pattern to use
:returns generator: the generator which iterates the matching file names
"""
for path, _, files in os.walk(base_dir):
for name in files:
if fnmatch.fnmatch(name, pattern):
yield os.path.join(path, name)
def collect_files(base_dir, pattern):
"""
Collect files in the provided base directory given a file pattern.
Will collect all files in the base dir recursively.
:param base_dir: directory to search in
:param pattern: file pattern to use
:returns dictionary: a dictionary file base name -> file path mapping
"""
name_mapping = {}
for fname in match_files(base_dir, pattern):
name = os.path.basename(fname)
if name in name_mapping:
logging.warning('Duplicate name detected: "%s" and "%s"', fname, name_mapping[name])
continue
name_mapping[name] = fname
return name_mapping
def amalgamate(base_dir, input_files=(), output_file=None,
append_c_files=False, remove_includes=(), extra_includes=(),
add_lineinfo=False):
"""
:param input_files: Main input source/header files
:param output_file: Output source/header file
:param append_c_files: Enable auto inclusion of c files under the base-dir
:param add_lineinfo: Enable #line macro insertion into the generated sources
"""
logging.debug('Starting merge with args: %s', json.dumps(locals(), indent=4, sort_keys=True))
h_files = collect_files(base_dir, '*.h')
c_files = collect_files(base_dir, '*.c')
for name in remove_includes:
c_files.pop(name, '')
h_files.pop(name, '')
amalgam = Amalgamator(h_files, extra_includes, remove_includes, add_lineinfo)
for input_file in input_files:
amalgam.add_file(input_file)
if append_c_files:
# if the input file is in the C files list it should be removed to avoid
# double inclusion of the file
for input_file in input_files:
input_name = os.path.basename(input_file)
c_files.pop(input_name, '')
# Add the C files in reverse order to make sure that builtins are
# not at the beginning.
for fname in sorted(c_files.values(), reverse=True):
amalgam.add_file(fname)
with open(output_file, 'w') as output:
amalgam.write_output(output)
def amalgamate_jerry_core(output_dir):
amalgamate(
base_dir=JERRY_CORE,
input_files=[
os.path.join(JERRY_CORE, 'api', 'jerry.c'),
# Add the global built-in by default to include some common items
# to avoid problems with common built-in headers
os.path.join(JERRY_CORE, 'ecma', 'builtin-objects', 'ecma-builtins.c'),
],
output_file=os.path.join(output_dir, 'jerryscript.c'),
append_c_files=True,
remove_includes=[
'jerryscript.h',
'jerryscript-port.h',
'jerryscript-compiler.h',
'jerryscript-core.h',
'jerryscript-debugger.h',
'jerryscript-debugger-transport.h',
'jerryscript-port.h',
'jerryscript-snapshot.h',
'config.h',
],
extra_includes=['jerryscript.h'],
)
amalgamate(
base_dir=JERRY_CORE,
input_files=[
os.path.join(JERRY_CORE, 'include', 'jerryscript.h'),
os.path.join(JERRY_CORE, 'include', 'jerryscript-debugger-transport.h'),
],
output_file=os.path.join(output_dir, 'jerryscript.h'),
remove_includes=['config.h'],
extra_includes=['jerryscript-config.h'],
)
shutil.copyfile(os.path.join(JERRY_CORE, 'config.h'),
os.path.join(output_dir, 'jerryscript-config.h'))
def amalgamate_jerry_port_default(output_dir):
amalgamate(
base_dir=JERRY_PORT,
output_file=os.path.join(output_dir, 'jerryscript-port-default.c'),
append_c_files=True,
remove_includes=[
'jerryscript-port.h',
'jerryscript-port-default.h',
'jerryscript-debugger.h',
],
extra_includes=[
'jerryscript.h',
'jerryscript-port-default.h',
],
)
amalgamate(
base_dir=JERRY_PORT,
input_files=[os.path.join(JERRY_PORT, 'include', 'jerryscript-port-default.h')],
output_file=os.path.join(output_dir, 'jerryscript-port-default.h'),
remove_includes=[
'jerryscript-port.h',
'jerryscript.h',
],
extra_includes=['jerryscript.h'],
)
def amalgamate_jerry_math(output_dir):
amalgamate(
base_dir=JERRY_MATH,
output_file=os.path.join(output_dir, 'jerryscript-math.c'),
append_c_files=True,
)
shutil.copyfile(os.path.join(JERRY_MATH, 'include', 'math.h'),
os.path.join(output_dir, 'math.h'))
def main():
parser = argparse.ArgumentParser(description='Generate amalgamated sources.')
parser.add_argument('--jerry-core', action='store_true',
help='amalgamate jerry-core files')
parser.add_argument('--jerry-port-default', action='store_true',
help='amalgamate jerry-port-default files')
parser.add_argument('--jerry-math', action='store_true',
help='amalgamate jerry-math files')
parser.add_argument('--output-dir', metavar='DIR', default='amalgam',
help='output dir (default: %(default)s)')
parser.add_argument('--verbose', '-v', action='store_true',
help='increase verbosity')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
try:
os.makedirs(args.output_dir)
except os.error:
pass
if args.jerry_core:
amalgamate_jerry_core(args.output_dir)
if args.jerry_port_default:
amalgamate_jerry_port_default(args.output_dir)
if args.jerry_math:
amalgamate_jerry_math(args.output_dir)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,26 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$(whoami)" != "root" ]; then
SUDO=sudo
fi
${SUDO} apt-get update -q
${SUDO} apt-get install -q -y \
make cmake \
gcc gcc-multilib \
doxygen \
cppcheck vera++ python pylint python-serial

View File

@@ -0,0 +1,24 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ "$(whoami)" != "root" ]; then
SUDO=sudo
fi
${SUDO} apt-get update -q
${SUDO} apt-get install -q -y \
gcc-arm-linux-gnueabihf libc6-dev-armhf-cross \
qemu-user-static

View File

@@ -0,0 +1,17 @@
{
"plugins": [
"@babel/plugin-transform-function-name",
"@babel/plugin-proposal-object-rest-spread",
"@babel/plugin-transform-block-scoping",
"@babel/plugin-transform-destructuring",
"@babel/plugin-transform-block-scoped-functions",
"@babel/plugin-transform-unicode-regex",
"@babel/plugin-transform-sticky-regex",
"@babel/plugin-transform-spread",
"@babel/plugin-transform-parameters",
"@babel/plugin-transform-object-super",
"@babel/plugin-transform-new-target",
"@babel/plugin-transform-literals",
"@babel/plugin-transform-instanceof"
]
}

View File

@@ -0,0 +1,65 @@
# Converting incompatible features with Babel
To run ES6 sources with JerryScript that use unsupported language features, you can use Babel to transpile your code, which will output a semantically equivalent source file, where the unsupported features are replaced with ES5.1 code.
Babel is a JavaScript compiler that is used to convert ES2015+ code into a backward-compatible version. You can find more information [here](https://babeljs.io/).
## Example
```javascript
//Before
const fn = () => 1;
//After conversion
var fn = function fn() {
return 1;
};
```
## Table of Contents
* **[Getting ready](#getting-ready)**
* Installing node.js and npm
* **[Using babel](#using-babel)**
* **[Missing features/Polyfill](#missing-features)**
## Getting ready [](#getting-ready)
### 1. **Node.js and npm**
Start by updating the packages with
`$ sudo apt update`
Install `nodejs` using the apt package manager
`$ sudo apt install nodejs`
Check the version of **node.js** to verify that it installed
```bash
$ nodejs --version
Output: v8.10.0
```
Next up is installing **npm** with the following command
`$ sudo apt install npm`
Verify installation by typing:
```bash
$ npm --version
Output: 6.10.2
```
### 2. Using babel [](#using-babel)
Assuming you're in the tools/babel folder,
`$ sudo npm install`
After installing the dependencies it is ready to use.
Place the files/directories you want transpiled to the babel folder and run
`$ npm run transpile [name of input file/directory] [(OPTIONAL)name of output file/directory]`
If you want to use the same name, then only give the name once.

View File

@@ -0,0 +1,30 @@
{
"name": "jerryscript",
"description": "Ultra-lightweight JavaScript engine for the Internet of Things.",
"scripts": {
"transpile": "scripty"
},
"repository": {
"type": "git",
"url": "git+https://github.com/jerryscript-project/jerryscript.git"
},
"license": "Apache-2.0",
"devDependencies": {
"@babel/cli": "^7.4.4",
"@babel/core": "^7.4.5",
"@babel/plugin-proposal-object-rest-spread": "^7.5.5",
"@babel/plugin-transform-block-scoped-functions": "^7.2.0",
"@babel/plugin-transform-block-scoping": "^7.4.4",
"@babel/plugin-transform-destructuring": "^7.4.4",
"@babel/plugin-transform-function-name": "^7.4.4",
"@babel/plugin-transform-instanceof": "^7.2.0",
"@babel/plugin-transform-literals": "^7.2.0",
"@babel/plugin-transform-new-target": "^7.4.4",
"@babel/plugin-transform-object-super": "^7.2.0",
"@babel/plugin-transform-parameters": "^7.4.4",
"@babel/plugin-transform-spread": "^7.2.2",
"@babel/plugin-transform-sticky-regex": "^7.2.0",
"@babel/plugin-transform-unicode-regex": "^7.4.4",
"scripty": "^1.9.1"
}
}

View File

@@ -0,0 +1,38 @@
#!/usr/bin/env bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ "$#" -gt 2 || "$#" -lt 1 ]]; then
echo "Usage: $0 input [output]"
echo "* input: name of input directory/file"
echo "* output: name of output directory/file (same as input if not given)"
exit 1
fi
if [[ ! -d $1 && ! -f $1 ]]; then
echo "Error: $1 is not a file or directory"
exit 1
fi
FLAG='--out-file'
if [[ -d $1 ]]; then
FLAG='--out-dir'
fi
if [[ "$#" -eq 1 ]]; then
./node_modules/.bin/babel $1 $FLAG $1 --verbose
else
./node_modules/.bin/babel $1 $FLAG $2 --verbose
fi

View File

@@ -0,0 +1,30 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
brew update
PKGS="
cmake
cppcheck vera++
"
for pkg in $PKGS
do
if ! ( brew list -1 | grep -q "^${pkg}\$" )
then
brew install $pkg
fi
done

View File

@@ -0,0 +1,300 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import multiprocessing
import os
import shutil
import subprocess
import sys
import settings
def default_toolchain():
# We don't have default toolchain on Windows and os.uname() isn't supported.
if sys.platform == 'win32':
return None
(sysname, _, _, _, machine) = os.uname()
toolchain = os.path.join(settings.PROJECT_DIR,
'cmake',
'toolchain_%s_%s.cmake' % (sysname.lower(), machine.lower()))
return toolchain if os.path.isfile(toolchain) else None
def get_arguments():
devhelp_preparser = argparse.ArgumentParser(add_help=False)
devhelp_preparser.add_argument('--devhelp', action='store_true', default=False,
help='show help with all options '
'(including those, which are useful for developers only)')
devhelp_arguments, args = devhelp_preparser.parse_known_args()
if devhelp_arguments.devhelp:
args.append('--devhelp')
def devhelp(helpstring):
return helpstring if devhelp_arguments.devhelp else argparse.SUPPRESS
parser = argparse.ArgumentParser(parents=[devhelp_preparser], epilog="""
This tool is a thin wrapper around cmake and make to help build the
project easily. All the real build logic is in the CMakeLists.txt files.
For most of the options, the defaults are also defined there.
""")
buildgrp = parser.add_argument_group('general build options')
buildgrp.add_argument('--builddir', metavar='DIR', default=os.path.join(settings.PROJECT_DIR, 'build'),
help='specify build directory (default: %(default)s)')
buildgrp.add_argument('--clean', action='store_true', default=False,
help='clean build')
buildgrp.add_argument('--cmake-param', metavar='OPT', action='append', default=[],
help='add custom argument to CMake')
buildgrp.add_argument('--compile-flag', metavar='OPT', action='append', default=[],
help='add custom compile flag')
buildgrp.add_argument('--build-type', metavar='TYPE', default='MinSizeRel',
help='set build type (default: %(default)s)')
buildgrp.add_argument('--debug', dest='build_type', action='store_const', const='Debug', default=argparse.SUPPRESS,
help='debug build (alias for --build-type %(const)s)')
buildgrp.add_argument('--install', metavar='DIR', nargs='?', default=None, const=False,
help='install after build (default: don\'t install; '
'default directory if install: OS-specific)')
buildgrp.add_argument('-j', '--jobs', metavar='N', type=int, default=multiprocessing.cpu_count() + 1,
help='number of parallel build jobs (default: %(default)s)')
buildgrp.add_argument('--link-lib', metavar='OPT', action='append', default=[],
help='add custom library to be linked')
buildgrp.add_argument('--linker-flag', metavar='OPT', action='append', default=[],
help='add custom linker flag')
buildgrp.add_argument('--amalgam', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable amalgamated build (%(choices)s)')
buildgrp.add_argument('--lto', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable link-time optimizations (%(choices)s)')
buildgrp.add_argument('--shared-libs', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable building of shared libraries (%(choices)s)')
buildgrp.add_argument('--strip', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='strip release binaries (%(choices)s)')
buildgrp.add_argument('--toolchain', metavar='FILE', default=default_toolchain(),
help='specify toolchain file (default: %(default)s)')
buildgrp.add_argument('-v', '--verbose', action='store_const', const='ON',
help='increase verbosity')
compgrp = parser.add_argument_group('optional components')
compgrp.add_argument('--doctests', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('build doctests (%(choices)s)'))
compgrp.add_argument('--jerry-cmdline', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='build jerry command line tool (%(choices)s)')
compgrp.add_argument('--jerry-cmdline-snapshot', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='build snapshot command line tool (%(choices)s)')
compgrp.add_argument('--jerry-cmdline-test', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('build test version of the jerry command line tool (%(choices)s)'))
compgrp.add_argument('--libfuzzer', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('build jerry with libfuzzer support (%(choices)s)'))
compgrp.add_argument('--jerry-ext', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='build jerry-ext (%(choices)s)')
compgrp.add_argument('--jerry-math', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='build and use jerry-math (%(choices)s)')
compgrp.add_argument('--jerry-port-default', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='build default jerry port implementation (%(choices)s)')
compgrp.add_argument('--unittests', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('build unittests (%(choices)s)'))
coregrp = parser.add_argument_group('jerry-core options')
coregrp.add_argument('--cpointer-32bit', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable 32 bit compressed pointers (%(choices)s)')
coregrp.add_argument('--error-messages', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable error messages (%(choices)s)')
coregrp.add_argument('--external-context', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable external context (%(choices)s)')
coregrp.add_argument('--jerry-debugger', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable the jerry debugger (%(choices)s)')
coregrp.add_argument('--js-parser', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable js-parser (%(choices)s)')
coregrp.add_argument('--line-info', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='provide line info (%(choices)s)')
coregrp.add_argument('--logging', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable logging (%(choices)s)')
coregrp.add_argument('--mem-heap', metavar='SIZE', type=int,
help='size of memory heap (in kilobytes)')
coregrp.add_argument('--gc-limit', metavar='SIZE', type=int,
help='memory usage limit to trigger garbage collection (in bytes)')
coregrp.add_argument('--stack-limit', metavar='SIZE', type=int,
help='maximum stack usage (in kilobytes)')
coregrp.add_argument('--gc-mark-limit', metavar='SIZE', type=int,
help='maximum depth of recursion during GC mark phase')
coregrp.add_argument('--mem-stats', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable memory statistics (%(choices)s)'))
coregrp.add_argument('--mem-stress-test', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable mem-stress test (%(choices)s)'))
coregrp.add_argument('--profile', metavar='FILE',
help='specify profile file')
coregrp.add_argument('--regexp-strict-mode', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable regexp strict mode (%(choices)s)'))
coregrp.add_argument('--show-opcodes', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable parser byte-code dumps (%(choices)s)'))
coregrp.add_argument('--show-regexp-opcodes', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable regexp byte-code dumps (%(choices)s)'))
coregrp.add_argument('--snapshot-exec', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable executing snapshot files (%(choices)s)')
coregrp.add_argument('--snapshot-save', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable saving snapshot files (%(choices)s)')
coregrp.add_argument('--system-allocator', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable system allocator (%(choices)s)')
coregrp.add_argument('--valgrind', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable Valgrind support (%(choices)s)'))
coregrp.add_argument('--vm-exec-stop', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help='enable VM execution stopping (%(choices)s)')
maingrp = parser.add_argument_group('jerry-main options')
maingrp.add_argument('--link-map', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable the generation of link map for jerry command line tool (%(choices)s)'))
maingrp.add_argument('--compile-commands', metavar='X', choices=['ON', 'OFF'], type=str.upper,
help=devhelp('enable the generation of compile_commands.json (%(choices)s)'))
arguments = parser.parse_args(args)
if arguments.devhelp:
parser.print_help()
sys.exit(0)
return arguments
def generate_build_options(arguments):
build_options = []
def build_options_append(cmakeopt, cliarg):
if cliarg:
build_options.append('-D%s=%s' % (cmakeopt, cliarg))
# general build options
build_options_append('CMAKE_BUILD_TYPE', arguments.build_type)
build_options_append('EXTERNAL_COMPILE_FLAGS', ' '.join(arguments.compile_flag))
build_options_append('EXTERNAL_LINK_LIBS', ' '.join(arguments.link_lib))
build_options_append('EXTERNAL_LINKER_FLAGS', ' '.join(arguments.linker_flag))
build_options_append('ENABLE_AMALGAM', arguments.amalgam)
build_options_append('ENABLE_LTO', arguments.lto)
build_options_append('BUILD_SHARED_LIBS', arguments.shared_libs)
build_options_append('ENABLE_STRIP', arguments.strip)
build_options_append('CMAKE_TOOLCHAIN_FILE', arguments.toolchain)
build_options_append('CMAKE_VERBOSE_MAKEFILE', arguments.verbose)
# optional components
build_options_append('DOCTESTS', arguments.doctests)
build_options_append('JERRY_CMDLINE', arguments.jerry_cmdline)
build_options_append('JERRY_CMDLINE_SNAPSHOT', arguments.jerry_cmdline_snapshot)
build_options_append('JERRY_CMDLINE_TEST', arguments.jerry_cmdline_test)
build_options_append('JERRY_LIBFUZZER', arguments.libfuzzer)
build_options_append('JERRY_EXT', arguments.jerry_ext)
build_options_append('JERRY_MATH', arguments.jerry_math)
build_options_append('JERRY_PORT_DEFAULT', arguments.jerry_port_default)
build_options_append('UNITTESTS', arguments.unittests)
# jerry-core options
build_options_append('JERRY_CPOINTER_32_BIT', arguments.cpointer_32bit)
build_options_append('JERRY_ERROR_MESSAGES', arguments.error_messages)
build_options_append('JERRY_EXTERNAL_CONTEXT', arguments.external_context)
build_options_append('JERRY_DEBUGGER', arguments.jerry_debugger)
build_options_append('JERRY_PARSER', arguments.js_parser)
build_options_append('JERRY_LINE_INFO', arguments.line_info)
build_options_append('JERRY_LOGGING', arguments.logging)
build_options_append('JERRY_GLOBAL_HEAP_SIZE', arguments.mem_heap)
build_options_append('JERRY_GC_LIMIT', arguments.gc_limit)
build_options_append('JERRY_STACK_LIMIT', arguments.stack_limit)
build_options_append('JERRY_MEM_STATS', arguments.mem_stats)
build_options_append('JERRY_MEM_GC_BEFORE_EACH_ALLOC', arguments.mem_stress_test)
build_options_append('JERRY_PROFILE', arguments.profile)
build_options_append('JERRY_REGEXP_STRICT_MODE', arguments.regexp_strict_mode)
build_options_append('JERRY_PARSER_DUMP_BYTE_CODE', arguments.show_opcodes)
build_options_append('JERRY_REGEXP_DUMP_BYTE_CODE', arguments.show_regexp_opcodes)
build_options_append('JERRY_SNAPSHOT_EXEC', arguments.snapshot_exec)
build_options_append('JERRY_SNAPSHOT_SAVE', arguments.snapshot_save)
build_options_append('JERRY_SYSTEM_ALLOCATOR', arguments.system_allocator)
build_options_append('JERRY_VALGRIND', arguments.valgrind)
build_options_append('JERRY_VM_EXEC_STOP', arguments.vm_exec_stop)
if arguments.gc_mark_limit is not None:
build_options.append('-D%s=%s' % ('JERRY_GC_MARK_LIMIT', arguments.gc_mark_limit))
# jerry-main options
build_options_append('ENABLE_LINK_MAP', arguments.link_map)
build_options_append('ENABLE_COMPILE_COMMANDS', arguments.compile_commands)
# general build options (final step)
if arguments.cmake_param:
build_options.extend(arguments.cmake_param)
return build_options
def configure_output_dir(arguments):
if not os.path.isabs(arguments.builddir):
arguments.builddir = os.path.join(settings.PROJECT_DIR, arguments.builddir)
if arguments.clean and os.path.exists(arguments.builddir):
shutil.rmtree(arguments.builddir)
if not os.path.exists(arguments.builddir):
os.makedirs(arguments.builddir)
def configure_jerry(arguments):
configure_output_dir(arguments)
build_options = generate_build_options(arguments)
cmake_cmd = ['cmake', '-B' + arguments.builddir, '-H' + settings.PROJECT_DIR]
if arguments.install:
cmake_cmd.append('-DCMAKE_INSTALL_PREFIX=%s' % arguments.install)
cmake_cmd.extend(build_options)
return subprocess.call(cmake_cmd)
def make_jerry(arguments):
make_cmd = ['cmake', '--build', arguments.builddir, '--config', arguments.build_type]
env = dict(os.environ)
env['CMAKE_BUILD_PARALLEL_LEVEL'] = str(arguments.jobs)
env['MAKEFLAGS'] = '-j%d' % (arguments.jobs) # Workaround for CMake < 3.12
proc = subprocess.Popen(make_cmd, env=env)
proc.wait()
return proc.returncode
def install_jerry(arguments):
install_target = 'INSTALL' if sys.platform == 'win32' else 'install'
make_cmd = ['cmake', '--build', arguments.builddir, '--config', arguments.build_type, '--target', install_target]
return subprocess.call(make_cmd)
def print_result(ret):
print('=' * 30)
if ret:
print('Build failed with exit code: %s' % (ret))
else:
print('Build succeeded!')
print('=' * 30)
def main():
arguments = get_arguments()
ret = configure_jerry(arguments)
if not ret:
ret = make_jerry(arguments)
if not ret and arguments.install is not None:
ret = install_jerry(arguments)
print_result(ret)
sys.exit(ret)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ "$OSTYPE" == "linux"* ]]; then
CPPCHECK_JOBS=${CPPCHECK_JOBS:=$(nproc)}
elif [[ "$OSTYPE" == "darwin"* ]]; then
CPPCHECK_JOBS=${CPPCHECK_JOBS:=$(sysctl -n hw.ncpu)}
else
CPPCHECK_JOBS=${CPPCHECK_JOBS:=1}
fi
JERRY_CORE_DIRS=`find jerry-core -type d`
JERRY_EXT_DIRS=`find jerry-ext -type d`
JERRY_PORT_DIRS=`find jerry-port -type d`
JERRY_MATH_DIRS=`find jerry-math -type d`
INCLUDE_DIRS=()
for DIR in $JERRY_CORE_DIRS $JERRY_EXT_DIRS $JERRY_PORT_DIRS $JERRY_MATH_DIRS
do
INCLUDE_DIRS=("${INCLUDE_DIRS[@]}" "-I$DIR")
done
cppcheck -j$CPPCHECK_JOBS --force \
--language=c --std=c99 \
--quiet \
--enable=warning,style,performance,portability,information \
--template="{file}:{line}: {severity}({id}): {message}" \
--error-exitcode=1 \
--inline-suppr \
--exitcode-suppressions=tools/cppcheck/suppressions-list \
--suppressions-list=tools/cppcheck/suppressions-list \
"${INCLUDE_DIRS[@]}" \
jerry-core jerry-ext jerry-port jerry-math jerry-main tests/unit-*

View File

@@ -0,0 +1,25 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
doxygen 2>&1 >/dev/null | head -n 1000 | tee doxygen.log
if [ -s doxygen.log ]
then
EXIT=1
else
EXIT=0
fi
rm -f doxygen.log
exit $EXIT

View File

@@ -0,0 +1,91 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import io
import os
import re
import sys
import settings
LICENSE = re.compile(
r'((#|//|\*) Copyright .*\n'
r')+\s?\2\n'
r'\s?\2 Licensed under the Apache License, Version 2.0 \(the "License"\);\n'
r'\s?\2 you may not use this file except in compliance with the License.\n'
r'\s?\2 You may obtain a copy of the License at\n'
r'\s?\2\n'
r'\s?\2 http://www.apache.org/licenses/LICENSE-2.0\n'
r'\s?\2\n'
r'\s?\2 Unless required by applicable law or agreed to in writing, software\n'
r'\s?\2 distributed under the License is distributed on an "AS IS" BASIS\n'
r'\s?\2 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n'
r'\s?\2 See the License for the specific language governing permissions and\n'
r'\s?\2 limitations under the License.\n'
)
INCLUDE_DIRS = [
'cmake',
'jerry-core',
'jerry-ext',
'jerry-math',
'jerry-main',
'jerry-port',
'targets',
'tests',
'tools',
]
EXCLUDE_DIRS = [
'targets/esp8266',
os.path.relpath(settings.TEST262_TEST_SUITE_DIR, settings.PROJECT_DIR),
]
EXTENSIONS = [
'.c',
'.cpp',
'.h',
'.S',
'.js',
'.py',
'.sh',
'.tcl',
'.cmake',
]
def main():
is_ok = True
for dname in INCLUDE_DIRS:
for root, _, files in os.walk(dname):
if any(root.startswith(exclude) for exclude in EXCLUDE_DIRS):
continue
for fname in files:
if any(fname.endswith(ext) for ext in EXTENSIONS):
fpath = os.path.join(root, fname)
with io.open(fpath, 'r', errors='ignore') as curr_file:
if not LICENSE.search(curr_file.read()):
print('%s: incorrect license' % fpath)
is_ok = False
if not is_ok:
sys.exit(1)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,36 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
MAGIC_STRINGS_GEN="tools/gen-magic-strings.py"
MAGIC_STRINGS_INC_H="jerry-core/lit/lit-magic-strings.inc.h"
MAGIC_STRINGS_TEMP=`mktemp lit-magic-strings.inc.h.XXXXXXXXXX`
cp $MAGIC_STRINGS_INC_H $MAGIC_STRINGS_TEMP
$MAGIC_STRINGS_GEN
DIFF_RESULT=$?
if [ $DIFF_RESULT -eq 0 ]
then
diff -q $MAGIC_STRINGS_INC_H $MAGIC_STRINGS_TEMP
DIFF_RESULT=$?
if [ $DIFF_RESULT -ne 0 ]
then
echo -e "\e[1;33m$MAGIC_STRINGS_INC_H must be re-generated. Run $MAGIC_STRINGS_GEN\e[0m"
fi
fi
mv $MAGIC_STRINGS_TEMP $MAGIC_STRINGS_INC_H
exit $DIFF_RESULT

View File

@@ -0,0 +1,28 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TERM_NORMAL='\033[0m'
TERM_RED='\033[1;31m'
pylint --version &>/dev/null
if [ $? -ne 0 ]
then
echo -e "${TERM_RED}Can't run check-pylint because pylint isn't installed.${TERM_NORMAL}\n"
exit 1
fi
find ./tools ./jerry-debugger -name "*.py" \
| xargs pylint --rcfile=tools/pylint/pylintrc

View File

@@ -0,0 +1,110 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Usage
function print_usage
{
echo "Usage: $0 [--help] [--tolerant] [--gh-actions]"
}
function print_help
{
echo "$0: Check Signed-off-by message of the latest commit"
echo ""
print_usage
echo ""
echo "Optional arguments:"
echo " --help print this help message"
echo " --tolerant check the existence of the message only but don't"
echo " require the name and email address to match the author"
echo " of the commit"
echo " --gh-actions perform check in tolerant mode if on GitHub-Actions and not"
echo " checking a pull request, perform strict check otherwise"
echo ""
echo "The last line of every commit message must follow the form of:"
echo "'JerryScript-DCO-1.0-Signed-off-by: NAME EMAIL', where NAME and EMAIL must"
echo "match the name and email address of the author of the commit (unless in"
echo "tolerant mode)."
}
# Processing command line
TOLERANT="no"
while [ "$#" -gt 0 ]
do
if [ "$1" == "--help" ]
then
print_help
exit 0
elif [ "$1" == "--tolerant" ]
then
TOLERANT="yes"
shift
elif [ "$1" == "--gh-actions" ]
then
if [ "$GITHUB_EVENT_NAME" == "" ]
then
echo -e "\e[1;33mWarning! GitHub-Actions-tolerant mode requested but not running on GitHub-Actions! \e[0m"
elif [ "$GITHUB_EVENT_NAME" == "pull_request" ]
then
TOLERANT="no"
else
TOLERANT="yes"
fi
shift
else
print_usage
exit 1
fi
done
# Determining latest commit
parent_hashes=(`git show -s --format=%p HEAD | head -1`)
if [ "${#parent_hashes[@]}" -eq 1 ]
then
commit_hash=`git show -s --format=%h HEAD | head -1`
elif [ "${#parent_hashes[@]}" -eq 2 ]
then
commit_hash=${parent_hashes[1]}
else
echo "$0: cannot handle commit with ${#parent_hashes[@]} parents ${parent_hashes[@]}"
exit 1
fi
# Checking the last line
actual_signed_off_by_line=`git show -s --format=%B $commit_hash | sed '/^$/d' | tr -d '\015' | tail -n 1`
if [ "$TOLERANT" == "no" ]
then
author_name=`git show -s --format=%an $commit_hash`
author_email=`git show -s --format=%ae $commit_hash`
required_signed_off_by_line="JerryScript-DCO-1.0-Signed-off-by: $author_name $author_email"
if [ "$actual_signed_off_by_line" != "$required_signed_off_by_line" ]
then
echo -e "\e[1;33mSigned-off-by message is incorrect. The following line should be at the end of the $commit_hash commit's message: '$required_signed_off_by_line'. \e[0m"
exit 1
fi
else
echo -e "\e[1;33mWarning! The name and email address of the author of the $commit_hash commit is not checked in tolerant mode! \e[0m"
if echo "$actual_signed_off_by_line" | grep -q -v '^JerryScript-DCO-1.0-Signed-off-by:'
then
echo -e "\e[1;33mSigned-off-by message is incorrect. The following line should be at the end of the $commit_hash commit's message: '$required_signed_off_by_line'. \e[0m"
exit 1
fi
fi
exit 0

View File

@@ -0,0 +1,47 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [[ "${TRAVIS_REPO_SLUG}" == "jerryscript-project/jerryscript"
&& ${TRAVIS_BRANCH} == "master"
&& ${TRAVIS_EVENT_TYPE} == "push" ]]
then
git fetch --unshallow
build-wrapper-linux-x86-64 --out-dir bw-output \
./tools/build.py --error-messages=on \
--jerry-cmdline-snapshot=on \
--jerry-debugger=on \
--line-info=on \
--mem-stats=on \
--profile=es.next \
--snapshot-save=on \
--snapshot-exec=on \
--valgrind=on \
--vm-exec-stop=on
sonar-scanner -Dsonar.projectVersion="${TRAVIS_COMMIT}"
else
# SonarQube analysis works only on the master branch.
# Ensure the build works with the options used for the analysis.
./tools/build.py --error-messages=on \
--jerry-cmdline-snapshot=on \
--jerry-debugger=on \
--line-info=on \
--mem-stats=on \
--profile=es.next \
--snapshot-save=on \
--snapshot-exec=on \
--valgrind=on \
--vm-exec-stop=on
fi

View File

@@ -0,0 +1,31 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JERRY_CORE_FILES=`find ./jerry-core -name "*.c" -or -name "*.h"`
JERRY_EXT_FILES=`find ./jerry-ext -name "*.c" -or -name "*.h"`
JERRY_PORT_FILES=`find ./jerry-port -name "*.c" -or -name "*.h"`
JERRY_MATH_FILES=`find ./jerry-math -name "*.c" -or -name "*.h"`
JERRY_MAIN_FILES=`find ./jerry-main -name "*.c" -or -name "*.h"`
UNIT_TEST_FILES=`find ./tests/unit-* -name "*.c" -or -name "*.h"`
if [ -n "$1" ]
then
MANUAL_CHECK_FILES=`find $1 -name "*.c" -or -name "*.h"`
fi
vera++ -r tools/vera++ -p jerry \
-e --no-duplicate \
$MANUAL_CHECK_FILES $JERRY_CORE_FILES $JERRY_EXT_FILES $JERRY_PORT_FILES $JERRY_MATH_FILES $JERRY_MAIN_FILES $UNIT_TEST_FILES

View File

@@ -0,0 +1,9 @@
wrongmathcall:tests/unit-math/test-math.inc.h
variableScope:jerry-math/*.c
invalidPointerCast:jerry-math/*.c
ConfigurationNotChecked:jerry-core/ecma/builtin-objects/ecma-builtin-global.inc.h
ConfigurationNotChecked:jerry-core/ecma/builtin-objects/ecma-builtin-helpers-macro-defines.inc.h
ConfigurationNotChecked:jerry-core/ecma/builtin-objects/ecma-builtin-function-prototype.inc.h
ConfigurationNotChecked:jerry-core/ecma/builtin-objects/ecma-builtin-object.inc.h
ConfigurationNotChecked:jerry-core/ecma/builtin-objects/ecma-builtin-helpers-macro-undefs.inc.h

View File

@@ -0,0 +1,180 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import fileinput
import os
import re
import shlex
import sys
class DoctestExtractor(object):
"""
An extractor to process Markdown files and find doctests inside.
"""
def __init__(self, outdir, dry):
"""
:param outdir: path to the directory where to write the found doctests.
:param dry: if True, don't create the doctest files but print the file
names only.
"""
self._outdir = outdir
self._dry = dry
# Attributes actually initialized by process()
self._infile = None
self._outname_base = None
self._outname_cnt = None
def _warning(self, message, lineno):
"""
Print a warning to the standard error.
:param message: a description of the problem.
:param lineno: the location that triggered the warning.
"""
print('%s:%d: %s' % (self._infile, lineno, message), file=sys.stderr)
def _process_decl(self, params):
"""
Process a doctest declaration (`[doctest]: # (name="test.c", ...)`).
:param params: the parameter string of the declaration (the string
between the parentheses).
:return: a tuple of a dictionary (of keys and values taken from the
`params` string) and the line number of the declaration.
"""
tokens = list(shlex.shlex(params))
decl = {}
for i in range(0, len(tokens), 4):
if i + 2 >= len(tokens) or tokens[i + 1] != '=' or (i + 3 < len(tokens) and tokens[i + 3] != ','):
self._warning('incorrect parameter list for test (key="value", ...)', fileinput.filelineno())
decl = {}
break
decl[tokens[i]] = tokens[i + 2].strip('\'"')
if 'name' not in decl:
decl['name'] = '%s%d.c' % (self._outname_base, self._outname_cnt)
self._outname_cnt += 1
if 'test' not in decl:
decl['test'] = 'run'
return decl, fileinput.filelineno()
def _process_code_start(self):
"""
Process the beginning of a fenced code block (` ```c `).
:return: a tuple of a list (of the first line(s) of the doctest) and the
line number of the start of the code block.
"""
return ['#line %d "%s"\n' % (fileinput.filelineno() + 1, self._infile)], fileinput.filelineno()
def _process_code_end(self, decl, code):
"""
Process the end of a fenced code block (` ``` `).
:param decl: the dictionary of the declaration parameters.
:param code: the list of lines of the doctest.
"""
outname = os.path.join(self._outdir, decl['name']).replace('\\', '/')
action = decl['test']
if self._dry:
print('%s %s' % (action, outname))
else:
with open(outname, 'w') as outfile:
outfile.writelines(code)
def process(self, infile):
"""
Find doctests in a Markdown file and process them according to the
constructor parameters.
:param infile: path to the input file.
"""
self._infile = infile
self._outname_base = os.path.splitext(os.path.basename(infile))[0]
self._outname_cnt = 1
mode = 'TEXT'
decl, decl_lineno = {}, 0
code, code_lineno = [], 0
for line in fileinput.input(infile):
decl_match = re.match(r'^\[doctest\]:\s+#\s+\((.*)\)\s*$', line)
nl_match = re.match(r'^\s*$', line)
start_match = re.match(r'^```c\s*$', line)
end_match = re.match(r'^```\s*', line)
if mode == 'TEXT':
if decl_match is not None:
decl, decl_lineno = self._process_decl(decl_match.group(1))
mode = 'NL'
elif mode == 'NL':
if decl_match is not None:
self._warning('test without code block', decl_lineno)
decl, decl_lineno = self._process_decl(decl_match.group(1))
elif start_match is not None:
code, code_lineno = self._process_code_start()
mode = 'CODE'
elif nl_match is None:
self._warning('test without code block', decl_lineno)
mode = 'TEXT'
elif mode == 'CODE':
if end_match is not None:
self._process_code_end(decl, code)
mode = 'TEXT'
else:
code.append(line)
if mode == 'NL':
self._warning('test without code block', decl_lineno)
elif mode == 'CODE':
self._warning('unterminated code block', code_lineno)
def main():
parser = argparse.ArgumentParser(description='Markdown doctest extractor', epilog="""
The tool extracts specially marked fenced C code blocks from the input Markdown files
and writes them to the file system. The annotations recognized by the tool are special
but valid Markdown links/comments that must be added before the fenced code blocks:
`[doctest]: # (name="test.c", ...)`. For now, two parameters are valid:
`name` determines the filename for the extracted code block (overriding the default
auto-numbered naming scheme), and `test` determines the test action to be performed on
the extracted code (valid options are "compile", "link", and the default "run").
""")
parser.add_argument('-d', '--dir', metavar='NAME', default=os.getcwd(),
help='output directory name (default: %(default)s)')
parser.add_argument('--dry', action='store_true',
help='don\'t generate files but print file names that would be generated '
'and what test action to perform on them')
parser.add_argument('file', nargs='+',
help='input Markdown file(s)')
args = parser.parse_args()
extractor = DoctestExtractor(args.dir, args.dry)
for mdfile in args.file:
extractor.process(mdfile)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,287 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
try:
from configparser import ConfigParser
except ImportError:
from ConfigParser import ConfigParser
import argparse
import fileinput
import json
import os
import re
from settings import PROJECT_DIR
MAGIC_STRINGS_INI = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.ini')
MAGIC_STRINGS_INC_H = os.path.join(PROJECT_DIR, 'jerry-core', 'lit', 'lit-magic-strings.inc.h')
def debug_dump(obj):
def deepcopy(obj):
if isinstance(obj, (list, tuple)):
return [deepcopy(e) for e in obj]
if isinstance(obj, set):
return [repr(e) for e in obj]
if isinstance(obj, dict):
return {repr(k): deepcopy(e) for k, e in obj.items()}
return obj
return json.dumps(deepcopy(obj), indent=4)
def read_magic_string_defs(debug=False):
# Read the `jerry-core/lit/lit-magic-strings.ini` file and returns the magic
# string definitions found therein in the form of
# [LIT_MAGIC_STRINGS]
# LIT_MAGIC_STRING_xxx = "vvv"
# ...
# as
# [('LIT_MAGIC_STRING_xxx', 'vvv'), ...]
# sorted by length and alpha.
ini_parser = ConfigParser()
ini_parser.optionxform = str # case sensitive options (magic string IDs)
ini_parser.read(MAGIC_STRINGS_INI)
defs = [(str_ref, json.loads(str_value) if str_value != '' else '')
for str_ref, str_value in ini_parser.items('LIT_MAGIC_STRINGS')]
defs = sorted(defs, key=lambda ref_value: (len(ref_value[1]), ref_value[1]))
if debug:
print('debug: magic string definitions: {dump}'
.format(dump=debug_dump(defs)))
return defs
def extract_magic_string_refs(debug=False):
results = {}
def process_line(fname, lnum, line, guard_stack):
# Build `results` dictionary as
# results['LIT_MAGIC_STRING_xxx'][('!defined (CONFIG_DISABLE_yyy_BUILTIN)', ...)]
# = [('zzz.c', 123), ...]
# meaning that the given literal is referenced under the given guards at
# the listed (file, line number) locations.
for str_ref in re.findall('LIT_MAGIC_STRING_[a-zA-Z0-9_]+', line):
if str_ref in ['LIT_MAGIC_STRING_DEF',
'LIT_MAGIC_STRING_FIRST_STRING_WITH_SIZE',
'LIT_MAGIC_STRING_LENGTH_LIMIT',
'LIT_MAGIC_STRING__COUNT']:
continue
guard_set = set()
for guards in guard_stack:
guard_set.update(guards)
guard_tuple = tuple(sorted(guard_set))
if str_ref not in results:
results[str_ref] = {}
str_guards = results[str_ref]
if guard_tuple not in str_guards:
str_guards[guard_tuple] = []
file_list = str_guards[guard_tuple]
file_list.append((fname, lnum))
def process_guard(guard):
# Transform `#ifndef MACRO` to `#if !defined (MACRO)` and
# `#ifdef MACRO` to `#if defined (MACRO)` to enable or-ing/and-ing the
# conditions later on.
if guard.startswith('ndef '):
guard = guard.replace('ndef ', '!defined (', 1) + ')'
elif guard.startswith('def '):
guard = guard.replace('def ', 'defined (', 1) + ')'
return guard
def process_file(fname):
# Builds `guard_stack` list for each line of a file as
# [['!defined (CONFIG_DISABLE_yyy_BUILTIN)', ...], ...]
# meaning that all the listed guards (conditionals) have to hold for the
# line to be kept by the preprocessor.
guard_stack = []
for line in fileinput.input(fname):
if_match = re.match('^ *# *if(.*)', line)
elif_match = re.match('^ *# *elif(.*)', line)
else_match = re.match('^ *# *else', line)
endif_match = re.match('^ *# *endif', line)
if if_match is not None:
guard_stack.append([process_guard(if_match.group(1))])
elif elif_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1].strip()
guards.append(process_guard(elif_match.group(1)))
elif else_match is not None:
guards = guard_stack[-1]
guards[-1] = '!(%s)' % guards[-1].strip()
elif endif_match is not None:
guard_stack.pop()
lnum = fileinput.filelineno()
process_line(fname, lnum, line, guard_stack)
if guard_stack:
print('warning: {fname}: unbalanced preprocessor conditional '
'directives (analysis finished with no closing `#endif` '
'for {guard_stack})'
.format(fname=fname, guard_stack=guard_stack))
for root, _, files in os.walk(os.path.join(PROJECT_DIR, 'jerry-core')):
for fname in files:
if (fname.endswith('.c') or fname.endswith('.h')) \
and fname != 'lit-magic-strings.inc.h':
process_file(os.path.join(root, fname))
if debug:
print('debug: magic string references: {dump}'
.format(dump=debug_dump(results)))
return results
def calculate_magic_string_guards(defs, uses, debug=False):
extended_defs = []
for str_ref, str_value in defs:
if str_ref not in uses:
print('warning: unused magic string {str_ref}'
.format(str_ref=str_ref))
continue
# Calculate the most compact guard, i.e., if a magic string is
# referenced under various guards, keep the one that is more generic.
# E.g.,
# guard1 = A and B and C and D and E and F
# guard2 = A and B and C
# then guard1 or guard2 == guard2.
guards = [set(guard_tuple) for guard_tuple in uses[str_ref].keys()]
for i, guard_i in enumerate(guards):
if guard_i is None:
continue
for j, guard_j in enumerate(guards):
if j == i or guard_j is None:
continue
if guard_i < guard_j:
guards[j] = None
guards = {tuple(sorted(guard)) for guard in guards if guard is not None}
extended_defs.append((str_ref, str_value, guards))
if debug:
print('debug: magic string definitions (with guards): {dump}'
.format(dump=debug_dump(extended_defs)))
return extended_defs
def guards_to_str(guards):
return ' \\\n|| '.join(' && '.join(g.strip() for g in sorted(guard))
for guard in sorted(guards))
def generate_header(gen_file):
header = \
"""/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/* This file is automatically generated by the %s script
* from %s. Do not edit! */
""" % (os.path.basename(__file__), os.path.basename(MAGIC_STRINGS_INI))
print(header, file=gen_file)
def generate_magic_string_defs(gen_file, defs):
last_guards = set([()])
for str_ref, str_value, guards in defs:
if last_guards != guards:
if () not in last_guards:
print('#endif', file=gen_file)
if () not in guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
print('LIT_MAGIC_STRING_DEF ({str_ref}, {str_value})'
.format(str_ref=str_ref, str_value=json.dumps(str_value)), file=gen_file)
last_guards = guards
if () not in last_guards:
print('#endif', file=gen_file)
def generate_first_magic_strings(gen_file, defs):
print(file=gen_file) # empty line separator
max_size = len(defs[-1][1])
for size in range(max_size + 1):
last_guards = set([()])
for str_ref, str_value, guards in defs:
if len(str_value) >= size:
if () not in guards and () in last_guards:
print('#if {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
elif () not in guards and () not in last_guards:
if guards == last_guards:
continue
print('#elif {guards}'.format(guards=guards_to_str(guards)), file=gen_file)
elif () in guards and () not in last_guards:
print('#else', file=gen_file)
print('LIT_MAGIC_STRING_FIRST_STRING_WITH_SIZE ({size}, {str_ref})'
.format(size=size, str_ref=str_ref), file=gen_file)
if () in guards:
break
last_guards = guards
if () not in last_guards:
print('#endif', file=gen_file)
def main():
parser = argparse.ArgumentParser(description='lit-magic-strings.inc.h generator')
parser.add_argument('--debug', action='store_true', help='enable debug output')
args = parser.parse_args()
defs = read_magic_string_defs(debug=args.debug)
uses = extract_magic_string_refs(debug=args.debug)
extended_defs = calculate_magic_string_guards(defs, uses, debug=args.debug)
with open(MAGIC_STRINGS_INC_H, 'w') as gen_file:
generate_header(gen_file)
generate_magic_string_defs(gen_file, extended_defs)
generate_first_magic_strings(gen_file, extended_defs)
if __name__ == '__main__':
main()

View File

@@ -0,0 +1,19 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
make -C tools/unit-tests build
tools/unit-tests/gen-test-math >tests/unit-math/test-math.inc.h
make -C tools/unit-tests clean

View File

@@ -0,0 +1,803 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import csv
import itertools
import os
import re
import warnings
from gen_c_source import LICENSE, format_code
from settings import PROJECT_DIR
UNICODE_DATA_FILE = 'UnicodeData.txt'
SPECIAL_CASING_FILE = 'SpecialCasing.txt'
DERIVED_PROPS_FILE = 'DerivedCoreProperties.txt'
PROP_LIST_FILE = 'PropList.txt'
CASE_FOLDING_FILE = 'CaseFolding.txt'
RANGES_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-ranges.inc.h')
RANGES_SUP_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-ranges-sup.inc.h')
CONVERSIONS_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-conversions.inc.h')
CONVERSIONS_SUP_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-conversions-sup.inc.h')
FOLDING_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-folding.inc.h')
FOLDING_SUP_C_SOURCE = os.path.join(PROJECT_DIR, 'jerry-core/lit/lit-unicode-folding-sup.inc.h')
UNICODE_PLANE_TYPE_BASIC = 0
UNICODE_PLANE_TYPE_SUPPLEMENTARY = 1
# common code generation
class UnicodeBasicSource(object):
# pylint: disable=too-many-instance-attributes
def __init__(self, filepath, character_type="uint16_t", length_type="uint8_t"):
self._filepath = filepath
self._header = [LICENSE, ""]
self._data = []
self._table_name_suffix = ""
self.character_type = character_type
self.length_type = length_type
self._range_table_types = [self.character_type,
self.length_type,
self.character_type]
self._range_table_names = ["interval_starts",
"interval_lengths",
"chars"]
self._range_table_descriptions = ["Character interval starting points for",
"Character interval lengths for",
"Non-interval characters for"]
self._conversion_range_types = [self.character_type,
self.length_type]
self._conversion_range_names = ["ranges",
"range_lengths"]
def complete_header(self, completion):
self._header.append(completion)
self._header.append("") # for an extra empty line
def add_whitepace_range(self, category, categorizer, units):
self.add_range(category, categorizer.create_tables(units))
def add_range(self, category, tables):
idx = 0
for table in tables:
self.add_table(table,
"/**\n * %s %s.\n */" % (self._range_table_descriptions[idx], category),
self._range_table_types[idx],
category,
self._range_table_names[idx])
idx += 1
def add_conversion_range(self, category, tables, descriptions):
self.add_named_conversion_range(category, tables, self._conversion_range_names, descriptions)
def add_named_conversion_range(self, category, tables, table_names, descriptions):
idx = 0
for table in tables:
self.add_table(table,
descriptions[idx],
self._conversion_range_types[idx],
category,
table_names[idx])
idx += 1
def add_table(self, table, description, table_type, category, table_name):
if table and sum(table) != 0:
self._data.append(description)
self._data.append("static const %s lit_unicode_%s%s%s[] JERRY_ATTR_CONST_DATA ="
% (table_type,
category.lower(),
"_" + table_name if table_name else "",
self._table_name_suffix))
self._data.append("{")
self._data.append(format_code(table, 1, 6 if self._table_name_suffix else 4))
self._data.append("};")
self._data.append("") # for an extra empty line
def generate(self):
with open(self._filepath, 'w') as generated_source:
generated_source.write("\n".join(self._header))
generated_source.write("\n".join(self._data))
class UnicodeSupplementarySource(UnicodeBasicSource):
def __init__(self, filepath):
UnicodeBasicSource.__init__(self, filepath, "uint32_t", "uint16_t")
self._table_name_suffix = "_sup"
def add_whitepace_range(self, category, categorizer, units):
self.add_range(category, categorizer.create_tables(units))
class UnicodeBasicCategorizer(object):
def __init__(self):
self._length_limit = 0xff
self.extra_id_continue_units = set([0x200C, 0x200D])
#pylint: disable=no-self-use
def in_range(self, i):
return i >= 0x80 and i < 0x10000
def _group_ranges(self, units):
"""
Convert an increasing list of integers into a range list
:return: List of ranges.
"""
for _, group in itertools.groupby(enumerate(units), lambda q: (q[1] - q[0])):
group = list(group)
yield group[0][1], group[-1][1]
def create_tables(self, units):
"""
Split list of ranges into intervals and single char lists.
:return: A tuple containing the following info:
- list of interval starting points
- list of interval lengths
- list of single chars
"""
interval_sps = []
interval_lengths = []
chars = []
for element in self._group_ranges(units):
interval_length = element[1] - element[0]
if interval_length == 0:
chars.append(element[0])
elif interval_length > self._length_limit:
for i in range(element[0], element[1], self._length_limit + 1):
length = min(self._length_limit, element[1] - i)
interval_sps.append(i)
interval_lengths.append(length)
else:
interval_sps.append(element[0])
interval_lengths.append(interval_length)
return interval_sps, interval_lengths, chars
def read_units(self, file_path, categories, subcategories=None):
"""
Read the Unicode Derived Core Properties file and extract the ranges
for the given categories.
:param file_path: Path to the Unicode "DerivedCoreProperties.txt" file.
:param categories: A list of category strings to extract from the Unicode file.
:param subcategories: A list of subcategory strings to restrict categories.
:return: A dictionary each string from the :param categories: is a key and for each
key list of code points are stored.
"""
# Create a dictionary in the format: { category[0]: [ ], ..., category[N]: [ ] }
units = {}
for category in categories:
units[category] = []
# Formats to match:
# <HEX> ; <category> #
# <HEX>..<HEX> ; <category> # <subcategory>
matcher = r"(?P<start>[\dA-F]+)(?:\.\.(?P<end>[\dA-F]+))?\s+; (?P<category>[\w]+) # (?P<subcategory>[\w&]{2})"
with open(file_path, "r") as src_file:
for line in src_file:
match = re.match(matcher, line)
if (match
and match.group("category") in categories
and (not subcategories or match.group("subcategory") in subcategories)):
start = int(match.group("start"), 16)
# if no "end" found use the "start"
end = int(match.group("end") or match.group("start"), 16)
matching_code_points = [
code_point for code_point in range(start, end + 1) if self.in_range(code_point)
]
units[match.group("category")].extend(matching_code_points)
return units
def read_case_mappings(self, unicode_data_file, special_casing_file):
"""
Read the corresponding unicode values of lower and upper case letters and store these in tables.
:param unicode_data_file: Contains the default case mappings (one-to-one mappings).
:param special_casing_file: Contains additional informative case mappings that are either not one-to-one
or which are context-sensitive.
:return: Upper and lower case mappings.
"""
lower_case_mapping = {}
upper_case_mapping = {}
# Add one-to-one mappings
with open(unicode_data_file) as unicode_data:
reader = csv.reader(unicode_data, delimiter=';')
for line in reader:
letter_id = int(line[0], 16)
if not self.in_range(letter_id):
continue
capital_letter = line[12]
small_letter = line[13]
if capital_letter:
upper_case_mapping[letter_id] = parse_unicode_sequence(capital_letter)
if small_letter:
lower_case_mapping[letter_id] = parse_unicode_sequence(small_letter)
# Update the conversion tables with the special cases
with open(special_casing_file) as special_casing:
reader = csv.reader(special_casing, delimiter=';')
for line in reader:
# Skip comment sections and empty lines
if not line or line[0].startswith('#'):
continue
# Replace '#' character with empty string
for idx, fragment in enumerate(line):
if fragment.find('#') >= 0:
line[idx] = ''
letter_id = int(line[0], 16)
condition_list = line[4]
if not self.in_range(letter_id) or condition_list:
continue
original_letter = parse_unicode_sequence(line[0])
small_letter = parse_unicode_sequence(line[1])
capital_letter = parse_unicode_sequence(line[3])
if small_letter != original_letter:
lower_case_mapping[letter_id] = small_letter
if capital_letter != original_letter:
upper_case_mapping[letter_id] = capital_letter
return lower_case_mapping, upper_case_mapping
class UnicodeSupplementaryCategorizer(UnicodeBasicCategorizer):
def __init__(self):
UnicodeBasicCategorizer.__init__(self)
self._length_limit = 0xffff
self.extra_id_continue_units = set()
def in_range(self, i):
return i >= 0x10000
def generate_ranges(script_args, plane_type):
if plane_type == UNICODE_PLANE_TYPE_SUPPLEMENTARY:
c_source = UnicodeSupplementarySource(RANGES_SUP_C_SOURCE)
categorizer = UnicodeSupplementaryCategorizer()
else:
c_source = UnicodeBasicSource(RANGES_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from %s. Do not edit! */" % (DERIVED_PROPS_FILE),
""]
c_source.complete_header("\n".join(header_completion))
derived_props_path = os.path.join(script_args.unicode_dir, DERIVED_PROPS_FILE)
units = categorizer.read_units(derived_props_path, ["ID_Start", "ID_Continue"])
units["ID_Continue"] = sorted(set(units["ID_Continue"]).union(categorizer.extra_id_continue_units)
- set(units["ID_Start"]))
for category, unit in units.items():
c_source.add_range(category, categorizer.create_tables(unit))
prop_list_path = os.path.join(script_args.unicode_dir, PROP_LIST_FILE)
white_space_units = categorizer.read_units(prop_list_path, ["White_Space"], ["Zs"])["White_Space"]
c_source.add_whitepace_range("White_Space", categorizer, white_space_units)
c_source.generate()
# functions for unicode conversions
def make_char(hex_val):
"""
Create a unicode character from a hex value
:param hex_val: Hex value of the character.
:return: Unicode character corresponding to the value.
"""
try:
return unichr(hex_val)
except NameError:
return chr(hex_val)
def parse_unicode_sequence(raw_data):
"""
Parse unicode sequence from raw data.
:param raw_data: Contains the unicode sequence which needs to parse.
:return: The parsed unicode sequence.
"""
result = ''
for unicode_char in raw_data.split(' '):
if unicode_char == '':
continue
# Convert it to unicode code point (from hex value without 0x prefix)
hex_val = int(unicode_char, 16)
result += make_char(hex_val)
return result
def extract_ranges(letter_case, reverse_letter_case=None):
"""
Extract ranges from case mappings
(the second param is optional, if it's not empty, a range will contains bidirectional conversions only).
:param letter_id: An integer, representing the unicode code point of the character.
:param letter_case: case mappings dictionary which contains the conversions.
:param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with the start points and their mapped value, and another table with the lengths of the ranges.
"""
in_range = False
range_position = -1
ranges = []
range_lengths = []
for letter_id in sorted(letter_case.keys()):
prev_letter_id = letter_id - 1
# One-way conversions
if reverse_letter_case is None:
if len(letter_case[letter_id]) > 1:
in_range = False
continue
if prev_letter_id not in letter_case or len(letter_case[prev_letter_id]) > 1:
in_range = False
continue
# Two way conversions
else:
if not is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
in_range = False
continue
if not is_bidirectional_conversion(prev_letter_id, letter_case, reverse_letter_case):
in_range = False
continue
conv_distance = calculate_conversion_distance(letter_case, letter_id)
prev_conv_distance = calculate_conversion_distance(letter_case, prev_letter_id)
if conv_distance != prev_conv_distance:
in_range = False
continue
if in_range:
range_lengths[range_position] += 1
else:
in_range = True
range_position += 1
# Add the start point of the range and its mapped value
ranges.extend([prev_letter_id, ord(letter_case[prev_letter_id])])
range_lengths.append(2)
# Remove all ranges from the case mapping table.
for idx in range(0, len(ranges), 2):
range_length = range_lengths[idx // 2]
for incr in range(range_length):
del letter_case[ranges[idx] + incr]
if reverse_letter_case is not None:
del reverse_letter_case[ranges[idx + 1] + incr]
return ranges, range_lengths
def extract_character_pair_ranges(letter_case, reverse_letter_case):
"""
Extract two or more character pairs from the case mapping tables.
:param letter_case: case mappings dictionary which contains the conversions.
:param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with the start points, and another table with the lengths of the ranges.
"""
start_points = []
lengths = []
in_range = False
element_counter = -1
for letter_id in sorted(letter_case.keys()):
# Only extract character pairs
if not is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
in_range = False
continue
if ord(letter_case[letter_id]) == letter_id + 1:
prev_letter_id = letter_id - 2
if not is_bidirectional_conversion(prev_letter_id, letter_case, reverse_letter_case):
in_range = False
if in_range:
lengths[element_counter] += 2
else:
element_counter += 1
start_points.append(letter_id)
lengths.append(2)
in_range = True
else:
in_range = False
# Remove all found case mapping from the conversion tables after the scanning method
for idx, letter_id in enumerate(start_points):
conv_length = lengths[idx]
for incr in range(0, conv_length, 2):
del letter_case[letter_id + incr]
del reverse_letter_case[letter_id + 1 + incr]
return start_points, lengths
def extract_character_pairs(letter_case, reverse_letter_case):
"""
Extract character pairs. Check that two unicode value are also a mapping value of each other.
:param letter_case: case mappings dictionary which contains the conversions.
:param reverse_letter_case: Comparable case mapping table which contains the return direction of the conversion.
:return: A table with character pairs.
"""
character_pairs = []
for letter_id in sorted(letter_case.keys()):
if is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
mapped_value = letter_case[letter_id]
character_pairs.extend([letter_id, ord(mapped_value)])
# Remove character pairs from case mapping tables
del letter_case[letter_id]
del reverse_letter_case[ord(mapped_value)]
return character_pairs
def extract_special_ranges(letter_case):
"""
Extract special ranges. It contains start points of one-to-two letter case ranges
where the second character is always the same.
:param letter_case: case mappings dictionary which contains the conversions.
:return: A table with the start points and their mapped values, and a table with the lengths of the ranges.
"""
special_ranges = []
special_range_lengths = []
range_position = -1
for letter_id in sorted(letter_case.keys()):
mapped_value = letter_case[letter_id]
if len(mapped_value) != 2:
continue
prev_letter_id = letter_id - 1
if prev_letter_id not in letter_case:
in_range = False
continue
prev_mapped_value = letter_case[prev_letter_id]
if len(prev_mapped_value) != 2:
continue
if prev_mapped_value[1] != mapped_value[1]:
continue
if (ord(prev_mapped_value[0]) - prev_letter_id) != (ord(mapped_value[0]) - letter_id):
in_range = False
continue
if in_range:
special_range_lengths[range_position] += 1
else:
range_position += 1
in_range = True
special_ranges.extend([prev_letter_id, ord(prev_mapped_value[0]), ord(prev_mapped_value[1])])
special_range_lengths.append(1)
# Remove special ranges from the conversion table
for idx in range(0, len(special_ranges), 3):
range_length = special_range_lengths[idx // 3]
letter_id = special_ranges[idx]
for incr in range(range_length):
del letter_case[special_ranges[idx] + incr]
return special_ranges, special_range_lengths
def extract_conversions(letter_case):
"""
Extract conversions. It provide the full (or remained) case mappings from the table.
The counter table contains the information of how much one-to-one, one-to-two or one-to-three mappings
exists successively in the conversion table.
:return: A table with conversions, and a table with counters.
"""
unicodes = [[], [], []]
unicode_lengths = [0, 0, 0]
# 1 to 1 byte
for letter_id in sorted(letter_case.keys()):
mapped_value = letter_case[letter_id]
if len(mapped_value) != 1:
continue
unicodes[0].extend([letter_id, ord(mapped_value)])
del letter_case[letter_id]
# 1 to 2 bytes
for letter_id in sorted(letter_case.keys()):
mapped_value = letter_case[letter_id]
if len(mapped_value) != 2:
continue
unicodes[1].extend([letter_id, ord(mapped_value[0]), ord(mapped_value[1])])
del letter_case[letter_id]
# 1 to 3 bytes
for letter_id in sorted(letter_case.keys()):
mapped_value = letter_case[letter_id]
if len(mapped_value) != 3:
continue
unicodes[2].extend([letter_id, ord(mapped_value[0]), ord(mapped_value[1]), ord(mapped_value[2])])
del letter_case[letter_id]
unicode_lengths = [int(len(unicodes[0]) / 2), int(len(unicodes[1]) / 3), int(len(unicodes[2]) / 4)]
return list(itertools.chain.from_iterable(unicodes)), unicode_lengths
def is_bidirectional_conversion(letter_id, letter_case, reverse_letter_case):
"""
Check that two unicode value are also a mapping value of each other.
:param letter_id: An integer, representing the unicode code point of the character.
:param other_case_mapping: Comparable case mapping table which possible contains
the return direction of the conversion.
:return: True, if it's a reverible conversion, false otherwise.
"""
if letter_id not in letter_case:
return False
# Check one-to-one mapping
mapped_value = letter_case[letter_id]
if len(mapped_value) > 1:
return False
# Check two way conversions
mapped_value_id = ord(mapped_value)
if mapped_value_id not in reverse_letter_case or len(reverse_letter_case[mapped_value_id]) > 1:
return False
if ord(reverse_letter_case[mapped_value_id]) != letter_id:
return False
return True
def calculate_conversion_distance(letter_case, letter_id):
"""
Calculate the distance between the unicode character and its mapped value
(only needs and works with one-to-one mappings).
:param letter_case: case mappings dictionary which contains the conversions.
:param letter_id: An integer, representing the unicode code point of the character.
:return: The conversion distance.
"""
if letter_id not in letter_case or len(letter_case[letter_id]) > 1:
return None
return ord(letter_case[letter_id]) - letter_id
def generate_conversions(script_args, plane_type):
if plane_type == UNICODE_PLANE_TYPE_SUPPLEMENTARY:
c_source = UnicodeSupplementarySource(CONVERSIONS_SUP_C_SOURCE)
categorizer = UnicodeSupplementaryCategorizer()
else:
c_source = UnicodeBasicSource(CONVERSIONS_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from %s and %s files. Do not edit! */" % (UNICODE_DATA_FILE, SPECIAL_CASING_FILE),
""]
c_source.complete_header("\n".join(header_completion))
unicode_data_path = os.path.join(script_args.unicode_dir, UNICODE_DATA_FILE)
special_casing_path = os.path.join(script_args.unicode_dir, SPECIAL_CASING_FILE)
# Read the corresponding unicode values of lower and upper case letters and store these in tables
lower_case, upper_case = categorizer.read_case_mappings(unicode_data_path, special_casing_path)
c_source.add_conversion_range("character_case",
extract_ranges(lower_case, upper_case),
[("/* Contains start points of character case ranges "
"(these are bidirectional conversions). */"),
"/* Interval lengths of start points in `character_case_ranges` table. */"])
c_source.add_conversion_range("character_pair",
extract_character_pair_ranges(lower_case, upper_case),
["/* Contains the start points of bidirectional conversion ranges. */",
"/* Interval lengths of start points in `character_pair_ranges` table. */"])
c_source.add_table(extract_character_pairs(lower_case, upper_case),
"/* Contains lower/upper case bidirectional conversion pairs. */",
c_source.character_type,
"character_pairs",
"")
c_source.add_conversion_range("upper_case_special",
extract_special_ranges(upper_case),
[("/* Contains start points of one-to-two uppercase ranges where the "
"second character\n"
" * is always the same.\n"
" */"),
"/* Interval lengths for start points in `upper_case_special_ranges` table. */"])
c_source.add_conversion_range("lower_case",
extract_ranges(lower_case),
["/* Contains start points of lowercase ranges. */",
"/* Interval lengths for start points in `lower_case_ranges` table. */"])
c_source.add_named_conversion_range("lower_case",
extract_conversions(lower_case),
["conversions", "conversion_counters"],
[("/* The remaining lowercase conversions. The lowercase variant can "
"be one-to-three character long. */"),
("/* Number of one-to-one, one-to-two, and one-to-three lowercase "
"conversions. */")])
c_source.add_named_conversion_range("upper_case",
extract_conversions(upper_case),
["conversions", "conversion_counters"],
[("/* The remaining uppercase conversions. The uppercase variant can "
"be one-to-three character long. */"),
("/* Number of one-to-one, one-to-two, and one-to-three uppercase "
"conversions. */")])
if lower_case:
warnings.warn('Not all elements extracted from the lowercase table!')
if upper_case:
warnings.warn('Not all elements extracted from the uppercase table!')
c_source.generate()
def generate_folding(script_args, plane_type):
if plane_type == UNICODE_PLANE_TYPE_SUPPLEMENTARY:
c_source = UnicodeSupplementarySource(FOLDING_SUP_C_SOURCE)
categorizer = UnicodeSupplementaryCategorizer()
else:
c_source = UnicodeBasicSource(FOLDING_C_SOURCE)
categorizer = UnicodeBasicCategorizer()
header_completion = ["/* This file is automatically generated by the %s script" % os.path.basename(__file__),
" * from the %s file. Do not edit! */" % (CASE_FOLDING_FILE),
""]
c_source.complete_header("\n".join(header_completion))
unicode_data_path = os.path.join(script_args.unicode_dir, UNICODE_DATA_FILE)
special_casing_path = os.path.join(script_args.unicode_dir, SPECIAL_CASING_FILE)
case_folding_path = os.path.join(script_args.unicode_dir, CASE_FOLDING_FILE)
# Read the corresponding unicode values of lower and upper case letters and store these in tables
lower_case, upper_case = categorizer.read_case_mappings(unicode_data_path, special_casing_path)
folding = {}
with open(case_folding_path, 'r') as case_folding:
case_folding_re = re.compile(r'(?P<code_point>[^;]*);\s*(?P<type>[^;]*);\s*(?P<folding>[^;]*);')
for line in case_folding:
match = case_folding_re.match(line)
if match and match.group('type') in ('S', 'C'):
code_point = int(match.group('code_point'), 16)
if categorizer.in_range(code_point):
folding[code_point] = parse_unicode_sequence(match.group('folding'))
should_to_upper = []
should_skip_to_lower = []
for code_point in lower_case:
if code_point not in folding:
should_skip_to_lower.append(code_point)
for code_point, folded in folding.items():
if lower_case.get(code_point, make_char(code_point)) != folded:
should_to_upper.append(code_point)
if upper_case.get(code_point, '') == folded:
should_skip_to_lower.append(code_point)
c_source.add_range('folding_skip_to_lower', categorizer.create_tables(should_skip_to_lower))
c_source.add_range('folding_to_upper', categorizer.create_tables(should_to_upper))
c_source.generate()
# entry point
def main():
parser = argparse.ArgumentParser(description='lit-unicode-{conversions,ranges}-{sup}.inc.h generator',
epilog='''
The input data must be retrieved from
http://www.unicode.org/Public/<VERSION>/ucd/UCD.zip.
The last known good version is 13.0.0.
''')
def check_dir(path):
if not os.path.isdir(path) or not os.access(path, os.R_OK):
raise argparse.ArgumentTypeError('The %s directory does not exist or is not readable!' % path)
return path
parser.add_argument('--unicode-dir', metavar='DIR', action='store', required=True,
type=check_dir, help='specify the unicode data directory')
script_args = parser.parse_args()
generate_ranges(script_args, UNICODE_PLANE_TYPE_BASIC)
generate_ranges(script_args, UNICODE_PLANE_TYPE_SUPPLEMENTARY)
generate_conversions(script_args, UNICODE_PLANE_TYPE_BASIC)
generate_conversions(script_args, UNICODE_PLANE_TYPE_SUPPLEMENTARY)
generate_folding(script_args, UNICODE_PLANE_TYPE_BASIC)
# There are currently no code points in the supplementary planes that require special folding
# generate_folding(script_args, UNICODE_PLANE_TYPE_SUPPLEMENTARY)
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,51 @@
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LICENSE = """/* Copyright JS Foundation and other contributors, http://js.foundation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/"""
def format_code(code, indent, digit_number=4):
def regroup(list_to_group, num):
return [list_to_group[i:i+num] for i in range(0, len(list_to_group), num)]
def hex_format(char, digit_number):
if isinstance(char, str):
char = ord(char)
return ("0x{:0%sx}" % digit_number).format(char)
lines = []
nums_per_line = 10
width = nums_per_line * (digit_number + 4)
# convert all characters to hex format
converted_code = [hex_format(char, digit_number) for char in code]
# 10 hex number per line
for line in regroup(", ".join(converted_code), width):
lines.append((' ' * indent) + line.strip())
return "\n".join(lines)

View File

@@ -0,0 +1,119 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
# force // operator to be integer division in Python 2
from __future__ import division
import argparse
import json
import os
import subprocess
import sys
TOOLS_PATH = os.path.dirname(os.path.realpath(__file__))
BASE_PATH = os.path.join(TOOLS_PATH, '..')
FLAG_CLEAN = '--clean'
FLAG_DEBUG = '--debug'
FLAG_HEAPLIMIT = '--mem-heap'
JERRY_BUILDER = os.path.join(BASE_PATH, 'tools', 'build.py')
JERRY_BIN = os.path.join(BASE_PATH, 'build', 'bin', 'jerry')
TEST_DIR = os.path.join(BASE_PATH, 'tests')
def get_args():
""" Parse input arguments. """
desc = 'Finds the smallest possible JerryHeap size without failing to run the given js file'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('testfile')
parser.add_argument('--heapsize', type=int, default=512,
help='set the limit of the first heapsize (default: %(default)d)')
parser.add_argument('--buildtype', choices=['release', 'debug'], default='release',
help='select build type (default: %(default)s)')
script_args = parser.parse_args()
return script_args
def check_files(opts):
files = [JERRY_BUILDER, opts.testfile]
for _file in files:
if not os.path.isfile(_file):
sys.exit("File not found: %s" % _file)
def build_bin(heapsize, opts):
""" Run tools/build.py script """
command = [
JERRY_BUILDER,
FLAG_CLEAN,
FLAG_HEAPLIMIT,
str(heapsize)
]
if opts.buildtype == 'debug':
command.append(FLAG_DEBUG)
print('Building JerryScript with: %s' % (' '.join(command)))
subprocess.check_output(command)
def run_test(opts):
""" Run the testfile to get the exitcode. """
try:
testfile = os.path.abspath(opts.testfile)
run_cmd = [JERRY_BIN, testfile]
# check output will raise an error if the exit code is not 0
subprocess.check_output(run_cmd, cwd=TEST_DIR)
except subprocess.CalledProcessError as err:
return err.returncode
return 0
def heap_limit(opts):
""" Find the minimal size of jerryheap to pass """
goodheap = opts.heapsize
lowheap = 0
hiheap = opts.heapsize
while lowheap < hiheap:
build_bin(hiheap, opts)
assert os.path.isfile(JERRY_BIN), 'Jerry binary file does not exists'
exitcode = run_test(opts)
if exitcode != 0:
lowheap = hiheap
hiheap = (lowheap + goodheap) // 2
else:
goodheap = hiheap
hiheap = (lowheap + hiheap) // 2
return {
'testfile': opts.testfile,
'heaplimit to pass': goodheap
}
def main(options):
check_files(options)
result = heap_limit(options)
print(json.dumps(result, indent=4))
if __name__ == "__main__":
main(get_args())

View File

@@ -0,0 +1,126 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file converts ./js/*.js to a C-array in ./source/jerry-targetjs.h file
import argparse
import glob
import os
import re
from gen_c_source import LICENSE, format_code
HEADER = '''#ifndef JERRY_TARGETJS_H
#define JERRY_TARGETJS_H
'''
FOOTER = '''
#endif
'''
NATIVE_STRUCT = '''
struct js_source_all {
const char* name;
const char* source;
const int length;
};
#define DECLARE_JS_CODES \\
struct js_source_all js_codes[] = \\
{ \\'''
def extract_name(path):
special_chars = re.compile(r'[-\\?\'".]')
return special_chars.sub('_', os.path.splitext(os.path.basename(path))[0])
def reduce_code(code):
code = re.sub(r"/\*.*?\*/", "", code, flags=re.DOTALL) # remove all occurance streamed comments
code = re.sub(r"//.*?\n", "", code) # remove all occurance singleline comments
code = re.sub('\n+', '\n', re.sub('\n +', '\n', code)) # remove white spaces
return code
def js_to_native_code(path, name, build_type):
with open(path, 'r') as js_source:
code = js_source.read()
if build_type != 'debug':
code = reduce_code(code)
data = format_code(code, 1, 2)
native_code = """const static char {0}_n[] = "{0}";
const static char {0}_s[] =
{{
{1}
}};
const static int {0}_l = {2};
""".format(name, data, len(code))
return native_code
def main():
parser = argparse.ArgumentParser(description="js2c")
parser.add_argument('--build-type', help='build type', default='release', choices=['release', 'debug'])
parser.add_argument('--ignore', help='files to ignore', dest='ignore_files', default=[], action='append')
parser.add_argument('--no-main',
help="don't require a 'main.js' file",
dest='main',
action='store_false',
default=True)
parser.add_argument('--js-source',
dest='js_source_path',
default='./js',
help='Source directory of JavaScript files" (default: %(default)s)')
parser.add_argument('--dest',
dest='output_path',
default='./source',
help="Destination directory of 'jerry-targetjs.h' (default: %(default)s)")
script_args = parser.parse_args()
gen_line = "/* This file is generated by %s. Please do not modify. */" % os.path.basename(__file__)
gen_output = [LICENSE, "", gen_line, "", HEADER]
gen_structs = [NATIVE_STRUCT]
if script_args.main:
gen_structs.append(' {{ {0}_n, {0}_s, {0}_l }}, \\'.format("main"))
files = glob.glob(os.path.join(script_args.js_source_path, '*.js'))
for path in files:
if os.path.basename(path) not in script_args.ignore_files:
name = extract_name(path)
gen_output.append(js_to_native_code(path, name, script_args.build_type))
if name != 'main':
gen_structs.append(' {{ {0}_n, {0}_s, {0}_l }}, \\'.format(name))
gen_structs.append(' { NULL, NULL, 0 } \\\n};')
gen_output.append("\n".join(gen_structs))
gen_output.append(FOOTER)
with open(os.path.join(script_args.output_path, 'jerry-targetjs.h'), 'w') as gen_file:
gen_file.write("\n".join(gen_output))
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,22 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JERRY=$1
TEST=$2
MEM_PEAK=`$JERRY $TEST --mem-stats | grep "Peak allocated =" | awk '{print $4}'`
echo $MEM_PEAK

View File

@@ -0,0 +1,195 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ITERS="$1"
ENGINE="$2"
BENCHMARK="$3"
PRINT_MIN="$4"
OS=`uname -s | tr [:upper:] [:lower:]`
if [ "$OS" == "darwin" ]
then
time_regexp='s/user[ ]*\([0-9]*\)m\([0-9.]*\)s/\1 \2/g'
else
time_regexp='s/user[ \t]*\([0-9]*\)m\([0-9.]*\)s/\1 \2/g'
fi
perf_values=$( (( for i in `seq 1 1 $ITERS`; do time $ENGINE "$BENCHMARK"; if [ $? -ne 0 ]; then exit 1; fi; done ) 2>&1 ) | \
grep user | \
sed "$time_regexp" | \
awk '{ print ($1 * 60 + $2); }';
if [ ${PIPESTATUS[0]} -ne 0 ]; then exit 1; fi; );
if [ "$PRINT_MIN" == "-min" ]
then
perf_values=$( echo "$perf_values" | \
awk "BEGIN {
min_v = -1;
}
{
if (min_v == -1 || $1 < min_v) {
min_v = $1;
}
}
END {
print min_v
}" || exit 1;
);
calc_status=$?
else
perf_values=$( echo "$perf_values" | \
awk "BEGIN {
n = 0
}
{
n++
a[n] = \$1
}
END {
#
# Values of 99% quantiles of two-sided t-distribution for given number of degrees of freedom
#
t_gamma_n_m1 [1] = 63.657
t_gamma_n_m1 [2] = 9.9248
t_gamma_n_m1 [3] = 5.8409
t_gamma_n_m1 [4] = 4.6041
t_gamma_n_m1 [5] = 4.0321
t_gamma_n_m1 [6] = 3.7074
t_gamma_n_m1 [7] = 3.4995
t_gamma_n_m1 [8] = 3.3554
t_gamma_n_m1 [9] = 3.2498
t_gamma_n_m1 [10] = 3.1693
t_gamma_n_m1 [11] = 3.1058
t_gamma_n_m1 [12] = 3.0545
t_gamma_n_m1 [13] = 3.0123
t_gamma_n_m1 [14] = 2.9768
t_gamma_n_m1 [15] = 2.9467
t_gamma_n_m1 [16] = 2.9208
t_gamma_n_m1 [17] = 2.8982
t_gamma_n_m1 [18] = 2.8784
t_gamma_n_m1 [19] = 2.8609
t_gamma_n_m1 [20] = 2.8453
t_gamma_n_m1 [21] = 2.8314
t_gamma_n_m1 [22] = 2.8188
t_gamma_n_m1 [23] = 2.8073
t_gamma_n_m1 [24] = 2.7969
t_gamma_n_m1 [25] = 2.7874
t_gamma_n_m1 [26] = 2.7787
t_gamma_n_m1 [27] = 2.7707
t_gamma_n_m1 [28] = 2.7633
t_gamma_n_m1 [29] = 2.7564
t_gamma_n_m1 [30] = 2.75
t_gamma_n_m1 [31] = 2.744
t_gamma_n_m1 [32] = 2.7385
t_gamma_n_m1 [33] = 2.7333
t_gamma_n_m1 [34] = 2.7284
t_gamma_n_m1 [35] = 2.7238
t_gamma_n_m1 [36] = 2.7195
t_gamma_n_m1 [37] = 2.7154
t_gamma_n_m1 [38] = 2.7116
t_gamma_n_m1 [39] = 2.7079
t_gamma_n_m1 [40] = 2.7045
t_gamma_n_m1 [41] = 2.7012
t_gamma_n_m1 [42] = 2.6981
t_gamma_n_m1 [43] = 2.6951
t_gamma_n_m1 [44] = 2.6923
t_gamma_n_m1 [45] = 2.6896
t_gamma_n_m1 [46] = 2.687
t_gamma_n_m1 [47] = 2.6846
t_gamma_n_m1 [48] = 2.6822
t_gamma_n_m1 [49] = 2.68
t_gamma_n_m1 [50] = 2.6778
#
# Sort array of measurements
#
for (i = 2; i <= n; i++) {
j = i
k = a [j]
while (j > 1 && a [j - 1] > k) {
a [j] = a [j - 1]
j--
}
a [j] = k
}
#
# Remove 20% of lowest and 20% of highest values
#
n_20_percent = int (n / 5)
for (i = 1; i <= n_20_percent; i++) {
delete a[n]
n--
}
for (i = 1; i <= n - n_20_percent; i++) {
a[i] = a[i + n_20_percent]
}
n -= n_20_percent
#
# Calculate average
#
sum = 0
for (i = 1; i <= n; i++) {
sum += a[i]
}
avg = sum / n
if (n > 1) {
if (n - 1 <= 50) {
t_coef = t_gamma_n_m1 [n - 1]
} else {
# For greater degrees of freedom, values of corresponding quantiles
# are insignificantly less than the value.
#
# For example, the value for infinite number of freedoms is 2.5758
#
# So, to reduce table size, we take this, greater value,
# overestimating inaccuracy for no more than 4%.
#
t_coef = t_gamma_n_m1 [50]
}
#
# Calculate inaccuracy estimation
#
sum_delta_squares = 0
for (i = 1; i <= n; i++) {
sum_delta_squares += (avg - a[i]) ^ 2
}
delta = t_coef * sqrt (sum_delta_squares / (n * (n - 1)))
print avg, delta
} else {
print avg
}
}
" || exit 1;
);
calc_status=$?
fi
echo "$perf_values"
if [ $? -ne 0 ];
then
exit 1;
fi;

View File

@@ -0,0 +1,378 @@
[MASTER]
# Specify a configuration file.
#rcfile=
# Python code to execute, usually for sys.path manipulation such as
# pygtk.require().
#init-hook=
# Add files or directories to the blacklist. They should be base names, not
# paths.
ignore=CVS
# Pickle collected data for later comparisons.
persistent=yes
# List of plugins (as comma separated values of python modules names) to load,
# usually to register additional checkers.
load-plugins=
# Use multiple processes to speed up Pylint.
jobs=1
# Allow loading of arbitrary C extensions. Extensions are imported into the
# active Python interpreter and may run arbitrary code.
unsafe-load-any-extension=no
# A comma-separated list of package or module names from where C extensions may
# be loaded. Extensions are loading into the active Python interpreter and may
# run arbitrary code
extension-pkg-whitelist=
# Allow optimization of some AST trees. This will activate a peephole AST
# optimizer, which will apply various small optimizations. For instance, it can
# be used to obtain the result of joining multiple strings with the addition
# operator. Joining a lot of strings can lead to a maximum recursion error in
# Pylint and this flag can prevent that. It has one side effect, the resulting
# AST will be different than the one from reality.
optimize-ast=no
[MESSAGES CONTROL]
# Only show warnings with the listed confidence levels. Leave empty to show
# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED
confidence=
# Enable the message, report, category or checker with the given id(s). You can
# either give multiple identifier separated by comma (,) or put this option
# multiple time. See also the "--disable" option for examples.
#enable=
# Disable the message, report, category or checker with the given id(s). You
# can either give multiple identifiers separated by comma (,) or put this
# option multiple times (only on the command line, not in the configuration
# file where it should appear only once).You can also use "--disable=all" to
# disable everything first and then reenable specific checks. For example, if
# you want to run only the similarities checker, you can use "--disable=all
# --enable=similarities". If you want to run only the classes checker, but have
# no Warning level messages displayed, use"--disable=all --enable=classes
# --disable=W"
disable=import-star-module-level,old-octal-literal,oct-method,unpacking-in-except,parameter-unpacking,backtick,old-raise-syntax,old-ne-operator,long-suffix,dict-view-method,dict-iter-method,metaclass-assignment,next-method-called,raising-string,indexing-exception,raw_input-builtin,long-builtin,file-builtin,execfile-builtin,coerce-builtin,cmp-builtin,buffer-builtin,basestring-builtin,apply-builtin,filter-builtin-not-iterating,using-cmp-argument,useless-suppression,range-builtin-not-iterating,suppressed-message,no-absolute-import,old-division,cmp-method,reload-builtin,zip-builtin-not-iterating,intern-builtin,unichr-builtin,reduce-builtin,standarderror-builtin,unicode-builtin,xrange-builtin,coerce-method,delslice-method,getslice-method,setslice-method,input-builtin,round-builtin,hex-method,nonzero-method,map-builtin-not-iterating,missing-docstring,locally-disabled
[REPORTS]
# Set the output format. Available formats are text, parseable, colorized, msvs
# (visual studio) and html. You can also give a reporter class, eg
# mypackage.mymodule.MyReporterClass.
output-format=text
# Put messages in a separate file for each module / package specified on the
# command line instead of printing them on stdout. Reports (if any) will be
# written in a file name "pylint_global.[txt|html]".
files-output=no
# Tells whether to display a full report or only the messages
reports=no
# Python expression which should return a note less than 10 (10 is the highest
# note). You have access to the variables errors warning, statement which
# respectively contain the number of errors / warnings messages and the total
# number of statements analyzed. This is used by the global evaluation report
# (RP0004).
evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10)
# Template used to display messages. This is a python new-style format string
# used to format the message information. See doc for all details
#msg-template=
[TYPECHECK]
# Tells whether missing members accessed in mixin class should be ignored. A
# mixin class is detected if its name ends with "mixin" (case insensitive).
ignore-mixin-members=yes
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
# and thus existing member attributes cannot be deduced by static analysis. It
# supports qualified module names, as well as Unix pattern matching.
ignored-modules=
# List of classes names for which member attributes should not be checked
# (useful for classes with attributes dynamically set). This supports can work
# with qualified names.
ignored-classes=
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
generated-members=
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=120
# Regexp for a line that is allowed to be longer than the limit.
ignore-long-lines=^\s*(# )?<?https?://\S+>?$
# Allow the body of an if to be on the same line as the test if there is no
# else.
single-line-if-stmt=no
# List of optional constructs for which whitespace checking is disabled. `dict-
# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}.
# `trailing-comma` allows a space between comma and closing bracket: (a, ).
# `empty-line` allows space-only lines.
no-space-check=trailing-comma,dict-separator
# Maximum number of lines in a module
max-module-lines=2000
# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1
# tab).
indent-string=' '
# Number of spaces of indent required inside a hanging or continued line.
indent-after-paren=4
# Expected format of line ending, e.g. empty (any line ending), LF or CRLF.
expected-line-ending-format=
[MISCELLANEOUS]
# List of note tags to take in consideration, separated by a comma.
notes=FIXME,XXX,TODO
[SPELLING]
# Spelling dictionary name. Available dictionaries: none. To make it working
# install python-enchant package.
spelling-dict=
# List of comma separated words that should not be checked.
spelling-ignore-words=
# A path to a file that contains private dictionary; one word per line.
spelling-private-dict-file=
# Tells whether to store unknown words to indicated private dictionary in
# --spelling-private-dict-file option instead of raising a message.
spelling-store-unknown-words=no
[VARIABLES]
# Tells whether we should check for unused import in __init__ files.
init-import=no
# A regular expression matching the name of dummy variables (i.e. expectedly
# not used).
dummy-variables-rgx=_$|dummy
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
additional-builtins=
# List of strings which can identify a callback function by name. A callback
# name must start or end with one of those strings.
callbacks=cb_,_cb
[BASIC]
# List of builtins function names that should not be used, separated by a comma
bad-functions=map,filter,input
# Good variable names which should always be accepted, separated by a comma
good-names=i,j,k,ex,Run,_
# Bad variable names which should always be refused, separated by a comma
bad-names=foo,bar,baz,toto,tutu,tata
# Colon-delimited sets of names that determine each other's naming style when
# the name regexes allow several styles.
name-group=
# Include a hint for the correct naming format with invalid-name
include-naming-hint=no
# Regular expression matching correct function names
function-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for function names
function-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct variable names
variable-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for variable names
variable-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct constant names
const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Naming hint for constant names
const-name-hint=(([A-Z_][A-Z0-9_]*)|(__.*__))$
# Regular expression matching correct attribute names
attr-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for attribute names
attr-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct argument names
argument-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for argument names
argument-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression matching correct class attribute names
class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Naming hint for class attribute names
class-attribute-name-hint=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$
# Regular expression matching correct inline iteration names
inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$
# Naming hint for inline iteration names
inlinevar-name-hint=[A-Za-z_][A-Za-z0-9_]*$
# Regular expression matching correct class names
class-rgx=[A-Z_][a-zA-Z0-9]+$
# Naming hint for class names
class-name-hint=[A-Z_][a-zA-Z0-9]+$
# Regular expression matching correct module names
module-rgx=(([a-z_][a-z0-9_-]*)|([A-Z][a-zA-Z0-9]+))$
# Naming hint for module names
module-name-hint=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$
# Regular expression matching correct method names
method-rgx=[a-z_][a-z0-9_]{2,30}$
# Naming hint for method names
method-name-hint=[a-z_][a-z0-9_]{2,30}$
# Regular expression which should only match function or class names that do
# not require a docstring.
no-docstring-rgx=^_
# Minimum line length for functions/classes that require docstrings, shorter
# ones are exempt.
docstring-min-length=-1
[ELIF]
# Maximum number of nested blocks for function / method body
max-nested-blocks=7
[LOGGING]
# Logging modules to check that the string format arguments are in logging
# function parameter format
logging-modules=logging
[SIMILARITIES]
# Minimum lines number of a similarity.
min-similarity-lines=4
# Ignore comments when computing similarities.
ignore-comments=yes
# Ignore docstrings when computing similarities.
ignore-docstrings=yes
# Ignore imports when computing similarities.
ignore-imports=no
[DESIGN]
# Maximum number of arguments for function / method
max-args=10
# Argument names that match this expression will be ignored. Default to name
# with leading underscore
ignored-argument-names=_.*
# Maximum number of locals for function / method body
max-locals=20
# Maximum number of return / yield for function / method body
max-returns=6
# Maximum number of branch for function / method body
max-branches=25
# Maximum number of statements in function / method body
max-statements=75
# Maximum number of parents for a class (see R0901).
max-parents=7
# Maximum number of attributes for a class (see R0902).
max-attributes=10
# Minimum number of public methods for a class (see R0903).
min-public-methods=0
# Maximum number of public methods for a class (see R0904).
max-public-methods=25
# Maximum number of boolean expressions in a if statement
max-bool-expr=5
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=regsub,TERMIOS,Bastion,rexec
# Create a graph of every (i.e. internal and external) dependencies in the
# given file (report RP0402 must not be disabled)
import-graph=
# Create a graph of external dependencies in the given file (report RP0402 must
# not be disabled)
ext-import-graph=
# Create a graph of internal dependencies in the given file (report RP0402 must
# not be disabled)
int-import-graph=
[CLASSES]
# List of method names used to declare (i.e. assign) instance attributes.
defining-attr-methods=__init__,__new__,setUp
# List of valid names for the first argument in a class method.
valid-classmethod-first-arg=cls
# List of valid names for the first argument in a metaclass class method.
valid-metaclass-classmethod-first-arg=mcs
# List of member names, which should be excluded from the protected access
# warning.
exclude-protected=_asdict,_fields,_replace,_source,_make
[EXCEPTIONS]
# Exceptions that will emit a warning when being caught. Defaults to
# "Exception"
overgeneral-exceptions=Exception

View File

@@ -0,0 +1,67 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JERRY=$1
TEST=$2
SLEEP=0.1
OS=`uname -s | tr [:upper:] [:lower:]`
Rss_OUT=""
function collect_entry()
{
OUT_NAME="$1_OUT";
OUT=$OUT_NAME;
if [ "$OS" == "darwin" ]
then
SUM=`ps -o rss $PID | grep [0-9]`
else
SUM=$(grep -o -e "^[0-9a-f][0-9a-f]*.*" -e "^Rss.*" /proc/$PID/smaps 2>/dev/null | grep -A 1 -- "r[w-]-p " | grep "^Rss"|awk '{s += $2;} END {print s;}')
fi;
if [ "$SUM" != "" ];
then
eval "$OUT=\"\$$OUT $SUM\\n\"";
fi;
}
function print_entry()
{
OUT_NAME="$1_OUT";
OUT=$OUT_NAME;
eval "echo -e \"\$$OUT\"" | awk -v entry="$1" '{ if ($1 != "") { n += 1; if ($1 > max) { max = $1; } } } END { if (n == 0) { exit; }; printf "%d\n", max; }';
}
function run_test()
{
$JERRY $TEST &
PID=$!
while kill -0 "$PID" > /dev/null 2>&1;
do
collect_entry Rss
sleep $SLEEP
done
wait "$PID" || exit 1
}
run_test
print_entry Rss

View File

@@ -0,0 +1,84 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Choosing table or semicolon-separated output mode
if [ "$1" == "-d" ]
then
TABLE="no"
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%s;", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%d;%d;%d\n", $1, $2, $3 * 1024}'
shift
else
PRINT_TEST_NAME_AWK_SCRIPT='{printf "%30s", $1}'
PRINT_TOTAL_AWK_SCRIPT='{printf "%25d%25d%25d\n", $1, $2, $3 * 1024}'
TABLE="yes"
fi
function fail_msg
{
echo "$1"
exit 1
}
# Engine
# Check if the specified build supports memory statistics options
function is_mem_stats_build
{
[ -x "$1" ] || fail_msg "Engine '$1' is not executable"
tmpfile=`mktemp`
"$1" --mem-stats $tmpfile 2>&1 | grep -- "Ignoring JERRY_INIT_MEM_STATS flag because of !JMEM_STATS configuration." 2>&1 > /dev/null
code=$?
rm $tmpfile
return $code
}
JERRY="$1"
shift
is_mem_stats_build "$JERRY" || fail_msg "First engine specified should be built without memory statistics support"
JERRY_MEM_STATS="$1"
shift
is_mem_stats_build "$JERRY_MEM_STATS" && fail_msg "Second engine specified should be built with memory statistics support"
# Benchmarks list
BENCHMARKS=""
while [ $# -ne 0 ]
do
BENCHMARKS="$BENCHMARKS $1"
shift
done
# Running
if [ "$TABLE" == "yes" ]
then
awk 'BEGIN {printf "%30s%25s%25s%25s\n", "Test name", "Peak Heap (parser)", "Peak Heap (execution)", "Maximum RSS"}'
echo
fi
for bench in $BENCHMARKS
do
test=`basename $bench .js`
echo "$test" | awk "$PRINT_TEST_NAME_AWK_SCRIPT"
MEM_STATS=$("$JERRY_MEM_STATS" --mem-stats --mem-stats-separate $bench | grep -e "Peak allocated =" | grep -o "[0-9]*")
RSS=$(./tools/rss-measure.sh "$JERRY" $bench | tail -n 1 | grep -o "[0-9]*")
echo $MEM_STATS $RSS | xargs | awk "$PRINT_TOTAL_AWK_SCRIPT"
done

View File

@@ -0,0 +1,348 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
trap "exit 2" INT
function pr_err() {
echo -e "\e[91mError: $@\e[39m"
}
function exit_err() {
pr_err $@
exit 1
}
# Check if the specified build supports memory statistics options
function is_mem_stats_build() {
[ -x "$1" ] || fail_msg "Engine '$1' is not executable"
tmpfile=`mktemp`
"$1" --mem-stats $tmpfile 2>&1 | grep -- "Ignoring JERRY_INIT_MEM_STATS flag because of !JMEM_STATS configuration." 2>&1 > /dev/null
code=$?
rm $tmpfile
return $code
}
USAGE="Usage:\n tools/run-perf-test.sh OLD_ENGINE NEW_ENGINE REPEATS TIMEOUT BENCH_FOLDER [-m result-file-name.md]"
if [ "$#" -lt 5 ]
then
echo -e "${USAGE}"
exit_err "Argument number mismatch..."
fi
ENGINE_OLD="$1"
ENGINE_NEW="$2"
REPEATS="$3"
TIMEOUT="$4"
BENCH_FOLDER="$5"
OUTPUT_FORMAT="$6"
OUTPUT_FILE="$7"
if [ "$#" -gt 5 ]
then
if [ "${OUTPUT_FORMAT}" != "-m" ]
then
exit_err "Please, use '-m result-file-name.md' as last arguments"
fi
if [ -z "${OUTPUT_FILE}" ]
then
exit_err "Missing md file name. Please, define the filename. Ex.: '-m result-file-name.md'"
fi
rm -rf "${OUTPUT_FILE}"
fi
if [ "${REPEATS}" -lt 1 ]
then
exit_err "REPEATS must be greater than 0"
fi
if [ "${TIMEOUT}" -lt 1 ]
then
exit_err "TIMEOUT must be greater than 0"
fi
perf_n=0
mem_n=0
perf_rel_mult=1.0
perf_rel_inaccuracy_tmp=0
mem_rel_mult=1.0
mem_rel_inaccuracy_tmp="-1"
# Unicode "figure space" character
FIGURE_SPACE=$(echo -e -n "\xE2\x80\x87")
# Unicode "approximately equal" character
APPROXIMATELY_EQUAL=$(echo -n -e "\xE2\x89\x88")
function run-compare()
{
COMMAND=$1
PRE=$2
TEST=$3
PRECISION=$4
UNIT=$5
ABS_FP_FMT="%$((PRECISION + 4)).$((PRECISION))f$UNIT"
REL_FP_FMT="%0.3f"
REL_SHOW_PLUS_SIGN_FP_FMT="%+0.3f"
OLD=$(timeout "${TIMEOUT}" ${COMMAND} "${ENGINE_OLD}" "${TEST}") || return 1
NEW=$(timeout "${TIMEOUT}" ${COMMAND} "${ENGINE_NEW}" "${TEST}") || return 1
#check result
! $OLD || ! $NEW || return 1
OLD_value=$(echo "$OLD " | cut -d ' ' -f 1)
OLD_inaccuracy=$(echo "$OLD " | cut -d ' ' -f 2)
NEW_value=$(echo "$NEW " | cut -d ' ' -f 1)
NEW_inaccuracy=$(echo "$NEW " | cut -d ' ' -f 2)
#calc relative speedup
eval "rel_mult=\$${PRE}_rel_mult"
rel=$(echo "${OLD_value}" "${NEW_value}" | awk '{ print $2 / $1; }')
#increment n
((${PRE}_n++))
#calc percent to display
PERCENT=$(echo "$rel" | awk '{print (1.0 - $1) * 100; }')
if [[ "$OLD_inaccuracy" != "" && "$NEW_inaccuracy" != "" ]]
then
DIFF=$(printf "$ABS_FP_FMT -> $ABS_FP_FMT" $OLD_value $NEW_value)
rel_inaccuracy=$(echo "$OLD_value $OLD_inaccuracy $NEW_value $NEW_inaccuracy" | \
awk "{
OLD_value=\$1
OLD_inaccuracy=\$2
NEW_value=\$3
NEW_inaccuracy=\$4
rel_inaccuracy = (NEW_value / OLD_value) * sqrt ((OLD_inaccuracy / OLD_value) ^ 2 + (NEW_inaccuracy / NEW_value) ^ 2)
if (rel_inaccuracy < 0) {
rel_inaccuracy = -rel_inaccuracy
}
print rel_inaccuracy
}")
PERCENT_inaccuracy=$(echo "$rel_inaccuracy" | awk '{ print $1 * 100.0 }')
ext=$(echo "$PERCENT $PERCENT_inaccuracy" | \
awk "{
PERCENT=\$1
PERCENT_inaccuracy=\$2
if (PERCENT > 0.0 && PERCENT > PERCENT_inaccuracy) {
print \"[+]\"
} else if (PERCENT < 0 && -PERCENT > PERCENT_inaccuracy) {
print \"[-]\"
} else {
print \"[$APPROXIMATELY_EQUAL]\"
}
}")
if [[ $rel_inaccuracy_tmp -lt 0 ]]
then
return 1
fi
eval "rel_inaccuracy_tmp=\$${PRE}_rel_inaccuracy_tmp"
rel_inaccuracy_tmp=$(echo "$rel $rel_inaccuracy $rel_inaccuracy_tmp" | \
awk "{
rel=\$1
rel_inaccuracy=\$2
rel_inaccuracy_tmp=\$3
print rel_inaccuracy_tmp + (rel_inaccuracy / rel) ^ 2
}")
eval "${PRE}_rel_inaccuracy_tmp=\$rel_inaccuracy_tmp"
PERCENT=$(printf "%8s %11s" $(printf "$REL_SHOW_PLUS_SIGN_FP_FMT%%" $PERCENT) $(printf "(+-$REL_FP_FMT%%)" $PERCENT_inaccuracy))
PERCENT="$PERCENT : $ext"
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
WIDTH=42
MD_DIFF=$(printf "%s%s" "$DIFF" "$(printf "%$(($WIDTH - ${#DIFF}))s")")
MD_PERCENT=$(printf "%s%s" "$(printf "%$(($WIDTH - ${#PERCENT}))s")" "$PERCENT")
MD_FORMAT="\`%s\`<br>\`%s\`"
fi
CONSOLE_FORMAT="%20s : %19s"
else
ext=""
if [[ "$OLD_inaccuracy" != "" || "$NEW_inaccuracy" != "" ]]
then
return 1;
fi
DIFF=$(printf "$ABS_FP_FMT -> $ABS_FP_FMT" $OLD_value $NEW_value)
PERCENT=$(printf "$REL_SHOW_PLUS_SIGN_FP_FMT%%" $PERCENT)
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
WIDTH=20
MD_DIFF=$(printf "%s%s" "$DIFF" "$(printf "%$(($WIDTH - ${#DIFF}))s")")
MD_PERCENT=$(printf "%s%s" "$(printf "%$(($WIDTH - ${#PERCENT}))s")" "$PERCENT")
MD_FORMAT="\`%s\`<br>\`%s\`"
fi
CONSOLE_FORMAT="%14s : %8s"
fi
rel_mult=$(echo "$rel_mult" "$rel" | awk '{print $1 * $2;}')
eval "${PRE}_rel_mult=\$rel_mult"
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
printf "$MD_FORMAT" "$MD_DIFF" "$MD_PERCENT" | sed "s/ /$FIGURE_SPACE/g" >> "${OUTPUT_FILE}"
fi
printf "$CONSOLE_FORMAT" "$DIFF" "$PERCENT"
}
function run-test()
{
TEST=$1
# print only filename
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
printf "%s | " "${TEST##*/}" >> "${OUTPUT_FILE}"
fi
printf "%50s | " "${TEST##*/}"
if [ "$IS_MEM_STAT" -ne 0 ]
then
run-compare "./tools/mem-stats-measure.sh" "mem" "${TEST}" 0 || return 1
else
run-compare "./tools/rss-measure.sh" "mem" "${TEST}" 0 k || return 1
fi
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
printf " | " >> "${OUTPUT_FILE}"
fi
printf " | "
run-compare "./tools/perf.sh ${REPEATS}" "perf" "${TEST}" 3 s || return 1
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
printf "\n" >> "${OUTPUT_FILE}"
fi
printf "\n"
}
function run-suite()
{
FOLDER=$1
for BENCHMARK in ${FOLDER}/*.js
do
run-test "${BENCHMARK}" 2> /dev/null || printf "<FAILED>\n" "${BENCHMARK}";
done
}
date
is_mem_stats_build "${ENGINE_OLD}" || is_mem_stats_build "${ENGINE_NEW}"
IS_MEM_STAT=$?
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
if [ "$IS_MEM_STAT" -ne 0 ]
then
echo "Benchmark | Peak alloc.<br>(+ is better) | Perf<br>(+ is better)" >> "${OUTPUT_FILE}"
else
echo "Benchmark | RSS<br>(+ is better) | Perf<br>(+ is better)" >> "${OUTPUT_FILE}"
fi
echo "---------: | --------- | ---------" >> "${OUTPUT_FILE}"
fi
if [ "$IS_MEM_STAT" -ne 0 ]
then
printf "%50s | %25s | %35s\n" "Benchmark" "Peak alloc.(+ is better)" "Perf(+ is better)"
else
printf "%50s | %25s | %35s\n" "Benchmark" "RSS(+ is better)" "Perf(+ is better)"
fi
run-suite "${BENCH_FOLDER}"
mem_rel_gmean=$(echo "$mem_rel_mult" "$mem_n" | awk '{print $1 ^ (1.0 / $2);}')
mem_percent_gmean=$(echo "$mem_rel_gmean" | awk '{print (1.0 - $1) * 100;}')
if [[ $mem_rel_inaccuracy_tmp != "-1" ]]
then
exit_err "Incorrect inaccuracy calculation for memory consumption geometric mean"
fi
perf_rel_gmean=$(echo "$perf_rel_mult" "$perf_n" | awk '{print $1 ^ (1.0 / $2);}')
perf_percent_gmean=$(echo "$perf_rel_gmean" | awk '{print (1.0 - $1) * 100;}')
if [[ "$perf_rel_inaccuracy_tmp" == "-1" ]]
then
exit_err "Incorrect inaccuracy calculation for performance geometric mean"
else
perf_percent_inaccuracy=$(echo "$perf_rel_gmean $perf_rel_inaccuracy_tmp $perf_n" | \
awk "{
perf_rel_gmean=\$1
perf_rel_inaccuracy_tmp=\$2
perf_n=\$3
print 100.0 * (perf_rel_gmean ^ (1.0 / perf_n) * sqrt (perf_rel_inaccuracy_tmp) / perf_n)
}")
perf_ext=$(echo "$perf_percent_gmean $perf_percent_inaccuracy" | \
awk "{
perf_percent_gmean=\$1
perf_percent_inaccuracy=\$2
if (perf_percent_gmean > 0.0 && perf_percent_gmean > perf_percent_inaccuracy) {
print \"[+]\"
} else if (perf_percent_gmean < 0 && -perf_percent_gmean > perf_percent_inaccuracy) {
print \"[-]\"
} else {
print \"[$APPROXIMATELY_EQUAL]\"
}
}")
perf_percent_inaccuracy=$(printf "(+-%0.3f%%) : $perf_ext" $perf_percent_inaccuracy)
fi
gmean_label_text="Geometric mean:"
if [ "${OUTPUT_FORMAT}" == "-m" ]
then
mem_percent_gmean_text=$(printf "RSS reduction: \`%0.3f%%\`" "$mem_percent_gmean")
perf_percent_gmean_text=$(printf "Speed up: \`%0.3f%% %s\`" "$perf_percent_gmean" "$perf_percent_inaccuracy")
printf "%s | %s | %s\n" "$gmean_label_text" "$mem_percent_gmean_text" "$perf_percent_gmean_text" >> "${OUTPUT_FILE}"
fi
mem_percent_gmean_text=$(printf "RSS reduction: %0.3f%%" "$mem_percent_gmean")
perf_percent_gmean_text=$(printf "Speed up: %0.3f%% %s" "$perf_percent_gmean" "$perf_percent_inaccuracy")
printf "%50s | %25s | %51s\n" "$gmean_label_text" "$mem_percent_gmean_text" "$perf_percent_gmean_text"
date

View File

@@ -0,0 +1,530 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import collections
import hashlib
import os
import platform
import subprocess
import sys
import settings
OUTPUT_DIR = os.path.join(settings.PROJECT_DIR, 'build', 'tests')
Options = collections.namedtuple('Options', ['name', 'build_args', 'test_args', 'skip'])
Options.__new__.__defaults__ = ([], [], False)
def skip_if(condition, desc):
return desc if condition else False
OPTIONS_COMMON = ['--lto=off']
OPTIONS_PROFILE_MIN = ['--profile=minimal']
OPTIONS_PROFILE_ES51 = ['--profile=es5.1']
OPTIONS_PROFILE_ESNEXT = ['--profile=es.next']
OPTIONS_STACK_LIMIT = ['--stack-limit=96']
OPTIONS_GC_MARK_LIMIT = ['--gc-mark-limit=16']
OPTIONS_MEM_STRESS = ['--mem-stress-test=on']
OPTIONS_DEBUG = ['--debug']
OPTIONS_SNAPSHOT = ['--snapshot-save=on', '--snapshot-exec=on', '--jerry-cmdline-snapshot=on']
OPTIONS_UNITTESTS = ['--unittests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on',
'--line-info=on', '--mem-stats=on']
OPTIONS_DOCTESTS = ['--doctests=on', '--jerry-cmdline=off', '--error-messages=on',
'--snapshot-save=on', '--snapshot-exec=on', '--vm-exec-stop=on']
# Test options for unittests
JERRY_UNITTESTS_OPTIONS = [
Options('unittests-es.next',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ESNEXT),
Options('unittests-es.next-debug',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG),
Options('doctests-es.next',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ESNEXT),
Options('doctests-es.next-debug',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG),
Options('unittests-es5.1',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51),
Options('unittests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('doctests-es5.1',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51),
Options('doctests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_DOCTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG),
Options('unittests-es5.1-debug-init-fini',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG
+ ['--cmake-param=-DFEATURE_INIT_FINI=ON'],
skip=skip_if((sys.platform == 'win32'), 'FEATURE_INIT_FINI build flag isn\'t supported on Windows,' +
' because Microsoft Visual C/C++ Compiler doesn\'t support' +
' library constructors and destructors.')),
Options('unittests-es5.1-debug-math',
OPTIONS_COMMON + OPTIONS_UNITTESTS + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG
+ ['--jerry-math=on']),
]
# Test options for jerry-tests
JERRY_TESTS_OPTIONS = [
Options('jerry_tests-es.next-debug',
OPTIONS_COMMON + OPTIONS_PROFILE_ESNEXT + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ OPTIONS_MEM_STRESS),
Options('jerry_tests-es5.1',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT),
Options('jerry_tests-es5.1-snapshot',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT,
['--snapshot']),
Options('jerry_tests-es5.1-debug',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ OPTIONS_MEM_STRESS),
Options('jerry_tests-es5.1-debug-snapshot',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_SNAPSHOT + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT
+ OPTIONS_GC_MARK_LIMIT, ['--snapshot']),
Options('jerry_tests-es5.1-debug-cpointer_32bit',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--cpointer-32bit=on', '--mem-heap=1024']),
Options('jerry_tests-es5.1-debug-external_context',
OPTIONS_COMMON + OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG + OPTIONS_STACK_LIMIT + OPTIONS_GC_MARK_LIMIT
+ ['--external-context=on']),
]
# Test options for test262
TEST262_TEST_SUITE_OPTIONS = [
Options('test262_tests', OPTIONS_PROFILE_ES51),
Options('test262_tests-debug', OPTIONS_PROFILE_ES51 + OPTIONS_DEBUG)
]
# Test options for test262-es2015
TEST262_ES2015_TEST_SUITE_OPTIONS = [
Options('test262_tests_es2015', OPTIONS_PROFILE_ESNEXT + ['--line-info=on', '--error-messages=on']),
]
# Test options for test262-esnext
TEST262_ESNEXT_TEST_SUITE_OPTIONS = [
Options('test262_tests_esnext', OPTIONS_PROFILE_ESNEXT
+ ['--line-info=on', '--error-messages=on', '--mem-heap=20480']),
]
# Test options for jerry-debugger
DEBUGGER_TEST_OPTIONS = [
Options('jerry_debugger_tests',
OPTIONS_DEBUG + ['--jerry-debugger=on'])
]
# Test options for buildoption-test
JERRY_BUILDOPTIONS = [
Options('buildoption_test-lto',
['--lto=on']),
Options('buildoption_test-error_messages',
['--error-messages=on']),
Options('buildoption_test-logging',
['--logging=on']),
Options('buildoption_test-amalgam',
['--amalgam=on']),
Options('buildoption_test-valgrind',
['--valgrind=on']),
Options('buildoption_test-mem_stats',
['--mem-stats=on']),
Options('buildoption_test-show_opcodes',
['--show-opcodes=on']),
Options('buildoption_test-show_regexp_opcodes',
['--show-regexp-opcodes=on']),
Options('buildoption_test-cpointer_32bit',
['--compile-flag=-m32', '--cpointer-32bit=on', '--system-allocator=on'],
skip=skip_if(
platform.system() != 'Linux' or (platform.machine() != 'i386' and platform.machine() != 'x86_64'),
'-m32 is only supported on x86[-64]-linux')
),
Options('buildoption_test-jerry_math',
['--jerry-math=on']),
Options('buildoption_test-no_lcache_prophashmap',
['--compile-flag=-DJERRY_LCACHE=0', '--compile-flag=-DJERRY_PROPRETY_HASHMAP=0']),
Options('buildoption_test-external_context',
['--external-context=on']),
Options('buildoption_test-shared_libs',
['--shared-libs=on'],
skip=skip_if((sys.platform == 'win32'), 'Not yet supported, link failure on Windows')),
Options('buildoption_test-cmdline_test',
['--jerry-cmdline-test=on'],
skip=skip_if((sys.platform == 'win32'), 'rand() can\'t be overriden on Windows (benchmarking.c)')),
Options('buildoption_test-cmdline_snapshot',
['--jerry-cmdline-snapshot=on']),
Options('buildoption_test-recursion_limit',
OPTIONS_STACK_LIMIT),
Options('buildoption_test-gc-mark_limit',
OPTIONS_GC_MARK_LIMIT),
Options('buildoption_test-jerry-debugger',
['--jerry-debugger=on']),
Options('buildoption_test-module-off',
['--compile-flag=-DJERRY_MODULE_SYSTEM=0', '--lto=off']),
Options('buildoption_test-builtin-proxy-off',
['--compile-flag=-DJERRY_BUILTIN_PROXY=0']),
]
def get_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('--toolchain', metavar='FILE',
help='Add toolchain file')
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--buildoptions', metavar='LIST',
help='Add a comma separated list of extra build options to each test')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--outdir', metavar='DIR', default=OUTPUT_DIR,
help='Specify output directory (default: %(default)s)')
parser.add_argument('--check-signed-off', metavar='TYPE', nargs='?',
choices=['strict', 'tolerant', 'gh-actions'], const='strict',
help='Run signed-off check (%(choices)s; default type if not given: %(const)s)')
parser.add_argument('--check-cppcheck', action='store_true',
help='Run cppcheck')
parser.add_argument('--check-doxygen', action='store_true',
help='Run doxygen')
parser.add_argument('--check-pylint', action='store_true',
help='Run pylint')
parser.add_argument('--check-vera', action='store_true',
help='Run vera check')
parser.add_argument('--check-license', action='store_true',
help='Run license check')
parser.add_argument('--check-magic-strings', action='store_true',
help='Run "magic string source code generator should be executed" check')
parser.add_argument('--jerry-debugger', action='store_true',
help='Run jerry-debugger tests')
parser.add_argument('--jerry-tests', action='store_true',
help='Run jerry-tests')
parser.add_argument('--test262', action='store_true',
help='Run test262 - ES5.1')
parser.add_argument('--test262-es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ESnext. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
parser.add_argument('--unittests', action='store_true',
help='Run unittests (including doctests)')
parser.add_argument('--buildoption-test', action='store_true',
help='Run buildoption-test')
parser.add_argument('--all', '--precommit', action='store_true',
help='Run all tests')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
script_args = parser.parse_args()
if script_args.test262_test_list and not \
(script_args.test262 or script_args.test262_es2015 or script_args.test262_esnext):
print("--test262-test-list is only allowed with --test262 or --test262-es2015 or --test262-esnext\n")
parser.print_help()
sys.exit(1)
return script_args
BINARY_CACHE = {}
TERM_NORMAL = '\033[0m'
TERM_YELLOW = '\033[1;33m'
TERM_BLUE = '\033[1;34m'
TERM_RED = '\033[1;31m'
def report_command(cmd_type, cmd, env=None):
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, cmd_type, TERM_NORMAL))
if env is not None:
sys.stderr.write(''.join('%s%s=%r \\%s\n' % (TERM_BLUE, var, val, TERM_NORMAL)
for var, val in sorted(env.items())))
sys.stderr.write('%s%s%s\n' % (TERM_BLUE, (' \\%s\n\t%s' % (TERM_NORMAL, TERM_BLUE)).join(cmd), TERM_NORMAL))
def report_skip(job):
sys.stderr.write('%sSkipping: %s' % (TERM_YELLOW, job.name))
if job.skip:
sys.stderr.write(' (%s)' % job.skip)
sys.stderr.write('%s\n' % TERM_NORMAL)
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return []
def create_binary(job, options):
build_args = job.build_args[:]
if options.buildoptions:
for option in options.buildoptions.split(','):
if option not in build_args:
build_args.append(option)
build_cmd = get_platform_cmd_prefix()
build_cmd.append(settings.BUILD_SCRIPT)
build_cmd.extend(build_args)
build_dir_path = os.path.join(options.outdir, job.name)
build_cmd.append('--builddir=%s' % build_dir_path)
install_dir_path = os.path.join(build_dir_path, 'local')
build_cmd.append('--install=%s' % install_dir_path)
if options.toolchain:
build_cmd.append('--toolchain=%s' % options.toolchain)
report_command('Build command:', build_cmd)
binary_key = tuple(sorted(build_args))
if binary_key in BINARY_CACHE:
ret, build_dir_path = BINARY_CACHE[binary_key]
sys.stderr.write('(skipping: already built at %s with returncode %d)\n' % (build_dir_path, ret))
return ret, build_dir_path
try:
subprocess.check_output(build_cmd)
ret = 0
except subprocess.CalledProcessError as err:
print(err.output)
ret = err.returncode
BINARY_CACHE[binary_key] = (ret, build_dir_path)
return ret, build_dir_path
def get_binary_path(build_dir_path):
executable_extension = '.exe' if sys.platform == 'win32' else ''
return os.path.join(build_dir_path, 'local', 'bin', 'jerry' + executable_extension)
def hash_binary(bin_path):
blocksize = 65536
hasher = hashlib.sha1()
with open(bin_path, 'rb') as bin_file:
buf = bin_file.read(blocksize)
while buf:
hasher.update(buf)
buf = bin_file.read(blocksize)
return hasher.hexdigest()
def iterate_test_runner_jobs(jobs, options):
tested_paths = set()
tested_hashes = {}
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
yield job, ret_build, None
if build_dir_path in tested_paths:
sys.stderr.write('(skipping: already tested with %s)\n' % build_dir_path)
continue
else:
tested_paths.add(build_dir_path)
bin_path = get_binary_path(build_dir_path)
bin_hash = hash_binary(bin_path)
if bin_hash in tested_hashes:
sys.stderr.write('(skipping: already tested with equivalent %s)\n' % tested_hashes[bin_hash])
continue
else:
tested_hashes[bin_hash] = build_dir_path
test_cmd = get_platform_cmd_prefix()
test_cmd.extend([settings.TEST_RUNNER_SCRIPT, '--engine', bin_path])
yield job, ret_build, test_cmd
def run_check(runnable, env=None):
report_command('Test command:', runnable, env=env)
if env is not None:
full_env = dict(os.environ)
full_env.update(env)
env = full_env
proc = subprocess.Popen(runnable, env=env)
proc.wait()
return proc.returncode
def run_jerry_debugger_tests(options):
ret_build = ret_test = 0
for job in DEBUGGER_TEST_OPTIONS:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
for channel in ["websocket", "rawpacket"]:
for test_file in os.listdir(settings.DEBUGGER_TESTS_DIR):
if test_file.endswith(".cmd"):
test_case, _ = os.path.splitext(test_file)
test_case_path = os.path.join(settings.DEBUGGER_TESTS_DIR, test_case)
test_cmd = [
settings.DEBUGGER_TEST_RUNNER_SCRIPT,
get_binary_path(build_dir_path),
channel,
settings.DEBUGGER_CLIENT_SCRIPT,
os.path.relpath(test_case_path, settings.PROJECT_DIR)
]
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd)
return ret_build | ret_test
def run_jerry_tests(options):
ret_build = ret_test = 0
for job, ret_build, test_cmd in iterate_test_runner_jobs(JERRY_TESTS_OPTIONS, options):
if ret_build:
break
test_cmd.append('--test-dir')
test_cmd.append(settings.JERRY_TESTS_DIR)
if options.quiet:
test_cmd.append("-q")
skip_list = []
if '--profile=es.next' in job.build_args:
skip_list.append(os.path.join('es5.1', ''))
else:
skip_list.append(os.path.join('es.next', ''))
if options.skip_list:
skip_list.append(options.skip_list)
if skip_list:
test_cmd.append("--skip-list=" + ",".join(skip_list))
if job.test_args:
test_cmd.extend(job.test_args)
ret_test |= run_check(test_cmd, env=dict(TZ='UTC'))
return ret_build | ret_test
def run_test262_test_suite(options):
ret_build = ret_test = 0
jobs = []
if options.test262:
jobs.extend(TEST262_TEST_SUITE_OPTIONS)
if options.test262_es2015:
jobs.extend(TEST262_ES2015_TEST_SUITE_OPTIONS)
if options.test262_esnext:
jobs.extend(TEST262_ESNEXT_TEST_SUITE_OPTIONS)
for job in jobs:
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
test_cmd = get_platform_cmd_prefix() + [
settings.TEST262_RUNNER_SCRIPT,
'--engine', get_binary_path(build_dir_path) + " --test262-object",
'--test-dir', settings.TEST262_TEST_SUITE_DIR
]
if job.name.endswith('es2015'):
test_cmd.append('--es2015')
test_cmd.append(options.test262_es2015)
elif job.name.endswith('esnext'):
test_cmd.append('--esnext')
test_cmd.append(options.test262_esnext)
else:
test_cmd.append('--es51')
if job.test_args:
test_cmd.extend(job.test_args)
if options.test262_test_list:
test_cmd.append('--test262-test-list')
test_cmd.append(options.test262_test_list)
ret_test |= run_check(test_cmd, env=dict(TZ='America/Los_Angeles'))
return ret_build | ret_test
def run_unittests(options):
ret_build = ret_test = 0
for job in JERRY_UNITTESTS_OPTIONS:
if job.skip:
report_skip(job)
continue
ret_build, build_dir_path = create_binary(job, options)
if ret_build:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
if sys.platform == 'win32':
if "--debug" in job.build_args:
build_config = "Debug"
else:
build_config = "MinSizeRel"
else:
build_config = ""
ret_test |= run_check(
get_platform_cmd_prefix() +
[settings.UNITTEST_RUNNER_SCRIPT] +
[os.path.join(build_dir_path, 'tests', build_config)] +
(["-q"] if options.quiet else [])
)
return ret_build | ret_test
def run_buildoption_test(options):
for job in JERRY_BUILDOPTIONS:
if job.skip:
report_skip(job)
continue
ret, _ = create_binary(job, options)
if ret:
print("\n%sBuild failed%s\n" % (TERM_RED, TERM_NORMAL))
break
return ret
Check = collections.namedtuple('Check', ['enabled', 'runner', 'arg'])
def main(options):
checks = [
Check(options.check_signed_off, run_check, [settings.SIGNED_OFF_SCRIPT]
+ {'tolerant': ['--tolerant'], 'gh-actions': ['--gh-actions']}.get(options.check_signed_off, [])),
Check(options.check_cppcheck, run_check, [settings.CPPCHECK_SCRIPT]),
Check(options.check_doxygen, run_check, [settings.DOXYGEN_SCRIPT]),
Check(options.check_pylint, run_check, [settings.PYLINT_SCRIPT]),
Check(options.check_vera, run_check, [settings.VERA_SCRIPT]),
Check(options.check_license, run_check, [settings.LICENSE_SCRIPT]),
Check(options.check_magic_strings, run_check, [settings.MAGIC_STRINGS_SCRIPT]),
Check(options.jerry_debugger, run_jerry_debugger_tests, options),
Check(options.jerry_tests, run_jerry_tests, options),
Check(options.test262 or options.test262_es2015 or options.test262_esnext, run_test262_test_suite, options),
Check(options.unittests, run_unittests, options),
Check(options.buildoption_test, run_buildoption_test, options),
]
for check in checks:
if check.enabled or options.all:
ret = check.runner(check.arg)
if ret:
sys.exit(ret)
if __name__ == "__main__":
main(get_arguments())

View File

@@ -0,0 +1,56 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ENGINE=$1
function run ()
{
echo "Running test: $1.js"
./tools/perf.sh 5 $ENGINE ./tests/benchmarks/$1.js
./tools/rss-measure.sh $ENGINE ./tests/benchmarks/$1.js
}
echo "Running Sunspider:"
#run jerry/sunspider/3d-morph // too fast
run jerry/sunspider/bitops-3bit-bits-in-byte
run jerry/sunspider/bitops-bits-in-byte
run jerry/sunspider/bitops-bitwise-and
run jerry/sunspider/controlflow-recursive
run jerry/sunspider/math-cordic
run jerry/sunspider/math-partial-sums
run jerry/sunspider/math-spectral-norm
echo "Running Jerry:"
run jerry/cse
run jerry/cse_loop
run jerry/cse_ready_loop
run jerry/empty_loop
run jerry/function_loop
run jerry/loop_arithmetics_10kk
run jerry/loop_arithmetics_1kk
echo "Running UBench:"
run ubench/function-closure
run ubench/function-empty
run ubench/function-correct-args
run ubench/function-excess-args
run ubench/function-missing-args
run ubench/function-sum
run ubench/loop-empty-resolve
run ubench/loop-empty
run ubench/loop-sum

View File

@@ -0,0 +1,63 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
JERRY=$1
CHANNEL=$2
DEBUGGER_CLIENT=$3
TEST_CASE=$4
CLIENT_ARGS=""
TERM_NORMAL='\033[0m'
TERM_RED='\033[1;31m'
TERM_GREEN='\033[1;32m'
if [[ $TEST_CASE == *"client_source"* ]]; then
START_DEBUG_SERVER="${JERRY} --start-debug-server --debug-channel ${CHANNEL} --debugger-wait-source &"
if [[ $TEST_CASE == *"client_source_multiple"* ]]; then
CLIENT_ARGS="--client-source ${TEST_CASE}_2.js ${TEST_CASE}_1.js"
else
CLIENT_ARGS="--client-source ${TEST_CASE}.js"
fi
else
START_DEBUG_SERVER="${JERRY} ${TEST_CASE}.js --start-debug-server --debug-channel ${CHANNEL} &"
fi
echo "$START_DEBUG_SERVER"
eval "$START_DEBUG_SERVER"
sleep 1s
RESULT_TEMP=`mktemp ${TEST_CASE}.out.XXXXXXXXXX`
(cat "${TEST_CASE}.cmd" | ${DEBUGGER_CLIENT} --channel ${CHANNEL} --non-interactive ${CLIENT_ARGS}) >${RESULT_TEMP} 2>&1
if [[ $TEST_CASE == *"restart"* ]]; then
CONTINUE_CASE=$(sed "s/restart/continue/g" <<< "$TEST_CASE")
(cat "${CONTINUE_CASE}.cmd" | ${DEBUGGER_CLIENT} --channel ${CHANNEL} --non-interactive ${CLIENT_ARGS}) >>${RESULT_TEMP} 2>&1
fi
diff -U0 ${TEST_CASE}.expected ${RESULT_TEMP}
STATUS_CODE=$?
rm -f ${RESULT_TEMP}
if [ ${STATUS_CODE} -ne 0 ]
then
echo -e "${TERM_RED}FAIL: ${TEST_CASE}${TERM_NORMAL}\n"
else
echo -e "${TERM_GREEN}PASS: ${TEST_CASE}${TERM_NORMAL}\n"
fi
exit ${STATUS_CODE}

View File

@@ -0,0 +1,244 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import shutil
import subprocess
import sys
import util
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return ['python2'] # The official test262.py isn't python3 compatible, but has python shebang.
def get_arguments():
execution_runtime = os.environ.get('RUNTIME', '')
parser = argparse.ArgumentParser()
parser.add_argument('--runtime', metavar='FILE', default=execution_runtime,
help='Execution runtime (e.g. qemu)')
parser.add_argument('--engine', metavar='FILE', required=True,
help='JerryScript binary to run tests with')
parser.add_argument('--test-dir', metavar='DIR', required=True,
help='Directory contains test262 test suite')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--es51', action='store_true',
help='Run test262 ES5.1 version')
group.add_argument('--es2015', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES2015. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
group.add_argument('--esnext', default=False, const='default',
nargs='?', choices=['default', 'all', 'update'],
help='Run test262 - ES.next. default: all tests except excludelist, ' +
'all: all tests, update: all tests and update excludelist')
parser.add_argument('--test262-test-list', metavar='LIST',
help='Add a comma separated list of tests or directories to run in test262 test suite')
args = parser.parse_args()
if args.es2015:
args.test_dir = os.path.join(args.test_dir, 'es2015')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = 'fd44cd73dfbce0b515a2474b7cd505d6176a9eb5'
args.excludelist_path = os.path.join('tests', 'test262-es6-excludelist.xml')
elif args.esnext:
args.test_dir = os.path.join(args.test_dir, 'esnext')
args.test262_harness_dir = os.path.abspath(os.path.dirname(__file__))
args.test262_git_hash = '281eb10b2844929a7c0ac04527f5b42ce56509fd'
args.excludelist_path = os.path.join('tests', 'test262-esnext-excludelist.xml')
else:
args.test_dir = os.path.join(args.test_dir, 'es51')
args.test262_harness_dir = args.test_dir
args.test262_git_hash = 'es5-tests'
args.mode = args.es2015 or args.esnext
return args
def prepare_test262_test_suite(args):
if os.path.isdir(os.path.join(args.test_dir, '.git')):
return 0
return_code = subprocess.call(['git', 'clone', '--no-checkout',
'https://github.com/tc39/test262.git', args.test_dir])
if return_code:
print('Cloning test262 repository failed.')
return return_code
return_code = subprocess.call(['git', 'checkout', args.test262_git_hash], cwd=args.test_dir)
assert not return_code, 'Cloning test262 repository failed - invalid git revision.'
if args.es51:
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'bestPractice')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
path_to_remove = os.path.join(args.test_dir, 'test', 'suite', 'intl402')
if os.path.isdir(path_to_remove):
shutil.rmtree(path_to_remove)
return 0
def update_exclude_list(args):
print("=== Summary - updating excludelist ===\n")
passing_tests = set()
failing_tests = set()
new_passing_tests = set()
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'r') as report_file:
for line in report_file:
match = re.match('(=== )?(.*) (?:failed|passed) in (?:non-strict|strict)', line)
if match:
(unexpected, test) = match.groups()
test = test.replace('\\', '/')
if unexpected:
failing_tests.add(test + '.js')
else:
passing_tests.add(test + '.js')
# Tests pass in strict-mode but fail in non-strict-mode (or vice versa) should be considered as failures
passing_tests = passing_tests - failing_tests
with open(args.excludelist_path, 'r+') as exclude_file:
lines = exclude_file.readlines()
exclude_file.seek(0)
exclude_file.truncate()
# Skip the last line "</excludeList>" to be able to insert new failing tests.
for line in lines[:-1]:
match = re.match(r" <test id=\"(\S*)\">", line)
if match:
test = match.group(1)
if test in failing_tests:
failing_tests.remove(test)
exclude_file.write(line)
elif test in passing_tests:
new_passing_tests.add(test)
else:
exclude_file.write(line)
else:
exclude_file.write(line)
if failing_tests:
print("New failing tests added to the excludelist")
for test in sorted(failing_tests):
exclude_file.write(' <test id="' + test + '"><reason></reason></test>\n')
print(" " + test)
print("")
exclude_file.write('</excludeList>\n')
if new_passing_tests:
print("New passing tests removed from the excludelist")
for test in sorted(new_passing_tests):
print(" " + test)
print("")
if failing_tests or new_passing_tests:
print("Excludelist was updated succesfully.")
return 1
print("Excludelist was already up-to-date.")
return 0
def main(args):
return_code = prepare_test262_test_suite(args)
if return_code:
return return_code
if sys.platform == 'win32':
original_timezone = util.get_timezone()
util.set_sighdl_to_reset_timezone(original_timezone)
util.set_timezone('Pacific Standard Time')
command = (args.runtime + ' ' + args.engine).strip()
kwargs = {}
if sys.version_info.major >= 3:
kwargs['errors'] = 'ignore'
if args.es51:
test262_harness_path = os.path.join(args.test262_harness_dir, 'tools/packaging/test262.py')
else:
test262_harness_path = os.path.join(args.test262_harness_dir, 'test262-harness.py')
test262_command = get_platform_cmd_prefix() + \
[test262_harness_path,
'--command', command,
'--tests', args.test_dir,
'--summary']
if 'excludelist_path' in args and args.mode == 'default':
test262_command.extend(['--exclude-list', args.excludelist_path])
if args.test262_test_list:
test262_command.extend(args.test262_test_list.split(','))
proc = subprocess.Popen(test262_command,
universal_newlines=True,
stdout=subprocess.PIPE,
**kwargs)
return_code = 1
with open(os.path.join(os.path.dirname(args.engine), 'test262.report'), 'w') as output_file:
counter = 0
summary_found = False
summary_end_found = False
while True:
output = proc.stdout.readline()
if not output:
break
output_file.write(output)
if output.startswith('=== Summary ==='):
summary_found = True
print('')
if summary_found:
if not summary_end_found:
print(output, end='')
if not output.strip():
summary_end_found = True
if 'All tests succeeded' in output:
return_code = 0
elif re.search('in (non-)?strict mode', output):
counter += 1
if (counter % 100) == 0:
print(".", end='')
if (counter % 5000) == 0:
print(" Executed %d tests." % counter)
proc.wait()
if sys.platform == 'win32':
util.set_timezone(original_timezone)
if args.mode == 'update':
return_code = update_exclude_list(args)
return return_code
if __name__ == "__main__":
sys.exit(main(get_arguments()))

View File

@@ -0,0 +1,221 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import subprocess
import sys
import util
def get_arguments():
execution_runtime = os.environ.get('RUNTIME')
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--runtime', metavar='FILE', default=execution_runtime,
help='Execution runtime (e.g. qemu)')
parser.add_argument('--engine', metavar='FILE',
help='JerryScript binary to run tests with')
parser.add_argument('--test-list', metavar='FILE',
help='File contains test paths to run')
parser.add_argument('--skip-list', metavar='LIST',
help='Add a comma separated list of patterns of the excluded JS-tests')
parser.add_argument('--test-dir', metavar='DIR',
help='Directory contains tests to run')
parser.add_argument('--snapshot', action='store_true',
help='Snapshot test')
script_args = parser.parse_args()
if script_args.skip_list:
script_args.skip_list = script_args.skip_list.split(',')
else:
script_args.skip_list = []
return script_args
def get_tests(test_dir, test_list, skip_list):
tests = []
if test_dir:
tests = []
for root, _, files in os.walk(test_dir):
for test_file in files:
if test_file.endswith('.js') or test_file.endswith('.mjs'):
tests.extend([os.path.join(root, test_file)])
if test_list:
dirname = os.path.dirname(test_list)
with open(test_list, "r") as test_list_fd:
for test in test_list_fd:
tests.append(os.path.normpath(os.path.join(dirname, test.rstrip())))
tests.sort()
def filter_tests(test):
for skipped in skip_list:
if skipped in test:
return False
return True
return [test for test in tests if filter_tests(test)]
def get_platform_cmd_prefix():
if sys.platform == 'win32':
return ['cmd', '/S', '/C']
return []
def execute_test_command(test_cmd):
kwargs = {}
if sys.version_info.major >= 3:
kwargs['encoding'] = 'unicode_escape'
process = subprocess.Popen(test_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
universal_newlines=True, **kwargs)
stdout = process.communicate()[0]
return (process.returncode, stdout)
def main(args):
tests = get_tests(args.test_dir, args.test_list, args.skip_list)
total = len(tests)
if total == 0:
print("No test to execute.")
return 1
if sys.platform == 'win32':
original_timezone = util.get_timezone()
util.set_sighdl_to_reset_timezone(original_timezone)
util.set_timezone('UTC')
if args.snapshot:
passed = run_snapshot_tests(args, tests)
else:
passed = run_normal_tests(args, tests)
if sys.platform == 'win32':
util.set_timezone(original_timezone)
failed = total - passed
summary_list = [os.path.relpath(args.engine)]
if args.snapshot:
summary_list.append('--snapshot')
if args.test_dir:
summary_list.append(os.path.relpath(args.test_dir))
if args.test_list:
summary_list.append(os.path.relpath(args.test_list))
util.print_test_summary(' '.join(summary_list), total, passed, failed)
return bool(failed)
def run_normal_tests(args, tests):
test_cmd = get_platform_cmd_prefix()
if args.runtime:
test_cmd.append(args.runtime)
test_cmd.extend([args.engine, '--call-on-exit', '__checkAsync'])
total = len(tests)
tested = 0
passed = 0
for test in tests:
tested += 1
test_path = os.path.relpath(test)
is_expected_to_fail = os.path.join(os.path.sep, 'fail', '') in test
test_argument = []
if test.endswith('.mjs'):
test_argument.extend(['-m'])
(returncode, stdout) = execute_test_command(test_cmd + test_argument + [test])
if (returncode == 0 and not is_expected_to_fail) or (returncode == 1 and is_expected_to_fail):
passed += 1
if not args.quiet:
passed_string = 'PASS' + (' (XFAIL)' if is_expected_to_fail else '')
util.print_test_result(tested, total, True, passed_string, test_path)
else:
passed_string = 'FAIL%s (%d)' % (' (XPASS)' if returncode == 0 and is_expected_to_fail else '', returncode)
util.print_test_result(tested, total, False, passed_string, test_path)
print("================================================")
print(stdout)
print("================================================")
return passed
def run_snapshot_tests(args, tests):
execute_snapshot_cmd = get_platform_cmd_prefix()
generate_snapshot_cmd = get_platform_cmd_prefix()
if args.runtime:
execute_snapshot_cmd.append(args.runtime)
generate_snapshot_cmd.append(args.runtime)
execute_snapshot_cmd.extend([args.engine, '--exec-snapshot', 'js.snapshot'])
execute_snapshot_cmd.extend(['--call-on-exit', '__checkAsync'])
# engine: jerry[.exe] -> snapshot generator: jerry-snapshot[.exe]
engine = os.path.splitext(args.engine)
generate_snapshot_cmd.append(engine[0] + '-snapshot' + engine[1])
generate_snapshot_cmd.append('generate')
total = len(tests)
tested = 0
passed = 0
for test in tests:
tested += 1
test_path = os.path.relpath(test)
is_expected_to_fail = os.path.join(os.path.sep, 'fail', '') in test
(returncode, stdout) = execute_test_command(generate_snapshot_cmd + [test])
if (returncode == 0) or (returncode == 1 and is_expected_to_fail):
if not args.quiet:
passed_string = 'PASS' + (' (XFAIL)' if returncode else '')
util.print_test_result(tested, total, True, passed_string, test_path, True)
else:
util.print_test_result(tested, total, False, 'FAIL (%d)' % (returncode), test_path, True)
print("================================================")
print(stdout)
print("================================================")
if returncode:
if is_expected_to_fail:
passed += 1
continue
(returncode, stdout) = execute_test_command(execute_snapshot_cmd)
os.remove('js.snapshot')
if (returncode == 0 and not is_expected_to_fail) or (returncode == 1 and is_expected_to_fail):
passed += 1
if not args.quiet:
passed_string = 'PASS' + (' (XFAIL)' if is_expected_to_fail else '')
util.print_test_result(tested, total, True, passed_string, test_path, False)
else:
passed_string = 'FAIL%s (%d)' % (' (XPASS)' if returncode == 0 and is_expected_to_fail else '', returncode)
util.print_test_result(tested, total, False, passed_string, test_path, False)
print("================================================")
print(stdout)
print("================================================")
return passed
if __name__ == "__main__":
sys.exit(main(get_arguments()))

View File

@@ -0,0 +1,87 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import glob
import os
import subprocess
import sys
import util
def get_arguments():
runtime = os.environ.get('RUNTIME')
parser = argparse.ArgumentParser()
parser.add_argument('-q', '--quiet', action='store_true',
help='Only print out failing tests')
parser.add_argument('--runtime', metavar='FILE', default=runtime,
help='Execution runtime (e.g. qemu)')
parser.add_argument('path',
help='Path of test binaries')
script_args = parser.parse_args()
return script_args
def get_unittests(path):
unittests = []
files = glob.glob(os.path.join(path, 'unit-*'))
for testfile in files:
if os.path.isfile(testfile) and os.access(testfile, os.X_OK):
if sys.platform != 'win32' or testfile.endswith(".exe"):
unittests.append(testfile)
unittests.sort()
return unittests
def main(args):
unittests = get_unittests(args.path)
total = len(unittests)
if total == 0:
print("%s: no unit-* test to execute", args.path)
return 1
test_cmd = [args.runtime] if args.runtime else []
tested = 0
passed = 0
failed = 0
for test in unittests:
tested += 1
test_path = os.path.relpath(test)
try:
subprocess.check_output(test_cmd + [test], stderr=subprocess.STDOUT, universal_newlines=True)
passed += 1
if not args.quiet:
util.print_test_result(tested, total, True, 'PASS', test_path)
except subprocess.CalledProcessError as err:
failed += 1
util.print_test_result(tested, total, False, 'FAIL (%d)' % err.returncode, test_path)
print("================================================")
print(err.output)
print("================================================")
util.print_test_summary(os.path.join(os.path.relpath(args.path), "unit-*"), total, passed, failed)
if failed > 0:
return 1
return 0
if __name__ == "__main__":
sys.exit(main(get_arguments()))

View File

@@ -0,0 +1,950 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is based on work under the following copyright and permission notice:
# https://github.com/test262-utils/test262-harness-py
# test262.py, _monkeyYaml.py, parseTestRecord.py
# license of test262.py:
# Copyright 2009 the Sputnik authors. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# This is derived from sputnik.py, the Sputnik console test runner,
# with elements from packager.py, which is separately
# copyrighted. TODO: Refactor so there is less duplication between
# test262.py and packager.py.
# license of _packager.py:
# Copyright (c) 2012 Ecma International. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# license of _monkeyYaml.py:
# Copyright 2014 by Sam Mikes. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
# license of parseTestRecord.py:
# Copyright 2011 by Google, Inc. All rights reserved.
# This code is governed by the BSD license found in the LICENSE file.
from __future__ import print_function
import logging
import optparse
import os
from os import path
import platform
import re
import subprocess
import sys
import tempfile
import xml.dom.minidom
from collections import Counter
import signal
import threading
import multiprocessing
#######################################################################
# based on _monkeyYaml.py
#######################################################################
M_YAML_LIST_PATTERN = re.compile(r"^\[(.*)\]$")
M_YAML_MULTILINE_LIST = re.compile(r"^ *- (.*)$")
# The timeout of each test case
TEST262_CASE_TIMEOUT = 5
def yaml_load(string):
return my_read_dict(string.splitlines())[1]
def my_read_dict(lines, indent=""):
dictionary = {}
key = None
empty_lines = 0
while lines:
if not lines[0].startswith(indent):
break
line = lines.pop(0)
if my_is_all_spaces(line):
empty_lines += 1
continue
result = re.match(r"(.*?):(.*)", line)
if result:
if not dictionary:
dictionary = {}
key = result.group(1).strip()
value = result.group(2).strip()
(lines, value) = my_read_value(lines, value, indent)
dictionary[key] = value
else:
if dictionary and key and key in dictionary:
char = " " if empty_lines == 0 else "\n" * empty_lines
dictionary[key] += char + line.strip()
else:
raise Exception("monkeyYaml is confused at " + line)
empty_lines = 0
if not dictionary:
dictionary = None
return lines, dictionary
def my_read_value(lines, value, indent):
if value == ">" or value == "|":
(lines, value) = my_multiline(lines, value == "|")
value = value + "\n"
return (lines, value)
if lines and not value:
if my_maybe_list(lines[0]):
return my_multiline_list(lines, value)
indent_match = re.match("(" + indent + r"\s+)", lines[0])
if indent_match:
if ":" in lines[0]:
return my_read_dict(lines, indent_match.group(1))
return my_multiline(lines, False)
return lines, my_read_one_line(value)
def my_maybe_list(value):
return M_YAML_MULTILINE_LIST.match(value)
def my_multiline_list(lines, value):
# assume no explcit indentor (otherwise have to parse value)
value = []
indent = 0
while lines:
line = lines.pop(0)
leading = my_leading_spaces(line)
if my_is_all_spaces(line):
pass
elif leading < indent:
lines.insert(0, line)
break
else:
indent = indent or leading
value += [my_read_one_line(my_remove_list_header(indent, line))]
return (lines, value)
def my_remove_list_header(indent, line):
line = line[indent:]
return M_YAML_MULTILINE_LIST.match(line).group(1)
def my_read_one_line(value):
if M_YAML_LIST_PATTERN.match(value):
return my_flow_list(value)
elif re.match(r"^[-0-9]*$", value):
try:
value = int(value)
except ValueError:
pass
elif re.match(r"^[-.0-9eE]*$", value):
try:
value = float(value)
except ValueError:
pass
elif re.match(r"^('|\").*\1$", value):
value = value[1:-1]
return value
def my_flow_list(value):
result = M_YAML_LIST_PATTERN.match(value)
values = result.group(1).split(",")
return [my_read_one_line(v.strip()) for v in values]
def my_multiline(lines, preserve_newlines=False):
# assume no explcit indentor (otherwise have to parse value)
value = ""
indent = my_leading_spaces(lines[0])
was_empty = None
while lines:
line = lines.pop(0)
is_empty = my_is_all_spaces(line)
if is_empty:
if preserve_newlines:
value += "\n"
elif my_leading_spaces(line) < indent:
lines.insert(0, line)
break
else:
if preserve_newlines:
if was_empty != None:
value += "\n"
else:
if was_empty:
value += "\n"
elif was_empty is False:
value += " "
value += line[(indent):]
was_empty = is_empty
return (lines, value)
def my_is_all_spaces(line):
return len(line.strip()) == 0
def my_leading_spaces(line):
return len(line) - len(line.lstrip(' '))
#######################################################################
# based on parseTestRecord.py
#######################################################################
# Matches trailing whitespace and any following blank lines.
_BLANK_LINES = r"([ \t]*[\r\n]{1,2})*"
# Matches the YAML frontmatter block.
# It must be non-greedy because test262-es2015/built-ins/Object/assign/Override.js contains a comment like yaml pattern
_YAML_PATTERN = re.compile(r"/\*---(.*?)---\*/" + _BLANK_LINES, re.DOTALL)
# Matches all known variants for the license block.
# https://github.com/tc39/test262/blob/705d78299cf786c84fa4df473eff98374de7135a/tools/lint/lib/checks/license.py
_LICENSE_PATTERN = re.compile(
r'// Copyright( \([C]\))? (\w+) .+\. {1,2}All rights reserved\.[\r\n]{1,2}' +
r'(' +
r'// This code is governed by the( BSD)? license found in the LICENSE file\.' +
r'|' +
r'// See LICENSE for details.' +
r'|' +
r'// Use of this source code is governed by a BSD-style license that can be[\r\n]{1,2}' +
r'// found in the LICENSE file\.' +
r'|' +
r'// See LICENSE or https://github\.com/tc39/test262/blob/master/LICENSE' +
r')' + _BLANK_LINES, re.IGNORECASE)
def yaml_attr_parser(test_record, attrs, name, onerror=print):
parsed = yaml_load(attrs)
if parsed is None:
onerror("Failed to parse yaml in name %s" % name)
return
for key in parsed:
value = parsed[key]
if key == "info":
key = "commentary"
test_record[key] = value
if 'flags' in test_record:
for flag in test_record['flags']:
test_record[flag] = ""
def find_license(src):
match = _LICENSE_PATTERN.search(src)
if not match:
return None
return match.group(0)
def find_attrs(src):
match = _YAML_PATTERN.search(src)
if not match:
return (None, None)
return (match.group(0), match.group(1).strip())
def parse_test_record(src, name, onerror=print):
# Find the license block.
header = find_license(src)
# Find the YAML frontmatter.
(frontmatter, attrs) = find_attrs(src)
# YAML frontmatter is required for all tests.
if frontmatter is None:
onerror("Missing frontmatter: %s" % name)
# The license shuold be placed before the frontmatter and there shouldn't be
# any extra content between the license and the frontmatter.
if header is not None and frontmatter is not None:
header_idx = src.index(header)
frontmatter_idx = src.index(frontmatter)
if header_idx > frontmatter_idx:
onerror("Unexpected license after frontmatter: %s" % name)
# Search for any extra test content, but ignore whitespace only or comment lines.
extra = src[header_idx + len(header): frontmatter_idx]
if extra and any(line.strip() and not line.lstrip().startswith("//") for line in extra.split("\n")):
onerror(
"Unexpected test content between license and frontmatter: %s" % name)
# Remove the license and YAML parts from the actual test content.
test = src
if frontmatter is not None:
test = test.replace(frontmatter, '')
if header is not None:
test = test.replace(header, '')
test_record = {}
test_record['header'] = header.strip() if header else ''
test_record['test'] = test
if attrs:
yaml_attr_parser(test_record, attrs, name, onerror)
# Report if the license block is missing in non-generated tests.
if header is None and "generated" not in test_record and "hashbang" not in name:
onerror("No license found in: %s" % name)
return test_record
#######################################################################
# based on test262.py
#######################################################################
class Test262Error(Exception):
def __init__(self, message):
Exception.__init__(self)
self.message = message
def report_error(error_string):
raise Test262Error(error_string)
def build_options():
result = optparse.OptionParser()
result.add_option("--command", default=None,
help="The command-line to run")
result.add_option("--tests", default=path.abspath('.'),
help="Path to the tests")
result.add_option("--exclude-list", default=None,
help="Path to the excludelist.xml file")
result.add_option("--cat", default=False, action="store_true",
help="Print packaged test code that would be run")
result.add_option("--summary", default=False, action="store_true",
help="Print summary after running tests")
result.add_option("--full-summary", default=False, action="store_true",
help="Print summary and test output after running tests")
result.add_option("--strict_only", default=False, action="store_true",
help="Test only strict mode")
result.add_option("--non_strict_only", default=False, action="store_true",
help="Test only non-strict mode")
result.add_option("--unmarked_default", default="both",
help="default mode for tests of unspecified strictness")
result.add_option("-j", "--job-count", default=None, action="store", type=int,
help="Number of parallel test jobs to run. In case of '0' cpu count is used.")
result.add_option("--logname", help="Filename to save stdout to")
result.add_option("--loglevel", default="warning",
help="sets log level to debug, info, warning, error, or critical")
result.add_option("--print-handle", default="print",
help="Command to print from console")
result.add_option("--list-includes", default=False, action="store_true",
help="List includes required by tests")
result.add_option("--module-flag", default="-m",
help="List includes required by tests")
return result
def validate_options(options):
if not options.command:
report_error("A --command must be specified.")
if not path.exists(options.tests):
report_error("Couldn't find test path '%s'" % options.tests)
def is_windows():
actual_platform = platform.system()
return (actual_platform == 'Windows') or (actual_platform == 'Microsoft')
class TempFile(object):
def __init__(self, suffix="", prefix="tmp", text=False):
self.suffix = suffix
self.prefix = prefix
self.text = text
self.file_desc = None
self.name = None
self.is_closed = False
self.open_file()
def open_file(self):
(self.file_desc, self.name) = tempfile.mkstemp(
suffix=self.suffix,
prefix=self.prefix,
text=self.text)
def write(self, string):
os.write(self.file_desc, string)
def read(self):
file_desc = file(self.name)
result = file_desc.read()
file_desc.close()
return result
def close(self):
if not self.is_closed:
self.is_closed = True
os.close(self.file_desc)
def dispose(self):
try:
self.close()
os.unlink(self.name)
except OSError as exception:
logging.error("Error disposing temp file: %s", str(exception))
class TestResult(object):
def __init__(self, exit_code, stdout, stderr, case):
self.exit_code = exit_code
self.stdout = stdout
self.stderr = stderr
self.case = case
def report_outcome(self, long_format):
name = self.case.get_name()
mode = self.case.get_mode()
if self.has_unexpected_outcome():
if self.case.is_negative():
print("=== %s passed in %s, but was expected to fail ===" % (name, mode))
print("--- expected error: %s ---\n" % self.case.get_negative_type())
else:
if long_format:
print("=== %s failed in %s ===" % (name, mode))
else:
print("%s in %s: " % (name, mode))
self.write_output(sys.stdout)
if long_format:
print("===")
elif self.case.is_negative():
print("%s failed in %s as expected" % (name, mode))
else:
print("%s passed in %s" % (name, mode))
def write_output(self, target):
out = self.stdout.strip()
if out:
target.write("--- output --- \n %s" % out)
error = self.stderr.strip()
if error:
target.write("--- errors --- \n %s" % error)
target.write("\n--- exit code: %d ---\n" % self.exit_code)
def has_failed(self):
return self.exit_code != 0
def async_has_failed(self):
return 'Test262:AsyncTestComplete' not in self.stdout
def has_unexpected_outcome(self):
if self.case.is_async_test():
return self.async_has_failed() or self.has_failed()
elif self.case.is_negative():
return not (self.has_failed() and self.case.negative_match(self.get_error_output()))
return self.has_failed()
def get_error_output(self):
if self.stderr:
return self.stderr
return self.stdout
class TestCase(object):
def __init__(self, suite, name, full_path, strict_mode, command_template, module_flag):
self.suite = suite
self.name = name
self.full_path = full_path
self.strict_mode = strict_mode
with open(self.full_path) as file_desc:
self.contents = file_desc.read()
test_record = parse_test_record(self.contents, name)
self.test = test_record["test"]
del test_record["test"]
del test_record["header"]
test_record.pop("commentary", None) # do not throw if missing
self.test_record = test_record
self.command_template = command_template
self.module_flag = module_flag
self.validate()
def negative_match(self, stderr):
neg = re.compile(self.get_negative_type())
return re.search(neg, stderr)
def get_negative(self):
if not self.is_negative():
return None
return self.test_record["negative"]
def get_negative_type(self):
negative = self.get_negative()
if isinstance(negative, dict) and "type" in negative:
return negative["type"]
return negative
def get_negative_phase(self):
negative = self.get_negative()
return negative and "phase" in negative and negative["phase"]
def get_name(self):
return path.join(*self.name)
def get_mode(self):
if self.strict_mode:
return "strict mode"
return "non-strict mode"
def get_path(self):
return self.name
def is_negative(self):
return 'negative' in self.test_record
def is_only_strict(self):
return 'onlyStrict' in self.test_record
def is_no_strict(self):
return 'noStrict' in self.test_record or self.is_raw()
def is_raw(self):
return 'raw' in self.test_record
def is_async_test(self):
return 'async' in self.test_record or '$DONE' in self.test
def is_module(self):
return 'module' in self.test_record
def get_include_list(self):
if self.test_record.get('includes'):
return self.test_record['includes']
return []
def get_additional_includes(self):
return '\n'.join([self.suite.get_include(include) for include in self.get_include_list()])
def get_source(self):
if self.is_raw():
return self.test
source = self.suite.get_include("sta.js") + \
self.suite.get_include("assert.js")
if self.is_async_test():
source = source + \
self.suite.get_include("timer.js") + \
self.suite.get_include("doneprintHandle.js").replace(
'print', self.suite.print_handle)
source = source + \
self.get_additional_includes() + \
self.test + '\n'
if self.get_negative_phase() == "early":
source = ("throw 'Expected an early error, but code was executed.';\n" +
source)
if self.strict_mode:
source = '"use strict";\nvar strict_mode = true;\n' + source
else:
# add comment line so line numbers match in both strict and non-strict version
source = '//"no strict";\nvar strict_mode = false;\n' + source
return source
@staticmethod
def instantiate_template(template, params):
def get_parameter(match):
key = match.group(1)
return params.get(key, match.group(0))
return re.sub(r"\{\{(\w+)\}\}", get_parameter, template)
@staticmethod
def execute(command):
if is_windows():
args = '%s' % command
else:
args = command.split(" ")
stdout = TempFile(prefix="test262-out-")
stderr = TempFile(prefix="test262-err-")
try:
logging.info("exec: %s", str(args))
process = subprocess.Popen(
args,
shell=False,
stdout=stdout.file_desc,
stderr=stderr.file_desc
)
timer = threading.Timer(TEST262_CASE_TIMEOUT, process.kill)
timer.start()
code = process.wait()
timer.cancel()
out = stdout.read()
err = stderr.read()
finally:
stdout.dispose()
stderr.dispose()
return (code, out, err)
def run_test_in(self, tmp):
tmp.write(self.get_source())
tmp.close()
if self.is_module():
arg = self.module_flag + ' ' + tmp.name
else:
arg = tmp.name
command = TestCase.instantiate_template(self.command_template, {
'path': arg
})
(code, out, err) = TestCase.execute(command)
return TestResult(code, out, err, self)
def run(self):
tmp = TempFile(suffix=".js", prefix="test262-", text=True)
try:
result = self.run_test_in(tmp)
finally:
tmp.dispose()
return result
def print_source(self):
print(self.get_source())
def validate(self):
flags = self.test_record.get("flags")
phase = self.get_negative_phase()
if phase not in [None, False, "parse", "early", "runtime", "resolution"]:
raise TypeError("Invalid value for negative phase: " + phase)
if not flags:
return
if 'raw' in flags:
if 'noStrict' in flags:
raise TypeError("The `raw` flag implies the `noStrict` flag")
elif 'onlyStrict' in flags:
raise TypeError(
"The `raw` flag is incompatible with the `onlyStrict` flag")
elif self.get_include_list():
raise TypeError(
"The `raw` flag is incompatible with the `includes` tag")
def pool_init():
"""Ignore CTRL+C in the worker process."""
signal.signal(signal.SIGINT, signal.SIG_IGN)
def test_case_run_process(case):
return case.run()
class ProgressIndicator(object):
def __init__(self, count):
self.count = count
self.succeeded = 0
self.failed = 0
self.failed_tests = []
def has_run(self, result):
result.report_outcome(True)
if result.has_unexpected_outcome():
self.failed += 1
self.failed_tests.append(result)
else:
self.succeeded += 1
def make_plural(num):
if num == 1:
return (num, "")
return (num, "s")
def percent_format(partial, total):
return "%i test%s (%.1f%%)" % (make_plural(partial) +
((100.0 * partial)/total,))
class TestSuite(object):
def __init__(self, options):
self.test_root = path.join(options.tests, 'test')
self.lib_root = path.join(options.tests, 'harness')
self.strict_only = options.strict_only
self.non_strict_only = options.non_strict_only
self.unmarked_default = options.unmarked_default
self.print_handle = options.print_handle
self.include_cache = {}
self.exclude_list_path = options.exclude_list
self.module_flag = options.module_flag
self.logf = None
def _load_excludes(self):
if self.exclude_list_path and os.path.exists(self.exclude_list_path):
xml_document = xml.dom.minidom.parse(self.exclude_list_path)
xml_tests = xml_document.getElementsByTagName("test")
return {x.getAttribute("id") for x in xml_tests}
return set()
def validate(self):
if not path.exists(self.test_root):
report_error("No test repository found")
if not path.exists(self.lib_root):
report_error("No test library found")
@staticmethod
def is_hidden(test_path):
return test_path.startswith('.') or test_path == 'CVS'
@staticmethod
def is_test_case(test_path):
return test_path.endswith('.js') and not test_path.endswith('_FIXTURE.js')
@staticmethod
def should_run(rel_path, tests):
if not tests:
return True
for test in tests:
if test in rel_path:
return True
return False
def get_include(self, name):
if not name in self.include_cache:
static = path.join(self.lib_root, name)
if path.exists(static):
with open(static) as file_desc:
contents = file_desc.read()
contents = re.sub(r'\r\n', '\n', contents)
self.include_cache[name] = contents + "\n"
else:
report_error("Can't find: " + static)
return self.include_cache[name]
def enumerate_tests(self, tests, command_template):
exclude_list = self._load_excludes()
logging.info("Listing tests in %s", self.test_root)
cases = []
for root, dirs, files in os.walk(self.test_root):
for hidden_dir in [x for x in dirs if self.is_hidden(x)]:
dirs.remove(hidden_dir)
dirs.sort()
for test_path in filter(TestSuite.is_test_case, sorted(files)):
full_path = path.join(root, test_path)
if full_path.startswith(self.test_root):
rel_path = full_path[len(self.test_root)+1:]
else:
logging.warning("Unexpected path %s", full_path)
rel_path = full_path
if self.should_run(rel_path, tests):
basename = path.basename(full_path)[:-3]
name = rel_path.split(path.sep)[:-1] + [basename]
if rel_path in exclude_list:
print('Excluded: ' + rel_path)
else:
if not self.non_strict_only:
strict_case = TestCase(self, name, full_path, True, command_template, self.module_flag)
if not strict_case.is_no_strict():
if strict_case.is_only_strict() or self.unmarked_default in ['both', 'strict']:
cases.append(strict_case)
if not self.strict_only:
non_strict_case = TestCase(self, name, full_path, False, command_template, self.module_flag)
if not non_strict_case.is_only_strict():
if non_strict_case.is_no_strict() or self.unmarked_default in ['both', 'non_strict']:
cases.append(non_strict_case)
logging.info("Done listing tests")
return cases
def print_summary(self, progress, logfile):
def write(string):
if logfile:
self.logf.write(string + "\n")
print(string)
print("")
write("=== Summary ===")
count = progress.count
succeeded = progress.succeeded
failed = progress.failed
write(" - Ran %i test%s" % make_plural(count))
if progress.failed == 0:
write(" - All tests succeeded")
else:
write(" - Passed " + percent_format(succeeded, count))
write(" - Failed " + percent_format(failed, count))
positive = [c for c in progress.failed_tests if not c.case.is_negative()]
negative = [c for c in progress.failed_tests if c.case.is_negative()]
if positive:
print("")
write("Failed Tests")
for result in positive:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
if negative:
print("")
write("Expected to fail but passed ---")
for result in negative:
write(" %s in %s" % (result.case.get_name(), result.case.get_mode()))
def print_failure_output(self, progress, logfile):
for result in progress.failed_tests:
if logfile:
self.write_log(result)
print("")
result.report_outcome(False)
def run(self, command_template, tests, print_summary, full_summary, logname, job_count=1):
if not "{{path}}" in command_template:
command_template += " {{path}}"
cases = self.enumerate_tests(tests, command_template)
if not cases:
report_error("No tests to run")
progress = ProgressIndicator(len(cases))
if logname:
self.logf = open(logname, "w")
if job_count == 1:
for case in cases:
result = case.run()
if logname:
self.write_log(result)
progress.has_run(result)
else:
if job_count == 0:
job_count = None # uses multiprocessing.cpu_count()
pool = multiprocessing.Pool(processes=job_count, initializer=pool_init)
try:
for result in pool.imap(test_case_run_process, cases):
if logname:
self.write_log(result)
progress.has_run(result)
except KeyboardInterrupt:
pool.terminate()
pool.join()
if print_summary:
self.print_summary(progress, logname)
if full_summary:
self.print_failure_output(progress, logname)
else:
print("")
print("Use --full-summary to see output from failed tests")
print("")
return progress.failed
def write_log(self, result):
name = result.case.get_name()
mode = result.case.get_mode()
if result.has_unexpected_outcome():
if result.case.is_negative():
self.logf.write(
"=== %s passed in %s, but was expected to fail === \n" % (name, mode))
self.logf.write("--- expected error: %s ---\n" % result.case.GetNegativeType())
result.write_output(self.logf)
else:
self.logf.write("=== %s failed in %s === \n" % (name, mode))
result.write_output(self.logf)
self.logf.write("===\n")
elif result.case.is_negative():
self.logf.write("%s failed in %s as expected \n" % (name, mode))
else:
self.logf.write("%s passed in %s \n" % (name, mode))
def print_source(self, tests):
cases = self.enumerate_tests(tests, "")
if cases:
cases[0].print_source()
def list_includes(self, tests):
cases = self.enumerate_tests(tests, "")
includes_dict = Counter()
for case in cases:
includes = case.get_include_list()
includes_dict.update(includes)
print(includes_dict)
def main():
code = 0
parser = build_options()
(options, args) = parser.parse_args()
validate_options(options)
test_suite = TestSuite(options)
test_suite.validate()
if options.loglevel == 'debug':
logging.basicConfig(level=logging.DEBUG)
elif options.loglevel == 'info':
logging.basicConfig(level=logging.INFO)
elif options.loglevel == 'warning':
logging.basicConfig(level=logging.WARNING)
elif options.loglevel == 'error':
logging.basicConfig(level=logging.ERROR)
elif options.loglevel == 'critical':
logging.basicConfig(level=logging.CRITICAL)
if options.cat:
test_suite.print_source(args)
elif options.list_includes:
test_suite.list_includes(args)
else:
code = test_suite.run(options.command, args,
options.summary or options.full_summary,
options.full_summary,
options.logname,
options.job_count)
return code
if __name__ == '__main__':
try:
sys.exit(main())
except Test262Error as exception:
print("Error: %s" % exception.message)
sys.exit(1)

View File

@@ -0,0 +1,67 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import signal
import subprocess
import sys
TERM_NORMAL = '\033[0m'
TERM_RED = '\033[1;31m'
TERM_GREEN = '\033[1;32m'
def set_timezone(timezone):
assert sys.platform == 'win32', "set_timezone is Windows only function"
subprocess.call(['cmd', '/S', '/C', 'tzutil', '/s', timezone])
def set_timezone_and_exit(timezone):
assert sys.platform == 'win32', "set_timezone_and_exit is Windows only function"
set_timezone(timezone)
sys.exit(1)
def get_timezone():
assert sys.platform == 'win32', "get_timezone is Windows only function"
return subprocess.check_output(['cmd', '/S', '/C', 'tzutil', '/g'], universal_newlines=True)
def set_sighdl_to_reset_timezone(timezone):
assert sys.platform == 'win32', "install_signal_handler_to_restore_timezone is Windows only function"
signal.signal(signal.SIGINT, lambda signal, frame: set_timezone_and_exit(timezone))
def print_test_summary(summary_string, total, passed, failed):
print("\n[summary] %s\n" % summary_string)
print("TOTAL: %d" % total)
print("%sPASS: %d%s" % (TERM_GREEN, passed, TERM_NORMAL))
print("%sFAIL: %d%s\n" % (TERM_RED, failed, TERM_NORMAL))
success_color = TERM_GREEN if passed == total else TERM_RED
print("%sSuccess: %d%%%s" % (success_color, passed*100/total, TERM_NORMAL))
def print_test_result(tested, total, is_passed, passed_string, test_path, is_snapshot_generation=None):
if is_snapshot_generation is None:
snapshot_string = ''
elif is_snapshot_generation:
snapshot_string = ' (generate snapshot)'
else:
snapshot_string = ' (execute snapshot)'
color = TERM_GREEN if is_passed else TERM_RED
print("[%4d/%4d] %s%s: %s%s%s" % (tested, total, color, passed_string, test_path, snapshot_string, TERM_NORMAL))

View File

@@ -0,0 +1,37 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from os import path
TOOLS_DIR = path.dirname(path.abspath(__file__))
PROJECT_DIR = path.normpath(path.join(TOOLS_DIR, '..'))
DEBUGGER_TESTS_DIR = path.join(PROJECT_DIR, 'tests/debugger')
JERRY_TESTS_DIR = path.join(PROJECT_DIR, 'tests/jerry')
TEST262_TEST_SUITE_DIR = path.join(PROJECT_DIR, 'tests/test262')
BUILD_SCRIPT = path.join(TOOLS_DIR, 'build.py')
CPPCHECK_SCRIPT = path.join(TOOLS_DIR, 'check-cppcheck.sh')
DEBUGGER_CLIENT_SCRIPT = path.join(PROJECT_DIR, 'jerry-debugger/jerry_client.py')
DEBUGGER_TEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-debugger-test.sh')
DOXYGEN_SCRIPT = path.join(TOOLS_DIR, 'check-doxygen.sh')
LICENSE_SCRIPT = path.join(TOOLS_DIR, 'check-license.py')
MAGIC_STRINGS_SCRIPT = path.join(TOOLS_DIR, 'check-magic-strings.sh')
PYLINT_SCRIPT = path.join(TOOLS_DIR, 'check-pylint.sh')
SIGNED_OFF_SCRIPT = path.join(TOOLS_DIR, 'check-signed-off.sh')
TEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-test-suite.py')
TEST262_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-test-suite-test262.py')
VERA_SCRIPT = path.join(TOOLS_DIR, 'check-vera.sh')
UNITTEST_RUNNER_SCRIPT = path.join(TOOLS_DIR, 'runners/run-unittests.py')

View File

@@ -0,0 +1,28 @@
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
CC=gcc
LDFLAGS=-lm
GENS=gen-test-math
.PHONY: build
build: $(GENS)
.PHONY: clean
clean:
rm -f $(GENS)
gen-test-math: gen-test-math.c
$(CC) $< -o $@ $(LDFLAGS)

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,119 @@
#!/bin/bash
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
if [ $# -ne 1 ]; then
echo "Please, specify your gh-pages clone directory: update-webpage <gh-pages clone dir>"
exit 1
fi
gh_pages_dir=$1
docs_dir=`dirname $(readlink -f $0)`"/../docs"
GETTING_STARTED_MD="00.GETTING-STARTED.md"
CONFIGURATION_MD="01.CONFIGURATION.md"
API_REFERENCE_MD="02.API-REFERENCE.md"
API_EXAMPLES_MD="03.API-EXAMPLE.md"
INTERNALS_MD="04.INTERNALS.md"
PORT_API_MD="05.PORT-API.md"
REFERENCE_COUNTING_MD="06.REFERENCE-COUNTING.md"
DEBUGGER_MD="07.DEBUGGER.md"
CODING_STANDARDS_MD="08.CODING-STANDARDS.md"
EXT_REFERENCE_ARG_MD="09.EXT-REFERENCE-ARG.md"
EXT_REFERENCE_HANDLER_MD="10.EXT-REFERENCE-HANDLER.md"
EXT_REFERENCE_AUTORELEASE_MD="11.EXT-REFERENCE-AUTORELEASE.md"
EXT_REFERENCE_MODULE_MD="12.EXT-REFERENCE-MODULE.md"
DEBUGGER_TRANSPORT_MD="13.DEBUGGER-TRANSPORT.md"
EXT_REFERENCE_HANDLE_SCOPE_MD="14.EXT-REFERENCE-HANDLE-SCOPE.md"
MODULE_SYSTEM_MD="15.MODULE-SYSTEM.md"
MIGRATION_GUIDE_MD="16.MIGRATION-GUIDE.md"
declare -A titles
titles[$GETTING_STARTED_MD]="Getting Started"
titles[$CONFIGURATION_MD]="Configuration"
titles[$API_REFERENCE_MD]="API Reference"
titles[$API_EXAMPLES_MD]="API Examples"
titles[$INTERNALS_MD]="Internals"
titles[$PORT_API_MD]="Port API"
titles[$REFERENCE_COUNTING_MD]="Reference Counting"
titles[$DEBUGGER_MD]="Debugger"
titles[$CODING_STANDARDS_MD]="Coding Standards"
titles[$EXT_REFERENCE_ARG_MD]="'Extension API: Argument Validation'"
titles[$EXT_REFERENCE_HANDLER_MD]="'Extension API: External Function Handlers'"
titles[$EXT_REFERENCE_AUTORELEASE_MD]="'Extension API: Autorelease Values'"
titles[$EXT_REFERENCE_MODULE_MD]="'Extension API: Module Support'"
titles[$DEBUGGER_TRANSPORT_MD]="'Debugger Transport'"
titles[$EXT_REFERENCE_HANDLE_SCOPE_MD]="'Extension API: Handle Scope'"
titles[$MODULE_SYSTEM_MD]="'Module System (EcmaScript2015)'"
titles[$MIGRATION_GUIDE_MD]="Migration Guide"
for docfile in $docs_dir/*.md; do
docfile_base=`basename $docfile`
permalink=`echo $docfile_base | cut -d'.' -f 2 | tr '[:upper:]' '[:lower:]'`
missing_title=`echo $permalink | tr '-' ' '`
# the first three documents belong to the navigation bar
category=$([[ $docfile_base =~ ^0[0-3] ]] && echo "navbar" || echo "documents")
# generate appropriate header for each *.md
echo "---" > $gh_pages_dir/$docfile_base
echo "layout: page" >> $gh_pages_dir/$docfile_base
echo "title: ${titles[$docfile_base]:-$missing_title}" >> $gh_pages_dir/$docfile_base
echo "category: ${category}" >> $gh_pages_dir/$docfile_base
echo "permalink: /$permalink/" >> $gh_pages_dir/$docfile_base
echo "---" >> $gh_pages_dir/$docfile_base
echo >> $gh_pages_dir/$docfile_base
echo "* toc" >> $gh_pages_dir/$docfile_base
echo "{:toc}" >> $gh_pages_dir/$docfile_base
echo >> $gh_pages_dir/$docfile_base
# the file itself removing underscores inside links
gawk \
'
!/\[.*\]\(#/ {
print $0
}
/\[.*\]\(#/ {
link_start_pos = index($0, "](#");
line_beg = substr($0, 1, link_start_pos+2);
line_remain = substr($0, link_start_pos+3);
link_end_pos = index(line_remain, ")")
link = substr(line_remain, 1, link_end_pos-1);
line_end = substr(line_remain, link_end_pos)
printf "%s%s%s\n", line_beg, link, line_end
}
' $docfile >> $gh_pages_dir/$docfile_base
# fix image links
sed -i -r -e 's/^!\[.*\]\(/&{{ site.github.url }}\//' $gh_pages_dir/$docfile_base
sed -i -r -e 's/^!\[.*\]\(\{\{ site\.github\.url \}\}\/img.*$/&{: class="thumbnail center-block img-responsive" }/' $gh_pages_dir/$docfile_base
# turn filenames into permalinks
sed -i -r -e 's/docs\/[0-9]+\.(.*)\.md/\L\1/g' $gh_pages_dir/$docfile_base
# replace span tags to div
sed -i 's/<span class=/<div class=/g' $gh_pages_dir/$docfile_base
sed -i 's/<\/span>/<\/div>/g' $gh_pages_dir/$docfile_base
# remove table header separators
sed -i '/^| ---/d' $gh_pages_dir/$docfile_base
# update images
cp -Ru $docs_dir/img $gh_pages_dir
done

View File

@@ -0,0 +1,19 @@
set rules {
jerry_always_curly
jerry_braces_on_separate_line
jerry_braces_same_line_or_column
jerry_comment_function_end
jerry_funcname_space_parentheses
jerry_identifier_no_space_bracket
jerry_indentation
jerry_max_line_length
jerry_no_space_after_opening_parentheses
jerry_no_space_before_closing_parentheses
jerry_no_tabs
jerry_no_trailing_spaces
jerry_no_leading_or_trailing_empty_line
jerry_no_consecutive_empty_lines
jerry_pointer_declarator_space
jerry_switch_case
jerry_typecast_space_parentheses
}

View File

@@ -0,0 +1,87 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach file_name [getSourceFileNames] {
set state "control"
set do_marks {}
set prev_tok_type ""
set prev_ctrl ""
set expect_while false
set expect_open_brace false
set paren_count 0
foreach token [getTokens $file_name 1 0 -1 -1 {if do while else for leftparen rightparen semicolon leftbrace rightbrace}] {
set tok_val [lindex $token 0]
set line_num [lindex $token 1]
set col_num [lindex $token 2]
set tok_type [lindex $token 3]
if {$state == "expression"} {
# puts "expression $paren_count $tok_type ($line_num , $col_num)"
if {$tok_type == "leftparen"} {
incr paren_count
} elseif {$tok_type == "rightparen"} {
incr paren_count -1
if {$paren_count == 0} {
set state "control"
set expect_open_brace true
} elseif {$paren_count < 0 } {
report $file_name $line_num "unexpected right parentheses"
}
} elseif {$tok_type != "semicolon"} {
report $file_name $line_num "unexpected token: $tok_type"
}
} else {
if {$expect_open_brace == true} {
if {$tok_type == "if" && $prev_tok_type == "else"} {
# empty
} elseif {$tok_type != "leftbrace"} {
report $file_name [lindex $prev_ctrl 1] "brace after \'[lindex $prev_ctrl 3]\' required"
}
set expect_open_brace false
}
if {$tok_type == "while" && ($expect_while == true || [lindex $prev_ctrl 3] == "do")} {
set expect_while false
set prev_ctrl ""
} elseif {$tok_type in {if for while}} {
set state "expression"
set prev_ctrl $token
} elseif {$tok_type in {do else}} {
set expect_open_brace true
set prev_ctrl $token
} elseif {$tok_type == "leftbrace"} {
if {[lindex $prev_ctrl 3] == "do"} {
lappend do_marks 1
} else {
lappend do_marks 0
}
set prev_ctrl ""
} elseif {$tok_type == "rightbrace"} {
if {[llength $do_marks] > 0} {
if {[lindex $do_marks end] == 1} {
set expect_while true
}
set do_marks [lreplace $do_marks end end]
} else {
report $file_name $line_num "unmatched brace"
}
}
}
set prev_tok_type $tok_type
}
}

View File

@@ -0,0 +1,115 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach file_name [getSourceFileNames] {
set state "normal"
set lines {}
set cols {}
set struct_marks {}
set expect_struct_name false
set prev_tok ""
set def_start false
set expect_newline false
set check_newline true
foreach token [getTokens $file_name 1 0 -1 -1 {}] {
set tok_val [lindex $token 0]
set line_num [lindex $token 1]
set col_num [lindex $token 2]
set tok_type [lindex $token 3]
if {$state == "macro"} {
if {$col_num == 0} {
set state "normal"
} else {
continue
}
}
if {$tok_type in {space ccomment cppcomment newline}} {
continue
}
if {$tok_type == "pp_define"} {
set state "macro"
set prev_tok ""
set def_start false
continue
}
if {$expect_struct_name == true} {
if {$tok_type == "identifier" && $line_num != [lindex $prev_tok 1]} {
report $file_name $line_num "type name should be on the same line with the rightbrace"
}
set expect_struct_name false
}
# check that rightbrace and typename (in struct, union and enum definitons) are on the same line
if {$tok_type in {struct enum union}} {
set def_start true
} elseif {$tok_type == "semicolon"} {
set def_start false
} elseif {$tok_type == "leftbrace"} {
lappend cols $col_num
lappend lines $line_num
if {$def_start == true} {
lappend struct_marks 1
set def_start false
} elseif {[lindex $prev_tok 3] == "assign"} {
lappend struct_marks 2
set check_newline false
} else {
lappend struct_marks 0
}
} elseif {$tok_type == "rightbrace"} {
if {[llength $lines] > 0} {
if {[lindex $struct_marks end] == 1} {
set expect_struct_name true
set check_newline false
} elseif {[lindex $struct_marks end] == 2} {
set check_newline false
}
set lines [lreplace $lines end end]
set cols [lreplace $cols end end]
set struct_marks [lreplace $struct_marks end end]
} else {
report $file_name $line_num "unmatched brace"
}
}
# check that braces are on separate lines
if {$check_newline == true} {
if {$expect_newline == true} {
if {$tok_type == "semicolon"} {
# empty
} elseif {[lindex $prev_tok 1] == $line_num} {
report $file_name $line_num "brace should be placed on a separate line"
} else {
set expect_newline false
}
} elseif {$tok_type in {leftbrace rightbrace}} {
if {[lindex $prev_tok 1] == $line_num} {
report $file_name $line_num "brace should be placed on a separate line"
}
set expect_newline true
}
} else {
set check_newline true
}
set prev_tok $token
}
}

View File

@@ -0,0 +1,61 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach file_name [getSourceFileNames] {
set state "normal"
set lines {}
set cols {}
foreach token [getTokens $file_name 1 0 -1 -1 {}] {
set tok_val [lindex $token 0]
set line_num [lindex $token 1]
set col_num [lindex $token 2]
set tok_type [lindex $token 3]
if {$state == "macro"} {
if {$col_num == 0} {
set state "normal"
} else {
set prev_tok_line $line_num
continue
}
}
if {$tok_type in {space ccomment cppcomment newline}} {
continue
}
if {$tok_type == "pp_define"} {
set state "macro"
continue
}
if {$tok_type == "leftbrace"} {
lappend cols $col_num
lappend lines $line_num
} elseif {$tok_type == "rightbrace"} {
if {[llength $lines] > 0} {
if {[lindex $lines end] != $line_num && [lindex $cols end] != $col_num} {
report $file_name $line_num "matching braces should be on the same line or column"
}
set lines [lreplace $lines end end]
set cols [lreplace $cols end end]
} else {
report $file_name $line_num "unmatched brace"
}
}
}
}

View File

@@ -0,0 +1,52 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach fileName [getSourceFileNames] {
set funcStart 0
set funcName ""
set lineNumber 1
foreach line [getAllLines $fileName] {
if {[regexp {^((static |const )*\w+ )*\w+ \(.*[,\)]} $line]} {
set type {}
set modifier {}
if {$funcStart == 0} {
regexp {^((static |const )*\w+ )*(\w+) \(} $line matched type modifier funcName
}
}
if {[regexp {^\{$} $line]} {
set funcStart 1
}
if {$funcStart == 1} {
if {[regexp {^\}$} $line] && [string length $funcName] != 0} {
report $fileName $lineNumber "missing comment at the end of function: /* $funcName */"
set funcStart 0
} elseif {[regexp {^\} /\*\s*\w+\s*\*/$} $line] && [string length $funcName] != 0} {
set comment {}
regexp {^\} /\*\s*(\w+)\s*\*/$} $line -> comment
if {$comment != $funcName} {
report $fileName $lineNumber "comment missmatch. (Current: $comment, Expected: $funcName) "
}
set funcStart 0
} elseif {[regexp {^\}.*;?$} $line]} {
set funcStart 0
}
}
incr lineNumber
}
}

View File

@@ -0,0 +1,60 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
if {[regexp {^\s*#[ ]*define} $line]} {
return
}
set line [string range $line $col_start $col_end]
if {[regexp {([[:alnum:]][\s]{2,}\()|([[:alnum:]]\()} $line]} {
report $file $line_num "there should be exactly one space before left parentheses"
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,56 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
set line [string range $line $col_start $col_end]
if {[regexp {[[:alnum:]_][\s]+\[} $line]} {
report $file $line_num "there should be no spaces between identifier and left bracket"
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,126 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Indentation
foreach fileName [getSourceFileNames] {
set indent 0
set lastCheckedLineNumber -1
set is_in_comment "no"
set is_in_pp_define "no"
set is_in_class "no"
set is_in_template "no"
set parentheses_level 0
set template_brackets_level 0
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set type [lindex $token 3]
set lineNumber [lindex $token 1]
if {$is_in_comment == "yes"} {
set is_in_comment "no"
}
if {$type == "newline"} {
set is_in_pp_define "no"
} elseif {$type == "class"} {
set is_in_class "yes"
} elseif {$type == "template"} {
set is_in_template "yes"
} elseif {$is_in_class == "yes" && $type == "semicolon" && $indent == 0} {
set is_in_class "no"
} elseif {$type == "ccomment"} {
set is_in_comment "yes"
} elseif {[string first "pp_" $type] == 0} {
if {$type == "pp_define"} {
set is_in_pp_define "yes"
}
set lastCheckedLineNumber $lineNumber
} elseif {$type == "space"} {
} elseif {$type != "eof"} {
if {$type == "rightbrace" && $indent > 0} {
incr indent -2
}
if {$is_in_pp_define == "no" && $is_in_comment == "no" && $parentheses_level == 0 &&
$is_in_template == "no"} {
set line [getLine $fileName $lineNumber]
if {$lineNumber != $lastCheckedLineNumber} {
if {[regexp {^[[:blank:]]*} $line match]} {
set real_indent [string length $match]
if {$indent != $real_indent} {
if {[regexp {^[[:blank:]]*(private:|public:|protected:)} $line]} {
if {$indent != $real_indent + 2} {
set exp_indent [expr {$indent - 2}]
report $fileName $lineNumber "Indentation: $real_indent -> $exp_indent. Line: '$line'"
}
} elseif {![regexp {^[[:alnum:]_]{1,}:$} $line] || $real_indent != 0} {
report $fileName $lineNumber "Indentation: $real_indent -> $indent. Line: '$line'"
}
}
}
}
if {$lineNumber == $lastCheckedLineNumber} {
if {$type == "leftbrace"} {
if {![regexp {^[[:blank:]]*\{[[:blank:]]*$} $line]
&& ![regexp {[^\{=]=[^\{=]\{.*\},?} $line]} {
report $fileName $lineNumber "Left brace is not the only non-space character in the line: '$line'"
}
}
if {$type == "rightbrace"} {
if {![regexp {^.* = .*\{.*\}[,;]?$} $line]
&& ![regexp {[^\{=]=[^\{=]\{.*\}[,;]?} $line]} {
report $fileName $lineNumber "Right brace is not first non-space character in the line: '$line'"
}
}
}
if {$type == "rightbrace"} {
if {![regexp {^[[:blank:]]*\};?((( [a-z_\(][a-z0-9_\(\)]{0,}){1,})?;| /\*.*\*/| //.*)?$} $line]
&& ![regexp {[^\{=]=[^\{=]\{.*\}[,;]?} $line]} {
report $fileName $lineNumber "Right brace is not the only non-space character in the line and \
is not single right brace followed by \[a-z0-9_() \] string and single semicolon character: '$line'"
}
}
}
if {$type == "leftbrace"} {
if {![regexp {^extern "C"} [getLine $fileName [expr {$lineNumber - 1}]]]} {
incr indent 2
}
} elseif {$type == "leftparen"} {
incr parentheses_level 1
} elseif {$type == "rightparen"} {
incr parentheses_level -1
}
if {$is_in_template == "yes"} {
if {$type == "less"} {
incr template_brackets_level
} elseif {$type == "greater"} {
incr template_brackets_level -1
if {$template_brackets_level == 0} {
set is_in_template "no"
}
}
}
set lastCheckedLineNumber $lineNumber
}
}
}

View File

@@ -0,0 +1,27 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set maxLen 120
foreach f [getSourceFileNames] {
set lineNumber 1
foreach line [getAllLines $f] {
if {[string length $line] > $maxLen} {
report $f $lineNumber "line is longer than ${maxLen} characters"
}
incr lineNumber
}
}

View File

@@ -0,0 +1,36 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set maxEmptyLines 1
foreach f [getSourceFileNames] {
set lineNumber 1
set emptyCount 0
set reported false
foreach line [getAllLines $f] {
if {[string trim $line] == ""} {
incr emptyCount
if {$emptyCount > $maxEmptyLines && $reported == "false"} {
report $f $lineNumber "too many consecutive empty lines"
set reported true
}
} else {
set emptyCount 0
set reported false
}
incr lineNumber
}
}

View File

@@ -0,0 +1,30 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach f [getSourceFileNames] {
set lineCount [getLineCount $f]
if {$lineCount > 0} {
set firstLine [getLine $f 1]
if {[string trim $firstLine] == ""} {
report $f 1 "leading empty line(s)"
}
set lastLine [getLine $f $lineCount]
if {[string trim $lastLine] == ""} {
report $f $lineCount "trailing empty line(s)"
}
}
}

View File

@@ -0,0 +1,56 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
set line [string range $line $col_start $col_end]
if {[regexp {\(+[[:blank:]]} $line]} {
report $file $line_num "there should be no blank characters after opening parentheses"
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,56 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
set line [string range $line $col_start $col_end]
if {[regexp {[[:graph:]][[:blank:]]+\)} $line]} {
report $file $line_num "there should be no blank characters before closing parentheses"
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,25 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach f [getSourceFileNames] {
set lineNumber 1
foreach line [getAllLines $f] {
if {[regexp {\t} $line]} {
report $f $lineNumber "tabs are not allowed"
}
incr lineNumber
}
}

View File

@@ -0,0 +1,25 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
foreach f [getSourceFileNames] {
set lineNumber 1
foreach line [getAllLines $f] {
if {[regexp {[[:blank:]]$} $line]} {
report $f $lineNumber "trailing space is not allowed"
}
incr lineNumber
}
}

View File

@@ -0,0 +1,58 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
set line [string range $line $col_start $col_end]
if {[regexp {\w\*\s\w+} $line]
|| [regexp {\w\*\)} $line]
|| [regexp {\w\*$} $line]} {
report $file $line_num "there should be a space between the referenced type and the pointer declarator."
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,282 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# switch-case
foreach fileName [getSourceFileNames] {
set is_in_comment "no"
set is_in_pp_define "no"
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set type [lindex $token 3]
set lineNumber [lindex $token 1]
if {$is_in_comment == "yes"} {
set is_in_comment "no"
}
if {$type == "newline"} {
set is_in_pp_define "no"
} elseif {$type == "ccomment"} {
set is_in_comment "yes"
} elseif {[string first "pp_" $type] == 0} {
if {$type == "pp_define"} {
set is_in_pp_define "yes"
}
} elseif {$type == "space"} {
} elseif {$type != "eof"} {
if {$is_in_pp_define == "no" && $type == "switch"} {
set next_token_start [lindex $token 2]
incr next_token_start 1
set line_num 0
set state "switch"
set case_block "no"
set seen_braces 0
foreach next_token [getTokens $fileName $lineNumber $next_token_start -1 -1 {}] {
set next_token_type [lindex $next_token 3]
set next_token_value [lindex $next_token 0]
if {$state == "switch"} {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "leftbrace"} {
set state "first-case"
continue
} else {
# TODO: check switch
continue
}
} elseif {$state == "first-case"} {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "case"} {
set state "case"
continue
} elseif {$next_token_type == "default"} {
set state "default"
continue
} else {
# Macros magic: give up
break
}
} elseif {$state == "case"} {
if {$next_token_type == "space"} {
set state "space-after-case"
continue
} else {
report $fileName [lindex $next_token 1] "There should be single space character after 'case' keyword (state $state)"
}
} elseif {$state == "space-after-case"} {
if {$next_token_type != "identifier" && $next_token_type != "intlit" && $next_token_type != "charlit" && $next_token_type != "sizeof"} {
report $fileName [lindex $next_token 1] "There should be single space character after 'case' keyword (state $state, next_token_type $next_token_type)"
} else {
set state "case-label"
continue
}
} elseif {$state == "case-label" || $state == "default"} {
set case_block "no"
if {$next_token_type != "colon"} {
continue
} else {
set state "colon"
continue
}
} elseif {$state == "after-colon-preprocessor"} {
if {$next_token_type == "newline"} {
set state "colon"
}
} elseif {$state == "colon"} {
if {$next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "ccomment"} {
if {[string match "*FALL*" $next_token_value]} {
set state "fallthru"
set line_num [lindex $next_token 1]
continue
} else {
continue
}
} elseif {$next_token_type == "case"} {
set state "case"
continue
} elseif {$next_token_type == "default"} {
set state "default"
continue
} elseif {$next_token_type == "leftbrace"} {
set case_block "yes"
set state "wait-for-break"
continue
} elseif {$next_token_type == "identifier"} {
if {[string compare "JERRY_UNREACHABLE" $next_token_value] == 0
|| [string first "JERRY_UNIMPLEMENTED" $next_token_value] == 0} {
set state "wait-for-semicolon"
continue
} else {
set state "wait-for-break"
continue
}
} elseif {$next_token_type == "break"
|| $next_token_type == "continue"
|| $next_token_type == "return"} {
set state "wait-for-semicolon"
continue
} elseif {[string first "pp_" $next_token_type] == 0} {
set state "after-colon-preprocessor"
} else {
set state "wait-for-break"
continue
}
} elseif {$state == "wait-for-semicolon"} {
if {$next_token_type == "semicolon"} {
set state "break"
}
continue
} elseif {$state == "wait-for-break"} {
if {$next_token_type == "case" || $next_token_type == "default"} {
report $fileName [lindex $next_token 1] "Missing break, continue or FALLTHRU comment before case (state $state)"
} elseif {$next_token_type == "leftbrace"} {
set state "inside-braces"
incr seen_braces 1
continue
} elseif {$next_token_type == "rightbrace"} {
if {$case_block == "yes"} {
set state "case-blocks-end"
continue
} else {
break
}
} elseif {[string compare "JERRY_UNREACHABLE" $next_token_value] == 0
|| [string first "JERRY_UNIMPLEMENTED" $next_token_value] == 0} {
set state "wait-for-semicolon"
continue
} elseif {$next_token_type == "ccomment" && [string match "*FALL*" $next_token_value]} {
set state "fallthru"
set line_num [lindex $next_token 1]
continue
} elseif {$next_token_type == "break"
|| $next_token_type == "continue"
|| $next_token_type == "return"
|| $next_token_type == "goto"} {
set state "wait-for-semicolon"
continue
}
continue
} elseif {$state == "break" || $state == "fallthru"} {
if {$case_block == "no"} {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "case"} {
set state "case"
continue
} elseif {$next_token_type == "default"} {
set state "default"
continue
} elseif {$next_token_type == "leftbrace"} {
set state "inside-braces"
incr seen_braces 1
continue
} elseif {$next_token_type == "rightbrace"} {
lappend switch_ends [lindex $next_token 1]
break
} elseif {$next_token_type == "break"
|| $next_token_type == "continue"
|| $next_token_type == "return"} {
set state "wait-for-semicolon"
continue
} else {
set state "wait-for-break"
continue
}
} else {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "case"} {
set state "case"
continue
} elseif {$next_token_type == "default"} {
set state "default"
continue
} elseif {$next_token_type == "leftbrace"} {
set state "inside-braces"
incr seen_braces 1
continue
} elseif {$next_token_type == "rightbrace"} {
set state "after-rightbrace"
continue
} elseif {$next_token_type == "break"
|| $next_token_type == "continue"
|| $next_token_type == "return"} {
set state "wait-for-semicolon"
continue
} else {
set state "wait-for-break"
continue
}
}
} elseif {$state == "inside-braces"} {
if {$next_token_type == "rightbrace"} {
incr seen_braces -1
if {$seen_braces == 0} {
set state "wait-for-break"
continue
}
} elseif {$next_token_type == "leftbrace"} {
incr seen_braces 1
}
continue
} elseif {$state == "after-rightbrace-preprocessor"} {
if {$next_token_type == "newline"} {
set state "after-rightbrace"
}
} elseif {$state == "after-rightbrace"} {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "case"} {
set state "case"
continue
} elseif {$next_token_type == "default"} {
set state "default"
continue
} elseif {$next_token_type == "rightbrace"} {
lappend switch_ends [lindex $next_token 1]
break
} elseif {[string first "pp_" $next_token_type] == 0} {
set state "after-rightbrace-preprocessor"
} else {
report $fileName [lindex $next_token 1] "There should be 'case' or 'default' (state $state)"
}
} elseif {$state == "case-blocks-end-preprocessor"} {
if {$next_token_type == "newline"} {
set state "case-blocks-end"
}
} elseif {$state == "case-blocks-end"} {
if {$next_token_type == "ccomment" || $next_token_type == "space" || $next_token_type == "newline"} {
continue
} elseif {$next_token_type == "rightbrace"} {
lappend switch_ends [lindex $next_token 1]
break
} elseif {[string first "pp_" $next_token_type] == 0} {
set state "case-blocks-end-preprocessor"
} else {
report $fileName [lindex $next_token 1] "Missing break, continue or FALLTHRU comment before rightbrace (state $state)"
}
} else {
report $fileName [lindex $next_token 1] "Unknown state: $state"
}
}
}
}
}
}

View File

@@ -0,0 +1,56 @@
#!/usr/bin/tclsh
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
proc check_part_of_the_file {file line_num col_start col_end} {
if {$col_start == $col_end} {
return
}
set line [getLine $file $line_num]
set line [string range $line $col_start $col_end]
if {[regexp {\)[\w\(&~=]} $line]} {
report $file $line_num "there should be exactly one space after right parentheses"
}
}
foreach fileName [getSourceFileNames] {
set checkLine 1
set checkColStart 0
set seenOmitToken false
foreach token [getTokens $fileName 1 0 -1 -1 {}] {
set lineNumber [lindex $token 1]
set colNumber [lindex $token 2]
set tokenType [lindex $token 3]
if {$checkLine != $lineNumber} {
if {!$seenOmitToken} {
check_part_of_the_file $fileName $checkLine $checkColStart end
}
set checkColStart $colNumber
set checkLine $lineNumber
} elseif {$seenOmitToken} {
set checkColStart $colNumber
}
if {$tokenType in {ccomment cppcomment stringlit}} {
check_part_of_the_file $fileName $checkLine $checkColStart $colNumber
set seenOmitToken true
} else {
set seenOmitToken false
}
}
}

View File

@@ -0,0 +1,47 @@
#!/usr/bin/env python
# Copyright JS Foundation and other contributors, http://js.foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import os
import re
import settings
def main():
parser = argparse.ArgumentParser(
description='Display version of JerryScript',
epilog="""
Extract version information from sources without relying on
compiler or preprocessor features.
"""
)
_ = parser.parse_args()
with open(os.path.join(settings.PROJECT_DIR, 'jerry-core', 'include', 'jerryscript-core.h'), 'r') as header:
version = {}
version_re = re.compile(r'\s*#define\s+JERRY_API_(?P<key>MAJOR|MINOR|PATCH)_VERSION\s+(?P<value>\S+)')
for line in header:
match = version_re.match(line)
if match:
version[match.group('key')] = match.group('value')
print('%(MAJOR)s.%(MINOR)s.%(PATCH)s' % version)
if __name__ == "__main__":
main()