diff --git a/.lgtm.yml b/.lgtm.yml new file mode 100644 index 0000000000..fbcedead43 --- /dev/null +++ b/.lgtm.yml @@ -0,0 +1,402 @@ +########################################################################################## +# Customize file classifications. # +# Results from files under any classifier will be excluded from LGTM # +# statistics. # +########################################################################################## + +########################################################################################## +# Use the `path_classifiers` block to define changes to the default classification of # +# files. # +########################################################################################## + +path_classifiers: + # docs: + # Identify the top-level file called `generate_javadoc.py` as documentation-related. + test: + # Override LGTM's default classification of test files by excluding all files. + - exclude: / + # Classify all files in the top-level directories tests/ and testsuites/ as test code. + - tests + # - testsuites + # Classify all files with suffix `.test` as test code. + # Note: use only forward slash / as a path separator. + # Use ** to indicate an arbitrary parent path. + # Use * to indicate any sequence of characters excluding /. + # Always enclose the expression in double quotes if it includes *. + # - "**/*.test" + # Refine the classifications above by excluding files in test/util/. + # - exclude: test/util + # The default behavior is to tag all files created during the + # build as `generated`. Results are hidden for generated code. You can tag + # further files as being generated by adding them to the `generated` section. + generated: + # Exclude all `*.c` files under the `ui/` directory from classification as + # generated code. + # - exclude: ui/**/*.c + # By default, all files not checked into the repository are considered to be + # 'generated'. + # The default behavior is to tag library code as `library`. Results are hidden + # for library code. You can tag further files as being library code by adding them + # to the `library` section. + library: + - exclude: deps/ + # The default behavior is to tag template files as `template`. Results are hidden + # for template files. You can tag further files as being template files by adding + # them to the `template` section. + template: + #- exclude: path/to/template/code/**/*.c + # Define your own category, for example: 'some_custom_category'. + some_custom_category: + # Classify all files in the top-level directory tools/ (or the top-level file + # called tools). + # - tools + +######################################################################################### +# Use the `queries` block to change the default display of query results. # +######################################################################################### + + # queries: + # Start by hiding the results of all queries. + # - exclude: "*" + # Then include all queries tagged 'security' and 'correctness', and with a severity of + # 'error'. + # - include: + # tags: + # - "security" + # - "correctness" + # severity: "error" + # Specifically hide the results of two queries. + # - exclude: cpp/use-of-goto + # - exclude: java/equals-on-unrelated-types + # Refine by including the `java/command-line-injection` query. + # - include: java/command-line-injection + +######################################################################################### +# Define changes to the default code extraction process. # +# Each block configures the extraction of a single language, and modifies actions in a # +# named step. Every named step includes automatic default actions, # +# except for the 'prepare' step. The steps are performed in the following sequence: # +# prepare # +# after_prepare # +# configure (C/C++ only) # +# python_setup (Python only) # +# before_index # +# index # +########################################################################################## + +######################################################################################### +# Environment variables available to the steps: # +######################################################################################### + +# LGTM_SRC +# The root of the source tree. +# LGTM_WORKSPACE +# An existing (initially empty) folder outside the source tree. +# Used for temporary download and setup commands. + +######################################################################################### +# Use the extraction block to define changes to the default code extraction process # +# for one or more languages. The settings for each language are defined in a child # +# block, with one or more steps. # +######################################################################################### + +extraction: + # Define settings for C/C++ analysis + ##################################### + cpp: + # The `prepare` step exists for customization on LGTM.com only. + prepare: + # # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to + # # be installed. + packages: + - cmake + # Add an `after-prepare` step if you need to run commands after the prepare step. + # Each command should be listed on a separate line. + # This step is useful for C/C++ analysis where you want to prepare the environment + # for the `configure` step without changing the default behavior for that step. + # after_prepare: + #- export GNU_MAKE=make + #- export GIT=true + # The `configure` step generates build configuration files which the `index` step + # then uses to build the codebase. + configure: + command: + - mkdir build + - cd build + - cmake .. + # - ./prepare_deps + # Optional step. You should add a `before_index` step if you need to run commands + # before the `index` step. + # before_index: + # - export BOOST_DIR=$LGTM_SRC/boost + # - export GTEST_DIR=$LGTM_SRC/googletest + # - export HUNSPELL_DIR=$LGTM_SRC/hunspell + # - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp + # The `index` step builds the code and extracts information during the build + # process. + index: + # Override the autobuild process by specifying a list of custom build commands + # to use instead. + build_command: + - cd build + - make + # - $GNU_MAKE -j2 -s + # Specify that all project or solution files should be used for extraction. + # Default: false. + # all_solutions: true + # Specify a list of one or more project or solution files for extraction. + # Default: LGTM chooses the file closest to the root of the repository (this may + # fail if there are multiple candidates). + # solution: + # - myProject.sln + # Specify MSBuild settings + # msbuild: + # Specify a list of additional arguments to MSBuild. Default: empty. + # arguments: /p:Platform=x64 /p:Configuration=Release + # Specify the MSBuild configuration to use, for example, debug or release. + # Default: read from the solution file or files. + # configuration: + # Specify the platform to target, for example: x86, x64, or Any CPU. + # Default: read from the solution file or files. + # platform: + # Specify the MSBuild target. Default: rebuild. + # target: + # Specify whether or not to perform a NuGet restore for extraction. Default: true. + # nuget_restore: false + # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom + # build commands (build_command). For example: + # 10 for Visual Studio 2010 + # 12 for Visual Studio 2012 + # 14 for Visual Studio 2015 + # 15 for Visual Studio 2017 + # Default: read from project files. + # vstools_version: 10 + + # Define settings for C# analysis + ################################## + # csharp: + # The `prepare` step exists for customization on LGTM.com only. + # prepare: + # packages: + # - example_package + # Add an `after-prepare` step if you need to run commands after the `prepare` step. + # Each command should be listed on a separate line. + # after_prepare: + # - export PATH=$LGTM_WORKSPACE/tools:$PATH + # The `index` step builds the code and extracts information during the build + # process. + #index: + # Specify that all project or solution files should be used for extraction. + # Default: false. + # all_solutions: true + # Specify a list of one or more project or solution files for extraction. + # Default: LGTM chooses the file closest to the root of the repository (this may + # fail if there are multiple candidates). + # solution: + # - myProject.sln + # Override the autobuild process by specifying a list of custom build commands + # to use instead. + # build_command: + # - ./example-compile-all.sh + # By default, LGTM analyzes the code by building it. You can override this, + # and tell LGTM not to build the code. Beware that this can lead + # to less accurate results. + # buildless: true + # Specify .NET Core settings. + # dotnet: + # Specify additional arguments to `dotnet build`. + # Default: empty. + # arguments: "example_arg" + # Specify the version of .NET Core SDK to use. + # Default: The version installed on the build machine. + # version: 2.1 + # Specify MSBuild settings. + # msbuild: + # Specify a list of additional arguments to MSBuild. Default: empty. + # arguments: /P:WarningLevel=2 + # Specify the MSBuild configuration to use, for example, debug or release. + # Default: read from the solution file or files. + # configuration: release + # Specify the platform to target, for example: x86, x64, or Any CPU. + # Default: read from the solution file or files. + # platform: x86 + # Specify the MSBuild target. Default: rebuild. + # target: notest + # Specify whether or not to perform a NuGet restore for extraction. Default: true. + # nuget_restore: false + # Specify a version of Microsoft Visual Studio to use for MSBuild or any custom + # build commands (build_command). For example: + # 10 for Visual Studio 2010 + # 12 for Visual Studio 2012 + # 14 for Visual Studio 2015 + # 15 for Visual Studio 2017 + # Default: read from project files + # vstools_version: 10 + # Specify additional options for the extractor, + # for example --fast to perform a faster extraction that produces a smaller + # database. + # extractor: "--fast" + + # Define settings for Go analysis + ################################## + # go: + # The `prepare` step exists for customization on LGTM.com only. + # prepare: + # packages: + # - example_package + # Add an `after-prepare` step if you need to run commands after the `prepare` step. + # Each command should be listed on a separate line. + # after_prepare: + # - export PATH=$LGTM_WORKSPACE/tools:$PATH + # The `index` step builds the code and extracts information during the build + # process. + # index: + # Override the autobuild process by specifying a list of custom build commands + # to use instead. + # build_command: + # - ./compile-all.sh + + # Define settings for Java analysis + #################################### + # java: + # The `prepare` step exists for customization on LGTM.com only. + # prepare: + # packages: + # - example_package + # Add an `after-prepare` step if you need to run commands after the prepare step. + # Each command should be listed on a separate line. + # after_prepare: + # - export PATH=$LGTM_WORKSPACE/tools:$PATH + # The `index` step extracts information from the files in the codebase. + # index: + # Specify Gradle settings. + # gradle: + # Specify the required Gradle version. + # Default: determined automatically. + # version: 4.4 + # Override the autobuild process by specifying a list of custom build commands + # to use instead. + # build_command: ./compile-all.sh + # Specify the Java version required to build the project. + # java_version: 11 + # Specify whether to extract Java .properties files + # Default: false + # properties_files: true + # Specify Maven settings. + # maven: + # Specify the path (absolute or relative) of a Maven settings file to use. + # Default: Maven uses a settings file in the default location, if it exists. + # settings_file: /opt/share/settings.xml + # Specify the path of a Maven toolchains file. + # Default: Maven uses a toolchains file in the default location, if it exists. + # toolchains_file: /opt/share/toolchains.xml + # Specify the required Maven version. + # Default: the Maven version is determined automatically, where feasible. + # version: 3.5.2 + # Specify how XML files should be extracted: + # all = extract all XML files. + # default = only extract XML files named `AndroidManifest.xml`, `pom.xml`, and `web.xml`. + # disabled = do not extract any XML files. + # xml_mode: all + + # Define settings for JavaScript analysis + ########################################## + # javascript: + # The `prepare` step exists for customization on LGTM.com only. + # prepare: + # packages: + # - example_package + # Add an `after-prepare` step if you need to run commands after the prepare step. + # Each command should be listed on a separate line. + # after_prepare: + # - export PATH=$LGTM_WORKSPACE/tools:$PATH + # The `index` step extracts information from the files in the codebase. + # index: + # Specify a list of files and folders to extract. + # Default: The project root directory. + # include: + # - src/js + # Specify a list of files and folders to exclude from extraction. + # exclude: + # - thirdparty/lib + # You can add additional file types for LGTM to extract, by mapping file + # extensions (including the leading dot) to file types. The usual + # include/exclude patterns apply, so, for example, `.jsm` files under + # `thirdparty/lib` will not be extracted. + # filetypes: + # ".jsm": "js" + # ".tmpl": "html" + # Specify a list of glob patterns to include/exclude files from extraction; this + # is applied on top of the include/exclude paths from above; patterns are + # processed in the same way as for path classifiers above. + # Default: include all files with known extensions (such as .js, .ts and .html), + # but exclude files ending in `-min.js` or `.min.js` and folders named `node_modules` + # or `bower_components` + # filters: + # exclude any *.ts files anywhere. + # - exclude: "**/*.ts" + # but include *.ts files under src/js/typescript. + # - include: "src/js/typescript/**/*.ts" + # Specify how TypeScript files should be extracted: + # none = exclude all TypeScript files. + # basic = extract syntactic information from TypeScript files. + # full = extract syntactic and type information from TypeScript files. + # Default: full. + # typescript: basic + # By default, LGTM doesn't extract any XML files. You can override this by + # using the `xml_mode` property and setting it to `all`. + # xml_mode: all + + # Define settings for Python analysis + ###################################### + # python: + # # The `prepare` step exists for customization on LGTM.com only. + # # prepare: + # # # The `packages` section is valid for LGTM.com only. It names packages to + # # # be installed. + # # packages: libpng-dev + # # This step is useful for Python analysis where you want to prepare the + # # environment for the `python_setup` step without changing the default behavior + # # for that step. + # after_prepare: + # - export PATH=$LGTM_WORKSPACE/tools:$PATH + # # This sets up the Python interpreter and virtual environment, ready for the + # # `index` step to extract the codebase. + # python_setup: + # # Specify packages that should NOT be installed despite being mentioned in the + # # requirements.txt file. + # # Default: no package marked for exclusion. + # exclude_requirements: + # - pywin32 + # # Specify a list of pip packages to install. + # # If any of these packages cannot be installed, the extraction will fail. + # requirements: + # - Pillow + # # Specify a list of requirements text files to use to set up the environment, + # # or false for none. Default: any requirements.txt, test-requirements.txt, + # # and similarly named files identified in the codebase are used. + # requirements_files: + # - required-packages.txt + # # Specify a setup.py file to use to set up the environment, or false for none. + # # Default: any setup.py files identified in the codebase are used in preference + # # to any requirements text files. + # setup_py: new-setup.py + # # Override the version of the Python interpreter used for setup and extraction + # # Default: Python 3. + # version: 2 + # # Optional step. You should add a `before_index` step if you need to run commands + # # before the `index` step. + # before_index: + # - antlr4 -Dlanguage=Python3 Grammar.g4 + # # The `index` step extracts information from the files in the codebase. + # index: + # # Specify a list of files and folders to exclude from extraction. + # # Default: Git submodules and Subversion externals. + # exclude: + # - legacy-implementation + # - thirdparty/libs + # filters: + # - exclude: "**/documentation/examples/snippets/*.py" + # - include: "**/documentation/examples/test_application/*" + # include: + # - example/to/include diff --git a/Jenkinsfile b/Jenkinsfile new file mode 100644 index 0000000000..53798c8db9 --- /dev/null +++ b/Jenkinsfile @@ -0,0 +1,70 @@ +pipeline { + agent any + stages { + stage('build TDengine') { + steps { + sh '''cd ${WORKSPACE} +export TZ=Asia/Harbin +date +rm -rf ${WORKSPACE}/debug +mkdir debug +cd debug +cmake .. > /dev/null +make > /dev/null +cd ${WORKSPACE}/debug''' + } + } + + stage('test_tsim') { + parallel { + stage('test') { + steps { + sh '''cd ${WORKSPACE}/tests +#./test-all.sh smoke +sudo ./test-all.sh full''' + } + } + + stage('test_crash_gen') { + steps { + sh '''cd ${WORKSPACE}/tests/pytest +sudo ./crash_gen.sh -a -p -t 4 -s 2000''' + } + } + + stage('test_valgrind') { + steps { + sh '''cd ${WORKSPACE}/tests/pytest +sudo ./valgrind-test.sh 2>&1 > mem-error-out.log +grep \'start to execute\\|ERROR SUMMARY\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-mem-error-out.log + +for memError in `grep \'ERROR SUMMARY\' uniq-mem-error-out.log | awk \'{print $4}\'` +do + if [ -n "$memError" ]; then + if [ "$memError" -gt 12 ]; then + echo -e "${RED} ## Memory errors number valgrind reports is $memError.\\ + More than our threshold! ## ${NC}" + travis_terminate $memError + fi + fi +done + +grep \'start to execute\\|definitely lost:\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-definitely-lost-out.log +for defiMemError in `grep \'definitely lost:\' uniq-definitely-lost-out.log | awk \'{print $7}\'` +do + if [ -n "$defiMemError" ]; then + if [ "$defiMemError" -gt 13 ]; then + echo -e "${RED} ## Memory errors number valgrind reports \\ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + travis_terminate $defiMemError + fi + fi +done''' + } + } + + } + } + + } +} \ No newline at end of file diff --git a/README.md b/README.md index 22984d8cfe..36436dd549 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,6 @@ [![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master) [![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop) [![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201) -[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine) [![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine) [![TDengine](TDenginelogo.png)](https://www.taosdata.com) @@ -84,14 +83,22 @@ sudo dnf install -y maven ## Get the source codes -- github: +First of all, you may clone the source codes from github: ```bash git clone https://github.com/taosdata/TDengine.git cd TDengine ``` +The connectors for go & grafana have been moved to separated repositories, +so you should run this command in the TDengine directory to install them: +```bash +git submodule update --init --recursive +``` + ## Build TDengine +### On Linux platform + ```bash mkdir debug && cd debug cmake .. && cmake --build . @@ -109,6 +116,34 @@ aarch32: cmake .. -DCPUTYPE=aarch32 && cmake --build . ``` +### On Windows platform + +If you use the Visual Studio 2013, please open a command window by executing "cmd.exe". +Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. +``` +mkdir debug && cd debug +"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 > +cmake .. -G "NMake Makefiles" +nmake +``` + +If you use the Visual Studio 2019, please open a command window by executing "cmd.exe". +Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat. +``` +mkdir debug && cd debug +"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 > +cmake .. -G "NMake Makefiles" +nmake +``` + +Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows: +``` +mkdir debug && cd debug +cmake .. -G "NMake Makefiles" +nmake +``` + +# Quick Run # Quick Run To quickly start a TDengine server after building, run the command below in terminal: ```cmd diff --git a/alert/cmd/alert/install_driver.sh b/alert/cmd/alert/install_driver.sh old mode 100644 new mode 100755 index 1171a99538..c7d44786ec --- a/alert/cmd/alert/install_driver.sh +++ b/alert/cmd/alert/install_driver.sh @@ -9,9 +9,7 @@ set -e script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory lib_link_dir="/usr/lib" - -#install main path -install_main_dir="/usr/local/taos" +lib64_link_dir="/usr/lib64" # Color setting RED='\033[0;31m' @@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then csudo="sudo" fi -function clean_driver() { - ${csudo} rm -f /usr/lib/libtaos.so || : -} - function install_driver() { - echo -e "${GREEN}Start to install TDengine client driver ...${NC}" - - #create install main dir and all sub dir - ${csudo} mkdir -p ${install_main_dir} - ${csudo} mkdir -p ${install_main_dir}/driver - - ${csudo} rm -f ${lib_link_dir}/libtaos.* || : - ${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/* - ${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 - ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - echo - echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}" + if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then + echo -e "${GREEN}Start to install TDengine client driver ...${NC}" + ${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || : + + if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then + ${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : + ${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : + fi + + echo + echo -e "${GREEN}TDengine client driver is successfully installed!${NC}" + else + echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}" + fi } install_driver diff --git a/alert/cmd/alert/main.go b/alert/cmd/alert/main.go index f4c30e156a..1bd185b580 100644 --- a/alert/cmd/alert/main.go +++ b/alert/cmd/alert/main.go @@ -119,7 +119,7 @@ WantedBy=multi-user.target return nil } -const version = "TDengine alert v2.0.0.1" +var version = "2.0.0.1s" func main() { var ( @@ -133,7 +133,7 @@ func main() { flag.Parse() if showVersion { - fmt.Println(version) + fmt.Println("TDengine alert v" + version) return } diff --git a/alert/release.sh b/alert/release.sh index 3342b0e567..35eb4d677f 100755 --- a/alert/release.sh +++ b/alert/release.sh @@ -6,9 +6,9 @@ set -e # set parameters by default value cpuType=amd64 # [armv6l | arm64 | amd64 | 386] osType=linux # [linux | darwin | windows] - +version="" declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86") -while getopts "h:c:o:" arg +while getopts "h:c:o:n:" arg do case $arg in c) @@ -19,6 +19,10 @@ do #echo "osType=$OPTARG" osType=$(echo $OPTARG) ;; + n) + #echo "version=$OPTARG" + version=$(echo $OPTARG) + ;; h) echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]" exit 0 @@ -30,18 +34,27 @@ do esac done +if [ "$version" == "" ]; then + echo "Please input the correct version!" + exit 1 +fi startdir=$(pwd) scriptdir=$(dirname $(readlink -f $0)) cd ${scriptdir}/cmd/alert -version=$(grep 'const version =' main.go | awk '{print $NF}') -version=${version%\"} -version=${version:1} echo "cpuType=${cpuType}" echo "osType=${osType}" echo "version=${version}" -GOOS=${osType} GOARCH=${cpuType} go build +GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version} + +mkdir -p TDengine-alert/driver + +cp alert alert.cfg install_driver.sh ./TDengine-alert/. +cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/. +chmod 777 ./TDengine-alert/install_driver.sh + +tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/ +rm -rf ./TDengine-alert -tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/ diff --git a/cmake/define.inc b/cmake/define.inc index 28770be254..6e64c2709a 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -48,6 +48,7 @@ ENDIF () IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) + MESSAGE(STATUS "linux64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () @@ -55,30 +56,38 @@ ENDIF () IF (TD_LINUX_32) ADD_DEFINITIONS(-D_TD_LINUX_32) ADD_DEFINITIONS(-DUSE_LIBICONV) - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + MESSAGE(STATUS "linux32 is defined") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_64) ADD_DEFINITIONS(-D_M_X64) - ADD_DEFINITIONS(-D_TD_ARM_64_) + ADD_DEFINITIONS(-D_TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "arm64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_32) - ADD_DEFINITIONS(-D_TD_ARM_32_) + ADD_DEFINITIONS(-D_TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_) - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "arm32 is defined") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") ENDIF () IF (TD_MIPS_64) ADD_DEFINITIONS(-D_TD_MIPS_64_) + ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "mips64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) ADD_DEFINITIONS(-D_TD_MIPS_32_) + ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "mips32 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () @@ -86,6 +95,7 @@ IF (TD_APLHINE) SET(COMMON_FLAGS "${COMMON_FLAGS} -largp") link_libraries(/usr/lib/libargp.a) ADD_DEFINITIONS(-D_ALPINE) + MESSAGE(STATUS "aplhine is defined") ENDIF () IF (TD_LINUX) @@ -95,12 +105,12 @@ IF (TD_LINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) IF (TD_NINGSI_60) - ADD_DEFINITIONS(-D_TD_NINGSI_60_) + ADD_DEFINITIONS(-D_TD_NINGSI_60) MESSAGE(STATUS "set ningsi macro to true") ENDIF () SET(DEBUG_FLAGS "-O0 -DDEBUG") - SET(RELEASE_FLAGS "-O0") + SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable") IF (${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") @@ -118,6 +128,7 @@ IF (TD_DARWIN_64) ADD_DEFINITIONS(-DDARWIN) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "darwin64 is defined") SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") @@ -147,11 +158,13 @@ IF (TD_WINDOWS_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_WINDOWS_64) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "windows64 is defined") ENDIF () IF (TD_WINDOWS_32) ADD_DEFINITIONS(-D_TD_WINDOWS_32) ADD_DEFINITIONS(-DUSE_LIBICONV) + MESSAGE(STATUS "windows32 is defined") ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc) diff --git a/cmake/install.inc b/cmake/install.inc index 7a92a396e3..f5e01e2f1d 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -10,31 +10,27 @@ ELSEIF (TD_WINDOWS) SET(CMAKE_INSTALL_PREFIX C:/TDengine) ENDIF () - IF (NOT TD_GODLL) - #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) - #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector) - #INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) - INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) - INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .) + INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .) + INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver) - IF (TD_POWER) - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) - ELSE () - INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) - ENDIF () + IF (TD_POWER) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .) + ELSE () + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .) + ENDIF () - #INSTALL(TARGETS taos RUNTIME DESTINATION driver) - #INSTALL(TARGETS shell RUNTIME DESTINATION .) - IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc) - ENDIF () - ELSE () - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver) + #INSTALL(TARGETS taos RUNTIME DESTINATION driver) + #INSTALL(TARGETS shell RUNTIME DESTINATION .) + IF (TD_MVN_INSTALLED) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index 52d62fca65..52fbe3ca58 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.3.0") + SET(TD_VER_NUMBER "2.0.4.0") ENDIF () IF (DEFINED VERCOMPATIBLE) @@ -42,6 +42,12 @@ IF (DEFINED CPUTYPE) ELSE () IF (TD_WINDOWS_32) SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_LINUX_32) + SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_ARM_32) + SET(TD_VER_CPUTYPE "x86") + ELSEIF (TD_MIPS_32) + SET(TD_VER_CPUTYPE "x86") ELSE () SET(TD_VER_CPUTYPE "x64") ENDIF () diff --git a/deps/zlib-1.2.11/CMakeLists.txt b/deps/zlib-1.2.11/CMakeLists.txt index 5502070819..f83aa70085 100644 --- a/deps/zlib-1.2.11/CMakeLists.txt +++ b/deps/zlib-1.2.11/CMakeLists.txt @@ -1,8 +1,11 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8) PROJECT(TDengine) -IF (TD_LINUX) - INCLUDE_DIRECTORIES(inc) - AUX_SOURCE_DIRECTORY(src SRC) - ADD_LIBRARY(z ${SRC}) -ENDIF () \ No newline at end of file +IF (TD_WINDOWS) + SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /WX-") + SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /WX-") +ENDIF() + +INCLUDE_DIRECTORIES(inc) +AUX_SOURCE_DIRECTORY(src SRC) +ADD_LIBRARY(z ${SRC}) diff --git a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md b/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md index d62a525e28..4e55f3c5e8 100644 --- a/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md +++ b/documentation20/webdocs/markdowndocs/Connections with other Tools-ch.md @@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/) ### 配置Grafana -TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。 +TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。 以CentOS 7.2操作系统为例,将tdengine目录拷贝到/var/lib/grafana/plugins目录下,重新启动grafana即可。 diff --git a/documentation20/webdocs/markdowndocs/Evaluation-ch.md b/documentation20/webdocs/markdowndocs/Evaluation-ch.md index 7d09d0dd33..9e7e0ec6aa 100644 --- a/documentation20/webdocs/markdowndocs/Evaluation-ch.md +++ b/documentation20/webdocs/markdowndocs/Evaluation-ch.md @@ -11,7 +11,7 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 * __全栈时序数据处理引擎__:将数据库、消息队列、缓存、流式计算等功能融为一体,应用无需再集成Kafka/Redis/HBase/Spark/HDFS等软件,大幅降低应用开发和维护的复杂度成本。 * __强大的分析功能__:无论是十年前还是一秒钟前的数据,指定时间范围即可查询。数据可在时间轴上或多个设备上进行聚合。即席查询可通过Shell, Python, R, Matlab随时进行。 * __与第三方工具无缝连接__:不用一行代码,即可与Telegraf, Grafana, EMQ, Prometheus, Matlab, R等集成。后续将支持OPC, Hadoop, Spark等, BI工具也将无缝连接。 -* __零运维成本、零学习成本__:安装、集群一秒搞定,无需分库分表,实时备份。标准SQL,支持JDBC, RESTful, 支持Python/Java/C/C++/Go, 与MySQL相似,零学习成本。 +* __零运维成本、零学习成本__:安装集群简单快捷,无需分库分表,实时备份。类似标准SQL,支持RESTful, 支持Python/Java/C/C++/C#/Go/Node.js, 与MySQL相似,零学习成本。 采用TDengine,可将典型的物联网、车联网、工业互联网大数据平台的总拥有成本大幅降低。但需要指出的是,因充分利用了物联网时序数据的特点,它无法用来处理网络爬虫、微博、微信、电商、ERP、CRM等通用型数据。 diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/webdocs/markdowndocs/Getting Started-ch.md index 210f0921ba..beb0c639ae 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/webdocs/markdowndocs/Getting Started-ch.md @@ -30,13 +30,13 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 - TDengine-alert-2.0.0-Linux-x64.tar.gz (8.1M) -目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which`命令来检测系统中是否存在`systemd`: +目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which systemctl`命令来检测系统中是否存在`systemd`包: ```cmd -which systemd +which systemctl ``` -如果系统中不存在`systemd`命令,请考虑[通过源码安装](#通过源码安装)TDengine。 +如果系统中不存在`systemd`包,请考虑[通过源码安装](#通过源码安装)TDengine。 具体的安装过程,请参见TDengine多种安装包的安装和卸载。 @@ -68,7 +68,7 @@ systemctl status taosd taos ``` -如果TDengine终端链接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端链接服务端失败的问题)。TDengine终端的提示符号如下: +如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: ```cmd taos> @@ -99,8 +99,8 @@ Query OK, 2 row(s) in set (0.001700s) - -c, --config-dir: 指定配置文件目录,默认为_/etc/taos_ - -h, --host: 指定服务的IP地址,默认为本地服务 - -s, --commands: 在不进入终端的情况下运行TDengine命令 -- -u, -- user: 链接TDengine服务器的用户名,缺省为root -- -p, --password: 链接TDengine服务器的密码,缺省为taosdata +- -u, -- user: 连接TDengine服务器的用户名,缺省为root +- -p, --password: 连接TDengine服务器的密码,缺省为taosdata - -?, --help: 打印出所有命令行参数 示例: diff --git a/documentation20/webdocs/markdowndocs/Getting Started.md b/documentation20/webdocs/markdowndocs/Getting Started.md index 00d97d3d9c..4d34cb49f4 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started.md +++ b/documentation20/webdocs/markdowndocs/Getting Started.md @@ -16,13 +16,13 @@ Three different packages are provided, please pick up the one you like.
  • TDengine DEB package (1.7M)
  • TDengine Tarball (3.0M)
  • -For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd*, use the _which_ command. +For the time being, TDengine only supports installation on Linux systems using [`systemd`](https://en.wikipedia.org/wiki/Systemd) as the service manager. To check if your system has *systemd* package, use the _which systemctl_ command. ```cmd -which systemd +which systemctl ``` -If the `systemd` command is not found, please [install from source code](#Install-from-Source). +If the `systemd` package is not found, please [install from source code](#Install-from-Source). ### Running TDengine diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md index 31eb6b3744..d698e3daaf 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/webdocs/markdowndocs/Model-ch.md @@ -19,7 +19,7 @@ CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4; USE power; ``` -就当前链接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。 +就当前连接里操作的库换为power,否则对具体表操作前,需要使用“库名.表名”来指定库的名字。 **注意:** @@ -56,4 +56,4 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21 ## 多列模型 vs 单列模型 TDengine支持多列模型,只要物理量是一个数据采集点同时采集的(时间戳一致),这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计,单列模型,每个采集的物理量都单独建表,因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位,就建三张超级表。 -TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型就会显得简单。 +TDengine建议尽可能采用多列模型,因为插入效率以及存储效率更高。但对于有些场景,一个采集点的采集量的种类经常变化,这个时候,如果采用多列模型,就需要频繁修改超级表的结构定义,让应用变的复杂,这个时候,采用单列模型会显得简单。 diff --git a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md b/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md index 8e5eeee1c5..44d572268d 100644 --- a/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md +++ b/documentation20/webdocs/markdowndocs/More on System Architecture-ch.md @@ -196,7 +196,7 @@ TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进 **对外服务地址**:TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时,选项-h需要提供的就是publicIp。 -**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图链接secondIp。 +**master/secondIp**:每一个dnode都需要配置一个masterIp。dnode启动后,将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp,对于集群中的第一个节点,就是它自己的privateIp。为保证连接成功,每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图连接secondIp。 dnode启动后,会获知集群的mnode IP列表,并且定时向mnode发送状态信息。 @@ -245,4 +245,4 @@ vnode(虚拟数据节点)保存采集的时序数据,而且查询、计算都 -**Note:**目前集群功能仅仅限于企业版 \ No newline at end of file +**Note:**目前集群功能仅仅限于企业版 diff --git a/documentation20/webdocs/markdowndocs/Queries-ch.md b/documentation20/webdocs/markdowndocs/Queries-ch.md index ed8342c88d..97383c78f7 100644 --- a/documentation20/webdocs/markdowndocs/Queries-ch.md +++ b/documentation20/webdocs/markdowndocs/Queries-ch.md @@ -29,23 +29,9 @@ Query OK, 2 row(s) in set (0.001100s) 具体的查询语法请看TAOS SQL 。 ## 多表聚合查询 +物联网场景中,往往同一个类型的数据采集点有多个。TDengine采用超级表(STable)的概念来描述某一个类型的数据采集点,一张普通的表来描述一个具体的数据采集点。同时TDengine使用标签来描述数据采集点的静态属性,一个具体的数据采集点有具体的标签值。通过指定标签的过滤条件,TDengine提供了一高效的方法将超级表(某一类型的数据采集点)所属的子表进行聚合查询。对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样。 -TDengine对每个数据采集点单独建表,但在实际应用中经常需要对不同的采集点数据进行聚合。为高效的进行聚合操作,TDengine引入超级表(STable)的概念。超级表用来代表一特定类型的数据采集点,它是包含多张表的表集合,集合里每张表的模式(schema)完全一致,但每张表都带有自己的静态标签,标签可以多个,可以随时增加、删除和修改。 - -应用可通过指定标签的过滤条件,对一个STable下的全部或部分表进行聚合或统计操作,这样大大简化应用的开发。其具体流程如下图所示: - -
    - -
    多表聚合查询原理图
    - -1:应用将一个查询条件发往系统;2: taosc将超级表的名字发往 Meta Node(管理节点);3:管理节点将超级表所拥有的 vnode 列表发回 taosc;4:taosc将计算的请求连同标签过滤条件发往这些vnode对应的多个数据节点;5:每个vnode先在内存里查找出自己节点里符合标签过滤条件的表的集合,然后扫描存储的时序数据,完成相应的聚合计算,将结果返回给taosc;6:taosc将多个数据节点返回的结果做最后的聚合,将其返回给应用。 - -由于TDengine在vnode内将标签数据与时序数据分离存储,通过先在内存里过滤标签数据,将需要扫描的数据集大幅减少,大幅提升聚合计算速度。同时,由于数据分布在多个vnode/dnode,聚合计算操作在多个vnode里并发进行,又进一步提升了聚合的速度。 - -对普通表的聚合函数以及绝大部分操作都适用于超级表,语法完全一样,细节请看 TAOS SQL。 - -比如:在TAOS Shell,查找所有智能电表采集的电压平均值,并按照location分组 - +**示例1**:在TAOS Shell,查找北京所有智能电表采集的电压平均值,并按照location分组 ```mysql taos> SELECT AVG(voltage) FROM meters GROUP BY location; avg(voltage) | location | @@ -55,6 +41,18 @@ taos> SELECT AVG(voltage) FROM meters GROUP BY location; Query OK, 2 row(s) in set (0.002136s) ``` +**示例2**:在TAOS shell, 查找groupId为2的所有智能电表过去24小时的记录条数,电流的最大值 + +```mysql +taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - 24h; + cunt(*) | max(current) | +================================== + 5 | 13.4 | +Query OK, 1 row(s) in set (0.002136s) +``` + +TDengine仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在TAOS SQL 一章,查询类操作都会注明是否支持超级表。 + ## 降采样查询、插值 物联网场景里,经常需要通过降采样(down sampling)将采集的数据按时间段进行聚合。TDengine 提供了一个简便的关键词 interval 让按照时间窗口的查询操作变得极为简单。比如,将智能电表 d1001 采集的电流值每10秒钟求和 @@ -66,9 +64,9 @@ taos> SELECT sum(current) FROM d1001 INTERVAL(10s); 2018-10-03 14:38:10.000 | 24.900000572 | Query OK, 2 row(s) in set (0.000883s) ``` -降采样操作也适用于超级表,比如:将所有智能电表采集的电流值每秒钟求和 +降采样操作也适用于超级表,比如:将北京所有智能电表采集的电流值每秒钟求和 ```mysql -taos> SELECT SUM(current) FROM meters INTERVAL(1s); +taos> SELECT SUM(current) FROM meters where location like "Beijing%" INTERVAL(1s); ts | sum(current) | ====================================================== 2018-10-03 14:38:04.000 | 10.199999809 | @@ -78,6 +76,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s); 2018-10-03 14:38:16.000 | 36.000000000 | Query OK, 5 row(s) in set (0.001538s) ``` +降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始 +```mysql +taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a); + ts | sum(current) | +====================================================== + 2018-10-03 14:38:04.500 | 11.189999809 | + 2018-10-03 14:38:05.500 | 31.900000572 | + 2018-10-03 14:38:06.500 | 11.600000000 | + 2018-10-03 14:38:15.500 | 12.300000381 | + 2018-10-03 14:38:16.500 | 35.000000000 | +Query OK, 5 row(s) in set (0.001521s) +``` 物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐,在很多系统里,需要应用自己写程序来处理,但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里,没有采集的数据,TDengine还提供插值计算的功能。 diff --git a/documentation20/webdocs/markdowndocs/Super Table-ch.md b/documentation20/webdocs/markdowndocs/Super Table-ch.md index 14145cbb70..96e7104ab7 100644 --- a/documentation20/webdocs/markdowndocs/Super Table-ch.md +++ b/documentation20/webdocs/markdowndocs/Super Table-ch.md @@ -148,7 +148,7 @@ INSERT INTO USING TAGS (, ...) VALUES (,… FROM WHERE <[=|<=|>=|<>] values..> ([AND|OR] …) - INTERVAL (