Merge pull request #3814 from taosdata/release/s103

Release/s103 merge to master
This commit is contained in:
Shengliang Guan 2020-10-12 18:36:43 +08:00 committed by GitHub
commit d08caf9fa5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
437 changed files with 17533 additions and 8219 deletions

402
.lgtm.yml Normal file
View File

@ -0,0 +1,402 @@
##########################################################################################
# Customize file classifications. #
# Results from files under any classifier will be excluded from LGTM #
# statistics. #
##########################################################################################
##########################################################################################
# Use the `path_classifiers` block to define changes to the default classification of #
# files. #
##########################################################################################
path_classifiers:
# docs:
# Identify the top-level file called `generate_javadoc.py` as documentation-related.
test:
# Override LGTM's default classification of test files by excluding all files.
- exclude: /
# Classify all files in the top-level directories tests/ and testsuites/ as test code.
- tests
# - testsuites
# Classify all files with suffix `.test` as test code.
# Note: use only forward slash / as a path separator.
# Use ** to indicate an arbitrary parent path.
# Use * to indicate any sequence of characters excluding /.
# Always enclose the expression in double quotes if it includes *.
# - "**/*.test"
# Refine the classifications above by excluding files in test/util/.
# - exclude: test/util
# The default behavior is to tag all files created during the
# build as `generated`. Results are hidden for generated code. You can tag
# further files as being generated by adding them to the `generated` section.
generated:
# Exclude all `*.c` files under the `ui/` directory from classification as
# generated code.
# - exclude: ui/**/*.c
# By default, all files not checked into the repository are considered to be
# 'generated'.
# The default behavior is to tag library code as `library`. Results are hidden
# for library code. You can tag further files as being library code by adding them
# to the `library` section.
library:
- exclude: deps/
# The default behavior is to tag template files as `template`. Results are hidden
# for template files. You can tag further files as being template files by adding
# them to the `template` section.
template:
#- exclude: path/to/template/code/**/*.c
# Define your own category, for example: 'some_custom_category'.
some_custom_category:
# Classify all files in the top-level directory tools/ (or the top-level file
# called tools).
# - tools
#########################################################################################
# Use the `queries` block to change the default display of query results. #
#########################################################################################
# queries:
# Start by hiding the results of all queries.
# - exclude: "*"
# Then include all queries tagged 'security' and 'correctness', and with a severity of
# 'error'.
# - include:
# tags:
# - "security"
# - "correctness"
# severity: "error"
# Specifically hide the results of two queries.
# - exclude: cpp/use-of-goto
# - exclude: java/equals-on-unrelated-types
# Refine by including the `java/command-line-injection` query.
# - include: java/command-line-injection
#########################################################################################
# Define changes to the default code extraction process. #
# Each block configures the extraction of a single language, and modifies actions in a #
# named step. Every named step includes automatic default actions, #
# except for the 'prepare' step. The steps are performed in the following sequence: #
# prepare #
# after_prepare #
# configure (C/C++ only) #
# python_setup (Python only) #
# before_index #
# index #
##########################################################################################
#########################################################################################
# Environment variables available to the steps: #
#########################################################################################
# LGTM_SRC
# The root of the source tree.
# LGTM_WORKSPACE
# An existing (initially empty) folder outside the source tree.
# Used for temporary download and setup commands.
#########################################################################################
# Use the extraction block to define changes to the default code extraction process #
# for one or more languages. The settings for each language are defined in a child #
# block, with one or more steps. #
#########################################################################################
extraction:
# Define settings for C/C++ analysis
#####################################
cpp:
# The `prepare` step exists for customization on LGTM.com only.
prepare:
# # The `packages` section is valid for LGTM.com only. It names Ubuntu packages to
# # be installed.
packages:
- cmake
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# This step is useful for C/C++ analysis where you want to prepare the environment
# for the `configure` step without changing the default behavior for that step.
# after_prepare:
#- export GNU_MAKE=make
#- export GIT=true
# The `configure` step generates build configuration files which the `index` step
# then uses to build the codebase.
configure:
command:
- mkdir build
- cd build
- cmake ..
# - ./prepare_deps
# Optional step. You should add a `before_index` step if you need to run commands
# before the `index` step.
# before_index:
# - export BOOST_DIR=$LGTM_SRC/boost
# - export GTEST_DIR=$LGTM_SRC/googletest
# - export HUNSPELL_DIR=$LGTM_SRC/hunspell
# - export CRYPTOPP_DIR=$LGTM_SRC/cryptopp
# The `index` step builds the code and extracts information during the build
# process.
index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
build_command:
- cd build
- make
# - $GNU_MAKE -j2 -s
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Specify MSBuild settings
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /p:Platform=x64 /p:Configuration=Release
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration:
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform:
# Specify the MSBuild target. Default: rebuild.
# target:
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files.
# vstools_version: 10
# Define settings for C# analysis
##################################
# csharp:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
#index:
# Specify that all project or solution files should be used for extraction.
# Default: false.
# all_solutions: true
# Specify a list of one or more project or solution files for extraction.
# Default: LGTM chooses the file closest to the root of the repository (this may
# fail if there are multiple candidates).
# solution:
# - myProject.sln
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./example-compile-all.sh
# By default, LGTM analyzes the code by building it. You can override this,
# and tell LGTM not to build the code. Beware that this can lead
# to less accurate results.
# buildless: true
# Specify .NET Core settings.
# dotnet:
# Specify additional arguments to `dotnet build`.
# Default: empty.
# arguments: "example_arg"
# Specify the version of .NET Core SDK to use.
# Default: The version installed on the build machine.
# version: 2.1
# Specify MSBuild settings.
# msbuild:
# Specify a list of additional arguments to MSBuild. Default: empty.
# arguments: /P:WarningLevel=2
# Specify the MSBuild configuration to use, for example, debug or release.
# Default: read from the solution file or files.
# configuration: release
# Specify the platform to target, for example: x86, x64, or Any CPU.
# Default: read from the solution file or files.
# platform: x86
# Specify the MSBuild target. Default: rebuild.
# target: notest
# Specify whether or not to perform a NuGet restore for extraction. Default: true.
# nuget_restore: false
# Specify a version of Microsoft Visual Studio to use for MSBuild or any custom
# build commands (build_command). For example:
# 10 for Visual Studio 2010
# 12 for Visual Studio 2012
# 14 for Visual Studio 2015
# 15 for Visual Studio 2017
# Default: read from project files
# vstools_version: 10
# Specify additional options for the extractor,
# for example --fast to perform a faster extraction that produces a smaller
# database.
# extractor: "--fast"
# Define settings for Go analysis
##################################
# go:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the `prepare` step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step builds the code and extracts information during the build
# process.
# index:
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command:
# - ./compile-all.sh
# Define settings for Java analysis
####################################
# java:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify Gradle settings.
# gradle:
# Specify the required Gradle version.
# Default: determined automatically.
# version: 4.4
# Override the autobuild process by specifying a list of custom build commands
# to use instead.
# build_command: ./compile-all.sh
# Specify the Java version required to build the project.
# java_version: 11
# Specify whether to extract Java .properties files
# Default: false
# properties_files: true
# Specify Maven settings.
# maven:
# Specify the path (absolute or relative) of a Maven settings file to use.
# Default: Maven uses a settings file in the default location, if it exists.
# settings_file: /opt/share/settings.xml
# Specify the path of a Maven toolchains file.
# Default: Maven uses a toolchains file in the default location, if it exists.
# toolchains_file: /opt/share/toolchains.xml
# Specify the required Maven version.
# Default: the Maven version is determined automatically, where feasible.
# version: 3.5.2
# Specify how XML files should be extracted:
# all = extract all XML files.
# default = only extract XML files named `AndroidManifest.xml`, `pom.xml`, and `web.xml`.
# disabled = do not extract any XML files.
# xml_mode: all
# Define settings for JavaScript analysis
##########################################
# javascript:
# The `prepare` step exists for customization on LGTM.com only.
# prepare:
# packages:
# - example_package
# Add an `after-prepare` step if you need to run commands after the prepare step.
# Each command should be listed on a separate line.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# The `index` step extracts information from the files in the codebase.
# index:
# Specify a list of files and folders to extract.
# Default: The project root directory.
# include:
# - src/js
# Specify a list of files and folders to exclude from extraction.
# exclude:
# - thirdparty/lib
# You can add additional file types for LGTM to extract, by mapping file
# extensions (including the leading dot) to file types. The usual
# include/exclude patterns apply, so, for example, `.jsm` files under
# `thirdparty/lib` will not be extracted.
# filetypes:
# ".jsm": "js"
# ".tmpl": "html"
# Specify a list of glob patterns to include/exclude files from extraction; this
# is applied on top of the include/exclude paths from above; patterns are
# processed in the same way as for path classifiers above.
# Default: include all files with known extensions (such as .js, .ts and .html),
# but exclude files ending in `-min.js` or `.min.js` and folders named `node_modules`
# or `bower_components`
# filters:
# exclude any *.ts files anywhere.
# - exclude: "**/*.ts"
# but include *.ts files under src/js/typescript.
# - include: "src/js/typescript/**/*.ts"
# Specify how TypeScript files should be extracted:
# none = exclude all TypeScript files.
# basic = extract syntactic information from TypeScript files.
# full = extract syntactic and type information from TypeScript files.
# Default: full.
# typescript: basic
# By default, LGTM doesn't extract any XML files. You can override this by
# using the `xml_mode` property and setting it to `all`.
# xml_mode: all
# Define settings for Python analysis
######################################
# python:
# # The `prepare` step exists for customization on LGTM.com only.
# # prepare:
# # # The `packages` section is valid for LGTM.com only. It names packages to
# # # be installed.
# # packages: libpng-dev
# # This step is useful for Python analysis where you want to prepare the
# # environment for the `python_setup` step without changing the default behavior
# # for that step.
# after_prepare:
# - export PATH=$LGTM_WORKSPACE/tools:$PATH
# # This sets up the Python interpreter and virtual environment, ready for the
# # `index` step to extract the codebase.
# python_setup:
# # Specify packages that should NOT be installed despite being mentioned in the
# # requirements.txt file.
# # Default: no package marked for exclusion.
# exclude_requirements:
# - pywin32
# # Specify a list of pip packages to install.
# # If any of these packages cannot be installed, the extraction will fail.
# requirements:
# - Pillow
# # Specify a list of requirements text files to use to set up the environment,
# # or false for none. Default: any requirements.txt, test-requirements.txt,
# # and similarly named files identified in the codebase are used.
# requirements_files:
# - required-packages.txt
# # Specify a setup.py file to use to set up the environment, or false for none.
# # Default: any setup.py files identified in the codebase are used in preference
# # to any requirements text files.
# setup_py: new-setup.py
# # Override the version of the Python interpreter used for setup and extraction
# # Default: Python 3.
# version: 2
# # Optional step. You should add a `before_index` step if you need to run commands
# # before the `index` step.
# before_index:
# - antlr4 -Dlanguage=Python3 Grammar.g4
# # The `index` step extracts information from the files in the codebase.
# index:
# # Specify a list of files and folders to exclude from extraction.
# # Default: Git submodules and Subversion externals.
# exclude:
# - legacy-implementation
# - thirdparty/libs
# filters:
# - exclude: "**/documentation/examples/snippets/*.py"
# - include: "**/documentation/examples/test_application/*"
# include:
# - example/to/include

View File

@ -13,6 +13,9 @@ ENDIF ()
SET(TD_ACCOUNT FALSE)
SET(TD_ADMIN FALSE)
SET(TD_GRANT FALSE)
SET(TD_SYNC TRUE)
SET(TD_MQTT TRUE)
SET(TD_TSDB_PLUGINS FALSE)
SET(TD_COVER FALSE)
SET(TD_MEM_CHECK FALSE)

70
Jenkinsfile vendored Normal file
View File

@ -0,0 +1,70 @@
pipeline {
agent any
stages {
stage('build TDengine') {
steps {
sh '''cd ${WORKSPACE}
export TZ=Asia/Harbin
date
rm -rf ${WORKSPACE}/debug
mkdir debug
cd debug
cmake .. > /dev/null
make > /dev/null
cd ${WORKSPACE}/debug'''
}
}
stage('test_tsim') {
parallel {
stage('test') {
steps {
sh '''cd ${WORKSPACE}/tests
#./test-all.sh smoke
sudo ./test-all.sh full'''
}
}
stage('test_crash_gen') {
steps {
sh '''cd ${WORKSPACE}/tests/pytest
sudo ./crash_gen.sh -a -p -t 4 -s 2000'''
}
}
stage('test_valgrind') {
steps {
sh '''cd ${WORKSPACE}/tests/pytest
sudo ./valgrind-test.sh 2>&1 > mem-error-out.log
grep \'start to execute\\|ERROR SUMMARY\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-mem-error-out.log
for memError in `grep \'ERROR SUMMARY\' uniq-mem-error-out.log | awk \'{print $4}\'`
do
if [ -n "$memError" ]; then
if [ "$memError" -gt 12 ]; then
echo -e "${RED} ## Memory errors number valgrind reports is $memError.\\
More than our threshold! ## ${NC}"
travis_terminate $memError
fi
fi
done
grep \'start to execute\\|definitely lost:\' mem-error-out.log|grep -v \'grep\'|uniq|tee uniq-definitely-lost-out.log
for defiMemError in `grep \'definitely lost:\' uniq-definitely-lost-out.log | awk \'{print $7}\'`
do
if [ -n "$defiMemError" ]; then
if [ "$defiMemError" -gt 13 ]; then
echo -e "${RED} ## Memory errors number valgrind reports \\
Definitely lost is $defiMemError. More than our threshold! ## ${NC}"
travis_terminate $defiMemError
fi
fi
done'''
}
}
}
}
}
}

View File

@ -2,7 +2,7 @@
[![Build status](https://ci.appveyor.com/api/projects/status/kf3pwh2or5afsgl9/branch/master?svg=true)](https://ci.appveyor.com/project/sangshuduo/tdengine-2n8ge/branch/master)
[![Coverage Status](https://coveralls.io/repos/github/taosdata/TDengine/badge.svg?branch=develop)](https://coveralls.io/github/taosdata/TDengine?branch=develop)
[![CII Best Practices](https://bestpractices.coreinfrastructure.org/projects/4201/badge)](https://bestpractices.coreinfrastructure.org/projects/4201)
[![Docker Pulls](https://img.shields.io/docker/pulls/tdengine/tdengine)](https://hub.docker.com/repository/docker/tdengine/tdengine)
[![tdengine](https://snapcraft.io//tdengine/badge.svg)](https://snapcraft.io/tdengine)
[![TDengine](TDenginelogo.png)](https://www.taosdata.com)
@ -83,14 +83,22 @@ sudo dnf install -y maven
## Get the source codes
- github:
First of all, you may clone the source codes from github:
```bash
git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
The connectors for go & grafana have been moved to separated repositories,
so you should run this command in the TDengine directory to install them:
```bash
git submodule update --init --recursive
```
## Build TDengine
### On Linux platform
```bash
mkdir debug && cd debug
cmake .. && cmake --build .
@ -108,6 +116,34 @@ aarch32:
cmake .. -DCPUTYPE=aarch32 && cmake --build .
```
### On Windows platform
If you use the Visual Studio 2013, please open a command window by executing "cmd.exe".
Please specify "x86_amd64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
mkdir debug && cd debug
"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
If you use the Visual Studio 2019, please open a command window by executing "cmd.exe".
Please specify "x64" for 64 bits Windows or specify "x86" is for 32 bits Windows when you execute vcvarsall.bat.
```
mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
Or, you can open a command window by clicking Visual Studio 2019 menu "Tools -> Command Line -> Developer Command Prompt" or "Tools -> Command Line -> Developer PowerShell" then execute commands as follows:
```
mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
# Quick Run
# Quick Run
To quickly start a TDengine server after building, run the command below in terminal:
```cmd

View File

@ -121,7 +121,21 @@ func (alert *Alert) refresh(rule *Rule, values map[string]interface{}) {
alert.Values = values
res := rule.Expr.Eval(func(key string) interface{} {
// ToLower is required as column name in result is in lower case
return alert.Values[strings.ToLower(key)]
i := alert.Values[strings.ToLower(key)]
switch v := i.(type) {
case int8:
return int64(v)
case int16:
return int64(v)
case int:
return int64(v)
case int32:
return int64(v)
case float32:
return float64(v)
default:
return v
}
})
val, ok := res.(bool)

35
alert/cmd/alert/install_driver.sh Normal file → Executable file
View File

@ -9,9 +9,7 @@ set -e
script_dir=$(dirname $(readlink -f "$0"))
# Dynamic directory
lib_link_dir="/usr/lib"
#install main path
install_main_dir="/usr/local/taos"
lib64_link_dir="/usr/lib64"
# Color setting
RED='\033[0;31m'
@ -25,24 +23,23 @@ if command -v sudo > /dev/null; then
csudo="sudo"
fi
function clean_driver() {
${csudo} rm -f /usr/lib/libtaos.so || :
}
function install_driver() {
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
#create install main dir and all sub dir
${csudo} mkdir -p ${install_main_dir}
${csudo} mkdir -p ${install_main_dir}/driver
${csudo} rm -f ${lib_link_dir}/libtaos.* || :
${csudo} cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo} chmod 777 ${install_main_dir}/driver/*
${csudo} ln -s ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so
echo
echo -e "\033[44;32;1mTDengine client driver is successfully installed!${NC}"
if [[ -d ${lib_link_dir} && ! -e ${lib_link_dir}/libtaos.so ]]; then
echo -e "${GREEN}Start to install TDengine client driver ...${NC}"
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so || :
if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then
${csudo} ln -s ${script_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || :
${csudo} ln -s ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || :
fi
echo
echo -e "${GREEN}TDengine client driver is successfully installed!${NC}"
else
echo -e "${GREEN}TDengine client driver already exists, Please confirm whether the alert version matches the client driver version!${NC}"
fi
}
install_driver

View File

@ -119,7 +119,7 @@ WantedBy=multi-user.target
return nil
}
const version = "TDengine alert v2.0.0.0"
var version = "2.0.0.1s"
func main() {
var (
@ -133,7 +133,7 @@ func main() {
flag.Parse()
if showVersion {
fmt.Println(version)
fmt.Println("TDengine alert v" + version)
return
}

View File

@ -6,9 +6,9 @@ set -e
# set parameters by default value
cpuType=amd64 # [armv6l | arm64 | amd64 | 386]
osType=linux # [linux | darwin | windows]
version=""
declare -A archMap=(["armv6l"]="arm" ["arm64"]="arm64" ["amd64"]="x64" ["386"]="x86")
while getopts "h:c:o:" arg
while getopts "h:c:o:n:" arg
do
case $arg in
c)
@ -19,6 +19,10 @@ do
#echo "osType=$OPTARG"
osType=$(echo $OPTARG)
;;
n)
#echo "version=$OPTARG"
version=$(echo $OPTARG)
;;
h)
echo "Usage: `basename $0` -c [armv6l | arm64 | amd64 | 386] -o [linux | darwin | windows]"
exit 0
@ -30,18 +34,27 @@ do
esac
done
if [ "$version" == "" ]; then
echo "Please input the correct version!"
exit 1
fi
startdir=$(pwd)
scriptdir=$(dirname $(readlink -f $0))
cd ${scriptdir}/cmd/alert
version=$(grep 'const version =' main.go | awk '{print $NF}')
version=${version%\"}
version=${version:1}
echo "cpuType=${cpuType}"
echo "osType=${osType}"
echo "version=${version}"
GOOS=${osType} GOARCH=${cpuType} go build
GOOS=${osType} GOARCH=${cpuType} go build -ldflags '-X main.version='${version}
mkdir -p TDengine-alert/driver
cp alert alert.cfg install_driver.sh ./TDengine-alert/.
cp ../../../debug/build/lib/libtaos.so.${version} ./TDengine-alert/driver/.
chmod 777 ./TDengine-alert/install_driver.sh
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz TDengine-alert/
rm -rf ./TDengine-alert
tar -I 'gzip -9' -cf ${startdir}/TDengine-alert-${version}-${osType^}-${archMap[${cpuType}]}.tar.gz alert alert.cfg install_driver.sh driver/

View File

@ -13,6 +13,18 @@ IF (TD_GRANT)
ADD_DEFINITIONS(-D_GRANT)
ENDIF ()
IF (TD_SYNC)
ADD_DEFINITIONS(-D_SYNC)
ENDIF ()
IF (TD_MQTT)
ADD_DEFINITIONS(-D_MQTT)
ENDIF ()
IF (TD_TSDB_PLUGINS)
ADD_DEFINITIONS(-D_TSDB_PLUGINS)
ENDIF ()
IF (TD_GODLL)
ADD_DEFINITIONS(-D_TD_GO_DLL_)
ENDIF ()
@ -36,6 +48,7 @@ ENDIF ()
IF (TD_LINUX_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_LINUX_64)
MESSAGE(STATUS "linux64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
ENDIF ()
@ -43,30 +56,38 @@ ENDIF ()
IF (TD_LINUX_32)
ADD_DEFINITIONS(-D_TD_LINUX_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
MESSAGE(STATUS "linux32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_ARM_64_)
ADD_DEFINITIONS(-D_TD_ARM_64)
ADD_DEFINITIONS(-D_TD_ARM_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_32_)
ADD_DEFINITIONS(-D_TD_ARM_32)
ADD_DEFINITIONS(-D_TD_ARM_)
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "arm32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ")
ENDIF ()
IF (TD_MIPS_64)
ADD_DEFINITIONS(-D_TD_MIPS_64_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
IF (TD_MIPS_32)
ADD_DEFINITIONS(-D_TD_MIPS_32_)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "mips32 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
ENDIF ()
@ -74,6 +95,7 @@ IF (TD_APLHINE)
SET(COMMON_FLAGS "${COMMON_FLAGS} -largp")
link_libraries(/usr/lib/libargp.a)
ADD_DEFINITIONS(-D_ALPINE)
MESSAGE(STATUS "aplhine is defined")
ENDIF ()
IF (TD_LINUX)
@ -83,12 +105,12 @@ IF (TD_LINUX)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
IF (TD_NINGSI_60)
ADD_DEFINITIONS(-D_TD_NINGSI_60_)
ADD_DEFINITIONS(-D_TD_NINGSI_60)
MESSAGE(STATUS "set ningsi macro to true")
ENDIF ()
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable")
IF (${COVER} MATCHES "true")
MESSAGE(STATUS "Test coverage mode, add extra flags")
@ -106,6 +128,7 @@ IF (TD_DARWIN_64)
ADD_DEFINITIONS(-DDARWIN)
ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "darwin64 is defined")
SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE")
SET(DEBUG_FLAGS "-O0 -DDEBUG")
SET(RELEASE_FLAGS "-O0")
@ -135,11 +158,13 @@ IF (TD_WINDOWS_64)
ADD_DEFINITIONS(-D_M_X64)
ADD_DEFINITIONS(-D_TD_WINDOWS_64)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "windows64 is defined")
ENDIF ()
IF (TD_WINDOWS_32)
ADD_DEFINITIONS(-D_TD_WINDOWS_32)
ADD_DEFINITIONS(-DUSE_LIBICONV)
MESSAGE(STATUS "windows32 is defined")
ENDIF ()
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/inc)

View File

@ -42,6 +42,16 @@ IF (${MEM_CHECK} MATCHES "true")
MESSAGE(STATUS "build with memory check")
ENDIF ()
IF (${MQTT} MATCHES "false")
SET(TD_MQTT FALSE)
MESSAGE(STATUS "build without mqtt module")
ENDIF ()
IF (${SYNC} MATCHES "false")
SET(TD_SYNC FALSE)
MESSAGE(STATUS "build without sync module")
ENDIF ()
IF (${RANDOM_FILE_FAIL} MATCHES "true")
SET(TD_RANDOM_FILE_FAIL TRUE)
MESSAGE(STATUS "build with random-file-fail enabled")

View File

@ -10,31 +10,27 @@ ELSEIF (TD_WINDOWS)
SET(CMAKE_INSTALL_PREFIX C:/TDengine)
ENDIF ()
IF (NOT TD_GODLL)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/grafana DESTINATION connector)
#INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/go DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/nodejs DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/src/connector/python DESTINATION connector)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/tests/examples DESTINATION .)
INSTALL(DIRECTORY ${TD_COMMUNITY_DIR}/packaging/cfg DESTINATION .)
INSTALL(FILES ${TD_COMMUNITY_DIR}/src/inc/taos.h DESTINATION include)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.lib DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.exp DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos.dll DESTINATION driver)
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
ENDIF ()
IF (TD_POWER)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/power.exe DESTINATION .)
ELSE ()
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .)
INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosdemo.exe DESTINATION .)
ENDIF ()
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSE ()
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll DESTINATION driver)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/libtaos.dll.a DESTINATION driver)
#INSTALL(TARGETS taos RUNTIME DESTINATION driver)
#INSTALL(TARGETS shell RUNTIME DESTINATION .)
IF (TD_MVN_INSTALLED)
INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.0-dist.jar DESTINATION connector/jdbc)
ENDIF ()
ELSEIF (TD_DARWIN)
SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh")

View File

@ -114,6 +114,9 @@ ELSEIF (${OSTYPE} MATCHES "Ningsi80")
MESSAGE(STATUS "input osType: Ningsi80")
ELSEIF (${OSTYPE} MATCHES "Linux")
MESSAGE(STATUS "input osType: Linux")
ELSEIF (${OSTYPE} MATCHES "Alpine")
MESSAGE(STATUS "input osType: Alpine")
SET(TD_APLHINE TRUE)
ELSE ()
MESSAGE(STATUS "input osType unknown: " ${OSTYPE})
ENDIF ()
ENDIF ()

View File

@ -4,7 +4,7 @@ PROJECT(TDengine)
IF (DEFINED VERNUMBER)
SET(TD_VER_NUMBER ${VERNUMBER})
ELSE ()
SET(TD_VER_NUMBER "2.0.3.0")
SET(TD_VER_NUMBER "2.0.5.1")
ENDIF ()
IF (DEFINED VERCOMPATIBLE)
@ -42,6 +42,12 @@ IF (DEFINED CPUTYPE)
ELSE ()
IF (TD_WINDOWS_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_LINUX_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_ARM_32)
SET(TD_VER_CPUTYPE "x86")
ELSEIF (TD_MIPS_32)
SET(TD_VER_CPUTYPE "x86")
ELSE ()
SET(TD_VER_CPUTYPE "x64")
ENDIF ()

2
deps/CMakeLists.txt vendored
View File

@ -10,6 +10,6 @@ ADD_SUBDIRECTORY(cJson)
ADD_SUBDIRECTORY(wepoll)
ADD_SUBDIRECTORY(MsvcLibX)
IF (TD_LINUX)
IF (TD_LINUX AND TD_MQTT)
ADD_SUBDIRECTORY(MQTT-C)
ENDIF ()

View File

@ -1,5 +1,4 @@
cmake_minimum_required(VERSION 3.5)
project(MQTT-C VERSION 1.1.2 LANGUAGES C)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
# MQTT-C build options
option(MQTT_C_OpenSSL_SUPPORT "Build MQTT-C with OpenSSL support?" OFF)

View File

@ -1,8 +1,11 @@
CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
IF (TD_LINUX)
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(z ${SRC})
ENDIF ()
IF (TD_WINDOWS)
SET(CMAKE_C_FLAGS_DEBUG "${CMAKE_C_FLAGS_DEBUG} /WX-")
SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} /WX-")
ENDIF()
INCLUDE_DIRECTORIES(inc)
AUX_SOURCE_DIRECTORY(src SRC)
ADD_LIBRARY(z ${SRC})

View File

@ -128,24 +128,84 @@ TDengine集群中加入一个新的dnode时涉及集群相关的一些参数
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
- arbitrator: 系统中裁决器的end point缺省为空
- timezone时区。从系统中动态获取当前的时区设置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- timezone、locale、charset 的配置见客户端配置。
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
客户端配置参数列表及解释
- firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
- secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
日志的配置参数与server的配置参数完全一样。
- locale
启动taos时也可以从命令行指定一个taosd实例的end point否则就从taos.cfg读取。
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是编码正确性是客户端来保证。因此如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为UTF-8部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中编码则是CP936。客户端需要确保正确设置自己所使用的字符集即客户端运行的操作系统当前编码字符集才能保证nchar中的数据正确转换为UCS4-LE编码格式。
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码>zh_CN.UTF-8zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置charset在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败则中断启动过程。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则charset的有效值是GBK。
```
charset GBK
locale zh_CN.UTF-8
```
charset的有效值是UTF-8。
日志的配置参数与server 的配置参数完全一样。
- timezone
默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
## 用户管理

View File

@ -11,7 +11,7 @@ TDengine能够与开源数据可视化系统[Grafana](https://www.grafana.com/)
### 配置Grafana
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafana目录下。
TDengine的Grafana插件在安装包的/usr/local/taos/connector/grafanaplugin目录下。
以CentOS 7.2操作系统为例将tdengine目录拷贝到/var/lib/grafana/plugins目录下重新启动grafana即可。

View File

@ -95,6 +95,8 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台
- [数据查询](https://www.taosdata.com/cn/documentation20/taos-sql/#数据查询):支持时间段、值过滤、排序、查询结果手动分页等
- [SQL函数](https://www.taosdata.com/cn/documentation20/taos-sql/#SQL函数)支持各种聚合函数、选择函数、计算函数如avg, min, diff等
- [时间维度聚合](https://www.taosdata.com/cn/documentation20/taos-sql/#时间维度聚合):将表中数据按照时间段进行切割后聚合,降维处理
- [边界线制](https://www.taosdata.com/cn/documentation20/taos-sql/#TAOS-SQL-边界限制)TAOS SQL的边界限制
- [错误码](https://www.taosdata.com/cn/documentation20/Taos-Error-Code)TDengine 2.0 错误码以及对应的十进制码
## TDengine的技术设计

View File

@ -68,7 +68,7 @@ systemctl status taosd
taos
```
如果TDengine终端接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端接服务端失败的问题。TDengine终端的提示符号如下
如果TDengine终端接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端接服务端失败的问题。TDengine终端的提示符号如下
```cmd
taos>
@ -99,8 +99,8 @@ Query OK, 2 row(s) in set (0.001700s)
- -c, --config-dir: 指定配置文件目录默认为_/etc/taos_
- -h, --host: 指定服务的IP地址默认为本地服务
- -s, --commands: 在不进入终端的情况下运行TDengine命令
- -u, -- user: 接TDengine服务器的用户名缺省为root
- -p, --password: 接TDengine服务器的密码缺省为taosdata
- -u, -- user: 接TDengine服务器的用户名缺省为root
- -p, --password: 接TDengine服务器的密码缺省为taosdata
- -?, --help: 打印出所有命令行参数
示例:

View File

@ -19,7 +19,7 @@ CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4;
USE power;
```
就当前接里操作的库换为power否则对具体表操作前需要使用“库名.表名”来指定库的名字。
就当前接里操作的库换为power否则对具体表操作前需要使用“库名.表名”来指定库的名字。
**注意:**
@ -56,4 +56,4 @@ INSERT INTO d1001 USING METERS TAGS ("Beijng.Chaoyang", 2) VALUES (now, 10.2, 21
## 多列模型 vs 单列模型
TDengine支持多列模型只要物理量是一个数据采集点同时采集的时间戳一致这些量就可以作为不同列放在一张超级表里。但还有一种极限的设计单列模型每个采集的物理量都单独建表因此每种类型的物理量都单独建立一超级表。比如电流、电压、相位就建三张超级表。
TDengine建议尽可能采用多列模型因为插入效率以及存储效率更高。但对于有些场景一个采集点的采集量的种类经常变化这个时候如果采用多列模型就需要频繁修改超级表的结构定义让应用变的复杂这个时候采用单列模型会显得简单。
TDengine建议尽可能采用多列模型因为插入效率以及存储效率更高。但对于有些场景一个采集点的采集量的种类经常变化这个时候如果采用多列模型就需要频繁修改超级表的结构定义让应用变的复杂这个时候采用单列模型会显得简单。

View File

@ -196,7 +196,7 @@ TDengine是基于硬件、软件系统不可靠、一定会有故障的假设进
**对外服务地址**TDengine集群可以容纳单台、多台甚至几千台物理节点。应用只需要向集群中任何一个物理节点的publicIp发起连接即可。启动CLI应用taos时选项-h需要提供的就是publicIp。
**master/secondIp**每一个dnode都需要配置一个masterIp。dnode启动后将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp对于集群中的第一个节点就是它自己的privateIp。为保证连接成功每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图接secondIp。
**master/secondIp**每一个dnode都需要配置一个masterIp。dnode启动后将对配置的masterIp发起加入集群的连接请求。masterIp是已经创建的集群中的任何一个节点的privateIp对于集群中的第一个节点就是它自己的privateIp。为保证连接成功每个dnode还可配置secondIp, 该IP地址也是已创建的集群中的任何一个节点的privateIp。如果一个节点连接masterIp失败,它将试图接secondIp。
dnode启动后会获知集群的mnode IP列表并且定时向mnode发送状态信息。
@ -245,4 +245,4 @@ vnode(虚拟数据节点)保存采集的时序数据,而且查询、计算都
**Note**目前集群功能仅仅限于企业版
**Note**目前集群功能仅仅限于企业版

View File

@ -78,6 +78,18 @@ taos> SELECT SUM(current) FROM meters INTERVAL(1s);
2018-10-03 14:38:16.000 | 36.000000000 |
Query OK, 5 row(s) in set (0.001538s)
```
降采样操作也支持时间偏移,比如:将所有智能电表采集的电流值每秒钟求和,但要求每个时间窗口从 500 毫秒开始
```mysql
taos> SELECT SUM(current) FROM meters INTERVAL(1s, 500a);
ts | sum(current) |
======================================================
2018-10-03 14:38:04.500 | 11.189999809 |
2018-10-03 14:38:05.500 | 31.900000572 |
2018-10-03 14:38:06.500 | 11.600000000 |
2018-10-03 14:38:15.500 | 12.300000381 |
2018-10-03 14:38:16.500 | 35.000000000 |
Query OK, 5 row(s) in set (0.001521s)
```
物联网场景里,每个数据采集点采集数据的时间是难同步的,但很多分析算法(比如FFT)需要把采集的数据严格按照时间等间隔的对齐在很多系统里需要应用自己写程序来处理但使用TDengine的降采样操作就轻松解决。如果一个时间间隔里没有采集的数据TDengine还提供插值计算的功能。

View File

@ -148,7 +148,7 @@ INSERT INTO <tb1_name> USING <stb1_name> TAGS (<tag1_value1>, ...) VALUES (<fiel
SELECT function<field_name>,…
FROM <stable_name>
WHERE <tag_name> <[=|<=|>=|<>] values..> ([AND|OR] …)
INTERVAL (<time range>)
INTERVAL (<interval> [, offset])
GROUP BY <tag_name>, <tag_name>
ORDER BY <tag_name> <asc|desc>
SLIMIT <group_limit>

View File

@ -33,8 +33,7 @@ taos> DESCRIBE meters;
- 内部函数now是服务器的当前时间
- 插入记录时如果时间戳为now插入数据时使用服务器当前时间
- Epoch Time: 时间戳也可以是一个长整数表示从1970-01-01 08:00:00.000开始的毫秒数
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。数字后面的时间单位a(毫秒), s(秒), m(分), h(小时), d(天)w(周), n(月), y(年)。比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据
- TDengine暂不支持时间窗口按照自然年和自然月切分。Where条件中的时间窗口单位的换算关系如下interval(1y) 等效于 interval(365d), interval(1n) 等效于 interval(30d), interval(1w) 等效于 interval(7d)
- 时间可以加减,比如 now-2h表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。
TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMicrosecond就可支持微秒。
@ -82,13 +81,40 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
```
删除数据库。所包含的全部数据表将被删除,谨慎使用
- **修改数据库参数**
```mysql
ALTER DATABASE db_name COMP 2;
```
COMP参数是指修改数据库文件压缩标志位取值范围为[0, 2]. 0表示不压缩1表示一阶段压缩2表示两阶段压缩。
```mysql
ALTER DATABASE db_name REPLICA 2;
```
REPLICA参数是指修改数据库副本数取值范围[1, 3]。在集群中使用副本数必须小于dnode的数目。
```mysql
ALTER DATABASE db_name KEEP 365;
```
KEEP参数是指修改数据文件保存的天数缺省值为3650取值范围[days, 365000]必须大于或等于days参数值。
```mysql
ALTER DATABASE db_name QUORUM 2;
```
QUORUM参数是指数据写入成功所需要的确认数。取值范围[1, 3]。对于异步复制quorum设为1具有master角色的虚拟节点自己确认即可。对于同步复制需要至少大于等于2。原则上Quorum >=1 并且 Quorum <= replica(副本数),这个参数在启动一个同步模块实例时需要提供。
```mysql
ALTER DATABASE db_name BLOCKS 100;
```
BLOCKS参数是每个VNODE (TSDB) 中有多少cache大小的内存块因此一个VNODE的用的内存大小粗略为cache * blocks。取值范围[3, 1000]。
**Tips**: 以上所有参数修改后都可以用show databases来确认是否修改成功。
- **显示系统所有数据库**
```mysql
SHOW DATABASES;
```
## 表管理
- **创建数据表**
@ -272,7 +298,7 @@ TDengine缺省的时间戳是毫秒精度但通过修改配置参数enableMic
SELECT select_expr [, select_expr ...]
FROM {tb_name_list}
[WHERE where_condition]
[INTERVAL [interval_offset,] interval_val]
[INTERVAL (interval_val [, interval_offset])]
[FILL fill_val]
[SLIDING fill_val]
[GROUP BY col_list]
@ -945,17 +971,17 @@ TDengine支持按时间段进行聚合可以将表中数据按照时间段进
```mysql
SELECT function_list FROM tb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({NONE | VALUE | PREV | NULL | LINEAR})]
SELECT function_list FROM stb_name
[WHERE where_condition]
INTERVAL (interval)
INTERVAL (interval [, offset])
[FILL ({ VALUE | PREV | NULL | LINEAR})]
[GROUP BY tags]
```
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- 聚合时间段的长度由关键词INTERVAL指定最短时间间隔10毫秒10a,并且支持偏移(偏移必须小于间隔)。聚合查询中能够同时执行的聚合和选择函数仅限于单个输出的函数count、avg、sum 、stddev、leastsquares、percentile、min、max、first、last不能使用具有多行输出结果的函数例如top、bottom、diff以及四则运算
- WHERE语句可以指定查询的起止时间和其他过滤条件
- FILL语句指定某一时间区间数据缺失的情况下的填充模式。填充模式包括以下几种
1. 不进行填充NONE(默认填充模式)。
@ -993,5 +1019,5 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER
- 表名最大长度为193每行数据最大长度16k个字符
- 列名最大长度为65最多允许1024列最少需要2列第一列必须是时间戳
- 标签最多允许128个可以0个标签总长度不超过16k个字符
- SQL语句最大长度65480个字符但可通过系统配置参数maxSQLLength修改最长可配置为8M
- SQL语句最大长度65480个字符但可通过系统配置参数maxSQLLength修改最长可配置为1M
- 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制

View File

@ -0,0 +1,173 @@
# TDengine 2.0 错误码以及对应的十进制码
| 状态码 | 模 | 错误码(十六进制) | 错误描述 | 错误码(十进制) |
|-----------------------| :---: | :---------: | :------------------------ | ---------------- |
|TSDB_CODE_RPC_ACTION_IN_PROGRESS| 0 | 0x0001| "Action in progress"| -2147483647|
|TSDB_CODE_RPC_AUTH_REQUIRED| 0 | 0x0002 | "Authentication required"| -2147483646|
|TSDB_CODE_RPC_AUTH_FAILURE| 0| 0x0003 | "Authentication failure"| -2147483645|
|TSDB_CODE_RPC_REDIRECT |0 | 0x0004| "Redirect"| -2147483644|
|TSDB_CODE_RPC_NOT_READY| 0 | 0x0005 | "System not ready"| -2147483643|
|TSDB_CODE_RPC_ALREADY_PROCESSED| 0 | 0x0006 |"Message already processed"| -2147483642|
|TSDB_CODE_RPC_LAST_SESSION_NOT_FINISHED| 0 |0x0007| "Last session not finished"| -2147483641|
|TSDB_CODE_RPC_MISMATCHED_LINK_ID| 0| 0x0008 | "Mismatched meter id"| -2147483640|
|TSDB_CODE_RPC_TOO_SLOW| 0 | 0x0009 | "Processing of request timed out"| -2147483639|
|TSDB_CODE_RPC_MAX_SESSIONS| 0 | 0x000A | "Number of sessions reached limit"| -2147483638|
|TSDB_CODE_RPC_NETWORK_UNAVAIL| 0 |0x000B | "Unable to establish connection" |-2147483637|
|TSDB_CODE_RPC_APP_ERROR| 0| 0x000C | "Unexpected generic error in RPC"| -2147483636|
|TSDB_CODE_RPC_UNEXPECTED_RESPONSE| 0 |0x000D | "Unexpected response"| -2147483635|
|TSDB_CODE_RPC_INVALID_VALUE| 0 | 0x000E | "Invalid value"| -2147483634|
|TSDB_CODE_RPC_INVALID_TRAN_ID| 0 | 0x000F | "Invalid transaction id"| -2147483633|
|TSDB_CODE_RPC_INVALID_SESSION_ID| 0| 0x0010 | "Invalid session id"| -2147483632|
|TSDB_CODE_RPC_INVALID_MSG_TYPE| 0| 0x0011| "Invalid message type"| -2147483631|
|TSDB_CODE_RPC_INVALID_RESPONSE_TYPE| 0 | 0x0012| "Invalid response type"| -2147483630|
|TSDB_CODE_RPC_INVALID_TIME_STAMP| 0| 0x0013| "Invalid timestamp"| -2147483629|
|TSDB_CODE_COM_OPS_NOT_SUPPORT| 0 | 0x0100| "Operation not supported"| -2147483392|
|TSDB_CODE_COM_MEMORY_CORRUPTED |0| 0x0101 | "Memory corrupted"| -2147483391|
|TSDB_CODE_COM_OUT_OF_MEMORY| 0| 0x0102| "Out of memory"| -2147483390|
|TSDB_CODE_COM_INVALID_CFG_MSG| 0 | 0x0103| "Invalid config message"| -2147483389|
|TSDB_CODE_COM_FILE_CORRUPTED| 0| 0x0104| "Data file corrupted" |-2147483388|
|TSDB_CODE_TSC_INVALID_SQL| 0| 0x0200 | "Invalid SQL statement"| -2147483136|
|TSDB_CODE_TSC_INVALID_QHANDLE| 0 | 0x0201 | "Invalid qhandle"| -2147483135|
|TSDB_CODE_TSC_INVALID_TIME_STAMP| 0 | 0x0202 | "Invalid combination of client/service time"| -2147483134|
|TSDB_CODE_TSC_INVALID_VALUE| 0 | 0x0203| "Invalid value in client"| -2147483133|
|TSDB_CODE_TSC_INVALID_VERSION| 0 | 0x0204 | "Invalid client version" |-2147483132|
|TSDB_CODE_TSC_INVALID_IE| 0 | 0x0205 | "Invalid client ie" |-2147483131|
|TSDB_CODE_TSC_INVALID_FQDN| 0 | 0x0206| "Invalid host name"| -2147483130|
|TSDB_CODE_TSC_INVALID_USER_LENGTH| 0 | 0x0207| "Invalid user name"| -2147483129|
|TSDB_CODE_TSC_INVALID_PASS_LENGTH| 0 | 0x0208 | "Invalid password"| -2147483128|
|TSDB_CODE_TSC_INVALID_DB_LENGTH| 0 | 0x0209| "Database name too long"| -2147483127|
|TSDB_CODE_TSC_INVALID_TABLE_ID_LENGTH| 0 | 0x020A | "Table name too long"| -2147483126|
|TSDB_CODE_TSC_INVALID_CONNECTION| 0 | 0x020B| "Invalid connection"| -2147483125|
|TSDB_CODE_TSC_OUT_OF_MEMORY| 0 | 0x020C | "System out of memory" |-2147483124|
|TSDB_CODE_TSC_NO_DISKSPACE| 0 | 0x020D | "System out of disk space"| -2147483123|
|TSDB_CODE_TSC_QUERY_CACHE_ERASED| 0 | 0x020E| "Query cache erased"| -2147483122|
|TSDB_CODE_TSC_QUERY_CANCELLED| 0 | 0x020F |"Query terminated"| -2147483121|
|TSDB_CODE_TSC_SORTED_RES_TOO_MANY| 0 |0x0210 | "Result set too large to be sorted"| -2147483120|
|TSDB_CODE_TSC_APP_ERROR| 0 | 0x0211 | "Application error"| -2147483119|
|TSDB_CODE_TSC_ACTION_IN_PROGRESS| 0 |0x0212 | "Action in progress"| -2147483118|
|TSDB_CODE_TSC_DISCONNECTED| 0 | 0x0213 |"Disconnected from service" |-2147483117|
|TSDB_CODE_TSC_NO_WRITE_AUTH| 0 | 0x0214 | "No write permission" |-2147483116|
|TSDB_CODE_MND_MSG_NOT_PROCESSED| 0| 0x0300| "Message not processed"| -2147482880|
|TSDB_CODE_MND_ACTION_IN_PROGRESS| 0 | 0x0301 |"Message is progressing"| -2147482879|
|TSDB_CODE_MND_ACTION_NEED_REPROCESSED| 0 | 0x0302 |"Messag need to be reprocessed"| -2147482878|
|TSDB_CODE_MND_NO_RIGHTS| 0 | 0x0303| "Insufficient privilege for operation"| -2147482877|
|TSDB_CODE_MND_APP_ERROR| 0 | 0x0304 | "Unexpected generic error in mnode"| -2147482876|
|TSDB_CODE_MND_INVALID_CONNECTION| 0 | 0x0305 | "Invalid message connection"| -2147482875|
|TSDB_CODE_MND_INVALID_MSG_VERSION| 0 | 0x0306 | "Incompatible protocol version"| -2147482874|
|TSDB_CODE_MND_INVALID_MSG_LEN| 0| 0x0307 | "Invalid message length"| -2147482873|
|TSDB_CODE_MND_INVALID_MSG_TYPE| 0 | 0x0308 | "Invalid message type" |-2147482872|
|TSDB_CODE_MND_TOO_MANY_SHELL_CONNS| 0 |0x0309 | "Too many connections"| -2147482871|
|TSDB_CODE_MND_OUT_OF_MEMORY| 0 |0x030A | "Out of memory in mnode"| -2147482870|
|TSDB_CODE_MND_INVALID_SHOWOBJ| 0 | 0x030B |"Data expired"| -2147482869|
|TSDB_CODE_MND_INVALID_QUERY_ID |0 | 0x030C |"Invalid query id" |-2147482868|
|TSDB_CODE_MND_INVALID_STREAM_ID| 0 |0x030D | "Invalid stream id"| -2147482867|
|TSDB_CODE_MND_INVALID_CONN_ID| 0| 0x030E | "Invalid connection id" |-2147482866|
|TSDB_CODE_MND_SDB_OBJ_ALREADY_THERE| 0 | 0x0320| "Object already there"| -2147482848|
|TSDB_CODE_MND_SDB_ERROR| 0 |0x0321 | "Unexpected generic error in sdb" |-2147482847|
|TSDB_CODE_MND_SDB_INVALID_TABLE_TYPE| 0 | 0x0322| "Invalid table type" |-2147482846|
|TSDB_CODE_MND_SDB_OBJ_NOT_THERE| 0 | 0x0323 |"Object not there" |-2147482845|
|TSDB_CODE_MND_SDB_INVAID_META_ROW| 0 | 0x0324| "Invalid meta row" |-2147482844|
|TSDB_CODE_MND_SDB_INVAID_KEY_TYPE| 0 | 0x0325 |"Invalid key type" |-2147482843|
|TSDB_CODE_MND_DNODE_ALREADY_EXIST| 0 | 0x0330 | "DNode already exists"| -2147482832|
|TSDB_CODE_MND_DNODE_NOT_EXIST| 0 | 0x0331| "DNode does not exist" |-2147482831|
|TSDB_CODE_MND_VGROUP_NOT_EXIST| 0 | 0x0332 |"VGroup does not exist"| -2147482830|
|TSDB_CODE_MND_NO_REMOVE_MASTER |0 | 0x0333 | "Master DNode cannot be removed"| -2147482829|
|TSDB_CODE_MND_NO_ENOUGH_DNODES |0 | 0x0334| "Out of DNodes"| -2147482828|
|TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT |0 | 0x0335 | "Cluster cfg inconsistent"| -2147482827|
|TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION| 0 | 0x0336 | "Invalid dnode cfg option"| -2147482826|
|TSDB_CODE_MND_BALANCE_ENABLED| 0 | 0x0337 | "Balance already enabled" |-2147482825|
|TSDB_CODE_MND_VGROUP_NOT_IN_DNODE| 0 |0x0338 | "Vgroup not in dnode"| -2147482824|
|TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE| 0 | 0x0339 | "Vgroup already in dnode"| -2147482823|
|TSDB_CODE_MND_DNODE_NOT_FREE |0 | 0x033A |"Dnode not avaliable"| -2147482822|
|TSDB_CODE_MND_INVALID_CLUSTER_ID |0 |0x033B | "Cluster id not match"| -2147482821|
|TSDB_CODE_MND_NOT_READY| 0 | 0x033C |"Cluster not ready"| -2147482820|
|TSDB_CODE_MND_ACCT_ALREADY_EXIST| 0 | 0x0340 | "Account already exists" |-2147482816|
|TSDB_CODE_MND_INVALID_ACCT| 0 | 0x0341| "Invalid account"| -2147482815|
|TSDB_CODE_MND_INVALID_ACCT_OPTION| 0 | 0x0342 | "Invalid account options"| -2147482814|
|TSDB_CODE_MND_USER_ALREADY_EXIST| 0 | 0x0350 | "User already exists"| -2147482800|
|TSDB_CODE_MND_INVALID_USER |0 | 0x0351 | "Invalid user" |-2147482799|
|TSDB_CODE_MND_INVALID_USER_FORMAT| 0 |0x0352 |"Invalid user format" |-2147482798|
|TSDB_CODE_MND_INVALID_PASS_FORMAT| 0| 0x0353 | "Invalid password format"| -2147482797|
|TSDB_CODE_MND_NO_USER_FROM_CONN| 0 | 0x0354 | "Can not get user from conn"| -2147482796|
|TSDB_CODE_MND_TOO_MANY_USERS| 0 | 0x0355| "Too many users"| -2147482795|
|TSDB_CODE_MND_TABLE_ALREADY_EXIST| 0| 0x0360| "Table already exists"| -2147482784|
|TSDB_CODE_MND_INVALID_TABLE_ID| 0| 0x0361| "Table name too long"| -2147482783|
|TSDB_CODE_MND_INVALID_TABLE_NAME| 0| 0x0362 | "Table does not exist"| -2147482782|
|TSDB_CODE_MND_INVALID_TABLE_TYPE| 0| 0x0363 | "Invalid table type in tsdb"| -2147482781|
|TSDB_CODE_MND_TOO_MANY_TAGS| 0 | 0x0364| "Too many tags"| -2147482780|
|TSDB_CODE_MND_TOO_MANY_TIMESERIES| 0| 0x0366| "Too many time series"| -2147482778|
|TSDB_CODE_MND_NOT_SUPER_TABLE| 0 |0x0367| "Not super table"| -2147482777|
|TSDB_CODE_MND_COL_NAME_TOO_LONG| 0| 0x0368| "Tag name too long"| -2147482776|
|TSDB_CODE_MND_TAG_ALREAY_EXIST| 0| 0x0369| "Tag already exists"| -2147482775|
|TSDB_CODE_MND_TAG_NOT_EXIST| 0 |0x036A | "Tag does not exist" |-2147482774|
|TSDB_CODE_MND_FIELD_ALREAY_EXIST| 0 | 0x036B| "Field already exists"| -2147482773|
|TSDB_CODE_MND_FIELD_NOT_EXIST| 0 | 0x036C | "Field does not exist"| -2147482772|
|TSDB_CODE_MND_INVALID_STABLE_NAME |0 | 0x036D |"Super table does not exist" |-2147482771|
|TSDB_CODE_MND_DB_NOT_SELECTED| 0 | 0x0380 | "Database not specified or available"| -2147482752|
|TSDB_CODE_MND_DB_ALREADY_EXIST| 0 | 0x0381 | "Database already exists"| -2147482751|
|TSDB_CODE_MND_INVALID_DB_OPTION| 0 | 0x0382 | "Invalid database options"| -2147482750|
|TSDB_CODE_MND_INVALID_DB| 0 | 0x0383 | "Invalid database name"| -2147482749|
|TSDB_CODE_MND_MONITOR_DB_FORBIDDEN| 0 | 0x0384 | "Cannot delete monitor database"| -2147482748|
|TSDB_CODE_MND_TOO_MANY_DATABASES| 0| 0x0385 | "Too many databases for account"| -2147482747|
|TSDB_CODE_MND_DB_IN_DROPPING| 0 | 0x0386| "Database not available" |-2147482746|
|TSDB_CODE_DND_MSG_NOT_PROCESSED| 0| 0x0400 | "Message not processed"| -2147482624|
|TSDB_CODE_DND_OUT_OF_MEMORY |0 | 0x0401 | "Dnode out of memory"| -2147482623|
|TSDB_CODE_DND_NO_WRITE_ACCESS| 0 | 0x0402 | "No permission for disk files in dnode"| -2147482622|
|TSDB_CODE_DND_INVALID_MSG_LEN| 0 | 0x0403 | "Invalid message length"| -2147482621|
|TSDB_CODE_VND_ACTION_IN_PROGRESS |0 |0x0500| "Action in progress" |-2147482368|
|TSDB_CODE_VND_MSG_NOT_PROCESSED| 0 |0x0501 | "Message not processed" |-2147482367|
|TSDB_CODE_VND_ACTION_NEED_REPROCESSED |0 |0x0502| "Action need to be reprocessed"| -2147482366|
|TSDB_CODE_VND_INVALID_VGROUP_ID |0 | 0x0503| "Invalid Vgroup ID"| -2147482365|
|TSDB_CODE_VND_INIT_FAILED| 0 | 0x0504 | "Vnode initialization failed"| -2147482364|
|TSDB_CODE_VND_NO_DISKSPACE| 0 |0x0505| "System out of disk space" |-2147482363|
|TSDB_CODE_VND_NO_DISK_PERMISSIONS| 0 | 0x0506| "No write permission for disk files" |-2147482362|
|TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR| 0 | 0x0507 | "Missing data file"| -2147482361|
|TSDB_CODE_VND_OUT_OF_MEMORY |0| 0x0508 | "Out of memory"| -2147482360|
|TSDB_CODE_VND_APP_ERROR| 0| 0x0509 | "Unexpected generic error in vnode"| -2147482359|
|TSDB_CODE_VND_INVALID_STATUS |0| 0x0510 | "Database not ready"| -2147482352|
|TSDB_CODE_VND_NOT_SYNCED| 0 | 0x0511 | "Database suspended"| -2147482351|
|TSDB_CODE_VND_NO_WRITE_AUTH| 0 | 0x0512| "Write operation denied" |-2147482350|
|TSDB_CODE_TDB_INVALID_TABLE_ID |0 | 0x0600 | "Invalid table ID"| -2147482112|
|TSDB_CODE_TDB_INVALID_TABLE_TYPE| 0| 0x0601 |"Invalid table type"| -2147482111|
|TSDB_CODE_TDB_IVD_TB_SCHEMA_VERSION| 0| 0x0602| "Invalid table schema version"| -2147482110|
|TSDB_CODE_TDB_TABLE_ALREADY_EXIST| 0 | 0x0603| "Table already exists"| -2147482109|
|TSDB_CODE_TDB_INVALID_CONFIG| 0 | 0x0604| "Invalid configuration"| -2147482108|
|TSDB_CODE_TDB_INIT_FAILED| 0 | 0x0605| "Tsdb init failed"| -2147482107|
|TSDB_CODE_TDB_NO_DISKSPACE| 0 | 0x0606| "No diskspace for tsdb"| -2147482106|
|TSDB_CODE_TDB_NO_DISK_PERMISSIONS| 0 | 0x0607| "No permission for disk files"| -2147482105|
|TSDB_CODE_TDB_FILE_CORRUPTED| 0 | 0x0608| "Data file(s) corrupted"| -2147482104|
|TSDB_CODE_TDB_OUT_OF_MEMORY| 0 | 0x0609| "Out of memory"| -2147482103|
|TSDB_CODE_TDB_TAG_VER_OUT_OF_DATE| 0 | 0x060A| "Tag too old"| -2147482102|
|TSDB_CODE_TDB_TIMESTAMP_OUT_OF_RANGE |0| 0x060B | "Timestamp data out of range"| -2147482101|
|TSDB_CODE_TDB_SUBMIT_MSG_MSSED_UP| 0| 0x060C| "Submit message is messed up"| -2147482100|
|TSDB_CODE_TDB_INVALID_ACTION| 0 | 0x060D | "Invalid operation"| -2147482099|
|TSDB_CODE_TDB_INVALID_CREATE_TB_MSG| 0 | 0x060E| "Invalid creation of table"| -2147482098|
|TSDB_CODE_TDB_NO_TABLE_DATA_IN_MEM| 0 | 0x060F| "No table data in memory skiplist" |-2147482097|
|TSDB_CODE_TDB_FILE_ALREADY_EXISTS| 0 | 0x0610| "File already exists"| -2147482096|
|TSDB_CODE_TDB_TABLE_RECONFIGURE| 0 | 0x0611| "Need to reconfigure table"| -2147482095|
|TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO| 0 | 0x0612| "Invalid information to create table"| -2147482094|
|TSDB_CODE_QRY_INVALID_QHANDLE| 0 | 0x0700| "Invalid handle"| -2147481856|
|TSDB_CODE_QRY_INVALID_MSG| 0 | 0x0701| "Invalid message"| -2147481855|
|TSDB_CODE_QRY_NO_DISKSPACE| 0 | 0x0702 | "No diskspace for query"| -2147481854|
|TSDB_CODE_QRY_OUT_OF_MEMORY| 0 | 0x0703 | "System out of memory"| -2147481853|
|TSDB_CODE_QRY_APP_ERROR| 0 | 0x0704 | "Unexpected generic error in query"| -2147481852|
|TSDB_CODE_QRY_DUP_JOIN_KEY| 0 | 0x0705| "Duplicated join key"| -2147481851|
|TSDB_CODE_QRY_EXCEED_TAGS_LIMIT| 0 | 0x0706 | "Tag conditon too many"| -2147481850|
|TSDB_CODE_QRY_NOT_READY |0| 0x0707 | "Query not ready" |-2147481849|
|TSDB_CODE_QRY_HAS_RSP| 0 | 0x0708| "Query should response"| -2147481848|
|TSDB_CODE_GRANT_EXPIRED| 0 | 0x0800| "License expired"| -2147481600|
|TSDB_CODE_GRANT_DNODE_LIMITED| 0 | 0x0801 | "DNode creation limited by licence"| -2147481599|
|TSDB_CODE_GRANT_ACCT_LIMITED |0| 0x0802 |"Account creation limited by license"| -2147481598|
|TSDB_CODE_GRANT_TIMESERIES_LIMITED| 0 | 0x0803 | "Table creation limited by license"| -2147481597|
|TSDB_CODE_GRANT_DB_LIMITED| 0 | 0x0804 | "DB creation limited by license"| -2147481596|
|TSDB_CODE_GRANT_USER_LIMITED| 0 | 0x0805 | "User creation limited by license"| -2147481595|
|TSDB_CODE_GRANT_CONN_LIMITED| 0| 0x0806 | "Conn creation limited by license" |-2147481594|
|TSDB_CODE_GRANT_STREAM_LIMITED| 0 | 0x0807 | "Stream creation limited by license"| -2147481593|
|TSDB_CODE_GRANT_SPEED_LIMITED| 0 | 0x0808 | "Write speed limited by license" |-2147481592|
|TSDB_CODE_GRANT_STORAGE_LIMITED| 0 |0x0809 | "Storage capacity limited by license"| -2147481591|
|TSDB_CODE_GRANT_QUERYTIME_LIMITED| 0 | 0x080A | "Query time limited by license" |-2147481590|
|TSDB_CODE_GRANT_CPU_LIMITED| 0 |0x080B |"CPU cores limited by license"| -2147481589|
|TSDB_CODE_SYN_INVALID_CONFIG| 0 | 0x0900| "Invalid Sync Configuration"| -2147481344|
|TSDB_CODE_SYN_NOT_ENABLED| 0 | 0x0901 | "Sync module not enabled" |-2147481343|
|TSDB_CODE_WAL_APP_ERROR| 0| 0x1000 | "Unexpected generic error in wal" |-2147479552|

View File

@ -47,6 +47,8 @@ Raw DataSize = numOfTables * rowSizePerTable * rowsPerTable
因为TDengine具有很好的水平扩展能力根据总量再根据单个物理机或虚拟机的资源就可以轻松决定需要购置多少台物理机或虚拟机了。
**立即计算CPU、内存、存储请参见<a href='https://www.taosdata.com/config/config.html'>资源估算方法</a>**
## 容错和灾备
### 容错
@ -91,6 +93,7 @@ TDengine系统后台服务由taosd提供可以在配置文件taos.cfg里修
- rolednode的可选角色。0-any; 既可作为mnode也可分配vnode1-mgmt;只能作为mnode不能分配vnode2-dnode;不能作为mnode只能分配vnode
- debugFlag运行日志开关。131输出错误和警告日志135 输出错误、警告和调试日志143 输出错误、警告、调试和跟踪日志。默认值131或135不同模块有不同的默认值
- numOfLogLines单个日志文件允许的最大行数。默认值10,000,000行。
- logKeepDays日志文件的最长保存时间。大于0时日志文件会被重命名为taosdlog.xxx其中xxx为日志文件最后修改的时间戳单位为秒。默认值0天。
- maxSQLLength单条SQL语句允许最长限制。默认值65380字节。
- telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息0表示不允许1表示允许。 默认值1。
@ -127,24 +130,100 @@ TDengine集群中加入一个新的dnode时涉及集群相关的一些参数
- statusInterval: dnode向mnode报告状态时长。单位为秒默认值1。
- maxTablesPerVnode: 每个vnode中能够创建的最大表个数。默认值1000000。
- maxVgroupsPerDb: 每个数据库中能够使用的最大vnode个数。
- arbitrator: 系统中裁决器的end point缺省为空
- timezone时区。从系统中动态获取当前的时区设置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- arbitrator: 系统中裁决器的end point缺省为空。
- timezone、locale、charset 的配置见客户端配置。
为方便调试可通过SQL语句临时调整每个dnode的日志配置系统重启后会失效
```mysql
ALTER DNODE <dnode_id> <config>
```
- dnode_id: 可以通过SQL语句"SHOW DNODES"命令获取
- config: 要调整的日志参数,在如下列表中取值
> resetlog 截断旧日志文件,创建一个新日志文件
> debugFlag < 131 | 135 | 143 > 设置debugFlag为131、135或者143
例如:
```
alter dnode 1 debugFlag 135;
```
## 客户端配置
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](#_TDengine_Shell命令行程序)。本节主要讲解taos客户端应用在配置文件taos.cfg文件中使用到的参数。
TDengine系统的前台交互客户端应用程序为taos它与taosd共享同一个配置文件taos.cfg。运行taos时使用参数-c指定配置文件目录如taos -c /home/cfg表示使用/home/cfg/目录下的taos.cfg配置文件中的参数缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。
客户端配置参数列表及解释
客户端配置参数
- firstEp: taos启动时主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。
- secondEp: taos启动时如果first连接不上尝试连接集群中第二个taosd实例的end point, 缺省值为空。
- charset字符集编码。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- locale系统区位信息及编码格式。系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置。
- maxBinaryDisplayWidthShell中binary 和 nchar字段的显示宽度上限超过此限制的部分将被隐藏。默认值30。可在 shell 中通过命令 set max_binary_display_width *nn* 动态修改此选项。
- locale
日志的配置参数与server的配置参数完全一样。
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
TDengine为存储中文、日文、韩文等非ASCII编码的宽字符提供一种专门的字段类型nchar。写入nchar字段的数据将统一采用UCS4-LE格式进行编码并发送到服务器。需要注意的是编码正确性是客户端来保证。因此如果用户想要正常使用nchar字段来存储诸如中文、日文、韩文等非ASCII字符需要正确设置客户端的编码格式。
客户端的输入的字符均采用操作系统当前默认的编码格式在Linux系统上多为UTF-8部分中文系统编码则可能是GB18030或GBK等。在docker环境中默认的编码是POSIX。在中文版Windows系统中编码则是CP936。客户端需要确保正确设置自己所使用的字符集即客户端运行的操作系统当前编码字符集才能保证nchar中的数据正确转换为UCS4-LE编码格式。
在 Linux 中 locale 的命名规则为: <语言>_<地区>.<字符集编码>zh_CN.UTF-8zh代表中文CN代表大陆地区UTF-8表示字符集。字符集编码为客户端正确解析本地字符串提供编码转换的说明。Linux系统与 Mac OSX 系统可以通过设置locale来确定系统的字符编码由于Windows使用的locale中不是POSIX标准的locale格式因此在Windows下需要采用另一个配置参数charset来指定字符编码。在Linux 系统中也可以使用charset来指定字符编码。
- charset
> 默认值系统中动态获取如果自动获取失败需要用户在配置文件设置或通过API设置
如果配置文件中不设置charset在Linux系统中taos在启动时候自动读取系统当前的locale信息并从locale信息中解析提取charset编码格式。如果自动读取locale信息失败则尝试读取charset配置如果读取charset配置也失败则中断启动过程。
在Linux系统中locale信息包含了字符编码信息因此正确设置了Linux系统locale以后可以不用再单独设置charset。例如
```
locale zh_CN.UTF-8
```
在Windows系统中无法从locale获取系统当前编码。如果无法从配置文件中读取字符串编码信息taos默认设置为字符编码为CP936。其等效在配置文件中添加如下配置
```
charset CP936
```
如果需要调整字符编码,请查阅当前操作系统使用的编码,并在配置文件中正确设置。
在Linux系统中如果用户同时设置了locale和字符集编码charset并且locale和charset的不一致后设置的值将覆盖前面设置的值。
```
locale zh_CN.UTF-8
charset GBK
```
则charset的有效值是GBK。
```
charset GBK
locale zh_CN.UTF-8
```
charset的有效值是UTF-8。
日志的配置参数与server 的配置参数完全一样。
- timezone
默认值:从系统中动态获取当前的时区设置
客户端运行系统所在的时区。为应对多时区的数据写入和查询问题TDengine 采用 Unix 时间戳(Unix Timestamp)来记录和存储时间戳。Unix 时间戳的特点决定了任一时刻不论在任何时区产生的时间戳均一致。需要注意的是Unix时间戳是在客户端完成转换和记录。为了确保客户端其他形式的时间转换为正确的 Unix 时间戳,需要设置正确的时区。
在Linux系统中客户端会自动读取系统设置的时区信息。用户也可以采用多种方式在配置文件设置时区。例如
```
timezone UTC-8
timezone GMT-8
timezone Asia/Shanghai
```
均是合法的设置东八区时区的格式。
时区的设置对于查询和写入SQL语句中非Unix时间戳的内容时间戳字符串、关键词now的解析产生影响。例如
```
SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08';
```
在东八区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554955268000;
```
在UTC时区SQL语句等效于
```
SELECT count(*) FROM table_name WHERE TS<1554984068000;
```
为了避免使用字符串时间格式带来的不确定性也可以直接使用Unix时间戳。此外还可以在SQL语句中使用带有时区的时间戳字符串例如RFC3339格式的时间戳字符串2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。
启动taos时也可以从命令行指定一个taosd实例的end point否则就从taos.cfg读取。
@ -170,6 +249,12 @@ ALTER USER <user_name> PASS <'password'>;
修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角
```
ALTER USER <user_name> PRIVILEDGE <super|write|read>;
```
修改用户权限为super/write/read不需要添加单引号
```
SHOW USERS;
```
@ -323,5 +408,5 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下
您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。
##

View File

@ -39,7 +39,7 @@ create table D1002 using meters tags ("Beijing.Haidian", 2);
我们已经知道可以通过下面这条SQL语句以一分钟为时间窗口、30秒为前向增量统计这些电表的平均电压。
```sql
select avg(voltage) from meters interval(1m) sliding(30s)
select avg(voltage) from meters interval(1m) sliding(30s);
```
每次执行这条语句,都会重新计算所有数据。
@ -47,14 +47,14 @@ select avg(voltage) from meters interval(1m) sliding(30s)
可以把上面的语句改进成下面的样子,每次使用不同的 `startTime` 并定期执行:
```sql
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s)
select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s);
```
这样做没有问题但TDengine提供了更简单的方法
只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如:
```sql
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s)
create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s);
```
会自动创建一个名为 `avg_vol` 的新表然后每隔30秒TDengine会增量执行 `as` 后面的 SQL 语句,
@ -80,7 +80,7 @@ taos> select * from avg_vol;
比如使用下面的SQL创建的连续查询将运行一小时之后会自动停止。
```sql
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s)
create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s);
```
需要说明的是,上面例子中的 `now` 是指创建连续查询的时间,而不是查询执行的时间,否则,查询就无法自动停止了。
@ -396,7 +396,7 @@ ts: 1597465200000 current: 11.2 voltage: 220 phase: 1 location: Beijing.Haidian
```sql
# taos
taos> use power
taos> use power;
taos> insert into d1001 values("2020-08-15 12:40:00.000", 12.4, 220, 1);
```

View File

@ -67,7 +67,7 @@ TDengine 分布式架构的逻辑结构图如下:
<center> 图 1 TDengine架构示意图 </center>
一个完整的 TDengine 系统是运行在一到多个物理节点上的,逻辑上,它包含数据节点(dnode)、TDengine客户端(taosc)以及应用(app)。系统中存在一到多个数据节点,这些数据节点组成一个集群(cluster)。应用通过taosc的API与TDengine集群进行互动。下面对每个逻辑单元进行简要介绍。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。
**物理节点(pnode):** pnode是一独立运行、拥有自己的计算、存储和网络能力的计算机可以是安装有OS的物理机、虚拟机或容器。物理节点由其配置的 FQDN(Fully Qualified Domain Name)来标识。TDengine完全依赖FQDN来进行网络通讯如果不了解FQDN请看博文《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》。
**数据节点(dnode):** dnode 是 TDengine 服务器侧执行代码 taosd 在物理节点上的一个运行实例一个工作的系统必须有至少一个数据节点。dnode包含零到多个逻辑的虚拟节点(VNODE),零或者至多一个逻辑的管理节点(mnode)。dnode在系统中的唯一标识由实例的End Point (EP )决定。EP是dnode所在物理节点的FQDN (Fully Qualified Domain Name)和系统所配置的网络端口号(Port)的组合。通过配置不同的端口,一个物理节点(一台物理机、虚拟机或容器)可以运行多个实例,或有多个数据节点。
@ -84,17 +84,17 @@ TDengine 分布式架构的逻辑结构图如下:
**FQDN配置**一个数据节点有一个或多个FQDN可以在系统配置文件taos.cfg通过参数“fqdn"进行指定如果没有指定系统将自动获取FQDN。如果节点没有配置FQDN可以直接将该节点的配置参数fqdn设置为它的IP地址。但不建议使用IP因为IP地址可变一旦变化将让集群无法正常工作。一个数据节点的EP(End Point)由FQDN + Port组成。采用FQDN需要保证DNS服务正常工作或者在节点以及应用所在的节点配置好hosts文件。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口是serverPort+10. 为支持多线程高效的处理UDP数据每个对内和对外的UDP都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10总共11个TCP/UDP端口。使用时需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
**端口配置:**一个数据节点对外的端口由TDengine的系统配置参数serverPort决定对集群内部通讯的端口是serverPort+5。集群内数据节点之间的数据复制操作还占有一个TCP端口是serverPort+10. 为支持多线程高效的处理UDP数据每个对内和对外的UDP都需要占用5个连续的端口。因此一个数据节点总的端口范围为serverPort到serverPort + 10总共11个TCP/UDP端口。使用时需要确保防火墙将这些端口打开。每个数据节点可以配置不同的serverPort。
**集群对外接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号。通过命令行CLI启动应用taos时可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号如果端口不配置将采用TDengine的系统配置参数serverPort。
**集群对外接:** TDengine集群可以容纳单个、多个甚至几千个数据节点。应用只需要向集群中任何一个数据节点发起连接即可接需要提供的网络参数是一数据节点的End Point(FQDN加配置的端口号。通过命令行CLI启动应用taos时可以通过选项-h来指定数据节点的FQDN, -P来指定其配置的端口号如果端口不配置将采用TDengine的系统配置参数serverPort。
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起交换信息。获取mnode的EP信息有三步1检查mnodeEpList文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步2检查系统配置文件taos.cfg, 获取mnode EP配置参数first, second如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步3将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后数据节点发起链接,如果链接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了接都仍然失败,则休眠几秒后,再进行尝试。
**集群内部通讯**: 各个数据节点之间通过TCP/UDP进行接。一个数据节点启动时将获取mnode所在的dnode的EP信息然后与系统中的mnode建立起交换信息。获取mnode的EP信息有三步1检查mnodeEpList文件是否存在如果不存在或不能正常打开获得mnode EP信息进入第二步2检查系统配置文件taos.cfg, 获取mnode EP配置参数first, second如果不存在或者taos.cfg里没有这两个配置参数或无效进入第三步3将自己的EP设为mnode EP, 并独立运行起来。获取mnode EP列表后数据节点发起连接,如果连接成功则成功加入进工作的集群如果不成功则尝试mnode EP列表中的下一个。如果都尝试了接都仍然失败,则休眠几秒后,再进行尝试。
**MNODE的选择:** TDengine逻辑上有管理节点但没有单独的执行代码服务器侧只有一套执行代码taosd。那么哪个数据节点会是管理节点呢这是系统自动决定的无需任何人工干预。原则如下一个数据节点启动时会检查自己的End Point, 并与获取的mnode EP List进行比对如果在其中该数据节点认为自己应该启动mnode模块成为mnode。如果自己的EP不在mnode EP List里则不启动mnode模块。在系统的运行过程中由于负载均衡、宕机等原因mnode有可能迁移至新的dnode但一切都是透明的无需人工干预配置参数的修改是mnode自己根据资源做出的决定。
**新数据节点的加入**系统有了一个数据节点后就已经成为一个工作的系统。添加新的节点进集群时有两个步骤第一步使用TDengine CLI接到现有工作的数据节点然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步在新的数据节点的系统配置参数文件taos.cfg里将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
**新数据节点的加入**系统有了一个数据节点后就已经成为一个工作的系统。添加新的节点进集群时有两个步骤第一步使用TDengine CLI接到现有工作的数据节点然后用命令”create dnode"将新的数据节点的End Point添加进去; 第二步在新的数据节点的系统配置参数文件taos.cfg里将first, second参数设置为现有集群中任意两个数据节点的EP即可。具体添加的详细步骤请见详细的用户手册。这样就把集群一步一步的建立起来。
**重定向**无论是dnode还是taosc最先都是要发起与mnode的但mnode是系统自动创建并维护的因此对于用户来说并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起接即可。因为任何一个正在运行的dnode都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的接请求如果自己不是mnode则将mnode EP List回复给对方taosc或新启动的dnode收到这个list, 就重新尝试建立接。当mnode EP List发生改变通过节点之间的消息交互各个数据节点就很快获取最新列表并通知taosc。
**重定向**无论是dnode还是taosc最先都是要发起与mnode的但mnode是系统自动创建并维护的因此对于用户来说并不知道哪个dnode在运行mnode。TDengine只要求向系统中任何一个工作的dnode发起接即可。因为任何一个正在运行的dnode都维护有目前运行的mnode EP List。当收到一个来自新启动的dnode或taosc的接请求如果自己不是mnode则将mnode EP List回复给对方taosc或新启动的dnode收到这个list, 就重新尝试建立接。当mnode EP List发生改变通过节点之间的消息交互各个数据节点就很快获取最新列表并通知taosc。
### 一个典型的消息流程
为解释vnode, mnode, taosc和应用之间的关系以及各自扮演的角色下面对写入数据这个典型操作的流程进行剖析。
@ -197,7 +197,7 @@ Master Vnode遵循下面的写入流程
### 主从选择
Vnode会保持一个数据版本号(Version),对内存数据进行持久化存储时,对该版本号也进行持久化存储。每个数据更新操作,无论是采集的时序数据还是元数据,这个版本号将增一。
一个vnode启动时角色(master、slave) 是不定的数据是处于未同步状态它需要与虚拟节点组内其他节点建立TCP并互相交换status其中包括version和自己的角色。通过status的交换系统进入选主流程规则如下
一个vnode启动时角色(master、slave) 是不定的数据是处于未同步状态它需要与虚拟节点组内其他节点建立TCP并互相交换status其中包括version和自己的角色。通过status的交换系统进入选主流程规则如下
1. 如果只有一个副本该副本永远就是master
2. 所有副本都在线时版本最高的被选为master
@ -276,14 +276,14 @@ SQL语句的解析和校验工作在客户端完成。解析SQL语句并生成
在TDengine中引入关键词interval来进行时间轴上固定长度时间窗口的切分并按照时间窗口对数据进行聚合对窗口范围内的数据按需进行聚合。例如
```mysql
select count(*) from d1001 interval(1h)
select count(*) from d1001 interval(1h);
```
针对d1001设备采集的数据按照1小时的时间窗口返回每小时存储的记录数量。
在需要连续获得查询结果的应用场景下如果给定的时间区间存在数据缺失会导致该区间数据结果也丢失。TDengine提供策略针对时间轴聚合计算的结果进行插值通过使用关键词Fill就能够对时间轴聚合结果进行插值。例如
```mysql
select count(*) from d1001 interval(1h) fill(prev)
select count(*) from d1001 interval(1h) fill(prev);
```
针对d1001设备采集数据统计每小时记录数如果某一个小时不存在数据这返回之前一个小时的统计数据。TDengine提供前向插值(prev)、线性插值(linear)、NULL值填充(NULL)、特定值填充(value)。

View File

@ -1,46 +1,70 @@
# TDengine 集群安装、管理
多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
多个TDengine服务器也就是多个taosd的运行实例可以组成一个集群以保证TDengine的高可靠运行并提供水平扩展能力。要了解TDengine 2.0的集群管理需要对集群的基本概念有所了解请看TDengine 2.0整体架构一章。而且在安装集群之前,请按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)一章安装并体验单节点功能。
集群的每个节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取。端口是这个节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
集群的每个数据节点是由End Point来唯一标识的End Point是由FQDN(Fully Qualified Domain Name)外加Port组成比如 h1.taosdata.com:6030。一般FQDN就是服务器的hostname可通过Linux命令`hostname -f`获取如何配置FQDN请参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。端口是这个数据节点对外服务的端口号缺省是6030但可以通过taos.cfg里配置参数serverPort进行修改。一个物理节点可能配置了多个hostname, TDengine会自动获取第一个但也可以通过taos.cfg里配置参数fqdn进行指定。如果习惯IP地址直接访问可以将参数fqdn设置为本节点的IP地址。
TDengine的集群管理极其简单除添加和删除节点需要人工干预之外其他全部是自动完成最大程度的降低了运维的工作量。本章对集群管理的操作做详细的描述。
## 准备工作
**第一步**如果搭建集群的节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**第零步**如果没有部署DNS服务请规划集群所有物理节点的FQDN然后按照《[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)》里的步骤将所有集群物理节点的IP与FQDN的对应关系添加好。
**第二步**建议关闭防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口;
**第一步**如果搭建集群的物理节点中存有之前的测试数据、装过1.X的版本或者装过其他版本的TDengine请先将其删除并清空所有数据具体步骤请参考博客[《TDengine多种安装包的安装和卸载》](https://www.taosdata.com/blog/2019/08/09/566.html )
**注意1**因为FQDN的信息会写进文件如果之前没有配置或者更改FQDN且启动了TDengine。请一定在确保数据无用或者备份的前提下清理一下之前的数据rm -rf /var/lib/taos/
**注意2**客户端也需要配置确保它可以正确解析每个节点的FQDN配置不管是通过DNS服务还是 Host 文件。
**第三步**在所有节点安装TDengine且版本必须是一致的**但不要启动taosd**
**第二步**建议关闭所有物理节点的防火墙至少保证端口6030 - 6042的TCP和UDP端口都是开放的。**强烈建议**先关闭防火墙,集群搭建完毕之后,再来配置端口
**第四步**检查、配置所有节点的FQDN
**第三步**在所有节点安装TDengine且版本必须是一致的**但不要启动taosd**。安装时提示输入是否要加入一个已经存在的TDengine集群时第一个物理节点直接回车创建新集群后续物理节点则输入该集群任何一个在线的物理节点的FQDN:端口号(默认6030)
1. 每个节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的
2. 每个节点上执行`ping host`, 其中host是其他节点的hostname, 看能否ping通其它节点; 如果不能ping通需要检查网络设置, 或/etc/hosts文件或DNS的配置。如果无法ping通是无法组成集群的。
3. 每个节点的FQDN就是输出的hostname外加端口号比如h1.taosdata.com:6030
**第四步**:检查所有数据节点,以及应用所在物理节点的网络设置:
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个节点End Point为 h1.taosdata.com:6030, 那么以下几个参数与集群相关:
1. 每个物理节点上执行命令`hostname -f`查看和确认所有节点的hostname是不相同的(应用驱动所在节点无需做此项检查)
2. 每个物理节点上执行`ping host`, 其中host是其他物理节点的hostname, 看能否ping通其它物理节点; 如果不能ping通需要检查网络设置, 或/etc/hosts文件(Windows系统默认路径为C:\Windows\system32\drivers\etc\hosts)或DNS的配置。如果无法ping通是无法组成集群的
3. 从应用运行的物理节点ping taosd运行的数据节点如果无法ping通应用是无法连接taosd的请检查应用所在物理节点的DNS设置或hosts文件
4. 每个数据节点的End Point就是输出的hostname外加端口号比如h1.taosdata.com:6030
**第五步**修改TDengine的配置文件所有节点的文件/etc/taos/taos.cfg都需要修改。假设准备启动的第一个数据节点End Point为 h1.taosdata.com:6030, 其与集群配置相关参数如下:
```
// firstEp 是每个节点启动后连接的第一个节点
// firstEp 是每个数据节点首次启动后连接的第一个数据节点
firstEp h1.taosdata.com:6030
// 配置本节点的FQDN如果本机只有一个hostname, 无需配置
fqdn h1.taosdata.com
// 必须配置数据节点的FQDN如果本机只有一个hostname, 可注释掉本配置
fqdn h1.taosdata.com
// 配置本节点的端口号缺省是6030
// 配置本数据节点的端口号缺省是6030
serverPort 6030
// 副本数为偶数的时候需要配置请参考《Arbitrator的使用》的部分
// 服务端节点数为偶数的时候需要配置请参考《Arbitrator的使用》的部分
arbitrator ha.taosdata.com:6042
```
一定要修改的参数是firstEp, 其他参数可不做任何修改,除非你很清楚为什么要修改。
一定要修改的参数是firstEp和fqdn。在每个数据节点firstEp需全部配置成一样**但fqdn一定要配置成其所在数据节点的值**。其他参数可不做任何修改,除非你很清楚为什么要修改。
## 启动第一个节点
**加入到集群中的数据节点dnode涉及集群相关的下表11项参数必须完全相同否则不能成功加入到集群中。**
| **#** | **配置参数名称** | **含义** |
| ----- | ------------------ | ---------------------------------------- |
| 1 | numOfMnodes | 系统中管理节点个数 |
| 2 | mnodeEqualVnodeNum | 一个mnode等同于vnode消耗的个数 |
| 3 | offlineThreshold | dnode离线阈值超过该时间将导致Dnode离线 |
| 4 | statusInterval | dnode向mnode报告状态时长 |
| 5 | arbitrator | 系统中裁决器的end point |
| 6 | timezone | 时区 |
| 7 | locale | 系统区位信息及编码格式 |
| 8 | charset | 字符集编码 |
| 9 | balance | 是否启动负载均衡 |
| 10 | maxTablesPerVnode | 每个vnode中能够创建的最大表个数 |
| 11 | maxVgroupsPerDb | 每个DB中 能够使用的最大vnode个数 |
## 启动第一个数据节点
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示启动第一个数据节点例如h1.taosdata.com然后执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
按照[《立即开始》](https://www.taosdata.com/cn/getting-started20/)里的指示启动第一个节点h1.taosdata.com然后执行taos, 启动taos shell从shell里执行命令"show dnodes;",如下所示:
```
Welcome to the TDengine shell from Linux, Client Version:2.0.0.0
Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.
@ -53,74 +77,88 @@ Query OK, 1 row(s) in set (0.006385s)
taos>
```
上述命令里可以看到这个刚启动的这个节点的End Point是h1.taos.com:6030
## 启动后续节点
上述命令里可以看到这个刚启动的这个数据节点的End Point是h1.taos.com:6030就是这个新集群的firstEP。
将后续的节点添加到现有集群,具体有以下几步:
## 启动后续数据节点
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个节点启动taosd。
将后续的数据节点添加到现有集群,具体有以下几步:
2. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 执行命令:
1. 按照["立即开始“](https://www.taosdata.com/cn/getting-started/)一章的方法在每个物理节点启动taosd
```
CREATE DNODE "h2.taos.com:6030"
```
2. 在第一个数据节点使用CLI程序taos, 登录进TDengine系统, 执行命令:
将新节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新节点的End Point。
```
CREATE DNODE "h2.taos.com:6030";
```
将新数据节点的End Point (准备工作中第四步获知的) 添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为这个新数据节点的End Point。
3. 然后执行命令
```
SHOW DNODES
```
```
SHOW DNODES;
```
查看新节点是否被成功加入。如果该被加入的节点处于离线状态,请做两个检查
- 查看该节点的taosd是否正常工作如果没有正常运行需要先检查为什么
- 查看该节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)看日志里输出的该节点fqdn以及端口号是否为刚添加的End Point。如果不一致需要将正确的End Point添加进去。
查看新节点是否被成功加入。如果该被加入的数据节点处于离线状态,请做两个检查
按照上述步骤可以源源不断的将新的节点加入到集群。
- 查看该数据节点的taosd是否正常工作如果没有正常运行需要先检查为什么
- 查看该数据节点taosd日志文件taosdlog.0里前面几行日志(一般在/var/log/taos目录)看日志里输出的该数据节点fqdn以及端口号是否为刚添加的End Point。如果不一致需要将正确的End Point添加进去。
按照上述步骤可以源源不断的将新的数据节点加入到集群。
**提示:**
- firstEp这个参数仅仅在该节点第一次加入集群时有作用加入集群后该节点会保存最新的mnode的End Point列表不再依赖这两个参数。
- 两个没有配置firstEp参数的dnode启动后会独立运行起来。这个时候无法将其中一个节点加入到另外一个节点形成集群。**无法将两个独立的集群合并成为新的集群**。
- 任何已经加入集群在线的数据节点都可以作为后续待加入节点的firstEP。
- firstEp这个参数仅仅在该数据节点首次加入集群时有作用加入集群后该数据节点会保存最新的mnode的End Point列表不再依赖这个参数。
- 两个没有配置firstEp参数的数据节点dnode启动后会独立运行起来。这个时候无法将其中一个数据节点加入到另外一个数据节点形成集群。**无法将两个独立的集群合并成为新的集群**。
## 节点管理
## 数据节点管理
上面已经介绍如何从零开始搭建集群。集群组建完后,还可以随时添加新的数据节点进行扩容,或删除数据节点,并检查集群当前状态。
### 添加数据节点
### 添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port"
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。
### 删除节点
```
CREATE DNODE "fqdn:port";
```
将新数据节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个数据节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。【强烈不建议用自动获取方式来配置FQDN可能导致生成的数据节点的End Point不是所期望的】
### 删除数据节点
执行CLI程序taos, 使用root账号登录进TDengine系统执行
```
DROP DNODE "fqdn:port";
```
其中fqdn是被删除的节点的FQDNport是其对外服务器的端口号
### 查看节点
### 查看数据节点
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW DNODES;
```
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个节点后可以使用该命令查看。
它将列出集群中所有的dnode,每个dnode的fqdn:port, 状态(ready, offline等vnode数目还未使用的vnode数目等信息。在添加或删除一个数据节点后可以使用该命令查看。
### 查看虚拟节点组
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
为充分利用多核技术并提供scalability数据需要分片处理。因此TDengine会将一个DB的数据切分成多份存放在多个vnode里。这些vnode可能分布在多个数据节点dnode里这样就实现了水平扩展。一个vnode仅仅属于一个DB但一个DB可以有多个vnode。vnode的是mnode根据当前系统资源的情况自动进行分配的无需任何人工干预。
执行CLI程序taos,使用root账号登录进TDengine系统执行
```
SHOW VGROUPS;
```
## vnode的高可用性
TDengine通过多副本的机制来提供系统的高可用性包括vnode和mnode的高可用性。
vnode的副本数是与DB关联的一个集群里可以有多个DB根据运营的需求每个DB可以配置不同的副本数。创建数据库时通过参数replica 指定副本数缺省为1。如果副本数为1系统的可靠性无法保证只要数据所在的节点宕机就将无法提供服务。集群的节点数必须大于等于副本数否则创建表时将返回错误“more dnodes are needed"。比如下面的命令将创建副本数为3的数据库demo
@ -128,21 +166,25 @@ vnode的副本数是与DB关联的一个集群里可以有多个DB根据
```
CREATE DATABASE demo replica 3;
```
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务。
一个dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的读写操作
一个DB里的数据会被切片分到多个vnode groupvnode group里的vnode数目就是DB的副本数同一个vnode group里各vnode的数据是完全一致的。为保证高可用性vnode group里的vnode一定要分布在不同的数据节点dnode里实际部署时需要在不同的物理机上只要一个vgroup里超过半数的vnode处于工作状态这个vgroup就能正常的对外服务
因为vnode的引入无法简单的给出结论“集群中过半dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个节点不工作那整个集群就无法正常工作了。
一个数据节点dnode里可能有多个DB的数据因此一个dnode离线时可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作那么该vnode group就无法对外服务无法插入或读取数据这样会影响到它所属的DB的一部分表的读写操作。
因为vnode的引入无法简单的给出结论“集群中过半数据节点dnode工作集群就应该工作”。但是对于简单的情形很好下结论。比如副本数为3只有三个dnode那如果仅有一个节点不工作整个集群还是可以正常工作的但如果有两个数据节点不工作那整个集群就无法正常工作了。
## Mnode的高可用性
TDengine集群是由mnode (taosd的一个模块逻辑节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
TDengine集群是由mnode (taosd的一个模块管理节点) 负责管理的为保证mnode的高可用可以配置多个mnode副本副本数由系统配置参数numOfMnodes决定有效范围为1-3。为保证元数据的强一致性mnode副本之间是通过同步的方式进行数据复制的。
一个集群有多个数据节点dnode, 但一个dnode至多运行一个mnode实例。多个dnode情况下哪个dnode可以作为mnode呢这是完全由系统根据整个系统资源情况自动指定的。用户可通过CLI程序taos在TDengine的console里执行如下命令
```
SHOW MNODES;
```
来查看mnode列表该列表将列出mnode所处的dnode的End Point和角色(master, slave, unsynced 或offline)。
当集群中第一个节点启动时该节点一定会运行一个mnode实例否则该dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
当集群中第一个数据节点启动时,该数据节点一定会运行一个mnode实例否则该数据节点dnode无法正常工作因为一个系统是必须有至少一个mnode的。如果numOfMnodes配置为2启动第二个dnode时该dnode也将运行一个mnode实例。
为保证mnode服务的高可用性numOfMnodes必须设置为2或更大。因为mnode保存的元数据必须是强一致的如果numOfMnodes大于2复制参数quorum自动设为2也就是说至少要保证有两个副本写入数据成功才通知客户端应用写入成功。
@ -152,22 +194,25 @@ SHOW MNODES;
有三种情况,将触发负载均衡,而且都无需人工干预。
- 当一个新节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新节点上,无需任何人工干预。
- 当一个节点从集群中移除时,系统将自动把该节点上的数据转移到其他节点,无需任何人工干预。
- 如果一个节点过热数据量过大系统将自动进行负载均衡将该节点的一些vnode自动挪到其他节点。
- 当一个新数据节点添加进集群时,系统将自动触发负载均衡,一些节点上的数据将被自动转移到新数据节点上,无需任何人工干预。
- 当一个数据节点从集群中移除时,系统将自动把该数据节点上的数据转移到其他数据节点,无需任何人工干预。
- 如果一个数据节点过热(数据量过大),系统将自动进行负载均衡,将该数据节点的一些vnode自动挪到其他节点。
当上述三种情况发生时,系统将启动一各个节点的负载计算,从而决定如何挪动。
当上述三种情况发生时,系统将启动一各个数据节点的负载计算,从而决定如何挪动。
##节点离线处理
如果一个节点离线TDengine集群将自动检测到。有如下两种情况
- 改节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
**【提示】负载均衡由参数balance控制它决定是否启动自动负载均衡。**
## 数据节点离线处理
如果一个数据节点离线TDengine集群将自动检测到。有如下两种情况
- 该数据节点离线超过一定时间taos.cfg里配置参数offlineThreshold控制时长),系统将自动把该数据节点删除,产生系统报警信息,触发负载均衡流程。如果该被删除的数据节点重现上线时,它将无法加入集群,需要系统管理员重新将其添加进集群才会开始工作。
- 离线后在offlineThreshold的时长内重新上线系统将自动启动数据恢复流程等数据完全恢复后该节点将开始正常工作。
**注意:**如果一个虚拟节点组包括mnode组里每个节点都处于离线或unsynced状态必须等该虚拟节点组里的所有节点都上线、都能交换状态信息后才能选出Master该虚拟节点组才能对外提供服务。比如整个集群有3个节点副本数为3如果3个节点都宕机然后2个节点重启是无法工作的只有等3个节点都重启成功才能对外服务。
**注意:**如果一个虚拟节点组包括mnode组所归属的每个数据节点都处于离线或unsynced状态必须等该虚拟节点组里的所有数据节点都上线、都能交换状态信息后才能选出Master该虚拟节点组才能对外提供服务。比如整个集群有3个数据节点副本数为3如果3个数据节点都宕机然后2个数据节点重启是无法工作的只有等3个数据节点都重启成功,才能对外服务。
## Arbitrator的使用
如果副本数为偶数当一个vnode group里一半或超过一半的vnode不工作时是无法从中选出master的。同理一半或超过一半的mnode不工作时是无法选出mnode的master的因为存在“split brain”问题。为解决这个问题TDengine引入了arbitrator的概念。Arbitrator模拟一个vnode或mnode在工作但只简单的负责网络连接不处理任何数据插入或访问。只要包含arbitrator在内超过半数的vnode或mnode工作那么该vnode group或mnode组就可以正常的提供数据插入或查询服务。比如对于副本数为2的情形如果一个节点A离线但另外一个节点B正常而且能连接到arbitrator, 那么节点B就能正常工作。
TDengine安装包里带有一个执行程序tarbitrator, 找任何一台Linux服务器运行它即可。该程序对系统资源几乎没有要求只需要保证有网络连接即可。该应用的命令行参数`-p`可以指定其对外服务的端口号缺省是6042。配置每个taosd实例时可以在配置文件taos.cfg里将参数arbitrator设置为arbitrator的End Point。如果该参数配置了当副本数为偶数数系统将自动连接配置的arbitrator。

View File

@ -46,7 +46,7 @@ taos>
5. 在第一个节点使用CLI程序taos, 登录进TDengine系统, 使用命令:
```
CREATE DNODE "h2.taos.com:6030"
CREATE DNODE "h2.taos.com:6030";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。请注意将示例的“h2.taos.com:6030" 替换为你自己第一个节点的End Point
@ -54,7 +54,7 @@ taos>
6. 使用命令
```
SHOW DNODES
SHOW DNODES;
```
查看新节点是否被成功加入。
@ -71,7 +71,7 @@ taos>
###添加节点
执行CLI程序taos, 使用root账号登录进系统, 执行:
```
CREATE DNODE "fqdn:port"
CREATE DNODE "fqdn:port";
```
将新节点的End Point添加进集群的EP列表。**"fqdn:port"需要用双引号引起来**否则出错。一个节点对外服务的fqdn和port可以通过配置文件taos.cfg进行配置缺省是自动获取。

View File

@ -142,7 +142,7 @@ C/C++的API类似于MySQL的C API。应用程序使用时需要包含TDengine
获取最近一次API调用失败的原因返回值为错误代码。
**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该接调用API否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。
**注意**:对于单个数据库连接,在同一时刻只能有一个线程使用该接调用API否则会有未定义的行为出现并可能导致客户端crash。客户端应用可以通过建立多个连接进行多线程的数据写入或查询处理。
### 异步查询API

View File

@ -228,7 +228,8 @@ resultSet.close();
stmt.close();
conn.close();
```
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。
## 与连接池使用
**HikariCP**

View File

@ -21,7 +21,7 @@
## 5. 遇到错误"Unable to establish connection", 我怎么办?
客户端遇到接故障,请按照下面的步骤进行检查:
客户端遇到接故障,请按照下面的步骤进行检查:
1. 检查网络环境
* 云服务器检查云服务器的安全组是否打开TCP/UDP 端口6030-6042的访问权限
@ -32,7 +32,7 @@
3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd*
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得
4. 确认客户端连接时指定了正确的服务器FQDN (Fully Qualified Domain Name(可在服务器上执行Linux命令hostname -f获得),FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
5. ping服务器FQDN如果没有反应请检查你的网络DNS设置或客户端所在计算机的系统hosts文件
@ -45,74 +45,19 @@
9. 如果仍不能排除连接故障请使用命令行工具nc来分别判断指定端口的TCP和UDP连接是否通畅
检查UDP端口连接是否工作`nc -vuz {hostIP} {port} `
检查服务器侧TCP端口连接是否工作`nc -l {port}`
检查客户端侧TCP端口接是否工作:`nc {hostIP} {port}`
检查客户端侧TCP端口接是否工作:`nc {hostIP} {port}`
10. 可以使用taos程序内嵌的网络连通检测功能验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP
taos通过参数 -n 来确定运行服务端功能,还是客户端功能。-n server表示运行检测服务端功能-n client表示运行检测客户端功能。
1首先在服务器上停止taosd服务
2在服务器上运行taos内嵌的网络连通检测的服务端功能taos -n server -P 6030 -e 6042 -l 1000
3在客户端运行taos内嵌的网络连通检测的客户端功能taos -n client -h host -P 6030 -e 6042 -l 1000
-n :指示运行网络连通检测的服务端功能,或客户端功能,缺省值为空,表示不启动网络连通检测;
-h指示服务端名称可以是ip地址或fqdn格式。如192.168.1.160,或 192.168.1.160:6030或 hostname1或hostname1:6030。缺省值是127.0.0.1。
-P 检测的起始端口号缺省值是6030
-e检测的结束端口号必须大于等于起始端口号缺省值是6042
-l指定检测端口连通的报文长度最大64000字节缺省值是1000字节测试时服务端和客户端必须指定相同
服务端设置的起始端口和结束端口号,必须包含客户端设置的起始端口和结束端口号;
对于起始端口号有三种设置方式:缺省值、-h指定、-P指定优先级是-P指定 > -h指定 > 缺省值。
客户端运行的输出样例:
`sum@sum-virtualBox /home/sum $ taos -n client -h ubuntu-vbox6`
`host: ubuntu-vbox6 start port: 6030 end port: 6042 packet len: 1000`
`tcp port:6030 test ok. udp port:6030 test ok.`
`tcp port:6031 test ok. udp port:6031 test ok.`
`tcp port:6032 test ok. udp port:6032 test ok.`
`tcp port:6033 test ok. udp port:6033 test ok.`
`tcp port:6034 test ok. udp port:6034 test ok.`
`tcp port:6035 test ok. udp port:6035 test ok.`
`tcp port:6036 test ok. udp port:6036 test ok.`
`tcp port:6037 test ok. udp port:6037 test ok.`
`tcp port:6038 test ok. udp port:6038 test ok.`
`tcp port:6039 test ok. udp port:6039 test ok.`
`tcp port:6040 test ok. udp port:6040 test ok.`
`tcp port:6041 test ok. udp port:6041 test ok.`
`tcp port:6042 test ok. udp port:6042 test ok.`
如果某个端口不通,会输出 `portxxxx test fail`的信息。
10. 也可以使用taos程序内嵌的网络连通检测功能来验证服务器和客户端之间指定的端口连接是否通畅包括TCP和UDP[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。
## 6. 遇到错误“Unexpected generic error in RPC” 我怎么办?
## 6. 遇到错误“Unexpected generic error in RPC”或者"TDengine Error: Unable to resolve FQDN" 我怎么办?
产生这个错误是由于客户端或数据节点无法解析FQDN(Fully Qualified Domain Name)导致。对于TAOS Shell或客户端应用请做如下检查
1. 请检查连接的服务器的FQDN是否正确
1. 请检查连接的服务器的FQDN是否正确,FQDN配置参考[一篇文章说清楚TDengine的FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。
2. 如果网络配置有DNS server, 请检查是否正常工作
3. 如果网络没有配置DNS server, 请检查客户端所在机器的hosts文件查看该FQDN是否配置并是否有正确的IP地址。
4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法接服务器的
4. 如果网络配置OK从客户端所在机器你需要能Ping该连接的FQDN否则客户端是无法连接服务器的
## 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误
@ -163,4 +108,8 @@ Connection = DriverManager.getConnection(url, properties);
附上必要的问题描述,以及发生该问题的执行操作,出现问题的表征及大概的时间,在<a href='https://github.com/taosdata/TDengine'> GitHub</a>提交Issue。
为了保证有足够的debug信息如果问题能够重复请修改/etc/taos/taos.cfg文件最后面添加一行“debugFlag 135"(不带引号本身然后重启taosd, 重复问题然后再递交。但系统正常运行时请一定将debugFlag设置为131否则会产生大量的日志信息降低系统效率。
为了保证有足够的debug信息如果问题能够重复请修改/etc/taos/taos.cfg文件最后面添加一行“debugFlag 135"(不带引号本身然后重启taosd, 重复问题然后再递交。也可以通过如下SQL语句临时设置taosd的日志级别。
```
alter dnode <dnode_id> debugFlag 135;
```
但系统正常运行时请一定将debugFlag设置为131否则会产生大量的日志信息降低系统效率。

View File

@ -25,6 +25,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6,
- 要提高写入效率需要批量写入。一批写入的记录条数越多插入效率就越高。但一条记录不能超过16K一条SQL语句总长度不能超过64K可通过参数maxSQLLength配置最大可配置为8M
- TDengine支持多线程同时写入要进一步提高写入速度一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后无法再提高甚至还会下降因为线程切频繁切换带来额外开销。
- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。
- 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2那么无法写入比当前时间还晚2天的数据。
## Prometheus直接写入
[Prometheus](https://www.prometheus.io/)作为Cloud Native Computing Fundation毕业的项目在性能监控以及K8S性能监控领域有着非常广泛的应用。TDengine提供一个小工具[Bailongma](https://github.com/taosdata/Bailongma)只需在Prometheus做简单配置无需任何代码就可将Prometheus采集的数据直接写入TDengine并按规则在TDengine自动创建库和相关表项。博文[用Docker容器快速搭建一个Devops监控Demo](https://www.taosdata.com/blog/2020/02/03/1189.html)即是采用bailongma将Prometheus和Telegraf的数据写入TDengine中的示例可以参考。

View File

@ -66,21 +66,21 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
数据实时复制有三个主要流程:选主、数据转发、数据恢复。后续做详细讨论。
## 虚拟节点之间的网络
## 虚拟节点之间的网络
虚拟节点之间通过TCP进行链接节点之间的状态交换、数据包的转发都是通过这个TCP链接(peerFd)进行。为避免竞争两个虚拟节点之间的TCP链接总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP链接被中断虚拟节点能通过TCP socket自动检测到将对方标为offline。如果监测到任何错误比如数据恢复流程虚拟节点将主动重置该接。
虚拟节点之间通过TCP进行连接节点之间的状态交换、数据包的转发都是通过这个TCP连接(peerFd)进行。为避免竞争两个虚拟节点之间的TCP连接总是由IP地址(UINT32)小的节点作为TCP客户端发起。一旦TCP连接被中断虚拟节点能通过TCP socket自动检测到将对方标为offline。如果监测到任何错误比如数据恢复流程虚拟节点将主动重置该接。
一旦作为客户端的节点链接不成或中断,它将周期性的每隔一秒钟去试图去链接一次。因为TCP本身有心跳机制虚拟节点之间不再另行提供心跳。
一旦作为客户端的节点连接不成或中断,它将周期性的每隔一秒钟去试图去连接一次。因为TCP本身有心跳机制虚拟节点之间不再另行提供心跳。
如果一个unsynced节点要发起数据恢复流程它与Master将建立起专有的TCP链接(syncFd)。数据恢复完成后,该链接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字系统会将新的数据恢复请求延后处理。
如果一个unsynced节点要发起数据恢复流程它与Master将建立起专有的TCP连接(syncFd)。数据恢复完成后,该连接会被关闭。而且为限制资源的使用,系统只容许一定数量(配置参数tsMaxSyncNum)的数据恢复的socket存在。如果超过这个数字系统会将新的数据恢复请求延后处理。
任意一个节点无论有多少虚拟节点都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的接请求。当TCP socket建立起来客户端侧发送的消息体里会带有vgId全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行就接受其请求。如果不存在就直接将接请求关闭。在TDengine代码里mnode group的vgId设置为1。
任意一个节点无论有多少虚拟节点都会启动而且只会启动一个TCP server, 来接受来自其他虚拟节点的上述两类TCP的接请求。当TCP socket建立起来客户端侧发送的消息体里会带有vgId全局唯一的vgroup ID), TCP 服务器侧会检查该vgId是否已经在该节点启动运行。如果已经启动运行就接受其请求。如果不存在就直接将接请求关闭。在TDengine代码里mnode group的vgId设置为1。
## 选主流程
当同一组的两个虚拟节点之间(vnode A, vnode B)建立连接后他们互换status消息。status消息里包含本地存储的同一虚拟节点组内所有虚拟节点的role和version。
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点vnode B接中断vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息还是检测与另外一虚拟节点的接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
如果一个虚拟节点(vnode A)检测到与同一虚拟节点组内另外一虚拟节点vnode B接中断vnode A将立即把vnode B的role设置为offline。无论是接收到另外一虚拟节点发来的status消息还是检测与另外一虚拟节点的接中断,该虚拟节点都将进入状态处理流程。状态处理流程的规则如下:
1. 如果检测到在线的节点数没有超过一半则将自己的状态设置为unsynced.
2. 如果在线的虚拟节点数超过一半会检查master节点是否存在如果存在则会决定是否将自己状态改为slave或启动数据恢复流程
@ -118,7 +118,7 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
9. 如果quorum为1上述678步不会发生。
10. 如果要等待slave的确认master会启动2秒的定时器可配置如果超时则认为失败。
对于回复确认sync模块提供的是异步回调函数因此APP在调用syncForwardToPeer之后无需等待可以处理下一个操作。在Master与Slave的TCP接管道里可能有多个Forward消息这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样TCP管道里存在多个但都是排序好的。这个顺序SYNC模块并没有做特别的事情是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据都是单线程
对于回复确认sync模块提供的是异步回调函数因此APP在调用syncForwardToPeer之后无需等待可以处理下一个操作。在Master与Slave的TCP接管道里可能有多个Forward消息这些消息是严格按照应用提供的顺序排好的。对于Forward Response也是一样TCP管道里存在多个但都是排序好的。这个顺序SYNC模块并没有做特别的事情是由APP单线程顺序写来保证的(TDengine里每个vnode的写数据都是单线程
## 数据恢复流程
@ -142,9 +142,9 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
<center> <img src="../assets/replica-restore.png"> </center>
1. 通过已经建立的TCP发送sync req给master节点
2. master收到sync req后以client的身份向vnode B主动建立一新的专用于同步的TCPsyncFd)
3. 新的TCP接建立成功后master将开始retrieve流程对应的vnode B将同步启动restore流程
1. 通过已经建立的TCP发送sync req给master节点
2. master收到sync req后以client的身份向vnode B主动建立一新的专用于同步的TCPsyncFd)
3. 新的TCP接建立成功后master将开始retrieve流程对应的vnode B将同步启动restore流程
4. Retrieve/Restore流程里先处理所有archived data (vnode里的data, head, last文件后处理WAL data。
5. 对于archived datamaster将通过回调函数getFileInfo获取数据文件的基本信息包括文件名、magic以及文件大小。
6. master 将获得的文件名、magic以及文件大小发给vnode B
@ -157,7 +157,7 @@ TDengine采取的是Master-Slave模式进行同步与流行的RAFT一致性
1. master节点调用回调函数getWalInfo获取WAL的文件名。
2. 如果getWalInfo返回值大于0表示该文件还不是最后一个WAL因此master调用sendfile一下把该文件发送给vnode B
3. 如果getWalInfo返回时为0表示该文件是最后一个WAL因为文件可能还处于写的状态中sync模块要根据WAL Head的定义逐条读出记录然后发往vnode B。
4. vnode A读取TCP接传来的数据按照WAL Head逐条读取如果版本号比现有的大调用回调函数writeToCache交给应用处理。如果小直接扔掉。
4. vnode A读取TCP接传来的数据按照WAL Head逐条读取如果版本号比现有的大调用回调函数writeToCache交给应用处理。如果小直接扔掉。
5. 上述流程循环直到所有WAL文件都被处理完。处理完后master就会将新来的数据包通过Forward消息转发给slave。
从同步文件启动起sync模块会通过inotify监控所有处理过的file以及wal。一旦发现被处理过的文件有更新变化同步流程将中止会重新启动。因为有可能落盘操作正在进行比如历史数据导入内存数据落盘把已经处理过的文件进行了修改需要重新同步才行。
@ -194,15 +194,15 @@ sync模块通过inotify监控LastWal文件的更新和关闭操作。而且在
因为写入失败客户端会重新写入数据。但对于TDengine而言是OK的。因为时序数据都是有时间戳的时间戳相同的数据更新操作第一次会执行但第二次会自动扔掉。对于Meta Data(增加、删除库、表等等的操作也是OK的。一张表、库已经被创建或删除再创建或删除不会被执行的。
在TDengine的设计里虚拟节点与虚拟节点之间是一个TCP是一个pipeline数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败这个接会被重置后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败但下一个数据块成功的可能。
在TDengine的设计里虚拟节点与虚拟节点之间是一个TCP是一个pipeline数据块一个接一个按顺序在这个pipeline里等待处理。一旦某个数据块的处理失败这个接会被重置后续的数据块的处理都会失败。因此不会存在Pipeline里一个数据块更新失败但下一个数据块成功的可能。
## Split Brain的问题
选举流程中有个强制要求那就是一定有超过半数的虚拟节点在线。但是如果replication正好是偶数这个时候完全可能存在splt brain问题。
为解决这个问题TDengine提供Arbitrator的解决方法。Arbitrator是一个节点它的任务就是接受任何虚拟节点的接请求,并保持它。
为解决这个问题TDengine提供Arbitrator的解决方法。Arbitrator是一个节点它的任务就是接受任何虚拟节点的接请求,并保持它。
在启动复制模块实例时在配置参数中应用可以提供Arbitrator的IP地址。如果是奇数个副本复制模块不会与这个arbitrator去建立链接,但如果是偶数个副本,就会主动去建立链接。
在启动复制模块实例时在配置参数中应用可以提供Arbitrator的IP地址。如果是奇数个副本复制模块不会与这个arbitrator去建立连接,但如果是偶数个副本,就会主动去建立连接。
Arbitrator的程序tarbitrator.c在复制模块的同一目录, 编译整个系统时会在bin目录生成。命令行参数“-”查看可以配置的参数比如绑定的IP地址监听的端口号。

View File

@ -13,7 +13,7 @@ taosd的启动入口是dnode模块dnode然后启动其他模块包括可
该模块负责taosd与taosc, 以及其他数据节点之间的通讯。TDengine没有采取标准的HTTP或gRPC等第三方工具而是实现了自己的通讯模块RPC。
考虑到物联网场景下数据写入的包一般不大因此除支持TCP链接之外RPC还支持UDP链接。当数据包小于15K时RPC将采用UDP方式进行链接否则将采用TCP链接。对于查询类的消息RPC不管包的大小总是采取TCP链接。对于UDP链RPC实现了自己的超时、重传、顺序检查等机制以保证数据可靠传输。
考虑到物联网场景下数据写入的包一般不大因此除支持TCP连接之外RPC还支持UDP连接。当数据包小于15K时RPC将采用UDP方式进行连接否则将采用TCP连接。对于查询类的消息RPC不管包的大小总是采取TCP连接。对于UDP连RPC实现了自己的超时、重传、顺序检查等机制以保证数据可靠传输。
RPC模块还提供数据压缩功能如果数据包的字节数超过系统配置参数compressMsgSize, RPC在传输中将自动压缩数据以节省带宽。
@ -25,7 +25,7 @@ RPC模块还提供数据压缩功能如果数据包的字节数超过系统
- 系统的初始化,包括
- 从文件taos.cfg读取系统配置参数从文件dnodeCfg.json读取数据节点的配置参数
- 启动RPC模块并建立起与taosc通讯的server链接与其他数据节点通讯的server链接;
- 启动RPC模块并建立起与taosc通讯的server连接与其他数据节点通讯的server连接;
- 启动并初始化dnode的内部管理, 该模块将扫描该数据节点已有的vnode并打开它们
- 初始化可配置的模块如mnode, http, monitor等。
- 数据节点的管理,包括

View File

@ -122,11 +122,14 @@
# number of replications, for cluster only
# replica 1
# mqtt uri
# mqttBrokerAddress mqtt://username:password@hostname:1883/taos/
# mqtt hostname
# mqttHostName test.mosquitto.org
# mqtt client name
# mqttBrokerClientId taos_mqtt
# mqtt port
# mqttPort 1883
# mqtt topic
# mqttTopic /test
# the compressed rpc message, option:
# -1 (no compression)
@ -186,6 +189,9 @@
# max number of rows per log filters
# numOfLogLines 10000000
# time of keeping log files, days
# logKeepDays 0
# enable/disable async log
# asyncLog 1

View File

@ -57,6 +57,7 @@ cp -r ${top_dir}/tests/examples/* ${pkg_dir}${install_home_pat
cp -r ${top_dir}/src/connector/grafanaplugin ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/python ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/go ${pkg_dir}${install_home_path}/connector
cp -r ${top_dir}/src/connector/nodejs ${pkg_dir}${install_home_path}/connector
cp ${compile_dir}/build/lib/taos-jdbcdriver*dist.* ${pkg_dir}${install_home_path}/connector
cp -r ${compile_dir}/../packaging/deb/DEBIAN ${pkg_dir}/

View File

@ -2,9 +2,11 @@ FROM centos:7
WORKDIR /root
ARG version
RUN echo $version
COPY tdengine.tar.gz /root/
RUN tar -zxf tdengine.tar.gz
WORKDIR /root/TDengine-server/
WORKDIR /root/TDengine-server-$version/
RUN sh install.sh -e no

View File

@ -1,5 +1,5 @@
#!/bin/bash
set -x
docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "."
docker build --rm -f "Dockerfile" -t tdengine/tdengine:$1 "." --build-arg version=$1
docker login -u tdengine -p $2 #replace the docker registry username and password
docker push tdengine/tdengine:$1

View File

@ -64,6 +64,7 @@ cp %{_compiledir}/../src/inc/taoserror.h %{buildroot}%{homepath}/incl
cp -r %{_compiledir}/../src/connector/grafanaplugin %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/python %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/go %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../src/connector/nodejs %{buildroot}%{homepath}/connector
cp %{_compiledir}/build/lib/taos-jdbcdriver*dist.* %{buildroot}%{homepath}/connector
cp -r %{_compiledir}/../tests/examples/* %{buildroot}%{homepath}/examples

View File

@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-arbitrator"
install_dir="${release_dir}/TDengine-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/TDengine-arbitrator"
install_dir="${release_dir}/TDengine-arbitrator-${version}"
fi
# Directories and files.
@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-arbitrator"
install_dir="${release_dir}/PowerDB-enterprise-arbitrator-${version}"
else
install_dir="${release_dir}/PowerDB-arbitrator"
install_dir="${release_dir}/PowerDB-arbitrator-${version}"
fi
# Directories and files.
@ -48,9 +48,9 @@ mkdir -p ${install_dir}/init.d && cp ${init_file_tarbitrator_rpm} ${install_dir}
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -32,9 +32,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-client"
install_dir="${release_dir}/TDengine-enterprise-client-${version}"
else
install_dir="${release_dir}/TDengine-client"
install_dir="${release_dir}/TDengine-client-${version}"
fi
# Directories and files.
@ -97,6 +97,8 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
@ -111,8 +113,9 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
fi
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
@ -122,9 +125,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -32,9 +32,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-client"
install_dir="${release_dir}/PowerDB-enterprise-client-${version}"
else
install_dir="${release_dir}/PowerDB-client"
install_dir="${release_dir}/PowerDB-client-${version}"
fi
# Directories and files.
@ -164,9 +164,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/TDengine-enterprise-server"
install_dir="${release_dir}/TDengine-enterprise-server-${version}"
else
install_dir="${release_dir}/TDengine-server"
install_dir="${release_dir}/TDengine-server-${version}"
fi
# Directories and files.
@ -113,6 +113,8 @@ if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp -r ${examples_dir}/python ${install_dir}/examples
cp -r ${examples_dir}/R ${install_dir}/examples
cp -r ${examples_dir}/go ${install_dir}/examples
cp -r ${examples_dir}/nodejs ${install_dir}/examples
cp -r ${examples_dir}/C# ${install_dir}/examples
fi
# Copy driver
mkdir -p ${install_dir}/driver
@ -122,10 +124,11 @@ cp ${lib_files} ${install_dir}/driver
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp ${build_dir}/lib/*.jar ${install_dir}/connector
cp -r ${connector_dir}/grafanaplugin ${install_dir}/connector/
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/python ${install_dir}/connector/
cp -r ${connector_dir}/go ${install_dir}/connector
cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi
# Copy release note
# cp ${script_dir}/release_note ${install_dir}
@ -135,9 +138,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -25,9 +25,9 @@ release_dir="${top_dir}/release"
#package_name='linux'
if [ "$verMode" == "cluster" ]; then
install_dir="${release_dir}/PowerDB-enterprise-server"
install_dir="${release_dir}/PowerDB-enterprise-server-${version}"
else
install_dir="${release_dir}/PowerDB-server"
install_dir="${release_dir}/PowerDB-server-${version}"
fi
# Directories and files.
@ -184,9 +184,9 @@ fi
cd ${release_dir}
if [ "$verMode" == "cluster" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
elif [ "$verMode" == "edge" ]; then
pkg_name=${install_dir}-${version}-${osType}-${cpuType}
pkg_name=${install_dir}-${osType}-${cpuType}
else
echo "unknow verMode, nor cluster or edge"
exit 1

View File

@ -1,6 +1,6 @@
name: tdengine
base: core18 # the base snap is the execution environment for this snap
version: '2.0.2.0' # just for humans, typically '1.2+git' or '1.3.2'
base: core18
version: 'RELEASE_VERSION'
icon: snap/gui/t-dengine.svg
summary: an open-source big data platform designed and optimized for IoT.
description: |
@ -18,41 +18,18 @@ apps:
- network
- network-bind
- system-observe
- systemfiles
taos:
command: taoswrapper.sh
plugs:
- network
- system-observe
- systemfiles
- historyfile
taosdemo:
command: usr/bin/taosdemo
plugs:
- network
plugs:
historyfile:
interface: personal-files
read:
- $HOME/.taos_history
write:
- $HOME/.taos_history
systemfiles:
interface: system-files
read:
- /etc/taos
- /var/lib/taos
- /var/log/taos
- /tmp
write:
- /var/log/taos
- /var/lib/taos
- /tmp
parts:
script:
plugin: dump
@ -95,7 +72,7 @@ parts:
- usr/bin/taosd
- usr/bin/taos
- usr/bin/taosdemo
- usr/lib/libtaos.so.2.0.2.0
- usr/lib/libtaos.so.RELEASE_VERSION
- usr/lib/libtaos.so.1
- usr/lib/libtaos.so
@ -115,8 +92,3 @@ layout:
bind: $SNAP_DATA/var/log/taos
/etc/taos:
bind: $SNAP_DATA/etc/taos
hooks:
install:
plugs: [systemfiles, historyfile]

View File

@ -10,7 +10,9 @@ ADD_SUBDIRECTORY(client)
ADD_SUBDIRECTORY(query)
ADD_SUBDIRECTORY(kit)
ADD_SUBDIRECTORY(plugins)
ADD_SUBDIRECTORY(sync)
IF (TD_SYNC)
ADD_SUBDIRECTORY(sync)
ENDIF ()
ADD_SUBDIRECTORY(balance)
ADD_SUBDIRECTORY(mnode)
ADD_SUBDIRECTORY(vnode)

View File

@ -389,6 +389,7 @@ void balanceReset() {
pDnode->lastAccess = 0;
if (pDnode->status != TAOS_DN_STATUS_DROPPING) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_NOT_RECEIVED;
}
mnodeDecDnodeRef(pDnode);
@ -551,6 +552,7 @@ static void balanceCheckDnodeAccess() {
if (tsAccessSquence - pDnode->lastAccess > 3) {
if (pDnode->status != TAOS_DN_STATUS_DROPPING && pDnode->status != TAOS_DN_STATUS_OFFLINE) {
pDnode->status = TAOS_DN_STATUS_OFFLINE;
pDnode->offlineReason = TAOS_DN_OFF_STATUS_MSG_TIMEOUT;
mInfo("dnode:%d, set to offline state", pDnode->dnodeId);
balanceSetVgroupOffline(pDnode);
}

View File

@ -69,17 +69,13 @@ typedef struct SJoinSupporter {
SSubqueryState* pState;
SSqlObj* pObj; // parent SqlObj
int32_t subqueryIndex; // index of sub query
char intervalTimeUnit;
char slidingTimeUnit;
int64_t intervalTime; // interval time
int64_t slidingTime; // sliding time
SInterval interval;
SLimitVal limit; // limit info
uint64_t uid; // query meter uid
uint64_t uid; // query table uid
SArray* colList; // previous query information, no need to use this attribute, and the corresponding attribution
SArray* exprList;
SFieldInfo fieldsInfo;
STagCond tagCond;
SSqlGroupbyExpr groupbyExpr;
struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array
FILE* f; // temporary file in order to create TSBuf
char path[PATH_MAX]; // temporary file path, todo dynamic allocate memory
@ -195,6 +191,7 @@ SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex);
SArray* tscColumnListClone(const SArray* src, int16_t tableIndex);
void tscColumnListDestroy(SArray* pColList);
void tscDequoteAndTrimToken(SStrToken* pToken);
int32_t tscValidateName(SStrToken* pToken);
void tscIncStreamExecutionCount(void* pStream);
@ -261,6 +258,8 @@ void tscDoQuery(SSqlObj* pSql);
*/
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd);
void registerSqlObj(SSqlObj* pSql);
SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, int32_t cmd, SSqlObj* pPrevSql);
void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t subClauseIndex, int32_t tableIndex);

View File

@ -29,6 +29,7 @@ extern "C" {
#include "tglobal.h"
#include "tsqlfunction.h"
#include "tutil.h"
#include "tcache.h"
#include "qExecutor.h"
#include "qSqlparser.h"
@ -88,7 +89,7 @@ typedef struct STableComInfo {
typedef struct SCMCorVgroupInfo {
int32_t version;
int8_t inUse;
int8_t inUse;
int8_t numOfEps;
SEpAddr epAddr[TSDB_MAX_REPLICA];
} SCMCorVgroupInfo;
@ -98,6 +99,7 @@ typedef struct STableMeta {
uint8_t tableType;
int16_t sversion;
int16_t tversion;
char sTableId[TSDB_TABLE_FNAME_LEN];
SCMVgroupInfo vgroupInfo;
SCMCorVgroupInfo corVgroupInfo;
STableId id;
@ -105,7 +107,7 @@ typedef struct STableMeta {
} STableMeta;
typedef struct STableMetaInfo {
STableMeta * pTableMeta; // table meta, cached in client side and acquired by name
STableMeta *pTableMeta; // table meta, cached in client side and acquired by name
SVgroupsInfo *vgroupList;
SArray *pVgroupTables; // SArray<SVgroupTableInfo>
@ -225,13 +227,8 @@ typedef struct SQueryInfo {
int16_t command; // the command may be different for each subclause, so keep it seperately.
uint32_t type; // query/insert type
// TODO refactor
char intervalTimeUnit;
char slidingTimeUnit;
STimeWindow window; // query time window
int64_t intervalTime; // aggregation time window range
int64_t slidingTime; // sliding window in mseconds
int64_t intervalOffset;// start offset of each time window
int32_t tz; // query client timezone
SInterval interval;
SSqlGroupbyExpr groupbyExpr; // group by tags info
SArray * colList; // SArray<SColumn*>
@ -263,7 +260,7 @@ typedef struct {
};
int32_t insertType;
int32_t clauseIndex; // index of multiple subclause query
int32_t clauseIndex; // index of multiple subclause query
char * curSql; // current sql, resume position of sql after parsing paused
int8_t parseFinished;
@ -278,7 +275,8 @@ typedef struct {
int32_t numOfParams;
int8_t dataSourceType; // load data from file or not
int8_t submitSchema; // submit block is built with table schema
int8_t submitSchema; // submit block is built with table schema
STagData tagData;
SHashObj *pTableList; // referred table involved in sql
SArray *pDataBlocks; // SArray<STableDataBlocks*> submit data blocks after parsing sql
} SSqlCmd;
@ -333,6 +331,7 @@ typedef struct STscObj {
struct SSqlStream *streamList;
void* pDnodeConn;
pthread_mutex_t mutex;
T_REF_DECLARE()
} STscObj;
typedef struct SSqlObj {
@ -359,6 +358,8 @@ typedef struct SSqlObj {
uint16_t numOfSubs;
struct SSqlObj **pSubs;
struct SSqlObj * prev, *next;
struct SSqlObj **self;
} SSqlObj;
typedef struct SSqlStream {
@ -366,8 +367,6 @@ typedef struct SSqlStream {
uint32_t streamId;
char listed;
bool isProject;
char intervalTimeUnit;
char slidingTimeUnit;
int16_t precision;
int64_t num; // number of computing count
@ -381,8 +380,7 @@ typedef struct SSqlStream {
int64_t ctime; // stream created time
int64_t stime; // stream next executed time
int64_t etime; // stream end query time, when time is larger then etime, the stream will be closed
int64_t intervalTime;
int64_t slidingTime;
SInterval interval;
void * pTimer;
void (*fp)();
@ -413,7 +411,6 @@ int32_t tscTansformSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
void tscRestoreSQLFuncForSTableQuery(SQueryInfo *pQueryInfo);
int32_t tscCreateResPointerInfo(SSqlRes *pRes, SQueryInfo *pQueryInfo);
void tscDestroyResPointerInfo(SSqlRes *pRes);
void tscResetSqlCmdObj(SSqlCmd *pCmd, bool removeFromCache);
@ -425,17 +422,19 @@ void tscFreeSqlResult(SSqlObj *pSql);
/**
* only free part of resources allocated during query.
* TODO remove it later
* Note: this function is multi-thread safe.
* @param pObj
*/
void tscPartiallyFreeSqlObj(SSqlObj *pObj);
void tscPartiallyFreeSqlObj(SSqlObj *pSql);
/**
* free sql object, release allocated resource
* @param pObj Free metric/meta information, dynamically allocated payload, and
* response buffer, object itself
* @param pObj
*/
void tscFreeSqlObj(SSqlObj *pObj);
void tscFreeSqlObj(SSqlObj *pSql);
void tscFreeSqlObjInCache(void *pSql);
void tscCloseTscObj(STscObj *pObj);
@ -451,9 +450,6 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen)
bool tscIsUpdateQuery(SSqlObj* pSql);
bool tscHasReachLimitation(SQueryInfo *pQueryInfo, SSqlRes *pRes);
// todo remove this function.
bool tscResultsetFetchCompleted(TAOS_RES *result);
char *tscGetErrorMsgPayload(SSqlCmd *pCmd);
int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql);
@ -468,7 +464,7 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
int32_t type = pInfo->pSqlExpr->resType;
int32_t bytes = pInfo->pSqlExpr->resBytes;
char* pData = pRes->data + pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row;
char* pData = pRes->data + (int32_t)(pInfo->pSqlExpr->offset * pRes->numOfRows + bytes * pRes->row);
// user defined constant value output columns
if (TSDB_COL_IS_UD_COL(pInfo->pSqlExpr->colInfo.flag)) {
@ -502,7 +498,8 @@ static FORCE_INLINE void tscGetResultColumnChr(SSqlRes* pRes, SFieldInfo* pField
}
}
extern void * tscCacheHandle;
extern SCacheObj* tscMetaCache;
extern SCacheObj* tscObjCache;
extern void * tscTmr;
extern void * tscQhandle;
extern int tscKeepConn[];

View File

@ -149,7 +149,7 @@ JNIEXPORT void JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_initImp(JNIEnv *e
JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv *env, jobject jobj, jint optionIndex,
jstring optionValue) {
if (optionValue == NULL) {
jniDebug("option index:%d value is null", optionIndex);
jniDebug("option index:%d value is null", (int32_t)optionIndex);
return 0;
}
@ -183,7 +183,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setOptions(JNIEnv
}
(*env)->ReleaseStringUTFChars(env, optionValue, tz1);
} else {
jniError("option index:%d is not found", optionIndex);
jniError("option index:%d is not found", (int32_t)optionIndex);
}
return res;
@ -227,10 +227,10 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_connectImp(JNIEn
ret = (jlong)taos_connect((char *)host, (char *)user, (char *)pass, (char *)dbname, (uint16_t)jport);
if (ret == 0) {
jniError("jobj:%p, conn:%p, connect to database failed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, jport);
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
} else {
jniDebug("jobj:%p, conn:%p, connect to database succeed, host=%s, user=%s, dbname=%s, port=%d", jobj, (void *)ret,
(char *)host, (char *)user, (char *)dbname, jport);
(char *)host, (char *)user, (char *)dbname, (int32_t)jport);
}
if (host != NULL) (*env)->ReleaseStringUTFChars(env, jhost, host);
@ -385,7 +385,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getAffectedRowsIm
}
jint ret = taos_affected_rows((SSqlObj *)res);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, ret);
jniDebug("jobj:%p, conn:%p, sql:%p, res: %p, affect rows:%d", jobj, tscon, (void *)con, (void *)res, (int32_t)ret);
return ret;
}
@ -490,13 +490,13 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_fetchRowImp(JNIEn
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetBooleanFp, i, (jboolean)(*((char *)row[i]) == 1));
break;
case TSDB_DATA_TYPE_TINYINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((char *)row[i]));
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetByteFp, i, (jbyte) * ((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((short *)row[i]));
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetShortFp, i, (jshort) * ((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int *)row[i]);
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetIntFp, i, (jint) * (int32_t *)row[i]);
break;
case TSDB_DATA_TYPE_BIGINT:
(*env)->CallVoidMethod(env, rowobj, g_rowdataSetLongFp, i, (jlong) * ((int64_t *)row[i]));

View File

@ -18,6 +18,7 @@
#include "tnote.h"
#include "trpc.h"
#include "tcache.h"
#include "tscLog.h"
#include "tscSubquery.h"
#include "tscLocalMerge.h"
@ -40,6 +41,8 @@ static void tscAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOfRows);
static void tscAsyncFetchSingleRowProxy(void *param, TAOS_RES *tres, int numOfRows);
void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const char* sqlstr, size_t sqlLen) {
SSqlCmd* pCmd = &pSql->cmd;
pSql->signature = pSql;
pSql->param = param;
pSql->pTscObj = pObj;
@ -48,6 +51,8 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
pSql->fp = fp;
pSql->fetchFp = fp;
registerSqlObj(pSql);
pSql->sqlstr = calloc(1, sqlLen + 1);
if (pSql->sqlstr == NULL) {
tscError("%p failed to malloc sql string buffer", pSql);
@ -59,7 +64,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
strntolower(pSql->sqlstr, sqlstr, (int32_t)sqlLen);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
pSql->cmd.curSql = pSql->sqlstr;
pCmd->curSql = pSql->sqlstr;
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return;
@ -69,7 +74,7 @@ void doAsyncQuery(STscObj* pObj, SSqlObj* pSql, void (*fp)(), void* param, const
tscQueueAsyncRes(pSql);
return;
}
tscDoQuery(pSql);
}
@ -504,7 +509,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) {
goto _error;
}
if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STMT_INSERT)) {
if (pCmd->insertType == TSDB_QUERY_TYPE_STMT_INSERT) {
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
code = tscGetTableMeta(pSql, pTableMetaInfo);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {

View File

@ -117,6 +117,10 @@ typedef struct SFirstLastInfo {
typedef struct SFirstLastInfo SLastrowInfo;
typedef struct SPercentileInfo {
tMemBucket *pMemBucket;
int32_t stage;
double minval;
double maxval;
int64_t numOfElems;
} SPercentileInfo;
typedef struct STopBotInfo {
@ -302,7 +306,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_PERCT) {
*type = (int16_t)TSDB_DATA_TYPE_DOUBLE;
*bytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(double);
*interBytes = (int16_t)sizeof(SPercentileInfo);
} else if (functionId == TSDB_FUNC_LEASTSQR) {
*type = TSDB_DATA_TYPE_BINARY;
*bytes = TSDB_AVG_FUNCTION_INTER_BUFFER_SIZE; // string
@ -322,7 +326,7 @@ int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionI
} else if (functionId == TSDB_FUNC_LAST_ROW) {
*type = (int16_t)dataType;
*bytes = (int16_t)dataBytes;
*interBytes = dataBytes + sizeof(SLastrowInfo);
*interBytes = dataBytes;
} else {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -521,7 +525,7 @@ static void do_sum(SQLFunctionCtx *pCtx) {
*retVal += pCtx->preAggVals.statis.sum;
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
double *retVal = (double*) pCtx->aOutputBuf;
*retVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum));
*retVal += GET_DOUBLE_VAL((const char*)&(pCtx->preAggVals.statis.sum));
}
} else { // computing based on the true data block
void *pData = GET_INPUT_CHAR(pCtx);
@ -707,13 +711,16 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en
if (pCtx->aOutputBuf == NULL) {
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else { // data in current block is not earlier than current result
return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else { // data in current block is not earlier than current result
// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) {
@ -726,12 +733,16 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end
return BLK_DATA_ALL_NEEDED;
}
SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
if (pInfo->hasResult != DATA_SET_FLAG) {
return BLK_DATA_ALL_NEEDED;
} else {
return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
}
return BLK_DATA_ALL_NEEDED;
// TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter
// is invalid
// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes);
// if (pInfo->hasResult != DATA_SET_FLAG) {
// return BLK_DATA_ALL_NEEDED;
// } else {
// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED;
// }
}
//////////////////////////////////////////////////////////////////////////////////////////////
@ -757,7 +768,7 @@ static void avg_function(SQLFunctionCtx *pCtx) {
if (pCtx->inputType >= TSDB_DATA_TYPE_TINYINT && pCtx->inputType <= TSDB_DATA_TYPE_BIGINT) {
*pVal += pCtx->preAggVals.statis.sum;
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
*pVal += GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.sum));
*pVal += GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.sum));
}
} else {
void *pData = GET_INPUT_CHAR(pCtx);
@ -1832,8 +1843,10 @@ static void last_row_function(SQLFunctionCtx *pCtx) {
pInfo1->hasResult = DATA_SET_FLAG;
DO_UPDATE_TAG_COLUMNS(pCtx, pInfo1->ts);
} else {
DO_UPDATE_TAG_COLUMNS(pCtx, pCtx->ptsList[pCtx->size - 1]);
}
SET_VAL(pCtx, pCtx->size, 1);
}
@ -2428,12 +2441,14 @@ static bool percentile_function_setup(SQLFunctionCtx *pCtx) {
if (!function_setup(pCtx)) {
return false;
}
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
((SPercentileInfo *)(pResInfo->interResultBuf))->pMemBucket =
tMemBucketCreate(pCtx->inputBytes, pCtx->inputType);
// in the first round, get the min-max value of all involved data
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
pInfo->minval = DBL_MAX;
pInfo->maxval = -DBL_MAX;
pInfo->numOfElems = 0;
return true;
}
@ -2442,7 +2457,65 @@ static void percentile_function(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
// the first stage, only acquire the min/max value
if (pInfo->stage == 0) {
if (pCtx->preAggVals.isSet) {
if (pInfo->minval > pCtx->preAggVals.statis.min) {
pInfo->minval = (double)pCtx->preAggVals.statis.min;
}
if (pInfo->maxval < pCtx->preAggVals.statis.max) {
pInfo->maxval = (double)pCtx->preAggVals.statis.max;
}
pInfo->numOfElems += (pCtx->size - pCtx->preAggVals.statis.numOfNull);
} else {
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
continue;
}
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(data);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(data);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(data));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(data);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(data);
break;
default:
v = GET_INT32_VAL(data);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
}
}
return;
}
// the second stage, calculate the true percentile value
for (int32_t i = 0; i < pCtx->size; ++i) {
char *data = GET_INPUT_CHAR_INDEX(pCtx, i);
if (pCtx->hasNull && isNull(data, pCtx->inputType)) {
@ -2462,10 +2535,47 @@ static void percentile_function_f(SQLFunctionCtx *pCtx, int32_t index) {
if (pCtx->hasNull && isNull(pData, pCtx->inputType)) {
return;
}
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = (SPercentileInfo *)pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// TODO extract functions
double v = 0;
switch (pCtx->inputType) {
case TSDB_DATA_TYPE_TINYINT:
v = GET_INT8_VAL(pData);
break;
case TSDB_DATA_TYPE_SMALLINT:
v = GET_INT16_VAL(pData);
break;
case TSDB_DATA_TYPE_BIGINT:
v = (double)(GET_INT64_VAL(pData));
break;
case TSDB_DATA_TYPE_FLOAT:
v = GET_FLOAT_VAL(pData);
break;
case TSDB_DATA_TYPE_DOUBLE:
v = GET_DOUBLE_VAL(pData);
break;
default:
v = GET_INT32_VAL(pData);
break;
}
if (v < pInfo->minval) {
pInfo->minval = v;
}
if (v > pInfo->maxval) {
pInfo->maxval = v;
}
pInfo->numOfElems += 1;
return;
}
tMemBucketPut(pInfo->pMemBucket, pData, 1);
SET_VAL(pCtx, 1, 1);
@ -2488,6 +2598,23 @@ static void percentile_finalizer(SQLFunctionCtx *pCtx) {
doFinalizer(pCtx);
}
static void percentile_next_step(SQLFunctionCtx *pCtx) {
SResultInfo * pResInfo = GET_RES_INFO(pCtx);
SPercentileInfo *pInfo = pResInfo->interResultBuf;
if (pInfo->stage == 0) {
// all data are null, set it completed
if (pInfo->numOfElems == 0) {
pResInfo->complete = true;
}
pInfo->stage += 1;
pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, pInfo->minval, pInfo->maxval);
} else {
pResInfo->complete = true;
}
}
//////////////////////////////////////////////////////////////////////////////////
static SAPercentileInfo *getAPerctInfo(SQLFunctionCtx *pCtx) {
SResultInfo *pResInfo = GET_RES_INFO(pCtx);
@ -3389,12 +3516,12 @@ static void spread_function(SQLFunctionCtx *pCtx) {
pInfo->max = (double)pCtx->preAggVals.statis.max;
}
} else if (pCtx->inputType == TSDB_DATA_TYPE_DOUBLE || pCtx->inputType == TSDB_DATA_TYPE_FLOAT) {
if (pInfo->min > GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min))) {
pInfo->min = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.min));
if (pInfo->min > GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min))) {
pInfo->min = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.min));
}
if (pInfo->max < GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max))) {
pInfo->max = GET_DOUBLE_VAL(&(pCtx->preAggVals.statis.max));
if (pInfo->max < GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max))) {
pInfo->max = GET_DOUBLE_VAL((const char *)&(pCtx->preAggVals.statis.max));
}
}
@ -4513,7 +4640,7 @@ SQLAggFuncElem aAggs[] = {{
percentile_function_setup,
percentile_function,
percentile_function_f,
no_next_step,
percentile_next_step,
percentile_finalizer,
noop1,
noop1,

View File

@ -16,7 +16,6 @@
#include "os.h"
#include "taosmsg.h"
#include "qExtbuffer.h"
#include "taosdef.h"
#include "tcache.h"
#include "tname.h"
@ -24,7 +23,30 @@
#include "tscUtil.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "taos.h"
#include "tscSubquery.h"
#define STR_NOCASE_EQUAL(str1, len1, str2, len2) ((len1 == len2) && 0 == strncasecmp(str1, str2, len1))
typedef enum BuildType {
SCREATE_BUILD_TABLE = 1,
SCREATE_BUILD_DB = 2,
} BuildType;
typedef enum Stage {
SCREATE_CALLBACK_QUERY = 1,
SCREATE_CALLBACK_RETRIEVE = 2,
} Stage;
// support 'show create table'
typedef struct SCreateBuilder {
char sTableName[TSDB_TABLE_FNAME_LEN];
char buf[TSDB_TABLE_FNAME_LEN];
SSqlObj *pParentSql;
SSqlObj *pInterSql;
int32_t (*fp)(void *para, char* result);
Stage callStage;
} SCreateBuilder;
static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength);
static int32_t getToStringLength(const char *pData, int32_t length, int32_t type) {
@ -273,14 +295,511 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) {
tscFieldInfoUpdateOffset(pQueryInfo);
return tscSetValueToResObj(pSql, rowLen);
}
static int32_t tscGetNthFieldResult(TAOS_ROW row, TAOS_FIELD* fields, int *lengths, int idx, char *result) {
const char *val = row[idx];
if (val == NULL) {
sprintf(result, "%s", TSDB_DATA_NULL_STR);
return -1;
}
uint8_t type = fields[idx].type;
int32_t length = lengths[idx];
switch (type) {
case TSDB_DATA_TYPE_BOOL:
sprintf(result, "%s", ((((int32_t)(*((char *)val))) == 1) ? "true" : "false"));
break;
case TSDB_DATA_TYPE_TINYINT:
sprintf(result, "%d", *((int8_t *)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
sprintf(result, "%d", *((int16_t *)val));
break;
case TSDB_DATA_TYPE_INT:
sprintf(result, "%d", *((int32_t *)val));
break;
case TSDB_DATA_TYPE_BIGINT:
sprintf(result, "%"PRId64, *((int64_t *)val));
break;
case TSDB_DATA_TYPE_FLOAT:
sprintf(result, "%f", GET_FLOAT_VAL(val));
break;
case TSDB_DATA_TYPE_DOUBLE:
sprintf(result, "%f", GET_DOUBLE_VAL(val));
break;
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_BINARY:
memcpy(result, val, length);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
///formatTimestamp(buf, *(int64_t*)val, TSDB_TIME_PRECISION_MICRO);
//memcpy(result, val, strlen(buf));
sprintf(result, "%"PRId64, *((int64_t *)val));
break;
default:
break;
}
return 0;
}
void tscSCreateCallBack(void *param, TAOS_RES *tres, int code) {
if (param == NULL || tres == NULL) {
return;
}
SCreateBuilder *builder = (SCreateBuilder *)(param);
SSqlObj *pParentSql = builder->pParentSql;
SSqlObj *pSql = (SSqlObj *)tres;
SSqlRes *pRes = &pParentSql->res;
pRes->code = taos_errno(pSql);
if (pRes->code != TSDB_CODE_SUCCESS) {
taos_free_result(pSql);
free(builder);
tscQueueAsyncRes(pParentSql);
return;
}
if (builder->callStage == SCREATE_CALLBACK_QUERY) {
taos_fetch_rows_a(tres, tscSCreateCallBack, param);
builder->callStage = SCREATE_CALLBACK_RETRIEVE;
} else {
char *result = calloc(1, TSDB_MAX_BINARY_LEN);
pRes->code = builder->fp(builder, result);
taos_free_result(pSql);
free(builder);
free(result);
if (pRes->code == TSDB_CODE_SUCCESS) {
(*pParentSql->fp)(pParentSql->param, pParentSql, code);
} else {
tscQueueAsyncRes(pParentSql);
}
}
}
TAOS_ROW tscFetchRow(void *param) {
SCreateBuilder *builder = (SCreateBuilder *)param;
if (builder == NULL) {
return NULL;
}
SSqlObj *pSql = builder->pInterSql;
if (pSql == NULL || pSql->signature != pSql) {
terrno = TSDB_CODE_TSC_DISCONNECTED;
return NULL;
}
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
if (pRes->qhandle == 0 ||
pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
pCmd->command == TSDB_SQL_INSERT) {
return NULL;
}
// set the sql object owner
tscSetSqlOwner(pSql);
// current data set are exhausted, fetch more data from node
if (pRes->row >= pRes->numOfRows && (pRes->completed != true || hasMoreVnodesToTry(pSql) || hasMoreClauseToTry(pSql)) &&
(pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_RETRIEVE_LOCALMERGE ||
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
pCmd->command == TSDB_SQL_CURRENT_DB ||
pCmd->command == TSDB_SQL_SERV_VERSION ||
pCmd->command == TSDB_SQL_CLI_VERSION ||
pCmd->command == TSDB_SQL_CURRENT_USER )) {
taos_fetch_rows_a(pSql, tscSCreateCallBack, param);
return NULL;
}
void* data = doSetResultRowData(pSql, true);
tscClearSqlOwner(pSql);
return data;
}
static int32_t tscGetTableTagValue(SCreateBuilder *builder, char *result) {
TAOS_ROW row = tscFetchRow(builder);
SSqlObj* pSql = builder->pInterSql;
if (row == NULL) {
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
}
int32_t* lengths = taos_fetch_lengths(pSql);
int num_fields = taos_num_fields(pSql);
TAOS_FIELD *fields = taos_fetch_fields(pSql);
char buf[TSDB_COL_NAME_LEN + 16];
for (int i = 0; i < num_fields; i++) {
memset(buf, 0, sizeof(buf));
int32_t ret = tscGetNthFieldResult(row, fields, lengths, i, buf);
if (i == 0) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s", "(");
}
if ((fields[i].type == TSDB_DATA_TYPE_NCHAR
|| fields[i].type == TSDB_DATA_TYPE_BINARY
|| fields[i].type == TSDB_DATA_TYPE_TIMESTAMP) && 0 == ret) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "\"%s\",", buf);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s,", buf);
}
if (i == num_fields - 1) {
sprintf(result + strlen(result) - 1, "%s", ")");
}
}
if (0 == strlen(result)) {
return TSDB_CODE_TSC_INVALID_TABLE_NAME;
}
return TSDB_CODE_SUCCESS;
}
// build 'show create table/database' result fields
static int32_t tscSCreateBuildResultFields(SSqlObj *pSql, BuildType type, const char *ddl) {
int32_t rowLen = 0;
int16_t ddlLen = (int16_t)strlen(ddl);
SColumnIndex index = {0};
pSql->cmd.numOfCols = 2;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
pQueryInfo->order.order = TSDB_ORDER_ASC;
TAOS_FIELD f;
if (type == SCREATE_BUILD_TABLE) {
f.type = TSDB_DATA_TYPE_BINARY;
f.bytes = (TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
tstrncpy(f.name, "Table", sizeof(f.name));
} else {
f.type = TSDB_DATA_TYPE_BINARY;
f.bytes = (TSDB_DB_NAME_LEN - 1) + VARSTR_HEADER_SIZE;
tstrncpy(f.name, "Database", sizeof(f.name));
}
SFieldSupInfo* pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
f.bytes, f.bytes - VARSTR_HEADER_SIZE, false);
rowLen += f.bytes;
f.bytes = (int16_t)(ddlLen + VARSTR_HEADER_SIZE);
f.type = TSDB_DATA_TYPE_BINARY;
if (type == SCREATE_BUILD_TABLE) {
tstrncpy(f.name, "Create Table", sizeof(f.name));
} else {
tstrncpy(f.name, "Create Database", sizeof(f.name));
}
pInfo = tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
pInfo->pSqlExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_TS_DUMMY, &index, TSDB_DATA_TYPE_BINARY,
(int16_t)(ddlLen + VARSTR_HEADER_SIZE), ddlLen, false);
rowLen += ddlLen + VARSTR_HEADER_SIZE;
return rowLen;
}
static int32_t tscSCreateSetValueToResObj(SSqlObj *pSql, int32_t rowLen, const char *tableName, const char *ddl) {
SSqlRes *pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
int32_t numOfRows = 1;
if (strlen(ddl) == 0) {
}
tscInitResObjForLocalQuery(pSql, numOfRows, rowLen);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
char* dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 0) * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(dst, tableName, pField->bytes);
pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 1);
dst = pRes->data + tscFieldInfoGetOffset(pQueryInfo, 1) * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(dst, ddl, pField->bytes);
return 0;
}
static int32_t tscSCreateBuildResult(SSqlObj *pSql, BuildType type, const char *str, const char *result) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
int32_t rowLen = tscSCreateBuildResultFields(pSql, type, result);
tscFieldInfoUpdateOffset(pQueryInfo);
return tscSCreateSetValueToResObj(pSql, rowLen, str, result);
}
int32_t tscRebuildCreateTableStatement(void *param,char *result) {
SCreateBuilder *builder = (SCreateBuilder *)param;
int32_t code = TSDB_CODE_SUCCESS;
char *buf = calloc(1,TSDB_MAX_BINARY_LEN);
if (buf == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
code = tscGetTableTagValue(builder, buf);
if (code == TSDB_CODE_SUCCESS) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE TABLE %s USING %s TAGS %s", builder->buf, builder->sTableName, buf);
code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_TABLE, builder->buf, result);
}
free(buf);
return code;
}
static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) {
TAOS_ROW row = tscFetchRow(builder);
if (row == NULL) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
const char *showColumns[] = {"REPLICA", "QUORUM", "DAYS", "KEEP", "BLOCKS", NULL};
SSqlObj *pSql = builder->pInterSql;
TAOS_FIELD *fields = taos_fetch_fields(pSql);
int num_fields = taos_num_fields(pSql);
char buf[TSDB_DB_NAME_LEN + 64] = {0};
do {
int32_t* lengths = taos_fetch_lengths(pSql);
int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf);
if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "CREATE DATABASE %s", buf);
for (int i = 1; i < num_fields; i++) {
for (int j = 0; showColumns[j] != NULL; j++) {
if (STR_NOCASE_EQUAL(fields[i].name, strlen(fields[i].name), showColumns[j], strlen(showColumns[j]))) {
memset(buf, 0, sizeof(buf));
ret = tscGetNthFieldResult(row, fields, lengths, i, buf);
if (ret == 0) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), " %s %s", showColumns[j], buf);
}
}
}
}
break;
}
row = tscFetchRow(builder);
} while (row != NULL);
if (0 == strlen(result)) {
return TSDB_CODE_TSC_DB_NOT_SELECTED;
}
return TSDB_CODE_SUCCESS;
}
int32_t tscRebuildCreateDBStatement(void *param,char *result) {
SCreateBuilder *builder = (SCreateBuilder *)param;
int32_t code = TSDB_CODE_SUCCESS;
char *buf = calloc(1, TSDB_MAX_BINARY_LEN);
if (buf == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
code = tscGetDBInfo(param, buf);
if (code == TSDB_CODE_SUCCESS) {
code = tscSCreateBuildResult(builder->pParentSql, SCREATE_BUILD_DB, builder->buf, buf);
}
free(buf);
return code;
}
static int32_t tscGetTableTagColumnName(SSqlObj *pSql, char **result) {
char *buf = (char *)malloc(TSDB_MAX_BINARY_LEN);
if (buf == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
buf[0] = 0;
STableMeta *pMeta = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0)->pTableMeta;
if (pMeta->tableType == TSDB_SUPER_TABLE || pMeta->tableType == TSDB_NORMAL_TABLE ||
pMeta->tableType == TSDB_STREAM_TABLE) {
free(buf);
return TSDB_CODE_TSC_INVALID_VALUE;
}
SSchema *pTagsSchema = tscGetTableTagSchema(pMeta);
int32_t numOfTags = tscGetNumOfTags(pMeta);
for (int32_t i = 0; i < numOfTags; i++) {
if (i != numOfTags - 1) {
snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s,", pTagsSchema[i].name);
} else {
snprintf(buf + strlen(buf), TSDB_MAX_BINARY_LEN - strlen(buf), "%s", pTagsSchema[i].name);
}
}
*result = buf;
return TSDB_CODE_SUCCESS;
}
static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
if (pInterSql == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder));
if (param == NULL) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char fullName[TSDB_TABLE_FNAME_LEN] = {0};
extractDBName(pTableMetaInfo->name, fullName);
extractTableName(pMeta->sTableId, param->sTableName);
snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName);
extractTableName(pTableMetaInfo->name, param->buf);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateTableStatement;
param->callStage = SCREATE_CALLBACK_QUERY;
char *query = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
if (query == NULL) {
free(param);
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
char *columns = NULL;
int32_t code = tscGetTableTagColumnName(pSql, &columns) ;
if (code != TSDB_CODE_SUCCESS) {
free(param);
free(pInterSql);
free(query);
return code;
}
snprintf(query + strlen(query), TSDB_MAX_BINARY_LEN - strlen(query), "SELECT %s FROM %s WHERE TBNAME IN(\'%s\')", columns, fullName, param->buf);
doAsyncQuery(pSql->pTscObj, pInterSql, tscSCreateCallBack, param, query, strlen(query));
free(query);
free(columns);
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscRebuildDDLForNormalTable(SSqlObj *pSql, const char *tableName, char *ddl) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
int32_t numOfRows = tscGetNumOfColumns(pMeta);
SSchema *pSchema = tscGetTableSchema(pMeta);
char *result = ddl;
sprintf(result, "create table %s (", tableName);
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[pSchema[i].type].aName);
}
}
sprintf(result + strlen(result) - 1, "%s", ")");
return TSDB_CODE_SUCCESS;
}
static int32_t tscRebuildDDLForSuperTable(SSqlObj *pSql, const char *tableName, char *ddl) {
char *result = ddl;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableMeta * pMeta = pTableMetaInfo->pTableMeta;
int32_t numOfRows = tscGetNumOfColumns(pMeta);
int32_t totalRows = numOfRows + tscGetNumOfTags(pMeta);
SSchema *pSchema = tscGetTableSchema(pMeta);
sprintf(result, "create table %s (", tableName);
for (int32_t i = 0; i < numOfRows; ++i) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result),"%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}
}
snprintf(result + strlen(result) - 1, TSDB_MAX_BINARY_LEN - strlen(result), "%s %s", ")", "TAGS (");
for (int32_t i = numOfRows; i < totalRows; i++) {
uint8_t type = pSchema[i].type;
if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s(%d),", pSchema[i].name,tDataTypeDesc[pSchema[i].type].aName,pSchema->bytes);
} else {
snprintf(result + strlen(result), TSDB_MAX_BINARY_LEN - strlen(result), "%s %s,", pSchema[i].name, tDataTypeDesc[type].aName);
}
}
sprintf(result + strlen(result) - 1, "%s", ")");
return TSDB_CODE_SUCCESS;
}
static int32_t tscProcessShowCreateTable(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
assert(pTableMetaInfo->pTableMeta != NULL);
char tableName[TSDB_TABLE_NAME_LEN] = {0};
extractTableName(pTableMetaInfo->name, tableName);
char *result = (char *)calloc(1, TSDB_MAX_BINARY_LEN);
int32_t code = TSDB_CODE_SUCCESS;
if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) {
code = tscRebuildDDLForSuperTable(pSql, tableName, result);
} else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo)) {
code = tscRebuildDDLForNormalTable(pSql, tableName, result);
} else if (UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) {
code = tscRebuildDDLForSubTable(pSql, tableName, result);
} else {
code = TSDB_CODE_TSC_INVALID_VALUE;
}
if (code == TSDB_CODE_SUCCESS) {
code = tscSCreateBuildResult(pSql, SCREATE_BUILD_TABLE, tableName, result);
}
free(result);
return code;
}
static int32_t tscProcessShowCreateDatabase(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
SSqlObj *pInterSql = (SSqlObj *)calloc(1, sizeof(SSqlObj));
if (pInterSql == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
SCreateBuilder *param = (SCreateBuilder *)malloc(sizeof(SCreateBuilder));
if (param == NULL) {
free(pInterSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
extractTableName(pTableMetaInfo->name, param->buf);
param->pParentSql = pSql;
param->pInterSql = pInterSql;
param->fp = tscRebuildCreateDBStatement;
param->callStage = SCREATE_CALLBACK_QUERY;
const char *query = "show databases";
doAsyncQuery(pSql->pTscObj, pInterSql, tscSCreateCallBack, param, query, strlen(query));
return TSDB_CODE_TSC_ACTION_IN_PROGRESS;
}
static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resBytes = TSDB_USER_LEN + TSDB_DATA_TYPE_BINARY;
pExpr->resType = TSDB_DATA_TYPE_BINARY;
char* vx = calloc(1, pExpr->resBytes);
if (vx == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -288,7 +807,7 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
size_t size = sizeof(pSql->pTscObj->user);
STR_WITH_MAXSIZE_TO_VARSTR(vx, pSql->pTscObj->user, size);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
@ -298,15 +817,15 @@ static int32_t tscProcessCurrentUser(SSqlObj *pSql) {
static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
char db[TSDB_DB_NAME_LEN] = {0};
extractDBName(pSql->pTscObj->db, db);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(db);
pExpr->resBytes = TSDB_DB_NAME_LEN + VARSTR_HEADER_SIZE;
char* vx = calloc(1, pExpr->resBytes);
if (vx == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
@ -317,7 +836,7 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
} else {
STR_WITH_SIZE_TO_VARSTR(vx, db, (VarDataLenT)t);
}
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
@ -327,49 +846,53 @@ static int32_t tscProcessCurrentDB(SSqlObj *pSql) {
static int32_t tscProcessServerVer(SSqlObj *pSql) {
const char* v = pSql->pTscObj->sversion;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(v);
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
char* vx = calloc(1, pExpr->resBytes);
if (vx == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
STR_WITH_SIZE_TO_VARSTR(vx, v, (VarDataLenT)t);
tscSetLocalQueryResult(pSql, vx, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(vx);
return TSDB_CODE_SUCCESS;
}
static int32_t tscProcessClientVer(SSqlObj *pSql) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
pExpr->resType = TSDB_DATA_TYPE_BINARY;
size_t t = strlen(version);
pExpr->resBytes = (int16_t)(t + VARSTR_HEADER_SIZE);
char* v = calloc(1, pExpr->resBytes);
if (v == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
STR_WITH_SIZE_TO_VARSTR(v, version, (VarDataLenT)t);
tscSetLocalQueryResult(pSql, v, pExpr->aliasName, pExpr->resType, pExpr->resBytes);
free(v);
return TSDB_CODE_SUCCESS;
}
static int32_t tscProcessServStatus(SSqlObj *pSql) {
STscObj* pObj = pSql->pTscObj;
if (pObj->pHb != NULL) {
if (pObj->pHb->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) {
pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL;
@ -380,9 +903,9 @@ static int32_t tscProcessServStatus(SSqlObj *pSql) {
return pSql->res.code;
}
}
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0);
int32_t val = 1;
tscSetLocalQueryResult(pSql, (char*) &val, pExpr->aliasName, TSDB_DATA_TYPE_INT, sizeof(int32_t));
@ -394,23 +917,23 @@ void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnNa
SSqlRes *pRes = &pSql->res;
pCmd->numOfCols = 1;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pQueryInfo->order.order = TSDB_ORDER_ASC;
tscFieldInfoClear(&pQueryInfo->fieldsInfo);
pQueryInfo->fieldsInfo.pFields = taosArrayInit(1, sizeof(TAOS_FIELD));
pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(1, sizeof(SFieldSupInfo));
TAOS_FIELD f = tscCreateField((int8_t)type, columnName, (int16_t)valueLength);
tscFieldInfoAppend(&pQueryInfo->fieldsInfo, &f);
tscInitResObjForLocalQuery(pSql, 1, (int32_t)valueLength);
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, 0);
SFieldSupInfo* pInfo = tscFieldInfoGetSupp(&pQueryInfo->fieldsInfo, 0);
pInfo->pSqlExpr = taosArrayGetP(pQueryInfo->exprList, 0);
memcpy(pRes->data, val, pField->bytes);
}
@ -429,8 +952,12 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
*/
pRes->qhandle = 0x1;
pRes->numOfRows = 0;
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) {
pRes->code = tscProcessShowCreateTable(pSql);
} else if (pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE) {
pRes->code = tscProcessShowCreateDatabase(pSql);
} else if (pCmd->command == TSDB_SQL_RESET_CACHE) {
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscMetaCache);
pRes->code = TSDB_CODE_SUCCESS;
} else if (pCmd->command == TSDB_SQL_SERV_VERSION) {
pRes->code = tscProcessServerVer(pSql);
@ -448,12 +975,13 @@ int tscProcessLocalCmd(SSqlObj *pSql) {
}
// keep the code in local variable in order to avoid invalid read in case of async query
int32_t code = pRes->code;
if (code == TSDB_CODE_SUCCESS) {
(*pSql->fp)(pSql->param, pSql, code);
} else if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS){
} else {
tscQueueAsyncRes(pSql);
}
return code;
}

View File

@ -368,13 +368,12 @@ void tscCreateLocalReducer(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrd
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
TSKEY stime = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
SFillColInfo* pFillCol = createFillColInfo(pQueryInfo);
pReducer->pFillInfo = taosInitFillInfo(pQueryInfo->order.order, revisedSTime, pQueryInfo->groupbyExpr.numOfGroupCols,
4096, (int32_t)numOfCols, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit,
4096, (int32_t)numOfCols, pQueryInfo->interval.sliding, pQueryInfo->interval.slidingUnit,
tinfo.precision, pQueryInfo->fillType, pFillCol);
}
}
@ -472,10 +471,8 @@ void tscDestroyLocalReducer(SSqlObj *pSql) {
return;
}
tscDebug("%p start to free local reducer", pSql);
SSqlRes *pRes = &(pSql->res);
if (pRes->pLocalReducer == NULL) {
tscDebug("%p local reducer has been freed, abort", pSql);
return;
}
@ -553,7 +550,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
}
// primary timestamp column is involved in final result
if (pQueryInfo->intervalTime != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
if (pQueryInfo->interval.interval != 0 || tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) {
numOfGroupByCols++;
}
@ -570,7 +567,7 @@ static int32_t createOrderDescriptor(tOrderDescriptor **pOrderDesc, SSqlCmd *pCm
orderIdx[i] = startCols++;
}
if (pQueryInfo->intervalTime != 0) {
if (pQueryInfo->interval.interval != 0) {
// the first column is the timestamp, handles queries like "interval(10m) group by tags"
orderIdx[numOfGroupByCols - 1] = PRIMARYKEY_TIMESTAMP_COL_INDEX;
}
@ -614,12 +611,12 @@ bool isSameGroup(SSqlCmd *pCmd, SLocalReducer *pReducer, char *pPrev, tFilePage
* super table interval query
* if the order columns is the primary timestamp, all result data belongs to one group
*/
assert(pQueryInfo->intervalTime > 0);
assert(pQueryInfo->interval.interval > 0);
if (numOfCols == 1) {
return true;
}
} else { // simple group by query
assert(pQueryInfo->intervalTime == 0);
assert(pQueryInfo->interval.interval == 0);
}
// only one row exists
@ -827,8 +824,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
if (pFillInfo != NULL) {
int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
int64_t revisedSTime =
taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
int64_t revisedSTime = taosTimeTruncate(stime, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pFillInfo, revisedSTime);
}
@ -841,7 +837,7 @@ void savePrevRecordAndSetupFillInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQ
}
static void genFinalResWithoutFill(SSqlRes* pRes, SLocalReducer *pLocalReducer, SQueryInfo* pQueryInfo) {
assert(pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
assert(pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE);
tFilePage * pBeforeFillData = pLocalReducer->pResultBuf;
@ -1222,7 +1218,7 @@ bool genFinalResults(SSqlObj *pSql, SLocalReducer *pLocalReducer, bool noMoreCur
#endif
// no interval query, no fill operation
if (pQueryInfo->intervalTime == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
if (pQueryInfo->interval.interval == 0 || pQueryInfo->fillType == TSDB_FILL_NONE) {
genFinalResWithoutFill(pRes, pLocalReducer, pQueryInfo);
} else {
SFillInfo* pFillInfo = pLocalReducer->pFillInfo;
@ -1260,13 +1256,10 @@ static void resetEnvForNewResultset(SSqlRes *pRes, SSqlCmd *pCmd, SLocalReducer
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
int8_t precision = tinfo.precision;
// for group result interpolation, do not return if not data is generated
if (pQueryInfo->fillType != TSDB_FILL_NONE) {
TSKEY skey = (pQueryInfo->order.order == TSDB_ORDER_ASC)? pQueryInfo->window.skey:pQueryInfo->window.ekey;//MIN(pQueryInfo->window.skey, pQueryInfo->window.ekey);
int64_t newTime =
taosGetIntervalStartTimestamp(skey, pQueryInfo->slidingTime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, precision);
int64_t newTime = taosTimeTruncate(skey, &pQueryInfo->interval, tinfo.precision);
taosResetFillInfo(pLocalReducer->pFillInfo, newTime);
}
}

View File

@ -142,7 +142,7 @@ int tsParseTime(SStrToken *pToken, int64_t *time, char **next, char *error, int1
return tscInvalidSQLErrMsg(error, "value expected in timestamp", sToken.z);
}
if (getTimestampInUsFromStr(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
if (parseAbsoluteDuration(valueToken.z, valueToken.n, &interval) != TSDB_CODE_SUCCESS) {
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -406,7 +406,7 @@ static int32_t tsCheckTimestamp(STableDataBlocks *pDataBlocks, const char *start
return TSDB_CODE_SUCCESS;
}
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, char *error,
int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[], SParsedDataColInfo *spd, SSqlCmd* pCmd,
int16_t timePrec, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken = {0};
@ -426,12 +426,17 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
*str += index;
if (sToken.type == TK_QUESTION) {
if (pCmd->insertType != TSDB_QUERY_TYPE_STMT_INSERT) {
*code = tscSQLSyntaxErrMsg(pCmd->payload, "? only allowed in binding insertion", *str);
return -1;
}
uint32_t offset = (uint32_t)(start - pDataBlocks->pData);
if (tscAddParamToDataBlock(pDataBlocks, pSchema->type, (uint8_t)timePrec, pSchema->bytes, offset) != NULL) {
continue;
}
strcpy(error, "client out of memory");
strcpy(pCmd->payload, "client out of memory");
*code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return -1;
}
@ -439,8 +444,7 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
int16_t type = sToken.type;
if ((type != TK_NOW && type != TK_INTEGER && type != TK_STRING && type != TK_FLOAT && type != TK_BOOL &&
type != TK_NULL && type != TK_HEX && type != TK_OCT && type != TK_BIN) || (sToken.n == 0) || (type == TK_RP)) {
tscSQLSyntaxErrMsg(error, "invalid data or symbol", sToken.z);
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
*code = tscSQLSyntaxErrMsg(pCmd->payload, "invalid data or symbol", sToken.z);
return -1;
}
@ -470,14 +474,14 @@ int tsParseOneRowData(char **str, STableDataBlocks *pDataBlocks, SSchema schema[
}
bool isPrimaryKey = (colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, error, str, isPrimaryKey, timePrec);
int32_t ret = tsParseOneColumnData(pSchema, &sToken, start, pCmd->payload, str, isPrimaryKey, timePrec);
if (ret != TSDB_CODE_SUCCESS) {
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1; // NOTE: here 0 mean error!
}
if (isPrimaryKey && tsCheckTimestamp(pDataBlocks, start) != TSDB_CODE_SUCCESS) {
tscInvalidSQLErrMsg(error, "client time/server time can not be mixed up", sToken.z);
tscInvalidSQLErrMsg(pCmd->payload, "client time/server time can not be mixed up", sToken.z);
*code = TSDB_CODE_TSC_INVALID_TIME_STAMP;
return -1;
}
@ -522,11 +526,11 @@ static int32_t rowDataCompar(const void *lhs, const void *rhs) {
}
int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMeta, int maxRows,
SParsedDataColInfo *spd, char *error, int32_t *code, char *tmpTokenBuf) {
SParsedDataColInfo *spd, SSqlCmd* pCmd, int32_t *code, char *tmpTokenBuf) {
int32_t index = 0;
SStrToken sToken;
int16_t numOfRows = 0;
int32_t numOfRows = 0;
SSchema *pSchema = tscGetTableSchema(pTableMeta);
STableComInfo tinfo = tscGetTableInfo(pTableMeta);
@ -534,8 +538,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
int32_t precision = tinfo.precision;
if (spd->hasVal[0] == false) {
strcpy(error, "primary timestamp column can not be null");
*code = TSDB_CODE_TSC_INVALID_SQL;
*code = tscInvalidSQLErrMsg(pCmd->payload, "primary timestamp column can not be null", *str);
return -1;
}
@ -547,17 +550,17 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
*str += index;
if (numOfRows >= maxRows || pDataBlock->size + tinfo.rowSize >= pDataBlock->nAllocSize) {
int32_t tSize;
int32_t retcode = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (retcode != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(error, "client out of memory");
*code = retcode;
*code = tscAllocateMemIfNeed(pDataBlock, tinfo.rowSize, &tSize);
if (*code != TSDB_CODE_SUCCESS) { //TODO pass the correct error code to client
strcpy(pCmd->payload, "client out of memory");
return -1;
}
ASSERT(tSize > maxRows);
maxRows = tSize;
}
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, error, precision, code, tmpTokenBuf);
int32_t len = tsParseOneRowData(str, pDataBlock, pSchema, spd, pCmd, precision, code, tmpTokenBuf);
if (len <= 0) { // error message has been set in tsParseOneRowData
return -1;
}
@ -568,7 +571,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
sToken = tStrGetToken(*str, &index, false, 0, NULL);
*str += index;
if (sToken.n == 0 || sToken.type != TK_RP) {
tscSQLSyntaxErrMsg(error, ") expected", *str);
tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str);
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
}
@ -577,7 +580,7 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, STableMeta *pTableMe
}
if (numOfRows <= 0) {
strcpy(error, "no any data points");
strcpy(pCmd->payload, "no any data points");
*code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
return -1;
} else {
@ -704,7 +707,7 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd->payload, &code, tmpTokenBuf);
int32_t numOfRows = tsParseValues(str, dataBuf, pTableMeta, maxNumOfRows, spd, pCmd, &code, tmpTokenBuf);
free(tmpTokenBuf);
if (numOfRows <= 0) {
return code;
@ -724,10 +727,6 @@ static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **st
dataBuf->vgId = pTableMeta->vgroupInfo.vgId;
dataBuf->numOfTables = 1;
/*
* the value of pRes->numOfRows does not affect the true result of AFFECTED ROWS,
* which is actually returned from server.
*/
*totalNum += numOfRows;
return TSDB_CODE_SUCCESS;
}
@ -791,7 +790,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
sql += index;
tscAllocPayload(pCmd, sizeof(STagData));
STagData *pTag = (STagData *) pCmd->payload;
STagData *pTag = &pCmd->tagData;
memset(pTag, 0, sizeof(STagData));
@ -946,7 +945,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", sToken.z);
}
pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen) + pTag->dataLen;
pTag->dataLen = htonl(pTag->dataLen);
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
@ -1192,8 +1190,7 @@ int tsParseInsertSql(SSqlObj *pSql) {
str += index;
if (TK_STRING == sToken.type) {
strdequote(sToken.z);
sToken.n = (uint32_t)strtrim(sToken.z);
tscDequoteAndTrimToken(&sToken);
}
if (sToken.type == TK_RP) {
@ -1460,8 +1457,7 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) {
char *lineptr = line;
strtolower(line, line);
int32_t len =
tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd->payload, tinfo.precision, &code, tokenBuf);
int32_t len = tsParseOneRowData(&lineptr, pTableDataBlock, pSchema, &spd, pCmd, tinfo.precision, &code, tokenBuf);
if (len <= 0 || pTableDataBlock->numOfParams > 0) {
pSql->res.code = code;
break;

View File

@ -43,10 +43,6 @@ typedef struct SNormalStmt {
tVariant* params;
} SNormalStmt;
//typedef struct SInsertStmt {
//
//} SInsertStmt;
typedef struct STscStmt {
bool isInsert;
STscObj* taos;
@ -54,7 +50,6 @@ typedef struct STscStmt {
SNormalStmt normal;
} STscStmt;
static int normalStmtAddPart(SNormalStmt* stmt, bool isParam, char* str, uint32_t len) {
uint16_t size = stmt->numParts + 1;
if (size > stmt->sizeParts) {
@ -514,7 +509,6 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
SSqlObj* pSql = pStmt->pSql;
size_t sqlLen = strlen(sql);
//doAsyncQuery(pObj, pSql, waitForQueryRsp, taos, sqlstr, sqlLen);
SSqlCmd *pCmd = &pSql->cmd;
SSqlRes *pRes = &pSql->res;
pSql->param = (void*) pSql;
@ -545,7 +539,9 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
pSql->cmd.numOfParams = 0;
pSql->cmd.batchSize = 0;
registerSqlObj(pSql);
int32_t code = tsParseSql(pSql, true);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
// wait for the callback function to post the semaphore
@ -574,7 +570,7 @@ int taos_stmt_close(TAOS_STMT* stmt) {
free(normal->sql);
}
tscFreeSqlObj(pStmt->pSql);
taos_free_result(pStmt->pSql);
free(pStmt);
return TSDB_CODE_SUCCESS;
}

View File

@ -259,11 +259,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) {
pSdesc->num = htobe64(pStream->num);
pSdesc->useconds = htobe64(pStream->useconds);
pSdesc->stime = htobe64(pStream->stime - pStream->intervalTime);
pSdesc->stime = htobe64(pStream->stime - pStream->interval.interval);
pSdesc->ctime = htobe64(pStream->ctime);
pSdesc->slidingTime = htobe64(pStream->slidingTime);
pSdesc->interval = htobe64(pStream->intervalTime);
pSdesc->slidingTime = htobe64(pStream->interval.sliding);
pSdesc->interval = htobe64(pStream->interval.interval);
pHeartbeat->numOfStreams++;
pSdesc++;

File diff suppressed because it is too large Load Diff

View File

@ -170,6 +170,7 @@ STableMeta* tscCreateTableMetaFromMsg(STableMetaMsg* pTableMetaMsg, size_t* size
pTableMeta->sversion = pTableMetaMsg->sversion;
pTableMeta->tversion = pTableMetaMsg->tversion;
tstrncpy(pTableMeta->sTableId, pTableMetaMsg->sTableId, TSDB_TABLE_FNAME_LEN);
memcpy(pTableMeta->schema, pTableMetaMsg->schema, schemaSize);

View File

@ -27,10 +27,7 @@
#include "tutil.h"
#include "tlockfree.h"
#define TSC_MGMT_VNODE 999
SRpcCorEpSet tscMgmtEpSet;
SRpcEpSet tscDnodeEpSet;
int (*tscBuildMsg[TSDB_SQL_MAX])(SSqlObj *pSql, SSqlInfo *pInfo) = {0};
@ -56,7 +53,10 @@ static void tscSetDnodeEpSet(SSqlObj* pSql, SCMVgroupInfo* pVgroupInfo) {
assert(pSql != NULL && pVgroupInfo != NULL && pVgroupInfo->numOfEps > 0);
SRpcEpSet* pEpSet = &pSql->epSet;
pEpSet->inUse = 0;
// Issue the query to one of the vnode among a vgroup randomly.
// change the inUse property would not affect the isUse attribute of STableMeta
pEpSet->inUse = rand() % pVgroupInfo->numOfEps;
// apply the FQDN string length check here
bool hasFqdn = false;
@ -131,6 +131,7 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
taosCorEndWrite(&pVgroupInfo->version);
}
void tscPrintMgmtEp() {
SRpcEpSet dump;
tscDumpMgmtEpSet(&dump);
@ -146,12 +147,13 @@ void tscPrintMgmtEp() {
void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
STscObj *pObj = (STscObj *)param;
if (pObj == NULL) return;
if (pObj != pObj->signature) {
tscError("heart beat msg, pObj:%p, signature:%p invalid", pObj, pObj->signature);
return;
}
SSqlObj *pSql = pObj->pHb;
SSqlObj *pSql = tres;
SSqlRes *pRes = &pSql->res;
if (code == 0) {
@ -172,10 +174,17 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) {
if (pRsp->streamId) tscKillStream(pObj, htonl(pRsp->streamId));
}
} else {
tscDebug("heart beat failed, code:%s", tstrerror(code));
tscDebug("heartbeat failed, code:%s", tstrerror(code));
}
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
if (pObj->pHb != NULL) {
int32_t waitingDuring = tsShellActivityTimer * 500;
tscDebug("%p start heartbeat in %dms", pSql, waitingDuring);
taosTmrReset(tscProcessActivityTimer, waitingDuring, pObj, tscTmr, &pObj->pTimer);
} else {
tscDebug("%p start to close tscObj:%p, not send heartbeat again", pSql, pObj);
}
}
void tscProcessActivityTimer(void *handle, void *tmrId) {
@ -191,8 +200,8 @@ void tscProcessActivityTimer(void *handle, void *tmrId) {
if (tscShouldFreeHeartBeat(pHB)) {
tscDebug("%p free HB object and release connection", pHB);
tscFreeSqlObj(pHB);
tscCloseTscObj(pObj);
pObj->pHb = 0;
taos_free_result(pHB);
} else {
int32_t code = tscProcessSql(pHB);
if (code != TSDB_CODE_SUCCESS) {
@ -236,43 +245,51 @@ int tscSendMsgToServer(SSqlObj *pSql) {
}
void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
SSqlObj *pSql = (SSqlObj *)rpcMsg->ahandle;
if (pSql == NULL || pSql->signature != pSql) {
tscError("%p sql is already released", pSql);
TSDB_CACHE_PTR_TYPE handle = (TSDB_CACHE_PTR_TYPE) rpcMsg->ahandle;
void** p = taosCacheAcquireByKey(tscObjCache, &handle, sizeof(TSDB_CACHE_PTR_TYPE));
if (p == NULL) {
rpcFreeCont(rpcMsg->pCont);
return;
}
SSqlObj* pSql = *p;
assert(pSql != NULL);
STscObj *pObj = pSql->pTscObj;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
assert(*pSql->self == pSql);
pSql->pRpcCtx = NULL;
if (pObj->signature != pObj) {
tscDebug("%p DB connection is closed, cmd:%d pObj:%p signature:%p", pSql, pCmd->command, pObj, pObj->signature);
tscFreeSqlObj(pSql);
taosCacheRelease(tscObjCache, (void**) &p, true);
rpcFreeCont(rpcMsg->pCont);
return;
}
pSql->pRpcCtx = NULL; // clear the rpcCtx
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if (pQueryInfo != NULL && pQueryInfo->type == TSDB_QUERY_TYPE_FREE_RESOURCE) {
tscDebug("%p sqlObj needs to be released or DB connection is closed, cmd:%d type:%d, pObj:%p signature:%p",
pSql, pCmd->command, pQueryInfo->type, pObj, pObj->signature);
tscFreeSqlObj(pSql);
void** p1 = p;
taosCacheRelease(tscObjCache, (void**) &p1, false);
taosCacheRelease(tscObjCache, (void**) &p, true);
rpcFreeCont(rpcMsg->pCont);
return;
}
if (pEpSet) {
if (pEpSet) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if(pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
if (pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
} else {
tscUpdateMgmtEpSet(pEpSet);
}
}
}
}
@ -294,7 +311,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
if (pSql->retry > pSql->maxRetry) {
tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry);
} else {
// wait for a little bit moment and then retry
// wait for a little bit moment and then retry, todo do not sleep in rpc callback thread
if (rpcMsg->code == TSDB_CODE_APP_NOT_READY || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
int32_t duration = getWaitingTimeInterval(pSql->retry);
taosMsleep(duration);
@ -304,6 +321,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
// if there is an error occurring, proceed to the following error handling procedure.
if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
taosCacheRelease(tscObjCache, (void**) &p, false);
rpcFreeCont(rpcMsg->pCont);
return;
}
@ -365,16 +383,18 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
rpcMsg->code = (*tscProcessMsgRsp[pCmd->command])(pSql);
}
bool shouldFree = tscShouldBeFreed(pSql);
if (rpcMsg->code != TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
rpcMsg->code = (pRes->code == TSDB_CODE_SUCCESS) ? (int32_t)pRes->numOfRows : pRes->code;
bool shouldFree = tscShouldBeFreed(pSql);
(*pSql->fp)(pSql->param, pSql, rpcMsg->code);
}
if (shouldFree) {
tscDebug("%p sqlObj is automatically freed", pSql);
tscFreeSqlObj(pSql);
}
void** p1 = p;
taosCacheRelease(tscObjCache, (void**) &p1, false);
if (shouldFree) { // in case of table-meta/vgrouplist query, automatically free it
taosCacheRelease(tscObjCache, (void **)&p, true);
tscDebug("%p sqlObj is automatically freed", pSql);
}
rpcFreeCont(rpcMsg->pCont);
@ -406,7 +426,7 @@ int doProcessSql(SSqlObj *pSql) {
if (code != TSDB_CODE_SUCCESS) {
pRes->code = code;
tscQueueAsyncRes(pSql);
return pRes->code;
return code;
}
return TSDB_CODE_SUCCESS;
@ -464,6 +484,7 @@ void tscKillSTableQuery(SSqlObj *pSql) {
pSub->res.code = TSDB_CODE_TSC_QUERY_CANCELLED;
if (pSub->pRpcCtx != NULL) {
rpcCancelRequest(pSub->pRpcCtx);
pSub->pRpcCtx = NULL;
}
tscQueueAsyncRes(pSub); // async res? not other functions?
@ -631,14 +652,14 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
size_t numOfSrcCols = taosArrayGetSize(pQueryInfo->colList);
if (numOfSrcCols <= 0 && !tscQueryTags(pQueryInfo)) {
tscError("%p illegal value of numOfCols in query msg: %"PRIu64", table cols:%d", pSql, numOfSrcCols,
tscError("%p illegal value of numOfCols in query msg: %" PRIu64 ", table cols:%d", pSql, (uint64_t)numOfSrcCols,
tscGetNumOfColumns(pTableMeta));
return TSDB_CODE_TSC_INVALID_SQL;
}
if (pQueryInfo->intervalTime < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %ld", pSql, pQueryInfo->intervalTime);
if (pQueryInfo->interval.interval < 0) {
tscError("%p illegal value of aggregation time interval in query msg: %" PRId64, pSql, (int64_t)pQueryInfo->interval.interval);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -665,10 +686,12 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
pQueryMsg->limit = htobe64(pQueryInfo->limit.limit);
pQueryMsg->offset = htobe64(pQueryInfo->limit.offset);
pQueryMsg->numOfCols = htons((int16_t)taosArrayGetSize(pQueryInfo->colList));
pQueryMsg->intervalTime = htobe64(pQueryInfo->intervalTime);
pQueryMsg->slidingTime = htobe64(pQueryInfo->slidingTime);
pQueryMsg->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pQueryMsg->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pQueryMsg->interval.interval = htobe64(pQueryInfo->interval.interval);
pQueryMsg->interval.sliding = htobe64(pQueryInfo->interval.sliding);
pQueryMsg->interval.offset = htobe64(pQueryInfo->interval.offset);
pQueryMsg->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pQueryMsg->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pQueryMsg->interval.offsetUnit = pQueryInfo->interval.offsetUnit;
pQueryMsg->numOfGroupCols = htons(pQueryInfo->groupbyExpr.numOfGroupCols);
pQueryMsg->numOfTags = htonl(numOfTags);
pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType);
@ -733,7 +756,6 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i);
if (!tscValidateColumnId(pTableMetaInfo, pExpr->colInfo.colId, pExpr->numOfParams)) {
/* column id is not valid according to the cached table meta, the table meta is expired */
tscError("%p table schema is not matched with parsed sql", pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
@ -1483,43 +1505,29 @@ int tscBuildConnectMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
SCMTableInfoMsg *pInfoMsg;
char * pMsg;
int msgLen = 0;
char *tmpData = NULL;
uint32_t len = pSql->cmd.payloadLen;
if (len > 0) {
if ((tmpData = calloc(1, len)) == NULL) {
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
// STagData is in binary format, strncpy is not available
memcpy(tmpData, pSql->cmd.payload, len);
}
SSqlCmd * pCmd = &pSql->cmd;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
pInfoMsg = (SCMTableInfoMsg *)pCmd->payload;
SCMTableInfoMsg* pInfoMsg = (SCMTableInfoMsg *)pCmd->payload;
strcpy(pInfoMsg->tableId, pTableMetaInfo->name);
pInfoMsg->createFlag = htons(pSql->cmd.autoCreated ? 1 : 0);
pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg);
char* pMsg = (char*)pInfoMsg + sizeof(SCMTableInfoMsg);
if (pSql->cmd.autoCreated && len > 0) {
memcpy(pInfoMsg->tags, tmpData, len);
pMsg += len;
size_t len = htonl(pCmd->tagData.dataLen);
if (pSql->cmd.autoCreated) {
if (len > 0) {
len += sizeof(pCmd->tagData.name) + sizeof(pCmd->tagData.dataLen);
memcpy(pInfoMsg->tags, &pCmd->tagData, len);
pMsg += len;
}
}
pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg);
pCmd->msgType = TSDB_MSG_TYPE_CM_TABLE_META;
taosTFree(tmpData);
assert(msgLen + minMsgSize() <= (int32_t)pCmd->allocSize);
return TSDB_CODE_SUCCESS;
}
@ -1667,8 +1675,10 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
pMetaMsg->contLen = htons(pMetaMsg->contLen);
pMetaMsg->numOfColumns = htons(pMetaMsg->numOfColumns);
if (pMetaMsg->sid < 0 || pMetaMsg->vgroup.numOfEps < 0) {
tscError("invalid meter vgId:%d, sid%d", pMetaMsg->vgroup.numOfEps, pMetaMsg->sid);
if ((pMetaMsg->tableType != TSDB_SUPER_TABLE) &&
(pMetaMsg->sid <= 0 || pMetaMsg->vgroup.vgId < 2 || pMetaMsg->vgroup.numOfEps <= 0)) {
tscError("invalid value in table numOfEps:%d, vgId:%d tid:%d, name:%s", pMetaMsg->vgroup.numOfEps, pMetaMsg->vgroup.vgId,
pMetaMsg->sid, pMetaMsg->tableId);
return TSDB_CODE_TSC_INVALID_VALUE;
}
@ -1708,7 +1718,7 @@ int tscProcessTableMetaRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
assert(pTableMetaInfo->pTableMeta == NULL);
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscCacheHandle, pTableMetaInfo->name,
pTableMetaInfo->pTableMeta = (STableMeta *) taosCachePut(tscMetaCache, pTableMetaInfo->name,
strlen(pTableMetaInfo->name), pTableMeta, size, tsTableMetaKeepTimer * 1000);
// todo handle out of memory case
@ -1820,7 +1830,7 @@ int tscProcessMultiMeterMetaRsp(SSqlObj *pSql) {
// int32_t size = (int32_t)(rsp - ((char *)pMeta)); // Consistent with STableMeta in cache
//
// pMeta->index = 0;
// (void)taosCachePut(tscCacheHandle, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
// (void)taosCachePut(tscMetaCache, pMeta->tableId, (char *)pMeta, size, tsTableMetaKeepTimer);
// }
}
@ -1907,12 +1917,14 @@ int tscProcessShowRsp(SSqlObj *pSql) {
key[0] = pCmd->msgType + 'a';
strcpy(key + 1, "showlist");
taosCacheRelease(tscCacheHandle, (void *)&(pTableMetaInfo->pTableMeta), false);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void *)&(pTableMetaInfo->pTableMeta), false);
}
size_t size = 0;
STableMeta* pTableMeta = tscCreateTableMetaFromMsg(pMetaMsg, &size);
pTableMetaInfo->pTableMeta = taosCachePut(tscCacheHandle, key, strlen(key), (char *)pTableMeta, size,
pTableMetaInfo->pTableMeta = taosCachePut(tscMetaCache, key, strlen(key), (char *)pTableMeta, size,
tsTableMetaKeepTimer * 1000);
SSchema *pTableSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta);
@ -1943,6 +1955,7 @@ int tscProcessShowRsp(SSqlObj *pSql) {
return 0;
}
// TODO multithread problem
static void createHBObj(STscObj* pObj) {
if (pObj->pHb != NULL) {
return;
@ -1970,10 +1983,11 @@ static void createHBObj(STscObj* pObj) {
pSql->param = pObj;
pSql->pTscObj = pObj;
pSql->signature = pSql;
pObj->pHb = pSql;
tscAddSubqueryInfo(&pObj->pHb->cmd);
tscDebug("%p HB is allocated, pObj:%p", pObj->pHb, pObj);
registerSqlObj(pSql);
tscDebug("%p HB is allocated, pObj:%p", pSql, pObj);
pObj->pHb = pSql;
}
int tscProcessConnectRsp(SSqlObj *pSql) {
@ -1999,7 +2013,6 @@ int tscProcessConnectRsp(SSqlObj *pSql) {
pObj->connId = htonl(pConnect->connId);
createHBObj(pObj);
taosTmrReset(tscProcessActivityTimer, tsShellActivityTimer * 500, pObj, tscTmr, &pObj->pTimer);
return 0;
@ -2013,15 +2026,16 @@ int tscProcessUseDbRsp(SSqlObj *pSql) {
return 0;
}
int tscProcessDropDbRsp(SSqlObj *UNUSED_PARAM(pSql)) {
taosCacheEmpty(tscCacheHandle);
int tscProcessDropDbRsp(SSqlObj *pSql) {
pSql->pTscObj->db[0] = 0;
taosCacheEmpty(tscMetaCache);
return 0;
}
int tscProcessDropTableRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
@ -2034,10 +2048,10 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
* instead.
*/
tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
}
return 0;
@ -2046,21 +2060,21 @@ int tscProcessDropTableRsp(SSqlObj *pSql) {
int tscProcessAlterTableMsgRsp(SSqlObj *pSql) {
STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, 0, 0);
STableMeta *pTableMeta = taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
STableMeta *pTableMeta = taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMeta == NULL) { /* not in cache, abort */
return 0;
}
tscDebug("%p force release metermeta in cache after alter-table: %s", pSql, pTableMetaInfo->name);
taosCacheRelease(tscCacheHandle, (void **)&pTableMeta, true);
taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true);
if (pTableMetaInfo->pTableMeta) {
bool isSuperTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo);
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
if (isSuperTable) { // if it is a super table, reset whole query cache
tscDebug("%p reset query cache since table:%s is stable", pSql, pTableMetaInfo->name);
taosCacheEmpty(tscCacheHandle);
taosCacheEmpty(tscMetaCache);
}
}
@ -2071,6 +2085,9 @@ int tscProcessAlterDbMsgRsp(SSqlObj *pSql) {
UNUSED(pSql);
return 0;
}
int tscProcessShowCreateRsp(SSqlObj *pSql) {
return tscLocalResultCommonBuilder(pSql, 1);
}
int tscProcessQueryRsp(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
@ -2089,6 +2106,10 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) {
SSqlCmd *pCmd = &pSql->cmd;
SRetrieveTableRsp *pRetrieve = (SRetrieveTableRsp *)pRes->pRsp;
if (pRetrieve == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
return pRes->code;
}
pRes->numOfRows = htonl(pRetrieve->numOfRows);
pRes->precision = htons(pRetrieve->precision);
@ -2141,6 +2162,8 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_META;
registerSqlObj(pNew);
tscAddSubqueryInfo(&pNew->cmd);
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
@ -2157,8 +2180,7 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf
assert(pNew->cmd.numOfClause == 1 && pNewQueryInfo->numOfTables == 1);
tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name));
memcpy(pNew->cmd.payload, pSql->cmd.payload, pSql->cmd.payloadLen); // tag information if table does not exists.
pNew->cmd.payloadLen = pSql->cmd.payloadLen;
memcpy(&pNew->cmd.tagData, &pSql->cmd.tagData, sizeof(pSql->cmd.tagData));
tscDebug("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated);
pNew->fp = tscTableMetaCallBack;
@ -2177,10 +2199,10 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) {
// If this STableMetaInfo owns a table meta, release it first
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), false);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), false);
}
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscCacheHandle, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
pTableMetaInfo->pTableMeta = (STableMeta *)taosCacheAcquireByKey(tscMetaCache, pTableMetaInfo->name, strlen(pTableMetaInfo->name));
if (pTableMetaInfo->pTableMeta != NULL) {
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
tscDebug("%p retrieve table Meta from cache, the number of columns:%d, numOfTags:%d, %p", pSql, tinfo.numOfColumns,
@ -2215,7 +2237,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) {
tscGetNumOfTags(pTableMeta), tscGetNumOfColumns(pTableMeta), pTableMeta->id.uid, pTableMeta);
}
taosCacheRelease(tscCacheHandle, (void **)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true);
return getTableMetaFromMgmt(pSql, pTableMetaInfo);
}
@ -2245,7 +2267,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
pNew->signature = pNew;
pNew->cmd.command = TSDB_SQL_STABLEVGROUP;
// TODO TEST IT
SQueryInfo *pNewQueryInfo = tscGetQueryInfoDetailSafely(&pNew->cmd, 0);
if (pNewQueryInfo == NULL) {
tscFreeSqlObj(pNew);
@ -2255,7 +2278,7 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, clauseIndex);
for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) {
STableMetaInfo *pMInfo = tscGetMetaInfo(pQueryInfo, i);
STableMeta *pTableMeta = taosCacheAcquireByData(tscCacheHandle, pMInfo->pTableMeta);
STableMeta *pTableMeta = taosCacheAcquireByData(tscMetaCache, pMInfo->pTableMeta);
tscAddTableMetaInfo(pNewQueryInfo, pMInfo->name, pTableMeta, NULL, pMInfo->tagColList);
}
@ -2265,6 +2288,8 @@ int tscGetSTableVgroupInfo(SSqlObj *pSql, int32_t clauseIndex) {
}
pNewQueryInfo->numOfTables = pQueryInfo->numOfTables;
registerSqlObj(pNew);
tscDebug("%p new sqlObj:%p to get vgroupInfo, numOfTables:%d", pSql, pNew, pNewQueryInfo->numOfTables);
pNew->fp = tscTableMetaCallBack;
@ -2342,6 +2367,10 @@ void tscInitMsgsFp() {
tscProcessMsgRsp[TSDB_SQL_ALTER_TABLE] = tscProcessAlterTableMsgRsp;
tscProcessMsgRsp[TSDB_SQL_ALTER_DB] = tscProcessAlterDbMsgRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp;
tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp;
tscKeepConn[TSDB_SQL_SHOW] = 1;
tscKeepConn[TSDB_SQL_RETRIEVE] = 1;
tscKeepConn[TSDB_SQL_SELECT] = 1;

View File

@ -26,6 +26,7 @@
#include "tsclient.h"
#include "ttokendef.h"
#include "tutil.h"
#include "tscProfile.h"
static bool validImpl(const char* str, size_t maxsize) {
if (str == NULL) {
@ -100,6 +101,8 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
}
pObj->signature = pObj;
pObj->pDnodeConn = pDnodeConn;
T_REF_INIT_VAL(pObj, 1);
tstrncpy(pObj->user, user, sizeof(pObj->user));
secretEncryptLen = MIN(secretEncryptLen, sizeof(pObj->pass));
@ -132,20 +135,15 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
return NULL;
}
pSql->pTscObj = pObj;
pSql->pTscObj = pObj;
pSql->signature = pSql;
pSql->maxRetry = TSDB_MAX_REPLICA;
tsem_init(&pSql->rspSem, 0, 0);
pObj->pDnodeConn = pDnodeConn;
pSql->fp = fp;
pSql->param = param;
if (taos != NULL) {
*taos = pObj;
}
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->fp = fp;
pSql->param = param;
pSql->cmd.command = TSDB_SQL_CONNECT;
tsem_init(&pSql->rspSem, 0, 0);
if (TSDB_CODE_SUCCESS != tscAllocPayload(&pSql->cmd, TSDB_DEFAULT_PAYLOAD_SIZE)) {
terrno = TSDB_CODE_TSC_OUT_OF_MEMORY;
rpcClose(pDnodeConn);
@ -154,7 +152,13 @@ SSqlObj *taosConnectImpl(const char *ip, const char *user, const char *pass, con
return NULL;
}
if (taos != NULL) {
*taos = pObj;
}
registerSqlObj(pSql);
tsInsertHeadSize = sizeof(SMsgDesc) + sizeof(SSubmitMsg);
return pSql;
}
@ -229,13 +233,21 @@ TAOS *taos_connect_c(const char *ip, uint8_t ipLen, const char *user, uint8_t us
return taos_connect(ipBuf, userBuf, passBuf, dbBuf, port);
}
static void asyncConnCallback(void *param, TAOS_RES *tres, int code) {
SSqlObj *pSql = (SSqlObj *) tres;
assert(pSql != NULL);
pSql->fetchFp(pSql->param, tres, code);
}
TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
void *param, void **taos) {
SSqlObj* pSql = taosConnectImpl(ip, user, pass, NULL, db, port, fp, param, taos);
SSqlObj* pSql = taosConnectImpl(ip, user, pass, NULL, db, port, asyncConnCallback, param, taos);
if (pSql == NULL) {
return NULL;
}
pSql->fetchFp = fp;
pSql->res.code = tscProcessSql(pSql);
tscDebug("%p DB async connection is opening", taos);
return taos;
@ -248,15 +260,26 @@ void taos_close(TAOS *taos) {
return;
}
if (pObj->pHb != NULL) {
if (pObj->pHb->pRpcCtx != NULL) { // wait for rsp from dnode
rpcCancelRequest(pObj->pHb->pRpcCtx);
SSqlObj* pHb = pObj->pHb;
if (pHb != NULL && atomic_val_compare_exchange_ptr(&pObj->pHb, pHb, 0) == pHb) {
if (pHb->pRpcCtx != NULL) { // wait for rsp from dnode
rpcCancelRequest(pHb->pRpcCtx);
pHb->pRpcCtx = NULL;
}
tscSetFreeHeatBeat(pObj);
tscFreeSqlObj(pObj->pHb);
tscDebug("%p HB is freed", pHb);
taos_free_result(pHb);
}
int32_t ref = T_REF_DEC(pObj);
assert(ref >= 0);
if (ref > 0) {
tscDebug("%p %d remain sqlObjs, not free tscObj and dnodeConn:%p", pObj, ref, pObj->pDnodeConn);
return;
}
tscDebug("%p all sqlObj are freed, free tscObj and close dnodeConn:%p", pObj, pObj->pDnodeConn);
tscCloseTscObj(pObj);
}
@ -451,6 +474,8 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
pCmd->command == TSDB_SQL_TABLE_JOIN_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE ||
pCmd->command == TSDB_SQL_SHOW_CREATE_DATABASE ||
pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_DESCRIBE_TABLE ||
pCmd->command == TSDB_SQL_SERV_STATUS ||
@ -533,57 +558,51 @@ int taos_select_db(TAOS *taos, const char *db) {
}
// send free message to vnode to free qhandle and corresponding resources in vnode
static bool tscKillQueryInVnode(SSqlObj* pSql) {
static UNUSED_FUNC bool tscKillQueryInDnode(SSqlObj* pSql) {
SSqlCmd* pCmd = &pSql->cmd;
SSqlRes* pRes = &pSql->res;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && !tscIsTwoStageSTableQuery(pQueryInfo, 0) &&
(pCmd->command == TSDB_SQL_SELECT ||
pCmd->command == TSDB_SQL_SHOW ||
pCmd->command == TSDB_SQL_RETRIEVE ||
pCmd->command == TSDB_SQL_FETCH) &&
(pSql->pStream == NULL && pTableMetaInfo->pTableMeta != NULL)) {
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("%p send msg to dnode to free qhandle ASAP, command:%s, ", pSql, sqlCmd[pCmd->command]);
tscProcessSql(pSql);
if (pRes == NULL || pRes->qhandle == 0) {
return true;
}
return false;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
if ((pQueryInfo == NULL) || tscIsTwoStageSTableQuery(pQueryInfo, 0)) {
return true;
}
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
tscRemoveFromSqlList(pSql);
int32_t cmd = pCmd->command;
if (pRes->code == TSDB_CODE_SUCCESS && pRes->completed == false && pSql->pStream == NULL && (pTableMetaInfo->pTableMeta != NULL) &&
(cmd == TSDB_SQL_SELECT ||
cmd == TSDB_SQL_SHOW ||
cmd == TSDB_SQL_RETRIEVE ||
cmd == TSDB_SQL_FETCH)) {
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
pCmd->command = (pCmd->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH;
tscDebug("%p send msg to dnode to free qhandle ASAP before free sqlObj, command:%s", pSql, sqlCmd[pCmd->command]);
tscProcessSql(pSql);
return false;
}
return true;
}
void taos_free_result(TAOS_RES *res) {
SSqlObj *pSql = (SSqlObj *)res;
SSqlObj* pSql = (SSqlObj*) res;
if (pSql == NULL || pSql->signature != pSql) {
tscDebug("%p sqlObj has been freed", pSql);
return;
}
// The semaphore can not be changed while freeing async sub query objects.
SSqlRes *pRes = &pSql->res;
if (pRes == NULL || pRes->qhandle == 0) {
tscFreeSqlObj(pSql);
tscDebug("%p SqlObj is freed by app, qhandle is null", pSql);
tscError("%p already released sqlObj", res);
return;
}
// set freeFlag to 1 in retrieve message if there are un-retrieved results data in node
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo == NULL) {
tscFreeSqlObj(pSql);
tscDebug("%p SqlObj is freed by app", pSql);
return;
}
pQueryInfo->type = TSDB_QUERY_TYPE_FREE_RESOURCE;
if (!tscKillQueryInVnode(pSql)) {
tscFreeSqlObj(pSql);
tscDebug("%p sqlObj is freed by app", pSql);
bool freeNow = tscKillQueryInDnode(pSql);
if (freeNow) {
tscDebug("%p free sqlObj in cache", pSql);
SSqlObj** p = pSql->self;
taosCacheRelease(tscObjCache, (void**) &p, true);
}
}
@ -679,7 +698,10 @@ void taos_stop_query(TAOS_RES *res) {
tscKillSTableQuery(pSql);
} else {
if (pSql->cmd.command < TSDB_SQL_LOCAL) {
rpcCancelRequest(pSql->pRpcCtx);
if (pSql->pRpcCtx != NULL) {
rpcCancelRequest(pSql->pRpcCtx);
pSql->pRpcCtx = NULL;
}
}
}
@ -700,15 +722,15 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields)
switch (fields[i].type) {
case TSDB_DATA_TYPE_TINYINT:
len += sprintf(str + len, "%d", *((char *)row[i]));
len += sprintf(str + len, "%d", *((int8_t *)row[i]));
break;
case TSDB_DATA_TYPE_SMALLINT:
len += sprintf(str + len, "%d", *((short *)row[i]));
len += sprintf(str + len, "%d", *((int16_t *)row[i]));
break;
case TSDB_DATA_TYPE_INT:
len += sprintf(str + len, "%d", *((int *)row[i]));
len += sprintf(str + len, "%d", *((int32_t *)row[i]));
break;
case TSDB_DATA_TYPE_BIGINT:
@ -767,14 +789,17 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
}
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
pSql->pTscObj = taos;
pSql->signature = pSql;
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
pRes->numOfTotal = 0;
pRes->numOfClauseTotal = 0;
tscDebug("%p Valid SQL: %s pObj:%p", pSql, sql, pObj);
int32_t sqlLen = (int32_t)strlen(sql);
@ -810,11 +835,12 @@ int taos_validate_sql(TAOS *taos, const char *sql) {
tsem_wait(&pSql->rspSem);
code = pSql->res.code;
}
if (code != TSDB_CODE_SUCCESS) {
tscDebug("%p Valid SQL result:%d, %s pObj:%p", pSql, code, taos_errstr(taos), pObj);
}
taos_free_result(pSql);
taos_free_result(pSql);
return code;
}
@ -913,34 +939,32 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
SSqlObj* pSql = calloc(1, sizeof(SSqlObj));
pSql->pTscObj = taos;
pSql->signature = pSql;
SSqlRes *pRes = &pSql->res;
pRes->code = 0;
pRes->numOfTotal = 0; // the number of getting table meta from server
pRes->numOfClauseTotal = 0;
pRes->code = 0;
assert(pSql->fp == NULL);
tscDebug("%p tableNameList: %s pObj:%p", pSql, tableNameList, pObj);
int32_t tblListLen = (int32_t)strlen(tableNameList);
if (tblListLen > MAX_TABLE_NAME_LENGTH) {
tscError("%p tableNameList too long, length:%d, maximum allowed:%d", pSql, tblListLen, MAX_TABLE_NAME_LENGTH);
pRes->code = TSDB_CODE_TSC_INVALID_SQL;
taosTFree(pSql);
return pRes->code;
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_INVALID_SQL;
}
char *str = calloc(1, tblListLen + 1);
if (str == NULL) {
pRes->code = TSDB_CODE_TSC_OUT_OF_MEMORY;
tscError("%p failed to malloc sql string buffer", pSql);
taosTFree(pSql);
return pRes->code;
tscFreeSqlObj(pSql);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
strtolower(str, tableNameList);
pRes->code = (uint8_t)tscParseTblNameList(pSql, str, tblListLen);
int32_t code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen);
/*
* set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query.
@ -950,17 +974,17 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) {
pRes->qhandle = 0;
free(str);
if (pRes->code != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
return pRes->code;
return code;
}
tscDoQuery(pSql);
tscDebug("%p load multi metermeta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
if (pRes->code != TSDB_CODE_SUCCESS) {
tscPartiallyFreeSqlObj(pSql);
tscDebug("%p load multi table meta result:%d %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj);
if ((code = pRes->code) != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pSql);
}
return pRes->code;
return code;
}

View File

@ -51,7 +51,7 @@ static int64_t tscGetRetryDelayTime(SSqlStream* pStream, int64_t slidingTime, in
int64_t retryDelta = (int64_t)(tsStreamCompRetryDelay * retryRangeFactor);
retryDelta = ((rand() % retryDelta) + tsStreamCompRetryDelay) * 1000L;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
// change to ms
if (prec == TSDB_TIME_PRECISION_MICRO) {
slidingTime = slidingTime / 1000;
@ -87,7 +87,7 @@ static void tscProcessStreamLaunchQuery(SSchedMsg *pMsg) {
// failed to get meter/metric meta, retry in 10sec.
if (code != TSDB_CODE_SUCCESS) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscDebug("%p stream:%p,get metermeta failed, retry in %" PRId64 "ms", pStream->pSql, pStream, retryDelayTime);
tscSetRetryTimer(pStream, pSql, retryDelayTime);
@ -132,15 +132,15 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
}
if (etime > pStream->etime) {
etime = pStream->etime;
} else if (pStream->intervalTimeUnit != 'y' && pStream->intervalTimeUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->intervalTime * pStream->intervalTime;
} else if (pStream->interval.intervalUnit != 'y' && pStream->interval.intervalUnit != 'n') {
etime = pStream->stime + (etime - pStream->stime) / pStream->interval.interval * pStream->interval.interval;
} else {
etime = taosGetIntervalStartTimestamp(etime, pStream->slidingTime, pStream->intervalTime, pStream->slidingTimeUnit, pStream->precision);
etime = taosTimeTruncate(etime, &pStream->interval, pStream->precision);
}
pQueryInfo->window.ekey = etime;
if (pQueryInfo->window.skey >= pQueryInfo->window.ekey) {
int64_t timer = pStream->slidingTime;
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
int64_t timer = pStream->interval.sliding;
if (pStream->interval.intervalUnit == 'y' || pStream->interval.intervalUnit == 'n') {
timer = 86400 * 1000l;
} else if (pStream->precision == TSDB_TIME_PRECISION_MICRO) {
timer /= 1000l;
@ -162,12 +162,12 @@ static void tscProcessStreamTimer(void *handle, void *tmrId) {
static void tscProcessStreamQueryCallback(void *param, TAOS_RES *tres, int numOfRows) {
SSqlStream *pStream = (SSqlStream *)param;
if (tres == NULL || numOfRows < 0) {
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelay = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, query data failed, code:0x%08x, retry in %" PRId64 "ms", pStream->pSql, pStream, numOfRows,
retryDelay);
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pStream->pSql->cmd, 0, 0);
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), true);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), true);
taosTFree(pTableMetaInfo->vgroupList);
tscSetRetryTimer(pStream, pStream->pSql, retryDelay);
@ -223,7 +223,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
SSqlObj * pSql = (SSqlObj *)res;
if (pSql == NULL || numOfRows < 0) {
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->slidingTime, pStream->precision);
int64_t retryDelayTime = tscGetRetryDelayTime(pStream, pStream->interval.sliding, pStream->precision);
tscError("%p stream:%p, retrieve data failed, code:0x%08x, retry in %" PRId64 "ms", pSql, pStream, numOfRows, retryDelayTime);
tscSetRetryTimer(pStream, pStream->pSql, retryDelayTime);
@ -246,11 +246,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
}
if (!pStream->isProject) {
if (pStream->intervalTimeUnit == 'y' || pStream->intervalTimeUnit == 'n') {
pStream->stime = taosAddNatualInterval(pStream->stime, pStream->slidingTime, pStream->slidingTimeUnit, pStream->precision);
} else {
pStream->stime += pStream->slidingTime;
}
pStream->stime = taosTimeAdd(pStream->stime, pStream->interval.sliding, pStream->interval.slidingUnit, pStream->precision);
}
// actually only one row is returned. this following is not necessary
taos_fetch_rows_a(res, tscProcessStreamRetrieveResult, pStream);
@ -275,7 +271,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf
// release the metric/meter meta information reference, so data in cache can be updated
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
pSql->numOfSubs = 0;
@ -310,7 +306,7 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
now + timer, timer, delay, pStream->stime, etime);
} else {
tscDebug("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->intervalTime, pStream->stime - 1);
pStream->stime, timer, delay, pStream->stime - pStream->interval.interval, pStream->stime - 1);
}
pSql->cmd.command = TSDB_SQL_SELECT;
@ -324,12 +320,12 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMaxStreamComputDelay * 1000L : tsMaxStreamComputDelay;
int64_t delayDelta = maxDelay;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
delayDelta = (int64_t)(pStream->slidingTime * tsStreamComputDelayRatio);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
delayDelta = (int64_t)(pStream->interval.sliding * tsStreamComputDelayRatio);
if (delayDelta > maxDelay) {
delayDelta = maxDelay;
}
int64_t remainTimeWindow = pStream->slidingTime - delayDelta;
int64_t remainTimeWindow = pStream->interval.sliding - delayDelta;
if (maxDelay > remainTimeWindow) {
maxDelay = (int64_t)(remainTimeWindow / 1.5f);
}
@ -337,8 +333,8 @@ static int64_t getLaunchTimeDelay(const SSqlStream* pStream) {
int64_t currentDelay = (rand() % maxDelay); // a random number
currentDelay += delayDelta;
if (pStream->intervalTimeUnit != 'n' && pStream->intervalTimeUnit != 'y') {
assert(currentDelay < pStream->slidingTime);
if (pStream->interval.intervalUnit != 'n' && pStream->interval.intervalUnit != 'y') {
assert(currentDelay < pStream->interval.sliding);
}
return currentDelay;
@ -353,7 +349,7 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
* for project query, no mater fetch data successfully or not, next launch will issue
* more than the sliding time window
*/
timer = pStream->slidingTime;
timer = pStream->interval.sliding;
if (pStream->stime > pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
@ -366,7 +362,8 @@ static void tscSetNextLaunchTimer(SSqlStream *pStream, SSqlObj *pSql) {
return;
}
} else {
int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
int64_t stime = taosTimeTruncate(pStream->stime - 1, &pStream->interval, pStream->precision);
//int64_t stime = taosGetIntervalStartTimestamp(pStream->stime - 1, pStream->interval.interval, pStream->interval.interval, pStream->interval.intervalUnit, pStream->precision);
if (stime >= pStream->etime) {
tscDebug("%p stream:%p, stime:%" PRId64 " is larger than end time: %" PRId64 ", stop the stream", pStream->pSql, pStream,
pStream->stime, pStream->etime);
@ -400,43 +397,43 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->intervalTime < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.interval < minIntervalTime) {
tscWarn("%p stream:%p, original sample interval:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
(int64_t)pQueryInfo->interval.interval, minIntervalTime);
pQueryInfo->interval.interval = minIntervalTime;
}
pStream->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pStream->intervalTime = pQueryInfo->intervalTime; // it shall be derived from sql string
pStream->interval.intervalUnit = pQueryInfo->interval.intervalUnit;
pStream->interval.interval = pQueryInfo->interval.interval; // it shall be derived from sql string
if (pQueryInfo->slidingTime <= 0) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->slidingTimeUnit = pQueryInfo->intervalTimeUnit;
if (pQueryInfo->interval.sliding <= 0) {
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
pQueryInfo->interval.slidingUnit = pQueryInfo->interval.intervalUnit;
}
int64_t minSlidingTime =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsMinSlidingTime * 1000L : tsMinSlidingTime;
if (pQueryInfo->intervalTimeUnit != 'n' && pQueryInfo->intervalTimeUnit != 'y' && pQueryInfo->slidingTime < minSlidingTime) {
if (pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit!= 'y' && pQueryInfo->interval.sliding < minSlidingTime) {
tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->interval.sliding, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
pQueryInfo->interval.sliding = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
if (pQueryInfo->interval.sliding > pQueryInfo->interval.interval) {
tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->interval.sliding, pQueryInfo->interval.interval);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
pQueryInfo->interval.sliding = pQueryInfo->interval.interval;
}
pStream->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pStream->slidingTime = pQueryInfo->slidingTime;
pStream->interval.slidingUnit = pQueryInfo->interval.slidingUnit;
pStream->interval.sliding = pQueryInfo->interval.sliding;
if (pStream->isProject) {
pQueryInfo->intervalTime = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->slidingTime = 0;
pQueryInfo->interval.interval = 0; // clear the interval value to avoid the force time window split by query processor
pQueryInfo->interval.sliding = 0;
}
}
@ -445,8 +442,8 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
if (pStream->isProject) {
// no data in table, flush all data till now to destination meter, 10sec delay
pStream->intervalTime = tsProjectExecInterval;
pStream->slidingTime = tsProjectExecInterval;
pStream->interval.interval = tsProjectExecInterval;
pStream->interval.sliding = tsProjectExecInterval;
if (stime != 0) { // first projection start from the latest event timestamp
assert(stime >= pQueryInfo->window.skey);
@ -456,15 +453,12 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
}
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = pQueryInfo->window.skey;
if (stime == INT64_MIN) {
stime = (int64_t)taosGetTimestamp(pStream->precision);
stime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
stime = taosGetIntervalStartTimestamp(stime - 1, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
if (pQueryInfo->window.skey != INT64_MIN) {
stime = pQueryInfo->window.skey;
}
stime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
} else {
int64_t newStime = taosGetIntervalStartTimestamp(stime, pStream->intervalTime, pStream->intervalTime, pStream->intervalTimeUnit, pStream->precision);
int64_t newStime = taosTimeTruncate(stime, &pStream->interval, pStream->precision);
if (newStime != stime) {
tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
@ -476,8 +470,10 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
}
static int64_t tscGetLaunchTimestamp(const SSqlStream *pStream) {
int64_t timer = pStream->stime - taosGetTimestamp(pStream->precision);
if (timer < 0) timer = 0;
int64_t timer = 0, now = taosGetTimestamp(pStream->precision);
if (pStream->stime > now) {
timer = pStream->stime - now;
}
int64_t startDelay =
(pStream->precision == TSDB_TIME_PRECISION_MICRO) ? tsStreamCompStartDelay * 1000L : tsStreamCompStartDelay;
@ -514,6 +510,8 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
return;
}
registerSqlObj(pSql);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
@ -534,7 +532,7 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) {
taosTmrReset(tscProcessStreamTimer, (int32_t)starttime, pStream, tscTmr, &pStream->pTimer);
tscDebug("%p stream:%p is opened, query on:%s, interval:%" PRId64 ", sliding:%" PRId64 ", first launched in:%" PRId64 ", sql:%s", pSql,
pStream, pTableMetaInfo->name, pStream->intervalTime, pStream->slidingTime, starttime, pSql->sqlstr);
pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr);
}
TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row),
@ -567,6 +565,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
pStream->pSql = pSql;
pSql->pStream = pStream;
pSql->param = pStream;
pSql->maxRetry = TSDB_MAX_REPLICA;
pSql->sqlstr = calloc(1, strlen(sqlstr) + 1);
if (pSql->sqlstr == NULL) {
@ -574,6 +573,7 @@ TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *p
tscFreeSqlObj(pSql);
return NULL;
}
strtolower(pSql->sqlstr, sqlstr);
tscDebugL("%p SQL: %s", pSql, pSql->sqlstr);
@ -614,10 +614,9 @@ void taos_close_stream(TAOS_STREAM *handle) {
tscDebug("%p stream:%p is closed", pSql, pStream);
// notify CQ to release the pStream object
pStream->fp(pStream->param, NULL, NULL);
tscFreeSqlObj(pSql);
pStream->pSql = NULL;
taos_free_result(pSql);
taosTFree(pStream);
}
}

View File

@ -105,6 +105,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
code = TAOS_SYSTEM_ERROR(errno);
goto fail;
}
tstrncpy(pSub->topic, topic, sizeof(pSub->topic));
pSub->progress = taosArrayInit(32, sizeof(SSubscriptionProgress));
if (pSub->progress == NULL) {
@ -119,6 +120,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto fail;
}
pSql->signature = pSql;
pSql->pTscObj = pObj;
pSql->pSubscription = pSub;
@ -142,6 +144,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
code = TSDB_CODE_TSC_OUT_OF_MEMORY;
goto fail;
}
strtolower(pSql->sqlstr, pSql->sqlstr);
pRes->qhandle = 0;
pRes->numOfRows = 1;
@ -152,11 +155,14 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
goto fail;
}
registerSqlObj(pSql);
code = tsParseSql(pSql, false);
if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) {
tsem_wait(&pSub->sem);
code = pSql->res.code;
}
if (code != TSDB_CODE_SUCCESS) {
line = __LINE__;
goto fail;
@ -173,9 +179,15 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char*
fail:
tscError("tscCreateSubscription failed at line %d, reason: %s", line, tstrerror(code));
if (pSql != NULL) {
tscFreeSqlObj(pSql);
if (pSql->self != NULL) {
taos_free_result(pSql);
} else {
tscFreeSqlObj(pSql);
}
pSql = NULL;
}
if (pSub != NULL) {
taosArrayDestroy(pSub->progress);
tsem_destroy(&pSub->sem);
@ -494,6 +506,10 @@ void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress) {
}
}
if (pSub->pSql != NULL) {
taos_free_result(pSub->pSql);
}
tscFreeSqlObj(pSub->pSql);
taosArrayDestroy(pSub->progress);
tsem_destroy(&pSub->sem);

View File

@ -23,6 +23,7 @@
#include "tscSubquery.h"
#include "tschemautil.h"
#include "tsclient.h"
#include "tscSubquery.h"
typedef struct SInsertSupporter {
SSubqueryState* pState;
@ -92,7 +93,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf);
#ifdef _DEBUG_VIEW
tscInfo("%" PRId64 ", tags:%d \t %" PRId64 ", tags:%d", elem1.ts, elem1.tag, elem2.ts, elem2.tag);
tscInfo("%" PRId64 ", tags:%"PRId64" \t %" PRId64 ", tags:%"PRId64, elem1.ts, elem1.tag.i64Key, elem2.ts, elem2.tag.i64Key);
#endif
int32_t res = tVariantCompare(&elem1.tag, &elem2.tag);
@ -113,7 +114,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ
* in case of stable query, limit/offset is not applied here. the limit/offset is applied to the
* final results which is acquired after the secondry merge of in the client.
*/
if (pLimit->offset == 0 || pQueryInfo->intervalTime > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) {
if (win->skey > elem1.ts) {
win->skey = elem1.ts;
}
@ -178,10 +179,7 @@ SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, in
pSupporter->subqueryIndex = index;
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex);
pSupporter->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pSupporter->slidingTime = pQueryInfo->slidingTimeUnit;
pSupporter->intervalTime = pQueryInfo->intervalTime;
pSupporter->slidingTime = pQueryInfo->slidingTime;
memcpy(&pSupporter->interval, &pQueryInfo->interval, sizeof(pSupporter->interval));
pSupporter->limit = pQueryInfo->limit;
STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, index);
@ -281,7 +279,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
tscDebug("%p subIndex: %d, no need to launch query, ignore it", pSql, i);
tscDestroyJoinSupporter(pSupporter);
tscFreeSqlObj(pPrevSub);
taos_free_result(pPrevSub);
pSql->pSubs[i] = NULL;
continue;
@ -301,28 +299,24 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) {
success = false;
break;
}
tscClearSubqueryInfo(&pNew->cmd);
pSql->pSubs[i] = pNew;
SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0);
pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object
// set the second stage sub query for join process
TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE);
pQueryInfo->intervalTimeUnit = pSupporter->intervalTimeUnit;
pQueryInfo->slidingTimeUnit = pSupporter->slidingTimeUnit;
pQueryInfo->intervalTime = pSupporter->intervalTime;
pQueryInfo->slidingTime = pSupporter->slidingTime;
pQueryInfo->groupbyExpr = pSupporter->groupbyExpr;
memcpy(&pQueryInfo->interval, &pSupporter->interval, sizeof(pQueryInfo->interval));
tscTagCondCopy(&pQueryInfo->tagCond, &pSupporter->tagCond);
pQueryInfo->colList = pSupporter->colList;
pQueryInfo->exprList = pSupporter->exprList;
pQueryInfo->fieldsInfo = pSupporter->fieldsInfo;
pSupporter->exprList = NULL;
pSupporter->colList = NULL;
memset(&pSupporter->fieldsInfo, 0, sizeof(SFieldInfo));
@ -1214,14 +1208,13 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter
}
pNew->cmd.numOfCols = 0;
pNewQueryInfo->intervalTime = 0;
pNewQueryInfo->interval.interval = 0;
pSupporter->limit = pNewQueryInfo->limit;
pNewQueryInfo->limit.limit = -1;
pNewQueryInfo->limit.offset = 0;
// backup the data and clear it in the sqlcmd object
pSupporter->groupbyExpr = pNewQueryInfo->groupbyExpr;
memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr));
tscInitQueryInfo(pNewQueryInfo);
@ -1391,7 +1384,7 @@ static void doCleanupSubqueries(SSqlObj *pSql, int32_t numOfSubs, SSubqueryState
taosTFree(pSupport->localBuffer);
taosTFree(pSupport);
tscFreeSqlObj(pSub);
taos_free_result(pSub);
}
free(pState);
@ -1523,9 +1516,9 @@ static void tscFreeSubSqlObj(SRetrieveSupport *trsupport, SSqlObj *pSql) {
SSqlObj *pParentSql = trsupport->pParentSql;
assert(pSql == pParentSql->pSubs[index]);
pParentSql->pSubs[index] = NULL;
taos_free_result(pSql);
// pParentSql->pSubs[index] = NULL;
//
// taos_free_result(pSql);
taosTFree(trsupport->localBuffer);
taosTFree(trsupport);
}
@ -1739,10 +1732,6 @@ static void tscRetrieveFromDnodeCallBack(void *param, TAOS_RES *tres, int numOfR
assert(tres != NULL);
SSqlObj *pSql = (SSqlObj *)tres;
// if (pSql == NULL) { // sql object has been released in error process, return immediately
// tscDebug("%p subquery has been released, idx:%d, abort", pParentSql, idx);
// return;
// }
SSubqueryState* pState = trsupport->pState;
assert(pState->numOfRemain <= pState->numOfTotal && pState->numOfRemain >= 0 && pParentSql->numOfSubs == pState->numOfTotal);
@ -1918,9 +1907,7 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows)
pParentObj->res.code = pSql->res.code;
}
taos_free_result(tres);
taosTFree(pSupporter);
if (atomic_sub_fetch_32(&pState->numOfRemain, 1) > 0) {
return;
}
@ -1964,28 +1951,27 @@ int32_t tscHandleInsertRetry(SSqlObj* pSql) {
}
int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
SSqlRes *pRes = &pSql->res;
SSqlCmd *pCmd = &pSql->cmd;
size_t size = taosArrayGetSize(pCmd->pDataBlocks);
assert(size > 0);
SSqlRes *pRes = &pSql->res;
pSql->numOfSubs = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks);
assert(pSql->numOfSubs > 0);
pRes->code = TSDB_CODE_SUCCESS;
// the number of already initialized subqueries
int32_t numOfSub = 0;
pSql->numOfSubs = (uint16_t)size;
pSql->pSubs = calloc(size, POINTER_BYTES);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
pState->numOfTotal = pSql->numOfSubs;
pState->numOfRemain = pSql->numOfSubs;
pSql->pSubs = calloc(pSql->numOfSubs, POINTER_BYTES);
if (pSql->pSubs == NULL) {
goto _error;
}
tscDebug("%p submit data to %" PRIzu " vnode(s)", pSql, size);
SSubqueryState *pState = calloc(1, sizeof(SSubqueryState));
pState->numOfTotal = pSql->numOfSubs;
pState->numOfRemain = pSql->numOfSubs;
pRes->code = TSDB_CODE_SUCCESS;
tscDebug("%p submit data to %d vnode(s)", pSql, pSql->numOfSubs);
while(numOfSub < pSql->numOfSubs) {
SInsertSupporter* pSupporter = calloc(1, sizeof(SInsertSupporter));
@ -2016,8 +2002,8 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
tscDebug("%p sub:%p create subObj success. orderOfSub:%d", pSql, pNew, numOfSub);
numOfSub++;
} else {
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%" PRIzu ", code:%s", pSql, numOfSub,
size, tstrerror(pRes->code));
tscDebug("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%s", pSql, numOfSub,
pSql->numOfSubs, tstrerror(pRes->code));
goto _error;
}
}
@ -2040,11 +2026,6 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) {
return TSDB_CODE_SUCCESS;
_error:
for(int32_t j = 0; j < numOfSub; ++j) {
taosTFree(pSql->pSubs[j]->param);
taos_free_result(pSql->pSubs[j]);
}
taosTFree(pState);
return TSDB_CODE_TSC_OUT_OF_MEMORY;
}
@ -2208,7 +2189,7 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
}
// primary key column cannot be null in interval query, no need to check
if (i == 0 && pQueryInfo->intervalTime > 0) {
if (i == 0 && pQueryInfo->interval.interval > 0) {
continue;
}
@ -2220,16 +2201,15 @@ void **doSetResultRowData(SSqlObj *pSql, bool finalResult) {
// calculate the result from several other columns
if (pSup->pArithExprInfo != NULL) {
if (pRes->pArithSup == NULL) {
SArithmeticSupport *sas = (SArithmeticSupport *) calloc(1, sizeof(SArithmeticSupport));
sas->offset = 0;
sas->pArithExpr = pSup->pArithExprInfo;
sas->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
sas->exprList = pQueryInfo->exprList;
sas->data = calloc(sas->numOfCols, POINTER_BYTES);
pRes->pArithSup = sas;
pRes->pArithSup = (SArithmeticSupport*)calloc(1, sizeof(SArithmeticSupport));
}
pRes->pArithSup->offset = 0;
pRes->pArithSup->pArithExpr = pSup->pArithExprInfo;
pRes->pArithSup->numOfCols = (int32_t)tscSqlExprNumOfExprs(pQueryInfo);
pRes->pArithSup->exprList = pQueryInfo->exprList;
pRes->pArithSup->data = calloc(pRes->pArithSup->numOfCols, POINTER_BYTES);
if (pRes->buffer[i] == NULL) {
TAOS_FIELD* field = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
pRes->buffer[i] = malloc(field->bytes);

View File

@ -30,7 +30,8 @@
#include "tlocale.h"
// global, not configurable
void * tscCacheHandle;
SCacheObj* tscMetaCache;
SCacheObj* tscObjCache;
void * tscTmr;
void * tscQhandle;
void * tscCheckDiskUsageTmr;
@ -121,11 +122,8 @@ void taos_init_imp(void) {
tscInitMsgsFp();
int queueSize = tsMaxConnections*2;
if (tscEmbedded == 0) {
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / 2.0);
} else {
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / 4.0);
}
double factor = (tscEmbedded == 0)? 2.0:4.0;
tscNumOfThreads = (int)(tsNumOfCores * tsNumOfThreadsPerCore / factor);
if (tscNumOfThreads < 2) tscNumOfThreads = 2;
@ -139,13 +137,11 @@ void taos_init_imp(void) {
if(0 == tscEmbedded){
taosTmrReset(tscCheckDiskUsage, 10, NULL, tscTmr, &tscCheckDiskUsageTmr);
}
int64_t refreshTime = tsTableMetaKeepTimer;
refreshTime = refreshTime > 10 ? 10 : refreshTime;
refreshTime = refreshTime < 10 ? 10 : refreshTime;
if (tscCacheHandle == NULL) {
tscCacheHandle = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
int64_t refreshTime = 10; // 10 seconds by default
if (tscMetaCache == NULL) {
tscMetaCache = taosCacheInit(TSDB_DATA_TYPE_BINARY, refreshTime, false, NULL, "tableMeta");
tscObjCache = taosCacheInit(TSDB_CACHE_PTR_KEY, refreshTime / 2, false, tscFreeSqlObjInCache, "sqlObj");
}
tscDebug("client is initialized successfully");
@ -154,9 +150,12 @@ void taos_init_imp(void) {
void taos_init() { pthread_once(&tscinit, taos_init_imp); }
void taos_cleanup() {
if (tscCacheHandle != NULL) {
taosCacheCleanup(tscCacheHandle);
tscCacheHandle = NULL;
if (tscMetaCache != NULL) {
taosCacheCleanup(tscMetaCache);
tscMetaCache = NULL;
taosCacheCleanup(tscObjCache);
tscObjCache = NULL;
}
if (tscQhandle != NULL) {

View File

@ -252,11 +252,11 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
if (pRes->tsrow == NULL) {
int32_t numOfOutput = pQueryInfo->fieldsInfo.numOfOutput;
pRes->numOfCols = numOfOutput;
pRes->tsrow = calloc(numOfOutput, POINTER_BYTES);
pRes->length = calloc(numOfOutput, sizeof(int32_t));
pRes->buffer = calloc(numOfOutput, POINTER_BYTES);
// not enough memory
if (pRes->tsrow == NULL || (pRes->buffer == NULL && pRes->numOfCols > 0)) {
taosTFree(pRes->tsrow);
@ -268,7 +268,7 @@ int32_t tscCreateResPointerInfo(SSqlRes* pRes, SQueryInfo* pQueryInfo) {
return TSDB_CODE_SUCCESS;
}
void tscDestroyResPointerInfo(SSqlRes* pRes) {
static void tscDestroyResPointerInfo(SSqlRes* pRes) {
if (pRes->buffer != NULL) { // free all buffers containing the multibyte string
for (int i = 0; i < pRes->numOfCols; i++) {
taosTFree(pRes->buffer[i]);
@ -344,8 +344,6 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
}
SSqlCmd* pCmd = &pSql->cmd;
STscObj* pObj = pSql->pTscObj;
int32_t cmd = pCmd->command;
if (cmd < TSDB_SQL_INSERT || cmd == TSDB_SQL_RETRIEVE_LOCALMERGE || cmd == TSDB_SQL_RETRIEVE_EMPTY_RESULT ||
cmd == TSDB_SQL_TABLE_JOIN_RETRIEVE) {
@ -353,26 +351,71 @@ void tscPartiallyFreeSqlObj(SSqlObj* pSql) {
}
// pSql->sqlstr will be used by tscBuildQueryStreamDesc
if (pObj->signature == pObj) {
// if (pObj->signature == pObj) {
//pthread_mutex_lock(&pObj->mutex);
taosTFree(pSql->sqlstr);
//pthread_mutex_unlock(&pObj->mutex);
}
// }
tscFreeSqlResult(pSql);
taosTFree(pSql->pSubs);
pSql->numOfSubs = 0;
pSql->self = 0;
tscResetSqlCmdObj(pCmd, false);
}
static UNUSED_FUNC void tscFreeSubobj(SSqlObj* pSql) {
if (pSql->numOfSubs == 0) {
return;
}
tscDebug("%p start to free sub SqlObj, numOfSub:%d", pSql, pSql->numOfSubs);
for(int32_t i = 0; i < pSql->numOfSubs; ++i) {
tscDebug("%p free sub SqlObj:%p, index:%d", pSql, pSql->pSubs[i], i);
taos_free_result(pSql->pSubs[i]);
pSql->pSubs[i] = NULL;
}
pSql->numOfSubs = 0;
}
/**
* The free operation will cause the pSql to be removed from hash table and free it in
* the function of processmsgfromserver is impossible in this case, since it will fail
* to retrieve pSqlObj in hashtable.
*
* @param pSql
*/
void tscFreeSqlObjInCache(void *pSql) {
assert(pSql != NULL);
SSqlObj** p = (SSqlObj**)pSql;
STscObj* pTscObj = (*p)->pTscObj;
assert((*p)->self != 0 && (*p)->self == (p));
tscFreeSqlObj(*p);
int32_t ref = T_REF_DEC(pTscObj);
assert(ref >= 0);
tscDebug("%p free sqlObj completed, tscObj:%p ref:%d", *p, pTscObj, ref);
if (ref == 0) {
tscDebug("%p all sqlObj freed, free tscObj:%p", *p, pTscObj);
tscCloseTscObj(pTscObj);
}
}
void tscFreeSqlObj(SSqlObj* pSql) {
if (pSql == NULL || pSql->signature != pSql) {
return;
}
tscDebug("%p start to free sql object", pSql);
tscDebug("%p start to free sqlObj", pSql);
tscFreeSubobj(pSql);
tscPartiallyFreeSqlObj(pSql);
pSql->signature = NULL;
@ -399,7 +442,10 @@ void tscDestroyDataBlock(STableDataBlocks* pDataBlock) {
taosTFree(pDataBlock->params);
// free the refcount for metermeta
taosCacheRelease(tscCacheHandle, (void**)&(pDataBlock->pTableMeta), false);
if (pDataBlock->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pDataBlock->pTableMeta), false);
}
taosTFree(pDataBlock);
}
@ -454,9 +500,12 @@ int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) {
// set the correct table meta object, the table meta has been locked in pDataBlocks, so it must be in the cache
if (pTableMetaInfo->pTableMeta != pDataBlock->pTableMeta) {
tstrncpy(pTableMetaInfo->name, pDataBlock->tableId, sizeof(pTableMetaInfo->name));
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), false);
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pDataBlock->pTableMeta);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), false);
}
pTableMetaInfo->pTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pDataBlock->pTableMeta);
} else {
assert(strncmp(pTableMetaInfo->name, pDataBlock->tableId, tListLen(pDataBlock->tableId)) == 0);
}
@ -527,7 +576,7 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff
* due to operation such as drop database. So here we add the reference count directly instead of invoke
* taosGetDataFromCache, which may return NULL value.
*/
dataBuf->pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMeta);
dataBuf->pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMeta);
assert(initialSize > 0 && pTableMeta != NULL && dataBuf->pTableMeta != NULL);
*dataBlocks = dataBuf;
@ -718,18 +767,22 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) {
return TSDB_CODE_SUCCESS;
}
// TODO: all subqueries should be freed correctly before close this connection.
void tscCloseTscObj(STscObj* pObj) {
assert(pObj != NULL);
pObj->signature = NULL;
taosTmrStopA(&(pObj->pTimer));
pthread_mutex_destroy(&pObj->mutex);
void* p = pObj->pDnodeConn;
if (pObj->pDnodeConn != NULL) {
rpcClose(pObj->pDnodeConn);
pObj->pDnodeConn = NULL;
}
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, pObj->pDnodeConn);
pthread_mutex_destroy(&pObj->mutex);
tscDebug("%p DB connection is closed, dnodeConn:%p", pObj, p);
taosTFree(pObj);
}
@ -1214,6 +1267,51 @@ static int32_t validateQuoteToken(SStrToken* pToken) {
return TSDB_CODE_SUCCESS;
}
void tscDequoteAndTrimToken(SStrToken* pToken) {
assert(pToken->type == TK_STRING);
uint32_t first = 0, last = pToken->n;
// trim leading spaces
while (first < last) {
char c = pToken->z[first];
if (c != ' ' && c != '\t') {
break;
}
first++;
}
// trim ending spaces
while (first < last) {
char c = pToken->z[last - 1];
if (c != ' ' && c != '\t') {
break;
}
last--;
}
// there are still at least two characters
if (first < last - 1) {
char c = pToken->z[first];
// dequote
if ((c == '\'' || c == '"') && c == pToken->z[last - 1]) {
first++;
last--;
}
}
// left shift the string and pad spaces
for (uint32_t i = 0; i + first < last; i++) {
pToken->z[i] = pToken->z[first + i];
}
for (uint32_t i = last - first; i < pToken->n; i++) {
pToken->z[i] = ' ';
}
// adjust token length
pToken->n = last - first;
}
int32_t tscValidateName(SStrToken* pToken) {
if (pToken->type != TK_STRING && pToken->type != TK_ID) {
return TSDB_CODE_TSC_INVALID_SQL;
@ -1529,8 +1627,8 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) {
pQueryInfo->fieldsInfo.pSupportInfo = taosArrayInit(4, sizeof(SFieldSupInfo));
assert(pQueryInfo->exprList == NULL);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES);
pQueryInfo->udColumnId = TSDB_UD_COLUMN_INDEX;
}
@ -1552,6 +1650,8 @@ int32_t tscAddSubqueryInfo(SSqlCmd* pCmd) {
}
tscInitQueryInfo(pQueryInfo);
pQueryInfo->window = TSWINDOW_INITIALIZER;
pQueryInfo->msg = pCmd->payload; // pointer to the parent error message buffer
pCmd->pQueryInfo[pCmd->numOfClause++] = pQueryInfo;
@ -1663,7 +1763,10 @@ void tscClearTableMetaInfo(STableMetaInfo* pTableMetaInfo, bool removeFromCache)
return;
}
taosCacheRelease(tscCacheHandle, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
if (pTableMetaInfo->pTableMeta != NULL) {
taosCacheRelease(tscMetaCache, (void**)&(pTableMetaInfo->pTableMeta), removeFromCache);
}
taosTFree(pTableMetaInfo->vgroupList);
tscColumnListDestroy(pTableMetaInfo->tagColList);
@ -1679,6 +1782,16 @@ void tscResetForNextRetrieve(SSqlRes* pRes) {
pRes->numOfRows = 0;
}
void registerSqlObj(SSqlObj* pSql) {
int32_t DEFAULT_LIFE_TIME = 2 * 600 * 1000; // 1200 sec
int32_t ref = T_REF_INC(pSql->pTscObj);
tscDebug("%p add to tscObj:%p, ref:%d", pSql, pSql->pTscObj, ref);
TSDB_CACHE_PTR_TYPE p = (TSDB_CACHE_PTR_TYPE) pSql;
pSql->self = taosCachePut(tscObjCache, &p, sizeof(TSDB_CACHE_PTR_TYPE), &p, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_LIFE_TIME);
}
SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cmd) {
SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj));
if (pNew == NULL) {
@ -1692,6 +1805,8 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
SSqlCmd* pCmd = &pNew->cmd;
pCmd->command = cmd;
pCmd->parseFinished = 1;
pCmd->autoCreated = pSql->cmd.autoCreated;
memcpy(&pCmd->tagData, &pSql->cmd.tagData, sizeof(pCmd->tagData));
if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) {
tscFreeSqlObj(pNew);
@ -1706,8 +1821,7 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
pNew->sqlstr = strdup(pSql->sqlstr);
if (pNew->sqlstr == NULL) {
tscError("%p new subquery failed", pSql);
free(pNew);
tscFreeSqlObj(pNew);
return NULL;
}
@ -1717,6 +1831,8 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm
STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0);
tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL);
registerSqlObj(pNew);
return pNew;
}
@ -1786,6 +1902,7 @@ static void doSetSqlExprAndResultFieldInfo(SQueryInfo* pQueryInfo, SQueryInfo* p
}
assert(matched);
(void)matched;
}
tscFieldInfoUpdateOffset(pNewQueryInfo);
@ -1835,10 +1952,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex);
pNewQueryInfo->command = pQueryInfo->command;
pNewQueryInfo->intervalTimeUnit = pQueryInfo->intervalTimeUnit;
pNewQueryInfo->slidingTimeUnit = pQueryInfo->slidingTimeUnit;
pNewQueryInfo->intervalTime = pQueryInfo->intervalTime;
pNewQueryInfo->slidingTime = pQueryInfo->slidingTime;
memcpy(&pNewQueryInfo->interval, &pQueryInfo->interval, sizeof(pNewQueryInfo->interval));
pNewQueryInfo->type = pQueryInfo->type;
pNewQueryInfo->window = pQueryInfo->window;
pNewQueryInfo->limit = pQueryInfo->limit;
@ -1909,14 +2023,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
STableMetaInfo* pFinalInfo = NULL;
if (pPrevSql == NULL) {
STableMeta* pTableMeta = taosCacheAcquireByData(tscCacheHandle, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
STableMeta* pTableMeta = taosCacheAcquireByData(tscMetaCache, pTableMetaInfo->pTableMeta); // get by name may failed due to the cache cleanup
assert(pTableMeta != NULL);
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList);
} else { // transfer the ownership of pTableMeta to the newly create sql object.
STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, pPrevSql->cmd.clauseIndex, 0);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscCacheHandle, (void**)&pPrevInfo->pTableMeta);
STableMeta* pPrevTableMeta = taosCacheTransfer(tscMetaCache, (void**)&pPrevInfo->pTableMeta);
SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList;
pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList);
@ -1956,6 +2070,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void
tscDebug("%p new sub insertion: %p, vnodeIdx:%d", pSql, pNew, pTableMetaInfo->vgroupIndex);
}
registerSqlObj(pNew);
return pNew;
_error:
@ -2067,6 +2182,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s
return TSDB_CODE_TSC_SQL_SYNTAX_ERROR;
}
int32_t tscInvalidSQLErrMsg(char* msg, const char* additionalInfo, const char* sql) {
const char* msgFormat1 = "invalid SQL: %s";
const char* msgFormat2 = "invalid SQL: \'%s\' (%s)";
@ -2097,11 +2213,6 @@ bool tscHasReachLimitation(SQueryInfo* pQueryInfo, SSqlRes* pRes) {
return (pQueryInfo->clauseLimit > 0 && pRes->numOfClauseTotal >= pQueryInfo->clauseLimit);
}
bool tscResultsetFetchCompleted(TAOS_RES *result) {
SSqlRes* pRes = result;
return pRes->completed;
}
char* tscGetErrorMsgPayload(SSqlCmd* pCmd) { return pCmd->payload; }
/**

View File

@ -78,6 +78,9 @@ enum {
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_LOCALMERGE, "retrieve-localmerge" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_TABLE_JOIN_RETRIEVE, "join-retrieve" )
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_TABLE, "show-create-table")
TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SHOW_CREATE_DATABASE, "show-create-database")
/*
* build empty result instead of accessing dnode to fetch result
* reset the client cache

View File

@ -325,8 +325,6 @@ void tdResetKVRowBuilder(SKVRowBuilder *pBuilder);
SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder);
static FORCE_INLINE int tdAddColToKVRow(SKVRowBuilder *pBuilder, int16_t colId, int8_t type, void *value) {
ASSERT(pBuilder->nCols == 0 || colId > pBuilder->pColIdx[pBuilder->nCols - 1].colId);
if (pBuilder->nCols >= pBuilder->tCols) {
pBuilder->tCols *= 2;
pBuilder->pColIdx = (SColIdx *)realloc((void *)(pBuilder->pColIdx), sizeof(SColIdx) * pBuilder->tCols);

View File

@ -104,8 +104,12 @@ extern int32_t tsTelegrafUseFieldNum;
// mqtt
extern int32_t tsEnableMqttModule;
extern char tsMqttBrokerAddress[];
extern char tsMqttBrokerClientId[];
extern char tsMqttHostName[];
extern char tsMqttPort[];
extern char tsMqttUser[];
extern char tsMqttPass[];
extern char tsMqttClientId[];
extern char tsMqttTopic[];
// monitor
extern int32_t tsEnableMonitorModule;
@ -154,6 +158,7 @@ extern char buildinfo[];
// log
extern int32_t tsAsyncLog;
extern int32_t tsNumOfLogLines;
extern int32_t tsLogKeepDays;
extern int32_t dDebugFlag;
extern int32_t vDebugFlag;
extern int32_t mDebugFlag;
@ -169,6 +174,7 @@ extern int32_t rpcDebugFlag;
extern int32_t odbcDebugFlag;
extern int32_t qDebugFlag;
extern int32_t wDebugFlag;
extern int32_t cqDebugFlag;
extern int32_t debugFlag;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)

View File

@ -35,8 +35,6 @@ bool tscValidateTableNameLength(size_t len);
SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numOfFilters);
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision);
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision);
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
// int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision);
#endif // TDENGINE_NAME_H

View File

@ -313,13 +313,13 @@ void dataColSetOffset(SDataCol *pCol, int nEle) {
SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
SDataCols *pCols = (SDataCols *)calloc(1, sizeof(SDataCols));
if (pCols == NULL) {
uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCols), strerror(errno));
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCols), strerror(errno));
return NULL;
}
pCols->cols = (SDataCol *)calloc(maxCols, sizeof(SDataCol));
if (pCols->cols == NULL) {
uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno));
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno));
tdFreeDataCols(pCols);
return NULL;
}
@ -331,7 +331,7 @@ SDataCols *tdNewDataCols(int maxRowSize, int maxCols, int maxRows) {
pCols->buf = malloc(pCols->bufSize);
if (pCols->buf == NULL) {
uDebug("malloc failure, size:%"PRId64" failed, reason:%s", sizeof(SDataCol) * maxCols, strerror(errno));
uDebug("malloc failure, size:%" PRId64 " failed, reason:%s", (int64_t)sizeof(SDataCol) * maxCols, strerror(errno));
tdFreeDataCols(pCols);
return NULL;
}
@ -716,4 +716,4 @@ SKVRow tdGetKVRowFromBuilder(SKVRowBuilder *pBuilder) {
memcpy(kvRowValues(row), pBuilder->buf, pBuilder->size);
return row;
}
}

View File

@ -131,14 +131,18 @@ uint16_t tsHttpPort = 6041; // only tcp, range tcp[6041]
int32_t tsHttpCacheSessions = 1000;
int32_t tsHttpSessionExpire = 36000;
int32_t tsHttpMaxThreads = 2;
int32_t tsHttpEnableCompress = 0;
int32_t tsHttpEnableCompress = 1;
int32_t tsHttpEnableRecordSql = 0;
int32_t tsTelegrafUseFieldNum = 0;
// mqtt
int32_t tsEnableMqttModule = 0; // not finished yet, not started it by default
char tsMqttBrokerAddress[128] = {0};
char tsMqttBrokerClientId[128] = {0};
char tsMqttHostName[TSDB_MQTT_HOSTNAME_LEN] = "test.mosquitto.org";
char tsMqttPort[TSDB_MQTT_PORT_LEN] = "1883";
char tsMqttUser[TSDB_MQTT_USER_LEN] = {0};
char tsMqttPass[TSDB_MQTT_PASS_LEN] = {0};
char tsMqttClientId[TSDB_MQTT_CLIENT_ID_LEN] = "TDengineMqttSubscriber";
char tsMqttTopic[TSDB_MQTT_TOPIC_LEN] = "/test"; // #
// monitor
int32_t tsEnableMonitorModule = 1;
@ -199,6 +203,7 @@ int32_t debugFlag = 0;
int32_t sDebugFlag = 135;
int32_t wDebugFlag = 135;
int32_t tsdbDebugFlag = 131;
int32_t cqDebugFlag = 135;
int32_t (*monitorStartSystemFp)() = NULL;
void (*monitorStopSystemFp)() = NULL;
@ -218,12 +223,13 @@ void taosSetAllDebugFlag() {
httpDebugFlag = debugFlag;
mqttDebugFlag = debugFlag;
monitorDebugFlag = debugFlag;
qDebugFlag = debugFlag;
rpcDebugFlag = debugFlag;
uDebugFlag = debugFlag;
sDebugFlag = debugFlag;
wDebugFlag = debugFlag;
tsdbDebugFlag = debugFlag;
qDebugFlag = debugFlag;
cqDebugFlag = debugFlag;
uInfo("all debug flag are set to %d", debugFlag);
}
}
@ -247,8 +253,11 @@ bool taosCfgDynamicOptions(char *msg) {
for (int32_t i = 0; i < tsGlobalConfigNum; ++i) {
SGlobalCfg *cfg = tsGlobalConfig + i;
if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
//if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_LOG)) continue;
if (cfg->valType != TAOS_CFG_VTYPE_INT32) continue;
int32_t cfgLen = (int32_t)strlen(cfg->option);
if (cfgLen != olen) continue;
if (strncasecmp(option, cfg->option, olen) != 0) continue;
*((int32_t *)cfg->ptr) = vint;
@ -767,26 +776,36 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttBrokerAddress";
cfg.ptr = tsMqttBrokerAddress;
cfg.option = "mqttHostName";
cfg.ptr = tsMqttHostName;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = 126;
cfg.ptrLength = TSDB_MQTT_HOSTNAME_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttBrokerClientId";
cfg.ptr = tsMqttBrokerClientId;
cfg.option = "mqttPort";
cfg.ptr = tsMqttPort;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = 126;
cfg.ptrLength = TSDB_MQTT_PORT_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "mqttTopic";
cfg.ptr = tsMqttTopic;
cfg.valType = TAOS_CFG_VTYPE_STRING;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_NOT_PRINT;
cfg.minValue = 0;
cfg.maxValue = 0;
cfg.ptrLength = TSDB_MQTT_TOPIC_LEN;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "compressMsgSize";
cfg.ptr = &tsCompressMsgSize;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -940,17 +959,6 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
// http configs
cfg.option = "httpCacheSessions";
cfg.ptr = &tsHttpCacheSessions;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG;
cfg.minValue = 1;
cfg.maxValue = 100000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "httpEnableRecordSql";
cfg.ptr = &tsHttpEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -996,12 +1004,22 @@ static void doInitGlobalConfig(void) {
cfg.ptr = &tsNumOfLogLines;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = 10000;
cfg.minValue = 1000;
cfg.maxValue = 2000000000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "logKeepDays";
cfg.ptr = &tsLogKeepDays;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG | TSDB_CFG_CTYPE_B_CLIENT;
cfg.minValue = -365000;
cfg.maxValue = 365000;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "asyncLog";
cfg.ptr = &tsAsyncLog;
cfg.valType = TAOS_CFG_VTYPE_INT16;
@ -1193,6 +1211,16 @@ static void doInitGlobalConfig(void) {
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "cqDebugFlag";
cfg.ptr = &cqDebugFlag;
cfg.valType = TAOS_CFG_VTYPE_INT32;
cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_LOG;
cfg.minValue = 0;
cfg.maxValue = 255;
cfg.ptrLength = 0;
cfg.unitType = TAOS_CFG_UTYPE_NONE;
taosInitConfigOption(cfg);
cfg.option = "tscEnableRecordSql";
cfg.ptr = &tsTscEnableRecordSql;
cfg.valType = TAOS_CFG_VTYPE_INT32;
@ -1270,6 +1298,9 @@ void taosInitGlobalCfg() {
}
bool taosCheckGlobalCfg() {
char fqdn[TSDB_FQDN_LEN];
uint16_t port;
if (debugFlag & DEBUG_TRACE || debugFlag & DEBUG_DEBUG || debugFlag & DEBUG_DUMP) {
taosSetAllDebugFlag();
}
@ -1278,17 +1309,23 @@ bool taosCheckGlobalCfg() {
taosGetFqdn(tsLocalFqdn);
}
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%d", tsLocalFqdn, tsServerPort);
snprintf(tsLocalEp, sizeof(tsLocalEp), "%s:%u", tsLocalFqdn, tsServerPort);
uInfo("localEp is: %s", tsLocalEp);
if (tsFirst[0] == 0) {
strcpy(tsFirst, tsLocalEp);
} else {
taosGetFqdnPortFromEp(tsFirst, fqdn, &port);
snprintf(tsFirst, sizeof(tsFirst), "%s:%u", fqdn, port);
}
if (tsSecond[0] == 0) {
strcpy(tsSecond, tsLocalEp);
} else {
taosGetFqdnPortFromEp(tsSecond, fqdn, &port);
snprintf(tsSecond, sizeof(tsSecond), "%s:%u", fqdn, port);
}
taosGetSystemInfo();
tsSetLocale();

View File

@ -62,10 +62,9 @@ SSchema tGetUserSpecifiedColumnSchema(tVariant* pVal, SStrToken* exprStr, const
if (name != NULL) {
tstrncpy(s.name, name, sizeof(s.name));
} else {
size_t len = strdequote(exprStr->z);
size_t tlen = MIN(sizeof(s.name), len + 1);
size_t tlen = MIN(sizeof(s.name), exprStr->n + 1);
tstrncpy(s.name, exprStr->z, tlen);
strdequote(s.name);
}
return s;
@ -100,62 +99,7 @@ SColumnFilterInfo* tscFilterInfoClone(const SColumnFilterInfo* src, int32_t numO
return pFilter;
}
int64_t taosAddNatualInterval(int64_t key, int64_t intervalTime, char timeUnit, int16_t precision) {
key /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key /= 1000;
}
struct tm tm;
time_t t = (time_t)key;
localtime_r(&t, &tm);
if (timeUnit == 'y') {
intervalTime *= 12;
}
int mon = (int)(tm.tm_year * 12 + tm.tm_mon + intervalTime);
tm.tm_year = mon / 12;
tm.tm_mon = mon % 12;
key = mktime(&tm) * 1000L;
if (precision == TSDB_TIME_PRECISION_MICRO) {
key *= 1000L;
}
return key;
}
int32_t taosCountNatualInterval(int64_t skey, int64_t ekey, int64_t intervalTime, char timeUnit, int16_t precision) {
skey /= 1000;
ekey /= 1000;
if (precision == TSDB_TIME_PRECISION_MICRO) {
skey /= 1000;
ekey /= 1000;
}
if (ekey < skey) {
int64_t tmp = ekey;
ekey = skey;
skey = tmp;
}
struct tm tm;
time_t t = (time_t)skey;
localtime_r(&t, &tm);
int smon = tm.tm_year * 12 + tm.tm_mon;
t = (time_t)ekey;
localtime_r(&t, &tm);
int emon = tm.tm_year * 12 + tm.tm_mon;
if (timeUnit == 'y') {
intervalTime *= 12;
}
return (emon - smon) / (int32_t)intervalTime;
}
#if 0
int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, int64_t intervalTime, char timeUnit, int16_t precision) {
if (slidingTime == 0) {
return startTime;
@ -220,6 +164,8 @@ int64_t taosGetIntervalStartTimestamp(int64_t startTime, int64_t slidingTime, in
return start;
}
#endif
/*
* tablePrefix.columnName
* extract table name and save it in pTable, with only column name in pToken

View File

@ -219,7 +219,7 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num
}
float fv = 0;
fv = GET_FLOAT_VAL(&(data[i]));
fv = GET_FLOAT_VAL((const char*)&(data[i]));
dsum += fv;
if (fmin > fv) {
fmin = fv;
@ -233,17 +233,12 @@ static void getStatics_f(const TSKEY *primaryKey, const void *pData, int32_t num
}
double csum = 0;
csum = GET_DOUBLE_VAL(sum);
csum = GET_DOUBLE_VAL((const char *)sum);
csum += dsum;
#ifdef _TD_ARM_32_
SET_DOUBLE_VAL_ALIGN(sum, &csum);
SET_DOUBLE_VAL_ALIGN(max, &fmax);
SET_DOUBLE_VAL_ALIGN(min, &fmin);
#else
*(double*)sum = csum;
*(double*)max = fmax;
*(double*)min = fmin;
#endif
SET_DOUBLE_VAL(sum, csum);
SET_DOUBLE_VAL(max, fmax);
SET_DOUBLE_VAL(min, fmin);
}
static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
@ -264,7 +259,7 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num
}
double dv = 0;
dv = GET_DOUBLE_VAL(&(data[i]));
dv = GET_DOUBLE_VAL((const char*)&(data[i]));
dsum += dv;
if (dmin > dv) {
dmin = dv;
@ -278,19 +273,12 @@ static void getStatics_d(const TSKEY *primaryKey, const void *pData, int32_t num
}
double csum = 0;
csum = GET_DOUBLE_VAL(sum);
csum = GET_DOUBLE_VAL((const char *)sum);
csum += dsum;
#ifdef _TD_ARM_32_
SET_DOUBLE_VAL_ALIGN(sum, &csum);
SET_DOUBLE_VAL_ALIGN(max, &dmax);
SET_DOUBLE_VAL_ALIGN(min, &dmin);
#else
*(double*) sum = csum;
*(double*) max = dmax;
*(double*) min = dmin;
#endif
SET_DOUBLE_PTR(sum, &csum);
SET_DOUBLE_PTR(max, &dmax);
SET_DOUBLE_PTR(min, &dmin);
}
static void getStatics_bin(const TSKEY *primaryKey, const void *pData, int32_t numOfRow, int64_t *min, int64_t *max,
@ -493,46 +481,29 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
*((int32_t *)val) = GET_INT32_VAL(src);
break;
}
case TSDB_DATA_TYPE_FLOAT: {
#ifdef _TD_ARM_32_
float fv = GET_FLOAT_VAL(src);
SET_FLOAT_VAL_ALIGN(val, &fv);
#else
*((float *)val) = GET_FLOAT_VAL(src);
#endif
case TSDB_DATA_TYPE_FLOAT:
SET_FLOAT_VAL(val, GET_FLOAT_VAL(src));
break;
};
case TSDB_DATA_TYPE_DOUBLE: {
#ifdef _TD_ARM_32_
double dv = GET_DOUBLE_VAL(src);
SET_DOUBLE_VAL_ALIGN(val, &dv);
#else
*((double *)val) = GET_DOUBLE_VAL(src);
#endif
case TSDB_DATA_TYPE_DOUBLE:
SET_DOUBLE_VAL(val, GET_DOUBLE_VAL(src));
break;
};
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT: {
case TSDB_DATA_TYPE_BIGINT:
*((int64_t *)val) = GET_INT64_VAL(src);
break;
};
case TSDB_DATA_TYPE_SMALLINT: {
case TSDB_DATA_TYPE_SMALLINT:
*((int16_t *)val) = GET_INT16_VAL(src);
break;
};
case TSDB_DATA_TYPE_BOOL:
case TSDB_DATA_TYPE_TINYINT: {
case TSDB_DATA_TYPE_TINYINT:
*((int8_t *)val) = GET_INT8_VAL(src);
break;
};
case TSDB_DATA_TYPE_BINARY: {
case TSDB_DATA_TYPE_BINARY:
varDataCopy(val, src);
break;
};
case TSDB_DATA_TYPE_NCHAR: {
case TSDB_DATA_TYPE_NCHAR:
varDataCopy(val, src);
break;
};
default: {
memcpy(val, src, len);
break;
@ -540,9 +511,7 @@ void assignVal(char *val, const char *src, int32_t len, int32_t type) {
}
}
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
char tmpBuf[4096] = {0};
void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf) {
switch (type) {
case TSDB_DATA_TYPE_INT: {
SWAP(*(int32_t *)(pLeft), *(int32_t *)(pRight), int32_t);
@ -575,11 +544,10 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size) {
}
default: {
assert(size <= 4096);
memcpy(tmpBuf, pLeft, size);
memcpy(buf, pLeft, size);
memcpy(pLeft, pRight, size);
memcpy(pRight, tmpBuf, size);
memcpy(pRight, buf, size);
break;
}
}
}
}

View File

@ -709,46 +709,21 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
return -1;
}
#ifdef _TD_ARM_32_
//memcpy(&payload, &value, sizeof(float));
float fv = (float)value;
SET_FLOAT_VAL_ALIGN(payload, &fv);
#else
*((float *)payload) = (float)value;
#endif
SET_FLOAT_VAL(payload, value);
}
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
#ifdef _TD_ARM_32_
//memcpy(&payload, &pVariant->i64Key, sizeof(float));
float fv = (float)pVariant->i64Key;
SET_FLOAT_VAL_ALIGN(payload, &fv);
#else
*((float *)payload) = (float)pVariant->i64Key;
#endif
SET_FLOAT_VAL(payload, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
#ifdef _TD_ARM_32_
//memcpy(&payload, &pVariant->dKey, sizeof(float));
float fv = (float)pVariant->dKey;
SET_FLOAT_VAL_ALIGN(payload, &fv);
#else
*((float *)payload) = (float)pVariant->dKey;
#endif
SET_FLOAT_VAL(payload, pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*((int32_t *)payload) = TSDB_DATA_FLOAT_NULL;
return 0;
}
#ifdef _TD_ARM_32_
float fv = GET_FLOAT_VAL(payload);
if (isinf(fv) || isnan(fv) || fv > FLT_MAX || fv < -FLT_MAX) {
return -1;
}
#else
if (isinf(*((float *)payload)) || isnan(*((float *)payload)) || *((float *)payload) > FLT_MAX ||
*((float *)payload) < -FLT_MAX) {
return -1;
}
#endif
break;
}
case TSDB_DATA_TYPE_DOUBLE: {
@ -765,42 +740,21 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu
return -1;
}
#ifdef _TD_ARM_32_
SET_DOUBLE_VAL_ALIGN(payload, &value);
#else
*((double *)payload) = value;
#endif
SET_DOUBLE_VAL(payload, value);
}
} else if (pVariant->nType >= TSDB_DATA_TYPE_BOOL && pVariant->nType <= TSDB_DATA_TYPE_BIGINT) {
#ifdef _TD_ARM_32_
double dv = (double)(pVariant->i64Key);
SET_DOUBLE_VAL_ALIGN(payload, &dv);
#else
*((double *)payload) = (double)pVariant->i64Key;
#endif
SET_DOUBLE_VAL(payload, pVariant->i64Key);
} else if (pVariant->nType == TSDB_DATA_TYPE_DOUBLE || pVariant->nType == TSDB_DATA_TYPE_FLOAT) {
#ifdef _TD_ARM_32_
double dv = (double)(pVariant->dKey);
SET_DOUBLE_VAL_ALIGN(payload, &dv);
#else
*((double *)payload) = pVariant->dKey;
#endif
SET_DOUBLE_VAL(payload, pVariant->dKey);
} else if (pVariant->nType == TSDB_DATA_TYPE_NULL) {
*((int64_t *)payload) = TSDB_DATA_DOUBLE_NULL;
return 0;
}
#ifdef _TD_ARM_32_
double dv = GET_DOUBLE_VAL(payload);
if (isinf(dv) || isnan(dv) || dv > DBL_MAX || dv < -DBL_MAX) {
return -1;
}
#else
if (isinf(*((double *)payload)) || isnan(*((double *)payload)) || *((double *)payload) > DBL_MAX ||
*((double *)payload) < -DBL_MAX) {
return -1;
}
#endif
break;
}

@ -1 +1 @@
Subproject commit 8c58c512b6acda8bcdfa48fdc7140227b5221766
Subproject commit 8d7bf743852897110cbdcc7c4322cd7a74d4167b

View File

@ -5,10 +5,9 @@
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0-SNAPSHOT</version>
<version>2.0.6</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
@ -93,14 +92,13 @@
<version>3.6.1</version>
<configuration>
<encoding>UTF-8</encoding>
<source>11</source>
<target>11</target>
<source>8</source>
<target>8</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-source-plugin</artifactId>

View File

@ -1,106 +1,103 @@
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<modelVersion>4.0.0</modelVersion>
<groupId>com.taosdata.jdbc</groupId>
<artifactId>taos-jdbcdriver</artifactId>
<version>2.0.0</version>
<packaging>jar</packaging>
<name>JDBCDriver</name>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>${commons-lang3.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
<description>TDengine JDBC Driver</description>
<licenses>
<license>
<name>GNU AFFERO GENERAL PUBLIC LICENSE Version 3</name>
<url>https://github.com/taosdata/TDengine/blob/master/LICENSE</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<connection>scm:git:git://github.com/taosdata/TDengine.git</connection>
<developerConnection>scm:git:git@github.com:taosdata/TDengine.git</developerConnection>
<url>https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc</url>
<tag>HEAD</tag>
</scm>
<developers>
<developer>
<name>taosdata</name>
<email>support@taosdata.com</email>
<organization>https://www.taosdata.com/</organization>
<organizationUrl>https://www.taosdata.com/</organizationUrl>
</developer>
</developers>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
<java.version>1.8</java.version>
<maven-compiler-plugin.version>3.6.0</maven-compiler-plugin.version>
<commons-logging.version>1.1.2</commons-logging.version>
<commons-lang3.version>3.5</commons-lang3.version>
</properties>
<dependencies>
<dependency>
<groupId>commons-logging</groupId>
<artifactId>commons-logging</artifactId>
<version>${commons-logging.version}</version>
<exclusions>
<exclusion>
<groupId>*</groupId>
<artifactId>*</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<version>4.13</version>
<scope>test</scope>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-assembly-plugin</artifactId>
<version>3.0.0</version>
<configuration>
<descriptors>
<descriptor>src/main/assembly/assembly-jar.xml</descriptor>
</descriptors>
</configuration>
<executions>
<execution>
<id>make-assembly</id>
<phase>package</phase>
<goals>
<goal>single</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
<version>${maven-compiler-plugin.version}</version>
<configuration>
<encoding>UTF-8</encoding>
<source>${java.version}</source>
<target>${java.version}</target>
<debug>true</debug>
<showDeprecation>true</showDeprecation>
</configuration>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<version>2.12.4</version>
<configuration>
<testFailureIgnore>true</testFailureIgnore>
</configuration>
</plugin>
</plugins>
</build>
</project>

View File

@ -67,14 +67,23 @@ public class DatabaseMetaDataResultSet implements ResultSet {
@Override
public boolean next() throws SQLException {
// boolean ret = false;
// if (rowDataList.size() > 0) {
// ret = rowDataList.iterator().hasNext();
// if (ret) {
// rowCursor = rowDataList.iterator().next();
// cursorRowNumber++;
// }
// }
// return ret;
/**** add by zyyang 2020-09-29 ****************/
boolean ret = false;
if (rowDataList.size() > 0) {
ret = rowDataList.iterator().hasNext();
if (ret) {
rowCursor = rowDataList.iterator().next();
cursorRowNumber++;
}
if (!rowDataList.isEmpty() && cursorRowNumber < rowDataList.size()) {
rowCursor = rowDataList.get(cursorRowNumber++);
ret = true;
}
return ret;
}
@ -91,7 +100,8 @@ public class DatabaseMetaDataResultSet implements ResultSet {
@Override
public String getString(int columnIndex) throws SQLException {
columnIndex--;
return rowCursor.getString(columnIndex, columnMetaDataList.get(columnIndex).getColType());
int colType = columnMetaDataList.get(columnIndex).getColType();
return rowCursor.getString(columnIndex, colType);
}
@Override

View File

@ -53,66 +53,12 @@ public class TSDBConnection implements Connection {
public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException {
this.dbMetaData = meta;
//load taos.cfg start
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()){
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST,endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT,endpoints.get(0).split(":")[1]);
}
//load taos.cfg end
connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST),
Integer.parseInt(info.getProperty(TSDBDriver.PROPERTY_KEY_PORT, "0")),
info.getProperty(TSDBDriver.PROPERTY_KEY_DBNAME), info.getProperty(TSDBDriver.PROPERTY_KEY_USER),
info.getProperty(TSDBDriver.PROPERTY_KEY_PASSWORD));
}
private List<String> loadConfigEndpoints(File cfgFile){
List<String> endpoints = new ArrayList<>();
try(BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine())!=null){
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")){
endpoints.add(line.substring(line.indexOf('p')+1).trim());
}
if (endpoints.size()>1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
/**
* @param cfgDirPath
* @return return the config dir
* **/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
/**
* @return search the default config dir, if the config dir is not exist will return null
* */
private File loadDefaultConfigDir(){
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
private void connect(String host, int port, String dbName, String user, String password) throws SQLException {
this.connector = new TSDBJNIConnector();
this.connector.connect(host, port, dbName, user, password);
@ -132,7 +78,9 @@ public class TSDBConnection implements Connection {
public Statement createStatement() throws SQLException {
if (!this.connector.isClosed()) {
return new TSDBStatement(this.connector);
TSDBStatement statement = new TSDBStatement(this, this.connector);
statement.setConnection(this);
return statement;
} else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
@ -153,7 +101,7 @@ public class TSDBConnection implements Connection {
public PreparedStatement prepareStatement(String sql) throws SQLException {
if (!this.connector.isClosed()) {
return new TSDBPreparedStatement(this.connector, sql);
return new TSDBPreparedStatement(this, this.connector, sql);
} else {
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}

View File

@ -14,26 +14,29 @@
*****************************************************************************/
package com.taosdata.jdbc;
import org.apache.commons.lang3.StringUtils;
import java.io.*;
import java.sql.*;
import java.util.ArrayList;
import java.util.List;
import java.util.Properties;
import java.util.logging.Logger;
/**
* The Java SQL framework allows for multiple database drivers. Each driver
* should supply a class that implements the Driver interface
*
*
* <p>
* The DriverManager will try to load as many drivers as it can find and then
* for any given connection request, it will ask each driver in turn to try to
* connect to the target URL.
*
*
* <p>
* It is strongly recommended that each Driver class should be small and stand
* alone so that the Driver class can be loaded and queried without bringing in
* vast quantities of supporting code.
*
*
* <p>
* When a Driver class is loaded, it should create an instance of itself and
* register it with the DriverManager. This means that a user can load and
@ -41,39 +44,41 @@ import java.util.logging.Logger;
*/
public class TSDBDriver implements java.sql.Driver {
@Deprecated
private static final String URL_PREFIX1 = "jdbc:TSDB://";
private static final String URL_PREFIX = "jdbc:TAOS://";
@Deprecated
private static final String URL_PREFIX1 = "jdbc:TSDB://";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
private static final String URL_PREFIX = "jdbc:TAOS://";
/**
* Key used to retrieve the host value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_HOST = "host";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key used to retrieve the database value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_DBNAME = "dbname";
/**
* Key used to retrieve the port number value from the properties instance
* passed to the driver.
*/
public static final String PROPERTY_KEY_PORT = "port";
/**
* Key used to retrieve the host value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_HOST = "host";
/**
* Key used to retrieve the password value from the properties instance passed
* to the driver.
*/
public static final String PROPERTY_KEY_PASSWORD = "password";
/**
* Key used to retrieve the port number value from the properties instance
* passed to the driver.
*/
public static final String PROPERTY_KEY_PORT = "port";
/**
* Key used to retrieve the user value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_USER = "user";
/**
* Key used to retrieve the user value from the properties instance passed to
* the driver.
*/
public static final String PROPERTY_KEY_USER = "user";
/**
* Key for the configuration file directory of TSDB client in properties instance
@ -98,277 +103,319 @@ public class TSDBDriver implements java.sql.Driver {
public static final String PROPERTY_KEY_PROTOCOL = "protocol";
/**
* Index for port coming out of parseHostPortPair().
*/
public final static int PORT_NUMBER_INDEX = 1;
/**
* Index for host coming out of parseHostPortPair().
*/
public final static int HOST_NAME_INDEX = 0;
/**
* Index for port coming out of parseHostPortPair().
*/
public final static int PORT_NUMBER_INDEX = 1;
private TSDBDatabaseMetaData dbMetaData = null;
/**
* Index for host coming out of parseHostPortPair().
*/
public final static int HOST_NAME_INDEX = 0;
static {
try {
java.sql.DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException E) {
throw new RuntimeException(TSDBConstants.WrapErrMsg("can't register tdengine jdbc driver!"));
}
}
private TSDBDatabaseMetaData dbMetaData = null;
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
}
static {
try {
java.sql.DriverManager.registerDriver(new TSDBDriver());
} catch (SQLException E) {
throw new RuntimeException(TSDBConstants.WrapErrMsg("can't register tdengine jdbc driver!"));
}
}
Properties props = null;
private List<String> loadConfigEndpoints(File cfgFile) {
List<String> endpoints = new ArrayList<>();
try (BufferedReader reader = new BufferedReader(new FileReader(cfgFile))) {
String line = null;
while ((line = reader.readLine()) != null) {
if (line.trim().startsWith("firstEp") || line.trim().startsWith("secondEp")) {
endpoints.add(line.substring(line.indexOf('p') + 1).trim());
}
if (endpoints.size() > 1)
break;
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
return endpoints;
}
if ((props = parseURL(url, info)) == null) {
return null;
}
/**
* @param cfgDirPath
* @return return the config dir
**/
private File loadConfigDir(String cfgDirPath) {
if (cfgDirPath == null)
return loadDefaultConfigDir();
File cfgDir = new File(cfgDirPath);
if (!cfgDir.exists())
return loadDefaultConfigDir();
return cfgDir;
}
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
(String) props.get(PROPERTY_KEY_TIME_ZONE));
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLWarning sqlWarning) {
sqlWarning.printStackTrace();
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLException sqlEx) {
throw sqlEx;
} catch (Exception ex) {
SQLException sqlEx = new SQLException("SQLException:" + ex.toString());
sqlEx.initCause(ex);
throw sqlEx;
}
}
/**
* @return search the default config dir, if the config dir is not exist will return null
*/
private File loadDefaultConfigDir() {
File cfgDir;
File cfgDir_linux = new File("/etc/taos");
cfgDir = cfgDir_linux.exists() ? cfgDir_linux : null;
File cfgDir_windows = new File("C:\\TDengine\\cfg");
cfgDir = (cfgDir == null && cfgDir_windows.exists()) ? cfgDir_windows : cfgDir;
return cfgDir;
}
/**
* Parses hostPortPair in the form of [host][:port] into an array, with the
* element of index HOST_NAME_INDEX being the host (or null if not specified),
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
* specified).
*
* @param hostPortPair
* host and port in form of of [host][:port]
*
* @return array containing host and port as Strings
*
* @throws SQLException
* if a parse error occurs
*/
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
String[] splitValues = new String[2];
public Connection connect(String url, Properties info) throws SQLException {
if (url == null) {
throw new SQLException(TSDBConstants.WrapErrMsg("url is not set!"));
}
int portIndex = hostPortPair.indexOf(":");
Properties props = null;
if ((props = parseURL(url, info)) == null) {
return null;
}
String hostname = null;
//load taos.cfg start
if (info.getProperty(TSDBDriver.PROPERTY_KEY_HOST) == null && info.getProperty(TSDBDriver.PROPERTY_KEY_PORT) == null) {
File cfgDir = loadConfigDir(info.getProperty(TSDBDriver.PROPERTY_KEY_CONFIG_DIR));
File cfgFile = cfgDir.listFiles((dir, name) -> "taos.cfg".equalsIgnoreCase(name))[0];
List<String> endpoints = loadConfigEndpoints(cfgFile);
if (!endpoints.isEmpty()) {
info.setProperty(TSDBDriver.PROPERTY_KEY_HOST, endpoints.get(0).split(":")[0]);
info.setProperty(TSDBDriver.PROPERTY_KEY_PORT, endpoints.get(0).split(":")[1]);
}
}
if (portIndex != -1) {
if ((portIndex + 1) < hostPortPair.length()) {
String portAsString = hostPortPair.substring(portIndex + 1);
hostname = hostPortPair.substring(0, portIndex);
try {
TSDBJNIConnector.init((String) props.get(PROPERTY_KEY_CONFIG_DIR), (String) props.get(PROPERTY_KEY_LOCALE), (String) props.get(PROPERTY_KEY_CHARSET),
(String) props.get(PROPERTY_KEY_TIME_ZONE));
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLWarning sqlWarning) {
sqlWarning.printStackTrace();
Connection newConn = new TSDBConnection(props, this.dbMetaData);
return newConn;
} catch (SQLException sqlEx) {
throw sqlEx;
} catch (Exception ex) {
SQLException sqlEx = new SQLException("SQLException:" + ex.toString());
sqlEx.initCause(ex);
throw sqlEx;
}
}
splitValues[HOST_NAME_INDEX] = hostname;
/**
* Parses hostPortPair in the form of [host][:port] into an array, with the
* element of index HOST_NAME_INDEX being the host (or null if not specified),
* and the element of index PORT_NUMBER_INDEX being the port (or null if not
* specified).
*
* @param hostPortPair host and port in form of of [host][:port]
* @return array containing host and port as Strings
* @throws SQLException if a parse error occurs
*/
protected static String[] parseHostPortPair(String hostPortPair) throws SQLException {
String[] splitValues = new String[2];
splitValues[PORT_NUMBER_INDEX] = portAsString;
} else {
throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
}
} else {
splitValues[HOST_NAME_INDEX] = hostPortPair;
splitValues[PORT_NUMBER_INDEX] = null;
}
int portIndex = hostPortPair.indexOf(":");
return splitValues;
}
String hostname = null;
public boolean acceptsURL(String url) throws SQLException {
return StringUtils.isNotBlank(url) && url.startsWith(URL_PREFIX);
}
if (portIndex != -1) {
if ((portIndex + 1) < hostPortPair.length()) {
String portAsString = hostPortPair.substring(portIndex + 1);
hostname = hostPortPair.substring(0, portIndex);
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
if (info == null) {
info = new Properties();
}
splitValues[HOST_NAME_INDEX] = hostname;
if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
info = parseURL(url, info);
}
splitValues[PORT_NUMBER_INDEX] = portAsString;
} else {
throw new SQLException(TSDBConstants.WrapErrMsg("port is not proper!"));
}
} else {
splitValues[HOST_NAME_INDEX] = hostPortPair;
splitValues[PORT_NUMBER_INDEX] = null;
}
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
hostProp.required = true;
return splitValues;
}
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT,
info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
public boolean acceptsURL(String url) throws SQLException {
return (url != null && url.length() > 0 && url.trim().length() > 0) && url.startsWith(URL_PREFIX);
}
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
public DriverPropertyInfo[] getPropertyInfo(String url, Properties info) throws SQLException {
if (info == null) {
info = new Properties();
}
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
userProp.required = true;
if ((url != null) && (url.startsWith(URL_PREFIX) || url.startsWith(URL_PREFIX1))) {
info = parseURL(url, info);
}
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD,
info.getProperty(PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
DriverPropertyInfo hostProp = new DriverPropertyInfo(PROPERTY_KEY_HOST, info.getProperty(PROPERTY_KEY_HOST));
hostProp.required = true;
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
DriverPropertyInfo portProp = new DriverPropertyInfo(PROPERTY_KEY_PORT, info.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
portProp.required = false;
return propertyInfo;
}
DriverPropertyInfo dbProp = new DriverPropertyInfo(PROPERTY_KEY_DBNAME, info.getProperty(PROPERTY_KEY_DBNAME));
dbProp.required = false;
dbProp.description = "Database name";
/**
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
*/
DriverPropertyInfo userProp = new DriverPropertyInfo(PROPERTY_KEY_USER, info.getProperty(PROPERTY_KEY_USER));
userProp.required = true;
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
Properties urlProps = (defaults != null) ? defaults : new Properties();
if (url == null) {
return null;
}
DriverPropertyInfo passwordProp = new DriverPropertyInfo(PROPERTY_KEY_PASSWORD, info.getProperty(PROPERTY_KEY_PASSWORD));
passwordProp.required = true;
if (!StringUtils.startsWithIgnoreCase(url, URL_PREFIX) && !StringUtils.startsWithIgnoreCase(url, URL_PREFIX1)) {
return null;
}
DriverPropertyInfo[] propertyInfo = new DriverPropertyInfo[5];
propertyInfo[0] = hostProp;
propertyInfo[1] = portProp;
propertyInfo[2] = dbProp;
propertyInfo[3] = userProp;
propertyInfo[4] = passwordProp;
String urlForMeta = url;
return propertyInfo;
}
String dbProductName = url.substring(url.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
int beginningOfSlashes = url.indexOf("//");
url = url.substring(beginningOfSlashes + 2);
/**
* example: jdbc:TSDB://127.0.0.1:0/db?user=root&password=your_password
*/
public Properties parseURL(String url, Properties defaults) throws java.sql.SQLException {
Properties urlProps = (defaults != null) ? defaults : new Properties();
if (url == null) {
return null;
}
String host = url.substring(0, url.indexOf(":"));
url = url.substring(url.indexOf(":") + 1);
urlProps.setProperty(PROPERTY_KEY_HOST, host);
if (!url.startsWith(URL_PREFIX) && !url.startsWith(URL_PREFIX1)) {
return null;
}
String port = url.substring(0, url.indexOf("/"));
urlProps.setProperty(PROPERTY_KEY_PORT, port);
url = url.substring(url.indexOf("/") + 1);
String urlForMeta = url;
if (url.indexOf("?") != -1) {
String dbName = url.substring(0, url.indexOf("?"));
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
url = url.trim().substring(url.indexOf("?") + 1);
} else {
// without user & password so return
if(!url.trim().isEmpty()) {
String dbName = url.trim();
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
}
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
return urlProps;
}
String dbProductName = url.substring(url.indexOf(":") + 1);
dbProductName = dbProductName.substring(0, dbProductName.indexOf(":"));
int beginningOfSlashes = url.indexOf("//");
url = url.substring(beginningOfSlashes + 2);
String user = "";
String host = url.substring(0, url.indexOf(":"));
url = url.substring(url.indexOf(":") + 1);
urlProps.setProperty(PROPERTY_KEY_HOST, host);
if (url.indexOf("&") == -1) {
String[] kvPair = url.trim().split("=");
if (kvPair.length == 2) {
setPropertyValue(urlProps, kvPair);
return urlProps;
}
}
String port = url.substring(0, url.indexOf("/"));
urlProps.setProperty(PROPERTY_KEY_PORT, port);
url = url.substring(url.indexOf("/") + 1);
String[] queryStrings = url.trim().split("&");
for (String queryStr : queryStrings) {
String[] kvPair = queryStr.trim().split("=");
if (kvPair.length < 2){
continue;
}
setPropertyValue(urlProps, kvPair);
}
if (url.indexOf("?") != -1) {
String dbName = url.substring(0, url.indexOf("?"));
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
url = url.trim().substring(url.indexOf("?") + 1);
} else {
// without user & password so return
if (!url.trim().isEmpty()) {
String dbName = url.trim();
urlProps.setProperty(PROPERTY_KEY_DBNAME, dbName);
}
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, urlProps.getProperty("user"));
return urlProps;
}
user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
String user = "";
return urlProps;
}
if (url.indexOf("&") == -1) {
String[] kvPair = url.trim().split("=");
if (kvPair.length == 2) {
setPropertyValue(urlProps, kvPair);
return urlProps;
}
}
public void setPropertyValue(Properties property, String[] keyValuePair) {
switch (keyValuePair[0].toLowerCase()) {
case PROPERTY_KEY_USER:
property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
break;
case PROPERTY_KEY_PASSWORD:
property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
break;
case PROPERTY_KEY_TIME_ZONE:
property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
break;
case PROPERTY_KEY_LOCALE:
property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
break;
case PROPERTY_KEY_CHARSET:
property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
break;
case PROPERTY_KEY_CONFIG_DIR:
property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
break;
}
}
String[] queryStrings = url.trim().split("&");
for (String queryStr : queryStrings) {
String[] kvPair = queryStr.trim().split("=");
if (kvPair.length < 2) {
continue;
}
setPropertyValue(urlProps, kvPair);
}
user = urlProps.getProperty(PROPERTY_KEY_USER).toString();
this.dbMetaData = new TSDBDatabaseMetaData(dbProductName, urlForMeta, user);
return urlProps;
}
public void setPropertyValue(Properties property, String[] keyValuePair) {
switch (keyValuePair[0].toLowerCase()) {
case PROPERTY_KEY_USER:
property.setProperty(PROPERTY_KEY_USER, keyValuePair[1]);
break;
case PROPERTY_KEY_PASSWORD:
property.setProperty(PROPERTY_KEY_PASSWORD, keyValuePair[1]);
break;
case PROPERTY_KEY_TIME_ZONE:
property.setProperty(PROPERTY_KEY_TIME_ZONE, keyValuePair[1]);
break;
case PROPERTY_KEY_LOCALE:
property.setProperty(PROPERTY_KEY_LOCALE, keyValuePair[1]);
break;
case PROPERTY_KEY_CHARSET:
property.setProperty(PROPERTY_KEY_CHARSET, keyValuePair[1]);
break;
case PROPERTY_KEY_CONFIG_DIR:
property.setProperty(PROPERTY_KEY_CONFIG_DIR, keyValuePair[1]);
break;
}
}
public int getMajorVersion() {
return 1;
}
public int getMajorVersion() {
return 1;
}
public int getMinorVersion() {
return 1;
}
public int getMinorVersion() {
return 1;
}
public boolean jdbcCompliant() {
return false;
}
public boolean jdbcCompliant() {
return false;
}
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return null;
}
public Logger getParentLogger() throws SQLFeatureNotSupportedException {
return null;
}
/**
* Returns the host property
*
* @param props
* the java.util.Properties instance to retrieve the hostname from.
*
* @return the host
*/
public String host(Properties props) {
return props.getProperty(PROPERTY_KEY_HOST, "localhost");
}
/**
* Returns the host property
*
* @param props the java.util.Properties instance to retrieve the hostname from.
* @return the host
*/
public String host(Properties props) {
return props.getProperty(PROPERTY_KEY_HOST, "localhost");
}
/**
* Returns the port number property
*
* @param props
* the properties to get the port number from
*
* @return the port number
*/
public int port(Properties props) {
return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
}
/**
* Returns the port number property
*
* @param props the properties to get the port number from
* @return the port number
*/
public int port(Properties props) {
return Integer.parseInt(props.getProperty(PROPERTY_KEY_PORT, TSDBConstants.DEFAULT_PORT));
}
/**
* Returns the database property from <code>props</code>
*
* @param props
* the Properties to look for the database property.
*
* @return the database name.
*/
public String database(Properties props) {
return props.getProperty(PROPERTY_KEY_DBNAME);
}
/**
* Returns the database property from <code>props</code>
*
* @param props the Properties to look for the database property.
* @return the database name.
*/
public String database(Properties props) {
return props.getProperty(PROPERTY_KEY_DBNAME);
}
}

View File

@ -42,8 +42,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat
private SavedPreparedStatement savedPreparedStatement;
TSDBPreparedStatement(TSDBJNIConnector connecter, String sql) {
super(connecter);
TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) {
super(connection, connecter);
init(sql);
}

View File

@ -19,153 +19,164 @@ import java.util.ArrayList;
import java.util.List;
public class TSDBStatement implements Statement {
private TSDBJNIConnector connecter = null;
private TSDBJNIConnector connecter = null;
/** To store batched commands */
protected List<String> batchedArgs;
/**
* To store batched commands
*/
protected List<String> batchedArgs;
/** Timeout for a query */
protected int queryTimeout = 0;
/**
* Timeout for a query
*/
protected int queryTimeout = 0;
private Long pSql = 0l;
private Long pSql = 0l;
/**
* Status of current statement
*/
private boolean isClosed = true;
private int affectedRows = 0;
private boolean isClosed = true;
private int affectedRows = 0;
TSDBStatement(TSDBJNIConnector connecter) {
this.connecter = connecter;
this.isClosed = false;
}
private TSDBConnection connection;
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setConnection(TSDBConnection connection) {
this.connection = connection;
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
TSDBStatement(TSDBConnection connection, TSDBJNIConnector connecter) {
this.connection = connection;
this.connecter = connecter;
this.isClosed = false;
}
public ResultSet executeQuery(String sql) throws SQLException {
public <T> T unwrap(Class<T> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isWrapperFor(Class<?> iface) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public ResultSet executeQuery(String sql) throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
// TODO make sure it is not a update query
pSql = this.connecter.executeQuery(sql);
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
// create/insert/update/delete/alter
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
this.connecter.freeResultSet(pSql);
return null;
}
if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
this.connecter.freeResultSet(pSql);
return null;
}
if (!this.connecter.isUpdateQuery(pSql)) {
return new TSDBResultSet(this.connecter, resultSetPointer);
} else {
this.connecter.freeResultSet(pSql);
return null;
}
if (!this.connecter.isUpdateQuery(pSql)) {
return new TSDBResultSet(this.connecter, resultSetPointer);
} else {
this.connecter.freeResultSet(pSql);
return null;
}
}
}
public int executeUpdate(String sql) throws SQLException {
public int executeUpdate(String sql) throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
// TODO check if current query is update query
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
}
this.affectedRows = this.connecter.getAffectedRows(pSql);
this.connecter.freeResultSet(pSql);
this.affectedRows = this.connecter.getAffectedRows(pSql);
this.connecter.freeResultSet(pSql);
return this.affectedRows;
}
return this.affectedRows;
}
public String getErrorMsg(long pSql) {
return this.connecter.getErrMsg(pSql);
}
public String getErrorMsg(long pSql) {
return this.connecter.getErrMsg(pSql);
}
public void close() throws SQLException {
public void close() throws SQLException {
if (!isClosed) {
if (!this.connecter.isResultsetClosed()) {
this.connecter.freeResultSet();
}
isClosed = true;
}
}
}
public int getMaxFieldSize() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getMaxFieldSize() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setMaxFieldSize(int max) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setMaxFieldSize(int max) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getMaxRows() throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0;
}
public int getMaxRows() throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
return 0;
}
public void setMaxRows(int max) throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
}
public void setMaxRows(int max) throws SQLException {
// always set maxRows to zero, meaning unlimitted rows in a resultSet
}
public void setEscapeProcessing(boolean enable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setEscapeProcessing(boolean enable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getQueryTimeout() throws SQLException {
return queryTimeout;
}
public int getQueryTimeout() throws SQLException {
return queryTimeout;
}
public void setQueryTimeout(int seconds) throws SQLException {
this.queryTimeout = seconds;
}
public void setQueryTimeout(int seconds) throws SQLException {
this.queryTimeout = seconds;
}
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void cancel() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public SQLWarning getWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public SQLWarning getWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void clearWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void clearWarnings() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setCursorName(String name) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setCursorName(String name) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql) throws SQLException {
public boolean execute(String sql) throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
boolean res = true;
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
boolean res = true;
pSql = this.connecter.executeQuery(sql);
long resultSetPointer = this.connecter.getResultSet();
if (resultSetPointer == TSDBConstants.JNI_CONNECTION_NULL) {
this.connecter.freeResultSet(pSql);
this.connecter.freeResultSet(pSql);
throw new SQLException(TSDBConstants.FixErrMsg(TSDBConstants.JNI_CONNECTION_NULL));
} else if (resultSetPointer == TSDBConstants.JNI_NULL_POINTER) {
// no result set is retrieved
@ -173,145 +184,147 @@ public class TSDBStatement implements Statement {
res = false;
}
return res;
}
return res;
}
public ResultSet getResultSet() throws SQLException {
public ResultSet getResultSet() throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
long resultSetPointer = connecter.getResultSet();
TSDBResultSet resSet = null;
long resultSetPointer = connecter.getResultSet();
TSDBResultSet resSet = null;
if (resultSetPointer != TSDBConstants.JNI_NULL_POINTER) {
resSet = new TSDBResultSet(connecter, resultSetPointer);
}
return resSet;
}
return resSet;
}
public int getUpdateCount() throws SQLException {
public int getUpdateCount() throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
return this.affectedRows;
}
return this.affectedRows;
}
public boolean getMoreResults() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean getMoreResults() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setFetchDirection(int direction) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setFetchDirection(int direction) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getFetchDirection() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getFetchDirection() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
/*
* used by spark
*/
public void setFetchSize(int rows) throws SQLException {
}
/*
* used by spark
*/
public void setFetchSize(int rows) throws SQLException {
}
/*
* used by spark
*/
public int getFetchSize() throws SQLException {
return 4096;
}
/*
* used by spark
*/
public int getFetchSize() throws SQLException {
return 4096;
}
public int getResultSetConcurrency() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getResultSetConcurrency() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getResultSetType() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getResultSetType() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) {
batchedArgs = new ArrayList<String>();
}
batchedArgs.add(sql);
}
public void addBatch(String sql) throws SQLException {
if (batchedArgs == null) {
batchedArgs = new ArrayList<>();
}
batchedArgs.add(sql);
}
public void clearBatch() throws SQLException {
batchedArgs.clear();
}
public void clearBatch() throws SQLException {
batchedArgs.clear();
}
public int[] executeBatch() throws SQLException {
public int[] executeBatch() throws SQLException {
if (isClosed) {
throw new SQLException("Invalid method call on a closed statement.");
}
if (batchedArgs == null) {
throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!"));
} else {
int[] res = new int[batchedArgs.size()];
for (int i = 0; i < batchedArgs.size(); i++) {
res[i] = executeUpdate(batchedArgs.get(i));
}
return res;
}
}
if (batchedArgs == null) {
throw new SQLException(TSDBConstants.WrapErrMsg("Batch is empty!"));
} else {
int[] res = new int[batchedArgs.size()];
for (int i = 0; i < batchedArgs.size(); i++) {
res[i] = executeUpdate(batchedArgs.get(i));
}
return res;
}
}
public Connection getConnection() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public Connection getConnection() throws SQLException {
if (this.connecter != null)
return this.connection;
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean getMoreResults(int current) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean getMoreResults(int current) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public ResultSet getGeneratedKeys() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public ResultSet getGeneratedKeys() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int executeUpdate(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, int autoGeneratedKeys) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, int[] columnIndexes) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean execute(String sql, String[] columnNames) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getResultSetHoldability() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public int getResultSetHoldability() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isClosed() throws SQLException {
return isClosed;
}
public boolean isClosed() throws SQLException {
return isClosed;
}
public void setPoolable(boolean poolable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void setPoolable(boolean poolable) throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isPoolable() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isPoolable() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void closeOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public void closeOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isCloseOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
public boolean isCloseOnCompletion() throws SQLException {
throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG);
}
}

View File

@ -23,6 +23,7 @@ import java.sql.SQLException;
public class SqlSyntaxValidator {
private TSDBConnection tsdbConnection;
public SqlSyntaxValidator(Connection connection) {
this.tsdbConnection = (TSDBConnection) connection;
}

View File

@ -8,8 +8,7 @@ import org.junit.BeforeClass;
public class BaseTest {
private static boolean testCluster = false;
private static TDNodes nodes = new TDNodes();
private static TDNodes nodes = new TDNodes();
@BeforeClass
public static void setupEnv() {
@ -19,11 +18,9 @@ public class BaseTest {
nodes.getTDNode(1).setRunning(1);
nodes.stop(1);
}
nodes.setTestCluster(testCluster);
nodes.deploy(1);
nodes.start(1);
nodes.start(1);
} catch (Exception e) {
e.printStackTrace();
}

View File

@ -7,13 +7,11 @@ import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class BatchInsertTest extends BaseTest {

View File

@ -0,0 +1,81 @@
package com.taosdata.jdbc;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.*;
import java.util.Properties;
import java.util.Random;
import static org.junit.Assert.assertEquals;
import java.util.Properties;
import java.util.concurrent.Executors;
import java.util.concurrent.*;
import static org.junit.Assert.assertTrue;
public class QueryDataTest extends BaseTest {
static Connection connection = null;
static Statement statement = null;
static String dbName = "test";
static String stbName = "meters";
static String host = "localhost";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
@Before
public void createDatabase() throws SQLException {
try {
Class.forName("com.taosdata.jdbc.TSDBDriver");
} catch (ClassNotFoundException e) {
return;
}
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
String createTableSql = "create table " + stbName + "(ts timestamp, name binary(6))";
statement.executeUpdate(createTableSql);
}
@Test
public void testQueryBinaryData() throws SQLException{
String insertSql = "insert into " + stbName + " values(now, 'taosda')";
System.out.println(insertSql);
statement.executeUpdate(insertSql);
String querySql = "select * from " + stbName;
ResultSet rs = statement.executeQuery(querySql);
while(rs.next()) {
String name = rs.getString(2) + "001";
System.out.println("name = " + name);
assertEquals(name, "taosda001");
}
rs.close();
}
@After
public void close() throws Exception {
statement.close();
connection.close();
Thread.sleep(10);
}
}

View File

@ -34,7 +34,6 @@ public class SelectTest extends BaseTest {
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k int, v int)");
}
@Test
@ -66,6 +65,5 @@ public class SelectTest extends BaseTest {
statement.close();
connection.close();
Thread.sleep(10);
}
}

View File

@ -10,8 +10,6 @@ import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
import static org.junit.Assert.assertTrue;
public class SubscribeTest extends BaseTest {
Connection connection = null;
Statement statement = null;

View File

@ -0,0 +1,108 @@
package com.taosdata.jdbc.cases;
import com.taosdata.jdbc.lib.TSDBCommon;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import static org.junit.Assert.assertEquals;
public class BatchInsertTest {
static String host = "localhost";
static String dbName = "test";
static String stbName = "meters";
static int numOfTables = 30;
final static int numOfRecordsPerTable = 1000;
static long ts = 1496732686000l;
final static String tablePrefix = "t";
private Connection connection;
@Before
public void before() {
try {
connection = TSDBCommon.getConn(host);
TSDBCommon.createDatabase(connection, dbName);
TSDBCommon.createStable(connection, stbName);
TSDBCommon.createTables(connection, numOfTables, stbName, tablePrefix);
} catch (Exception e) {
e.printStackTrace();
}
}
@Test
public void testBatchInsert(){
ExecutorService executorService = Executors.newFixedThreadPool(numOfTables);
for (int i = 0; i < numOfTables; i++) {
final int index = i;
executorService.execute(new Runnable() {
@Override
public void run() {
try {
long startTime = System.currentTimeMillis();
Statement statement = connection.createStatement(); // get statement
StringBuilder sb = new StringBuilder();
sb.append("INSERT INTO " + tablePrefix + index + " VALUES");
Random rand = new Random();
for (int j = 1; j <= numOfRecordsPerTable; j++) {
sb.append("(" + (ts + j) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ", ");
sb.append(rand.nextInt(100) + ")");
}
statement.addBatch(sb.toString());
statement.executeBatch();
long endTime = System.currentTimeMillis();
System.out.println("Thread " + index + " takes " + (endTime - startTime) + " microseconds");
connection.commit();
statement.close();
} catch (Exception e) {
e.printStackTrace();
}
}
});
}
executorService.shutdown();
try {
executorService.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS);
} catch (InterruptedException e) {
e.printStackTrace();
}
try{
Statement statement = connection.createStatement();
ResultSet rs = statement.executeQuery("select * from meters");
int num = 0;
while (rs.next()) {
num++;
}
assertEquals(num, numOfTables * numOfRecordsPerTable);
rs.close();
}catch (Exception e){
e.printStackTrace();
}
}
@After
public void after() {
try {
if (connection != null)
connection.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
}

View File

@ -0,0 +1,47 @@
package com.taosdata.jdbc.lib;
import com.taosdata.jdbc.TSDBDriver;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.Properties;
public class TSDBCommon {
public static Connection getConn(String host) throws SQLException, ClassNotFoundException {
Class.forName("com.taosdata.jdbc.TSDBDriver");
Properties properties = new Properties();
properties.setProperty(TSDBDriver.PROPERTY_KEY_HOST, host);
properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8");
properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8");
return DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties);
}
public static void createDatabase(Connection connection, String dbName) throws SQLException {
Statement statement = connection.createStatement();
statement.executeUpdate("drop database if exists " + dbName);
statement.executeUpdate("create database if not exists " + dbName);
statement.executeUpdate("use " + dbName);
statement.close();
}
public static void createStable(Connection connection, String stbName) throws SQLException {
Statement statement = connection.createStatement();
String createTableSql = "create table " + stbName + "(ts timestamp, f1 int, f2 int, f3 int) tags(areaid int, loc binary(20))";
statement.executeUpdate(createTableSql);
statement.close();
}
public static void createTables(Connection connection, int numOfTables, String stbName,String tablePrefix) throws SQLException {
Statement statement = connection.createStatement();
for(int i = 0; i < numOfTables; i++) {
String loc = i % 2 == 0 ? "beijing" : "shanghai";
String createSubTalbesSql = "create table " + tablePrefix + i + " using " + stbName + " tags(" + i + ", '" + loc + "')";
statement.executeUpdate(createSubTalbesSql);
}
statement.close();
}
}

View File

@ -2,6 +2,8 @@ CMAKE_MINIMUM_REQUIRED(VERSION 2.8)
PROJECT(TDengine)
INCLUDE_DIRECTORIES(inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc)
INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/query/inc)
AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR}/src SRC)
IF (TD_LINUX)

View File

@ -21,6 +21,7 @@
#include <string.h>
#include "taos.h"
#include "tsclient.h"
#include "taosdef.h"
#include "taosmsg.h"
#include "ttimer.h"
@ -30,10 +31,12 @@
#include "tlog.h"
#include "twal.h"
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("ERROR CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("WARN CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cFatal(...) { if (cqDebugFlag & DEBUG_FATAL) { taosPrintLog("CQ FATAL ", 255, __VA_ARGS__); }}
#define cError(...) { if (cqDebugFlag & DEBUG_ERROR) { taosPrintLog("CQ ERROR ", 255, __VA_ARGS__); }}
#define cWarn(...) { if (cqDebugFlag & DEBUG_WARN) { taosPrintLog("CQ WARN ", 255, __VA_ARGS__); }}
#define cInfo(...) { if (cqDebugFlag & DEBUG_INFO) { taosPrintLog("CQ ", 255, __VA_ARGS__); }}
#define cDebug(...) { if (cqDebugFlag & DEBUG_DEBUG) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cTrace(...) { if (cqDebugFlag & DEBUG_TRACE) { taosPrintLog("CQ ", cqDebugFlag, __VA_ARGS__); }}
#define cPrint(...) { taosPrintLog("CQ ", 255, __VA_ARGS__); }
typedef struct {
int vgId;
@ -63,8 +66,6 @@ typedef struct SCqObj {
SCqContext * pContext;
} SCqObj;
int cqDebugFlag = 135;
static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row);
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
@ -94,7 +95,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
pthread_mutex_init(&pContext->mutex, NULL);
cTrace("vgId:%d, CQ is opened", pContext->vgId);
cInfo("vgId:%d, CQ is opened", pContext->vgId);
return pContext;
}
@ -125,7 +126,7 @@ void cqClose(void *handle) {
taosTmrCleanUp(pContext->tmrCtrl);
pContext->tmrCtrl = NULL;
cTrace("vgId:%d, CQ is closed", pContext->vgId);
cInfo("vgId:%d, CQ is closed", pContext->vgId);
free(pContext);
}
@ -133,7 +134,7 @@ void cqStart(void *handle) {
SCqContext *pContext = handle;
if (pContext->dbConn || pContext->master) return;
cTrace("vgId:%d, start all CQs", pContext->vgId);
cInfo("vgId:%d, start all CQs", pContext->vgId);
pthread_mutex_lock(&pContext->mutex);
pContext->master = 1;
@ -149,7 +150,7 @@ void cqStart(void *handle) {
void cqStop(void *handle) {
SCqContext *pContext = handle;
cTrace("vgId:%d, stop all CQs", pContext->vgId);
cInfo("vgId:%d, stop all CQs", pContext->vgId);
if (pContext->dbConn == NULL || pContext->master == 0) return;
pthread_mutex_lock(&pContext->mutex);
@ -160,7 +161,7 @@ void cqStop(void *handle) {
if (pObj->pStream) {
taos_close_stream(pObj->pStream);
pObj->pStream = NULL;
cTrace("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is closed", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
taosTmrStop(pObj->tmrId);
pObj->tmrId = 0;
@ -188,7 +189,7 @@ void *cqCreate(void *handle, uint64_t uid, int tid, char *sqlStr, STSchema *pSch
pObj->pSchema = tdDupSchema(pSchema);
pObj->rowSize = schemaTLen(pSchema);
cTrace("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is created", pContext->vgId, pObj->tid, pObj->sqlStr);
pthread_mutex_lock(&pContext->mutex);
@ -228,7 +229,7 @@ void cqDrop(void *handle) {
pObj->tmrId = 0;
}
cTrace("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr);
tdFreeSchema(pObj->pSchema);
free(pObj->sqlStr);
free(pObj);
@ -236,24 +237,31 @@ void cqDrop(void *handle) {
pthread_mutex_unlock(&pContext->mutex);
}
static void doCreateStream(void *param, TAOS_RES *result, int code) {
SCqObj* pObj = (SCqObj*)param;
SCqContext* pContext = pObj->pContext;
SSqlObj* pSql = (SSqlObj*)result;
pContext->dbConn = pSql->pTscObj;
cqCreateStream(pContext, pObj);
}
static void cqProcessCreateTimer(void *param, void *tmrId) {
SCqObj* pObj = (SCqObj*)param;
SCqContext* pContext = pObj->pContext;
if (pContext->dbConn == NULL) {
pContext->dbConn = taos_connect("localhost", pContext->user, pContext->pass, pContext->db, 0);
if (pContext->dbConn == NULL) {
cError("vgId:%d, failed to connect to TDengine(%s)", pContext->vgId, tstrerror(terrno));
}
cDebug("vgId:%d, try connect to TDengine", pContext->vgId);
taos_connect_a(NULL, pContext->user, pContext->pass, pContext->db, 0, doCreateStream, param, NULL);
} else {
cqCreateStream(pContext, pObj);
}
cqCreateStream(pContext, pObj);
}
static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pContext = pContext;
if (pContext->dbConn == NULL) {
cDebug("vgId:%d, create dbConn after 1000 ms", pContext->vgId);
pObj->tmrId = taosTmrStart(cqProcessCreateTimer, 1000, pObj, pContext->tmrCtrl);
return;
}
@ -262,7 +270,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL);
if (pObj->pStream) {
pContext->num++;
cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
} else {
cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
}
@ -278,7 +286,7 @@ static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
STSchema *pSchema = pObj->pSchema;
if (pObj->pStream == NULL) return;
cTrace("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
cDebug("vgId:%d, id:%d CQ:%s stream result is ready", pContext->vgId, pObj->tid, pObj->sqlStr);
int size = sizeof(SWalHead) + sizeof(SSubmitMsg) + sizeof(SSubmitBlk) + TD_DATA_ROW_HEAD_SIZE + pObj->rowSize;
char *buffer = calloc(size, 1);

View File

@ -11,10 +11,12 @@ AUX_SOURCE_DIRECTORY(src SRC)
IF (TD_LINUX)
ADD_EXECUTABLE(taosd ${SRC})
TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4)
IF (TD_SOMODE_STATIC)
TARGET_LINK_LIBRARIES(taosd mnode taos_static monitor http mqtt tsdb twal vnode cJson lz4 balance sync)
TARGET_LINK_LIBRARIES(taosd taos_static)
ELSE ()
TARGET_LINK_LIBRARIES(taosd mnode taos monitor http mqtt tsdb twal vnode cJson lz4 balance sync)
TARGET_LINK_LIBRARIES(taosd taos)
ENDIF ()
IF (TD_ACCOUNT)
@ -25,6 +27,14 @@ IF (TD_LINUX)
TARGET_LINK_LIBRARIES(taosd grant)
ENDIF ()
IF (TD_MQTT)
TARGET_LINK_LIBRARIES(taosd mqtt)
ENDIF ()
IF (TD_SYNC)
TARGET_LINK_LIBRARIES(taosd balance sync)
ENDIF ()
SET(PREPARE_ENV_CMD "prepare_env_cmd")
SET(PREPARE_ENV_TARGET "prepare_env_target")
ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD}

View File

@ -464,7 +464,30 @@ void dnodeUpdateMnodeEpSetForPeer(SRpcEpSet *pEpSet) {
dInfo("mnode EP list for peer is changed, numOfEps:%d inUse:%d", pEpSet->numOfEps, pEpSet->inUse);
for (int i = 0; i < pEpSet->numOfEps; ++i) {
pEpSet->port[i] -= TSDB_PORT_DNODEDNODE;
dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i])
dInfo("mnode index:%d %s:%u", i, pEpSet->fqdn[i], pEpSet->port[i]);
if (!mnodeIsRunning()) {
if (strcmp(pEpSet->fqdn[i], tsLocalFqdn) == 0 && pEpSet->port[i] == tsServerPort) {
dInfo("mnode index:%d %s:%u should work as mnode", i, pEpSet->fqdn[i], pEpSet->port[i]);
bool find = false;
for (int i = 0; i < tsDMnodeInfos.nodeNum; ++i) {
if (tsDMnodeInfos.nodeInfos[i].nodeId == dnodeGetDnodeId()) {
dInfo("localEp found in mnode infos");
find = true;
break;
}
}
if (!find) {
dInfo("localEp not found in mnode infos, will set into mnode infos");
tstrncpy(tsDMnodeInfos.nodeInfos[tsDMnodeInfos.nodeNum].nodeEp, tsLocalEp, TSDB_EP_LEN);
tsDMnodeInfos.nodeInfos[tsDMnodeInfos.nodeNum].nodeId = dnodeGetDnodeId();
tsDMnodeInfos.nodeNum++;
}
dnodeStartMnode();
}
}
}
tsDMnodeEpSet = *pEpSet;
@ -611,7 +634,7 @@ static bool dnodeReadMnodeInfos() {
}
for (int i = 0; i < size; ++i) {
cJSON* nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
cJSON *nodeInfo = cJSON_GetArrayItem(nodeInfos, i);
if (nodeInfo == NULL) continue;
cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "nodeId");
@ -627,7 +650,7 @@ static bool dnodeReadMnodeInfos() {
goto PARSE_OVER;
}
strncpy(tsDMnodeInfos.nodeInfos[i].nodeEp, nodeEp->valuestring, TSDB_EP_LEN);
}
}
ret = true;

Some files were not shown because too many files have changed in this diff Show More