diff --git a/.appveyor.yml b/.appveyor.yml index 559431e2f9..fe4816688b 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -3,6 +3,7 @@ os: Visual Studio 2015 environment: matrix: - ARCH: amd64 + - ARCH: x86 clone_folder: c:\dev\TDengine clone_depth: 1 @@ -23,6 +24,7 @@ notifications: - provider: Email to: - sangshuduo@gmail.com + on_build_success: true on_build_failure: true on_build_status_changed: true diff --git a/Jenkinsfile b/Jenkinsfile index edbe11d428..99e70407b1 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,92 +1,109 @@ +import hudson.model.Result +import jenkins.model.CauseOfInterruption +properties([pipelineTriggers([githubPush()])]) +node { + git url: 'https://github.com/taosdata/TDengine.git' +} + + +def abortPreviousBuilds() { + def currentJobName = env.JOB_NAME + def currentBuildNumber = env.BUILD_NUMBER.toInteger() + def jobs = Jenkins.instance.getItemByFullName(currentJobName) + def builds = jobs.getBuilds() + + for (build in builds) { + if (!build.isBuilding()) { + continue; + } + + if (currentBuildNumber == build.getNumber().toInteger()) { + continue; + } + + build.doKill() //doTerm(),doKill(),doTerm() + } +} +//abort previous build +abortPreviousBuilds() +def abort_previous(){ + def buildNumber = env.BUILD_NUMBER as int + if (buildNumber > 1) milestone(buildNumber - 1) + milestone(buildNumber) +} +def pre_test(){ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + sudo rmtaos + ''' + } + sh ''' + + cd ${WKC} + rm -rf * + cd ${WK} + git reset --hard + git checkout develop + git pull + cd ${WKC} + rm -rf * + mv ${WORKSPACE}/* . + cd ${WK} + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake .. > /dev/null + make > /dev/null + make install > /dev/null + cd ${WKC}/tests + ''' + return 1 +} pipeline { agent none + environment{ WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } - + stages { + + stage('Parallel test stage') { + //only build pr + when { + changeRequest() + } parallel { - stage('pytest') { - agent{label '184'} + stage('python') { + agent{label 'pytest'} steps { + + pre_test() sh ''' - date - cd ${WKC} - git reset --hard - git checkout develop - git pull - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - make install > /dev/null cd ${WKC}/tests - #./test-all.sh smoke ./test-all.sh pytest date''' } } stage('test_b1') { - agent{label 'master'} - steps { + agent{label 'b1'} + steps { + pre_test() sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null cd ${WKC}/tests - #./test-all.sh smoke ./test-all.sh b1 date''' } } stage('test_crash_gen') { - agent{label "185"} + agent{label "b2"} steps { - sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - cd ${WKC}/tests/pytest - ''' + pre_test() catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/pytest @@ -109,193 +126,28 @@ pipeline { } stage('test_valgrind') { - agent{label "186"} + agent{label "b3"} steps { + pre_test() + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./valgrind-test.sh 2>&1 > mem-error-out.log + ./handle_val_log.sh + ''' + } sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - cd ${WKC}/tests/pytest - ./valgrind-test.sh 2>&1 > mem-error-out.log - ./handle_val_log.sh - date cd ${WKC}/tests ./test-all.sh b3 date''' } } - stage('connector'){ - agent{label "release"} - steps{ - sh''' - cd ${WORKSPACE} - git checkout develop - ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/gotest - bash batchtest.sh - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker - python3 PythonChecker.py - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ - mvn clean package assembly:single >/dev/null - java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# - dotnet run - ''' - } - - } - } - stage('arm64_build'){ - agent{label 'arm64'} - steps{ - sh ''' - cd ${WK} - git fetch - git checkout develop - git pull - cd ${WKC} - git fetch - git checkout develop - git pull - git submodule update - cd ${WKC}/packaging - ./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0 - - ''' - } - } - stage('arm32_build'){ - agent{label 'arm32'} - steps{ - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WK} - git fetch - git checkout develop - git pull - cd ${WKC} - git fetch - git checkout develop - git pull - git submodule update - cd ${WKC}/packaging - ./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0 - - ''' - } - - } - } - } + + } - } - post { - success { - emailext ( - subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' - - - - - - - - - - - - -

- 构建信息 -
-
    -
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • -
  • 构建结果: Successful
  • -
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • -
  • 构建地址:${BUILD_URL}
  • -
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • -
    -
-
- - ''', - to: "yqliu@taosdata.com,pxiao@taosdata.com", - from: "support@taosdata.com" - ) - } - failure { - emailext ( - subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' - - - - - - - - - - - - -

- 构建信息 -
-
    -
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • -
  • 构建结果: Successful
  • -
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • -
  • 构建地址:${BUILD_URL}
  • -
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • -
    -
-
- - ''', - to: "yqliu@taosdata.com,pxiao@taosdata.com", - from: "support@taosdata.com" - ) - } - } -} \ No newline at end of file + } + +} diff --git a/cmake/define.inc b/cmake/define.inc index 7da0dfdea4..f22ccb3797 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -49,7 +49,7 @@ IF (TD_LINUX_64) ADD_DEFINITIONS(-D_M_X64) ADD_DEFINITIONS(-D_TD_LINUX_64) MESSAGE(STATUS "linux64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DUSE_LIBICONV) ENDIF () @@ -57,7 +57,7 @@ IF (TD_LINUX_32) ADD_DEFINITIONS(-D_TD_LINUX_32) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "linux32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -munaligned-access -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_64) @@ -66,7 +66,7 @@ IF (TD_ARM_64) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_ARM_32) @@ -74,21 +74,21 @@ IF (TD_ARM_32) ADD_DEFINITIONS(-D_TD_ARM_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "arm32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-pointer-to-int-cast -Wno-int-to-pointer-cast -Wno-incompatible-pointer-types ") ENDIF () IF (TD_MIPS_64) ADD_DEFINITIONS(-D_TD_MIPS_64_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_MIPS_32) ADD_DEFINITIONS(-D_TD_MIPS_32_) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "mips32 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -g3 -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -fPIC -gdwarf-2 -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () IF (TD_APLHINE) @@ -109,8 +109,8 @@ IF (TD_LINUX) MESSAGE(STATUS "set ningsi macro to true") ENDIF () - SET(DEBUG_FLAGS "-O0 -DDEBUG") - SET(RELEASE_FLAGS "-O0 -Wno-unused-variable -Wunused-but-set-variable") + SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") + SET(RELEASE_FLAGS "-O3 -Wno-error") IF (${COVER} MATCHES "true") MESSAGE(STATUS "Test coverage mode, add extra flags") @@ -129,9 +129,9 @@ IF (TD_DARWIN_64) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) MESSAGE(STATUS "darwin64 is defined") - SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") - SET(DEBUG_FLAGS "-O0 -DDEBUG") - SET(RELEASE_FLAGS "-O0") + SET(COMMON_FLAGS "-std=gnu99 -Wall -Werror -Wno-missing-braces -fPIC -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(DEBUG_FLAGS "-O0 -g3 -DDEBUG") + SET(RELEASE_FLAGS "-Og") ENDIF () IF (TD_WINDOWS) @@ -144,7 +144,7 @@ IF (TD_WINDOWS) IF (NOT TD_GODLL) SET(COMMON_FLAGS "/nologo /WX /wd4018 /wd2220 /Oi /Oy- /Gm- /EHsc /MT /GS /Gy /fp:precise /Zc:wchar_t /Zc:forScope /Gd /errorReport:prompt /analyze-") SET(DEBUG_FLAGS "/Zi /W3 /GL") - SET(RELEASE_FLAGS "/W0 /GL") + SET(RELEASE_FLAGS "/W0 /O3 /GL") ENDIF () INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/deps/pthread) diff --git a/cmake/env.inc b/cmake/env.inc index 18a6fea51d..efcc996176 100755 --- a/cmake/env.inc +++ b/cmake/env.inc @@ -41,8 +41,10 @@ SET(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_FLAGS_RELEASE} ${COMMON_FLAGS} ${RELEASE_FL # SET(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} ${COMMON_CXX_FLAGS} ${RELEASE_FLAGS}") IF (${CMAKE_BUILD_TYPE} MATCHES "Debug") + SET(CMAKE_BUILD_TYPE "Debug") MESSAGE(STATUS "Build Debug Version") ELSEIF (${CMAKE_BUILD_TYPE} MATCHES "Release") + SET(CMAKE_BUILD_TYPE "Release") MESSAGE(STATUS "Build Release Version") ELSE () IF (TD_WINDOWS) diff --git a/cmake/version.inc b/cmake/version.inc index 948c7d2d0b..e11e782a78 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.9.0") + SET(TD_VER_NUMBER "2.0.11.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation20/webdocs/assets/connector.png b/documentation20/webdocs/assets/connector.png new file mode 100644 index 0000000000..6030bd73f5 Binary files /dev/null and b/documentation20/webdocs/assets/connector.png differ diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md index f1f2d58f05..766a428f1b 100644 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md @@ -84,6 +84,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 - [数据导出](https://www.taosdata.com/cn/documentation20/administrator/#数据导出):从shell按表导出,也可用taosdump工具做各种导出 - [系统监控](https://www.taosdata.com/cn/documentation20/administrator/#系统监控):检查系统现有的连接、查询、流式计算,日志和事件等 - [文件目录结构](https://www.taosdata.com/cn/documentation20/administrator/#文件目录结构):TDengine数据文件、配置文件等所在目录 +- [参数限制和保留关键字](https://www.taosdata.com/cn/documentation20/administrator/#参数限制和保留关键字):TDengine的参数限制和保留关键字列表 ## [TAOS SQL](https://www.taosdata.com/cn/documentation20/taos-sql) @@ -128,4 +129,4 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ## [培训和FAQ](https://www.taosdata.com/cn/faq) - [FAQ](https://www.taosdata.com/cn/documentation20/faq):常见问题与答案 -- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine \ No newline at end of file +- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine diff --git a/documentation20/webdocs/markdowndocs/Evaluation-ch.md b/documentation20/webdocs/markdowndocs/Evaluation-ch.md index a92f97a8d9..fa6cec6e48 100644 --- a/documentation20/webdocs/markdowndocs/Evaluation-ch.md +++ b/documentation20/webdocs/markdowndocs/Evaluation-ch.md @@ -59,5 +59,3 @@ TDengine的模块之一是时序数据库。但除此之外,为减少研发的 |要求运维学习成本可控| | | √ |同上。| |要求市场有大量人才储备| √ | | |TDengine作为新一代产品,目前人才市场里面有经验的人员还有限。但是学习成本低,我们作为厂家也提供运维的培训和辅助服务。| -## TDengine 性能指标介绍和验证方法 - diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/webdocs/markdowndocs/Getting Started-ch.md index beb0c639ae..9f9d3a2ec2 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/webdocs/markdowndocs/Getting Started-ch.md @@ -2,7 +2,7 @@ ## 快捷安装 -TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版仅能在Linux系统上安装和运行,后续会支持Windows、MAC OS等系统。如果应用需要在Windows或Mac上运行,目前只能使用TDengine的RESTful接口连接服务器。硬件支持X64,后续会支持ARM、龙芯等CPU系统。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 +TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。客户端可以在Windows或Linux上安装和运行。任何OS的应用也可以选择RESTful接口连接服务器taosd。CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 ### 通过源码安装 @@ -14,29 +14,11 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 ### 通过安装包安装 -服务器部分,我们提供三种安装包,您可以根据需要选择。TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。 +TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。服务端安装包包含客户端和连接器,我们提供三种安装包,您可以根据需要选择: - - -客户端部分,Linux安装包如下: - -- TDengine-client-2.0.0.0-Linux-x64.tar.gz (3.4M) - -报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)): - -- TDengine-alert-2.0.0-Linux-x64.tar.gz (8.1M) - -目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which systemctl`命令来检测系统中是否存在`systemd`包: - -```cmd -which systemctl -``` - -如果系统中不存在`systemd`包,请考虑[通过源码安装](#通过源码安装)TDengine。 +- TDengine-server-2.0.10.0-Linux-x64.rpm (4.2M) +- TDengine-server-2.0.10.0-Linux-x64.deb (2.7M) +- TDengine-server-2.0.10.0-Linux-x64.tar.gz (4.5M) 具体的安装过程,请参见TDengine多种安装包的安装和卸载。 @@ -44,28 +26,39 @@ which systemctl 安装成功后,用户可使用`systemctl`命令来启动TDengine的服务进程。 -```cmd -systemctl start taosd +```bash +$ systemctl start taosd ``` 检查服务是否正常工作。 -```cmd -systemctl status taosd +```bash +$ systemctl status taosd ``` 如果TDengine服务正常工作,那么您可以通过TDengine的命令行程序`taos`来访问并体验TDengine。 - + **注意:** - + - systemctl命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo - 为更好的获得产品反馈,改善产品,TDengine会采集基本的使用信息,但您可以修改系统配置文件taos.cfg里的配置参数telemetryReporting, 将其设为0,就可将其关闭。 +- TDengine采用FQDN(一般就是hostname)作为节点的ID,为保证正常运行,需要给运行taosd的服务器配置好hostname,在客户端应用运行的机器配置好DNS服务或hosts文件,保证FQDN能够解析。 + +* TDengine 支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装,用`which systemctl`命令来检测系统中是否存在`systemd`包: + + ```bash + $ which systemctl + ``` + + 如果系统中不支持systemd,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 + + ## TDengine命令行程序 执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。 -```cmd -taos +```bash +$ taos ``` 如果TDengine终端连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印错误消息出来(请参考[FAQ](https://www.taosdata.com/cn/faq/)来解决终端连接服务端失败的问题)。TDengine终端的提示符号如下: @@ -105,15 +98,15 @@ Query OK, 2 row(s) in set (0.001700s) 示例: -```cmd -taos -h 192.168.0.1 -s "use db; show tables;" +```bash +$ taos -h 192.168.0.1 -s "use db; show tables;" ``` ### 运行SQL命令脚本 TDengine终端可以通过`source`命令来运行SQL命令脚本. -``` +```mysql taos> source ; ``` @@ -128,8 +121,8 @@ taos> source ; 启动TDengine的服务,在Linux终端执行taosdemo -``` -> taosdemo +```bash +$ taosdemo ``` 该命令将在数据库test下面自动创建一张超级表meters,该超级表下有1万张表,表名为"t0" 到"t9999",每张表有10万条记录,每条记录有 (f1, f2, f3)三个字段,时间戳从"2017-07-14 10:40:00 000" 到"2017-07-14 10:41:39 999",每张表带有标签areaid和loc, areaid被设置为1到10, loc被设置为"beijing"或者“shanghai"。 @@ -140,33 +133,92 @@ taos> source ; - 查询超级表下记录总条数: -``` -taos>select count(*) from test.meters; +```mysql +taos> select count(*) from test.meters; ``` - 查询10亿条记录的平均值、最大值、最小值等: -``` -taos>select avg(f1), max(f2), min(f3) from test.meters; +```mysql +taos> select avg(f1), max(f2), min(f3) from test.meters; ``` - 查询loc="beijing"的记录总条数: -``` -taos>select count(*) from test.meters where loc="beijing"; +```mysql +taos> select count(*) from test.meters where loc="beijing"; ``` - 查询areaid=10的所有记录的平均值、最大值、最小值等: -``` -taos>select avg(f1), max(f2), min(f3) from test.meters where areaid=10; +```mysql +taos> select avg(f1), max(f2), min(f3) from test.meters where areaid=10; ``` - 对表t10按10s进行平均值、最大值和最小值聚合统计: -``` -taos>select avg(f1), max(f2), min(f3) from test.t10 interval(10s); +```mysql +taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ``` **Note:** taosdemo命令本身带有很多选项,配置表的数目、记录条数等等,请执行 `taosdemo --help`详细列出。您可以设置不同参数进行体验。 + + +## 客户端和报警模块 + +如果客户端和服务端运行在不同的电脑上,可以单独安装客户端。Linux和Windows安装包如下: + +- TDengine-client-2.0.10.0-Linux-x64.tar.gz(3.0M) +- TDengine-client-2.0.10.0-Windows-x64.exe(2.8M) +- TDengine-client-2.0.10.0-Windows-x86.exe(2.8M) + +报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)): + +- TDengine-alert-2.0.10.0-Linux-x64.tar.gz (8.1M) + + + +## **支持平台列表** + +### TDengine服务器支持的平台列表 + +| | **CentOS** **6/7/8** | **Ubuntu** **16/18/20** | **Other Linux** | **统信****UOS** | **银河****/****中标麒麟** | **凝思** **V60/V80** | +| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | +| X64 | ● | ● | | ○ | ● | ● | +| 树莓派ARM32 | | ● | ● | | | | +| 龙芯MIPS64 | | | ● | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | ● | | +| 申威 Alpha64 | | | ○ | ● | | | +| 飞腾ARM64 | | ○优麒麟 | | | | | +| 海光X64 | ● | ● | ● | ○ | ● | ● | +| 瑞芯微ARM64/32 | | | ○ | | | | +| 全志ARM64/32 | | | ○ | | | | +| 炬力ARM64/32 | | | ○ | | | | +| TI ARM32 | | | ○ | | | | + +注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 + + + +### TDengine客户端和连接器支持的平台列表 + +目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。 + +对照矩阵如下: + +| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS ** **龙芯** | **Alpha ** **申威** | **X64 ** **海光** | +| ----------- | --------------- | --------- | --------- | --------------- | --------- | --------- | ------------------- | -------------------- | ------------------ | +| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | +| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | +| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | +| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | +| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | +| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | + +注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 + +请跳转到 [连接器 ](https://www.taosdata.com/cn/documentation/connector)查看更详细的信息。 + diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md index c0b64bde6e..27105bdb90 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/webdocs/markdowndocs/Model-ch.md @@ -6,16 +6,16 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个 ## 创建库 -不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: +不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: -```cmd -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4; +```mysql +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; ``` -上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4。详细的语法及参数请见TAOS SQL +上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见TAOS SQL 创建库之后,需要使用SQL命令USE将当前库切换过来,例如: -```cmd +```mysql USE power; ``` @@ -28,7 +28,7 @@ USE power; ## 创建超级表 一个物联网系统,往往存在多种类型的设备,比如对于电网,存在智能电表、变压器、母线、开关等等。为便于多表之间的聚合,使用TDengine, 需要对每个类型的数据采集点创建一超级表。以表一中的智能电表为例,可以使用如下的SQL命令创建超级表: -```cmd +```mysql CREATE TABLE meters (ts timestamp, current float, voltage int, phase float) TAGS (location binary(64), groupdId int); ``` 与创建普通表一样,创建表时,需要提供表名(示例中为meters),表结构Schema,即数据列的定义。第一列必须为时间戳(示例中为ts),其他列为采集的物理量(示例中为current, voltage, phase),数据类型可以为整型、浮点型、字符串等。除此之外,还需要提供标签的schema (示例中为location, groupId),标签的数据类型可以为整型、浮点型、字符串等。采集点的静态属性往往可以作为标签,比如采集点的地理位置、设备型号、设备组ID、管理员ID等等。标签的schema可以事后增加、删除、修改。具体定义以及细节请见 TAOS SQL 一节。 diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 760ebae4fc..8ed497fe21 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -2,7 +2,7 @@ 本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。 -TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供修改和更新功能,因此在TAO SQL中不提供数据更新和数据删除的相关功能。 +TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供删除功能,因此在TAO SQL中不提供数据删除的相关功能。 本章节SQL语法遵循如下约定: @@ -57,18 +57,29 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ## 数据库管理 - **创建数据库** - ```mysql - CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]; - ``` - 说明: - - 1) `KEEP`是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据; - 2) 数据库名最大长度为33; - 3) 一条SQL 语句的最大长度为65480个字符; - 4) 数据库还有更多与存储相关的配置参数,请参见[系统管理](../administrator/#服务端配置)。 + + ```mysql + CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1]; + ``` + 说明: + + 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据; + + 2) UPDATE 标志数据库支持更新相同时间戳数据; + + 3) 数据库名最大长度为33; + 4) 一条SQL 语句的最大长度为65480个字符; + 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。 + +- **显示系统当前参数** + + ```mysql + SHOW VARIABLES; + ``` - **使用数据库** + ```mysql USE db_name; ``` @@ -143,6 +154,12 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic 显示当前数据库下的所有数据表信息。说明:可在like中使用通配符进行名称的匹配。 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’_’下划线匹配一个字符。 +- **在线修改显示字符宽度** + + ```mysql + SET MAX_BINARY_DISPLAY_WIDTH ; + ``` + - **获取表的结构信息** ```mysql @@ -1022,3 +1039,21 @@ SELECT AVG(current),MAX(current),LEASTSQUARES(current, start_val, step_val), PER - 标签最多允许128个,可以0个,标签总长度不超过16k个字符 - SQL语句最大长度65480个字符,但可通过系统配置参数maxSQLLength修改,最长可配置为1M - 库的数目,超级表的数目、表的数目,系统不做限制,仅受系统资源限制 + + + +## TAOS SQL其他约定 + +**group by的限制** + +TAOS SQL支持对标签、tbname进行group by操作,也支持普通列进行group by,前提是:仅限一列且该列的唯一值小于10万个。 + +**join操作的限制** + +TAOS SQL支持表之间按主键时间戳来join两张表的列,暂不支持两个表之间聚合后的四则运算。 + +**is not null与不为空的表达式适用范围** + +is not null支持所有类型的列。不为空的表达式为 <>"",仅对非数值类型的列适用。 + + diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index f54c6b91a1..2c470a270d 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -80,6 +80,12 @@ TDengine集群的节点数必须大于等于副本数,否则创建表时将报 TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修改配置参数,以满足不同场景的需求。配置文件的缺省位置在/etc/taos目录,可以通过taosd命令行执行参数-c指定配置文件目录。比如taosd -c /home/user来指定配置文件位于/home/user这个目录。 +另外可以使用 “-C” 显示当前服务器配置参数: + +``` +taosd -C +``` + 下面仅仅列出一些重要的配置参数,更多的参数请看配置文件里的说明。各个参数的详细介绍及作用请看前述章节,而且这些参数的缺省配置都是工作的,一般无需设置。**注意:配置修改后,需要重启*taosd*服务才能生效。** - firstEp: taosd启动时,主动连接的集群中首个dnode的end point, 默认值为localhost:6030。 @@ -96,6 +102,8 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 - stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。 +- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为字节。 +- ratioOfQueryCores: 设置查询线程的最大数量。最小值0 表示只有1个查询线程;最大值2表示最大建立2倍CPU核数的查询线程。默认为1,表示最大和CPU核数相等的查询线程。该值可以为小数,即0.5表示最大建立CPU核数一半的查询线程。 **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6042共13个端口,而且必须TCP和UDP都打开。 @@ -151,11 +159,20 @@ ALTER DNODE ## 客户端配置 -TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 +TDengine系统的前台交互客户端应用程序为taos,以及应用驱动,它与taosd共享同一个配置文件taos.cfg。运行taos时,使用参数-c指定配置文件目录,如taos -c /home/cfg,表示使用/home/cfg/目录下的taos.cfg配置文件中的参数,缺省目录是/etc/taos。更多taos的使用方法请见[Shell命令行程序](https://www.taosdata.com/cn/documentation/administrator/#_TDengine_Shell命令行程序)。本节主要说明 taos 客户端应用在配置文件 taos.cfg 文件中使用到的参数。 + +**2.0.10.0 之后版本支持命令行以下参数显示当前客户端参数的配置** + +```bash +taos -C 或 taos --dump-config +``` 客户端配置参数 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 + +- secondEp: taos 启动时,如果 firstEp 连不上,将尝试连接 secondEp。 + - locale 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 @@ -211,15 +228,15 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同 均是合法的设置东八区时区的格式。 时区的设置对于查询和写入SQL语句中非Unix时间戳的内容(时间戳字符串、关键词now的解析)产生影响。例如: - ``` + ```sql SELECT count(*) FROM table_name WHERE TS<'2019-04-11 12:01:08'; ``` 在东八区,SQL语句等效于 - ``` + ```sql SELECT count(*) FROM table_name WHERE TS<1554955268000; ``` 在UTC时区,SQL语句等效于 - ``` + ```sql SELECT count(*) FROM table_name WHERE TS<1554984068000; ``` 为了避免使用字符串时间格式带来的不确定性,也可以直接使用Unix时间戳。此外,还可以在SQL语句中使用带有时区的时间戳字符串,例如:RFC3339格式的时间戳字符串,2013-04-12T15:52:01.123+08:00或者ISO-8601格式时间戳字符串2013-04-12T15:52:01.123+0800。上述两个字符串转化为Unix时间戳不受系统所在时区的影响。 @@ -229,42 +246,42 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同 - maxBinaryDisplayWidth Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 - + ## 用户管理 系统管理员可以在CLI界面里添加、删除用户,也可以修改密码。CLI里SQL语法如下: -``` +```sql CREATE USER PASS <'password'>; ``` 创建用户,并指定用户名和密码,密码需要用单引号引起来,单引号为英文半角 -``` +```sql DROP USER ; ``` 删除用户,限root用户使用 -``` +```sql ALTER USER PASS <'password'>; ``` 修改用户密码, 为避免被转换为小写,密码需要用单引号引用,单引号为英文半角 -``` +```sql ALTER USER PRIVILEGE ; ``` 修改用户权限为:super/write/read,不需要添加单引号 -``` +```mysql SHOW USERS; ``` 显示所有用户 - + **注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 ## 数据导入 @@ -312,12 +329,11 @@ taos> DESCRIBE d1001 ``` 那么可以用如下命令导入数据 -``` +```mysql taos> insert into d1001 file '~/data.csv'; Query OK, 9 row(s) affected (0.004763s) ``` - **taosdump工具导入** TDengine提供了方便的数据库导入导出工具taosdump。用户可以将taosdump从一个系统导出的数据,导入到其他系统中。具体使用方法,请参见博客:TDengine DUMP工具使用指南 @@ -330,7 +346,7 @@ TDengine提供了方便的数据库导入导出工具taosdump。用户可以将t 如果用户需要导出一个表或一个STable中的数据,可在shell中运行 -``` +```mysql select * from >> data.csv; ``` @@ -344,37 +360,37 @@ TDengine提供了方便的数据库导出工具taosdump。用户可以根据需 系统管理员可以从CLI查询系统的连接、正在进行的查询、流式计算,并且可以关闭连接、停止正在进行的查询和流式计算。CLI里SQL语法如下: -``` +```mysql SHOW CONNECTIONS; ``` 显示数据库的连接,其中一列显示ip:port, 为连接的IP地址和端口号。 -``` +```mysql KILL CONNECTION ; ``` 强制关闭数据库连接,其中的connection-id是SHOW CONNECTIONS中显示的第一列的数字。 -``` +```mysql SHOW QUERIES; ``` 显示数据查询,其中第一列显示的以冒号隔开的两个数字为query-id,为发起该query应用连接的connection-id和查询次数。 -``` +```mysql KILL QUERY ; ``` 强制关闭数据查询,其中query-id是SHOW QUERIES中显示的 connection-id:query-no字串,如“105:2”,拷贝粘贴即可。 -``` +```mysql SHOW STREAMS; ``` 显示流式计算,其中第一列显示的以冒号隔开的两个数字为stream-id, 为启动该stream应用连接的connection-id和发起stream的次数。 -``` +```mysql KILL STREAM ; ``` @@ -413,3 +429,65 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 + +## TDengine参数限制与保留关键字 + +- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符 +- 表的列名:不能包含特殊字符,不能超过64个字符 +- 表的列数:不能超过1024列 +- 记录的最大长度:包括时间戳8 byte,不能超过16KB +- 单条SQL语句默认最大字符串长度:65480 byte +- 数据库副本数:不能超过3 +- 用户名:不能超过20个byte +- 用户密码:不能超过15个byte +- 标签(Tags)数量:不能超过128个 +- 标签的总长度:不能超过16Kbyte +- 记录条数:仅受存储空间限制 +- 表的个数:仅受节点个数限制 +- 库的个数:仅受节点个数限制 +- 单个库上虚拟节点个数:不能超过64个 + + + +目前TDengine有将近200个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable名、数据列名及标签列名等。这些关键字列表如下: + +| 关键字列表 | | | | | +| ---------- | ----------- | ------------ | ---------- | --------- | +| ABLOCKS | CONNECTION | GT | MINUS | SHOW | +| ABORT | CONNECTIONS | ID | MNODES | SLASH | +| ACCOUNT | COPY | IF | MODULES | SLIDING | +| ACCOUNTS | COUNT | IGNORE | NCHAR | SMALLINT | +| ADD | CREATE | IMMEDIATE | NE | SPREAD | +| AFTER | CTIME | IMPORT | NONE | STAR | +| ALL | DATABASE | IN | NOT | STATEMENT | +| ALTER | DATABASES | INITIALLY | NOTNULL | STDDEV | +| AND | DAYS | INSERT | NOW | STREAM | +| AS | DEFERRED | INSTEAD | OF | STREAMS | +| ASC | DELIMITERS | INTEGER | OFFSET | STRING | +| ATTACH | DESC | INTERVAL | OR | SUM | +| AVG | DESCRIBE | INTO | ORDER | TABLE | +| BEFORE | DETACH | IP | PASS | TABLES | +| BEGIN | DIFF | IS | PERCENTILE | TAG | +| BETWEEN | DIVIDE | ISNULL | PLUS | TAGS | +| BIGINT | DNODE | JOIN | PRAGMA | TBLOCKS | +| BINARY | DNODES | KEEP | PREV | TBNAME | +| BITAND | DOT | KEY | PRIVILEGE | TIMES | +| BITNOT | DOUBLE | KILL | QUERIES | TIMESTAMP | +| BITOR | DROP | LAST | QUERY | TINYINT | +| BOOL | EACH | LE | RAISE | TOP | +| BOTTOM | END | LEASTSQUARES | REM | TRIGGER | +| BY | EQ | LIKE | REPLACE | UMINUS | +| CACHE | EXISTS | LIMIT | REPLICA | UPLUS | +| CASCADE | EXPLAIN | LINEAR | RESET | USE | +| CHANGE | FAIL | LOCAL | RESTRICT | USER | +| CLOG | FILL | LP | ROW | USERS | +| CLUSTER | FIRST | LSHIFT | ROWS | USING | +| COLON | FLOAT | LT | RP | VALUES | +| COLUMN | FOR | MATCH | RSHIFT | VARIABLE | +| COMMA | FROM | MAX | SCORES | VGROUPS | +| COMP | GE | METRIC | SELECT | VIEW | +| CONCAT | GLOB | METRICS | SEMI | WAVG | +| CONFIGS | GRANTS | MIN | SET | WHERE | +| CONFLICT | GROUP | | | | + diff --git a/documentation20/webdocs/markdowndocs/advanced features-ch.md b/documentation20/webdocs/markdowndocs/advanced features-ch.md index ed02af25f2..cdd9ee8104 100644 --- a/documentation20/webdocs/markdowndocs/advanced features-ch.md +++ b/documentation20/webdocs/markdowndocs/advanced features-ch.md @@ -60,7 +60,7 @@ create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s 会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句, 并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如: -```shell +```mysql taos> select * from avg_vol; ts | avg_voltage_ | =================================================== @@ -72,14 +72,13 @@ taos> select * from avg_vol; 需要注意,查询时间窗口的最小值是10毫秒,没有时间窗口范围的上限。 - 此外,TDengine还支持用户指定连续查询的起止时间。 如果不输入开始时间,连续查询将从第一条原始数据所在的时间窗口开始; 如果没有输入结束时间,连续查询将永久运行; 如果用户指定了结束时间,连续查询在系统时间达到指定的时间以后停止运行。 比如使用下面的SQL创建的连续查询将运行一小时,之后会自动停止。 -```sql +```mysql create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); ``` diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index db479417c5..60ac6e4c2e 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -137,6 +137,16 @@ DROP DNODE "fqdn:port"; 其中fqdn是被删除的节点的FQDN,port是其对外服务器的端口号 +**【注意】** + + - 一个数据节点一旦被drop之后,不能重新加入集群。需要将此节点重新部署(清空数据文件夹)。集群在完成drop dnode操作之前,会将该dnode的数据迁移走。 + + - 请注意 drop dnode 和 停止taosd进程是两个不同的概念,不要混淆:因为删除dnode之前要执行迁移数据的操作,因此被删除的dnode必须保持在线状态。待删除操作结束之后,才能停止taosd进程。 + + - 一个数据节点被drop之后,其他节点都会感知到这个dnodeID的删除操作,任何集群中的节点都不会再接收此dnodeID的请求。 + + - dnodeID的是集群自动分配的,不得人工指定。它在生成时递增的,不会重复。 + ### 查看数据节点 执行CLI程序taos,使用root账号登录进TDengine系统,执行: diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 69c560bbc4..c04dc9891e 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -1,8 +1,28 @@ # 连接器 -TDengine提供了丰富的应用程序开发接口,其中包括C/C++、JAVA、Python、RESTful、Go等,便于用户快速开发应用。 +TDengine提供了丰富的应用程序开发接口,其中包括C/C++、C# 、Java、Python、Go、Node.js、RESTful 等,便于用户快速开发应用。 -注意:所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 +![image-connecotr](../assets/connector.png) + +目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。对照矩阵如下: + +| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** | +| ----------- | --------------- | --------------- | --------------- | --------------- | --------- | --------- | --------------- | ---------------- | -------------- | +| **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | +| **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | +| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | +| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | +| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | +| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | +| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | + +其中 ● 表示经过官方测试验证, ○ 表示非官方测试验证。 + +注意: + +* 所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 +* 升级到TDengine到2.0.8.0版本的用户,必须更新JDBC连接TDengine必须升级taos-jdbcdriver到2.0.12及以上。 ## C/C++ Connector @@ -285,7 +305,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 ### 安装准备 * 已安装TDengine, 如果客户端在Windows上,需要安装Windows 版本的TDengine客户端 [(Windows TDengine 客户端安装)][4] * 已安装python 2.7 or >= 3.4 -* 已安装pip +* 已安装pip 或 pip3 ### Python客户端安装 @@ -297,7 +317,7 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 或 -​ `pip install src/connector/python/linux/python3/` +​ `pip3 install src/connector/python/linux/python3/` #### Windows 在已安装Windows TDengine 客户端的情况下, 将文件"C:\TDengine\driver\taos.dll" 拷贝到 "C:\windows\system32" 目录下, 然后进入Windwos cmd 命令行界面 @@ -457,13 +477,13 @@ HTTP请求的BODY里就是一个完整的SQL语句,SQL语句中的数据表应 使用curl通过自定义身份认证方式来发起一个HTTP Request,语法如下: -``` +```bash curl -H 'Authorization: Basic ' -d '' :/rest/sql ``` 或者 -``` +```bash curl -u username:password -d '' :/rest/sql ``` @@ -473,7 +493,7 @@ curl -u username:password -d '' :/rest/sql 返回值为JSON格式,如下: -``` +```json { "status": "succ", "head": ["Time Stamp","current", …], @@ -496,7 +516,7 @@ curl -u username:password -d '' :/rest/sql HTTP请求中需要带有授权码``,用于身份识别。授权码通常由管理员提供,可简单的通过发送`HTTP GET`请求来获取授权码,操作如下: -``` +```bash curl http://:6041/rest/login// ``` @@ -510,13 +530,13 @@ curl http://:6041/rest/login// 获取授权码示例: -``` +```bash curl http://192.168.0.1:6041/rest/login/root/taosdata ``` 返回值: -``` +```json { "status": "succ", "code": 0, @@ -528,12 +548,12 @@ curl http://192.168.0.1:6041/rest/login/root/taosdata - 在demo库里查询表d1001的所有记录: -``` +```bash curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sql ``` 返回值: -``` +```json { "status": "succ", "head": ["Time Stamp","current","voltage","phase"], @@ -547,12 +567,12 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 - 创建库demo: -``` +```bash curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 192.168.0.1:6041/rest/sql ``` 返回值: -``` +```json { "status": "succ", "head": ["affected_rows"], @@ -567,13 +587,13 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'create database demo' 19 HTTP请求URL采用`sqlt`时,返回结果集的时间戳将采用Unix时间戳格式表示,例如 -``` +```bash curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001' 192.168.0.1:6041/rest/sqlt ``` 返回值: -``` +```json { "status": "succ", "head": ["column1","column2","column3"], @@ -588,13 +608,13 @@ curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.d1001 #### 结果集采用UTC时间字符串 HTTP请求URL采用`sqlutc`时,返回结果集的时间戳将采用UTC时间字符串表示,例如 -``` +```bash curl -H 'Authorization: Basic cm9vdDp0YW9zZGF0YQ==' -d 'select * from demo.t1' 192.168.0.1:6041/rest/sqlutc ``` 返回值: -``` +```json { "status": "succ", "head": ["column1","column2","column3"], @@ -664,30 +684,52 @@ import ( _ "github.com/taosdata/driver-go/taosSql" ) ``` +**建议Go版本是1.13或以上,并开启模块支持:** + +``` +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct +``` + ### 常用API -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - 该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine - - **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。 - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` + +- sql.Open(DRIVER_NAME string, dataSourceName string) *DB` + + 该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine + + **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。 + +- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` sql.Open内置的方法,用来执行非查询相关SQL - - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - + +- `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` + sql.Open内置的方法,用来执行查询语句 - - + +- `func (db *DB) Prepare(query string) (*Stmt, error)` + + sql.Open内置的方法,Prepare creates a prepared statement for later queries or executions. + +- `func (s *Stmt) Exec(args ...interface{}) (Result, error)` + + sql.Open内置的方法,executes a prepared statement with the given arguments and returns a Result summarizing the effect of the statement. + +- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)` + + sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a *Rows. + +- `func (s *Stmt) Close() error` + + sql.Open内置的方法,Close closes the statement. + ## Node.js Connector TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs): 首先,通过[npm](https://www.npmjs.com/)安装node.js 连接器. -```cmd +```bash npm install td2.0-connector ``` 我们建议用户使用npm 安装node.js连接器。如果您没有安装npm, 可以将*src/connector/nodejs/*拷贝到您的nodejs 项目目录下 @@ -701,32 +743,6 @@ npm install td2.0-connector - `make` - c语言编译器比如[GCC](https://gcc.gnu.org) -### macOS - -- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) - -- Xcode - - - 然后通过Xcode安装 - - ``` - Command Line Tools - ``` - - 在 - ``` - Xcode -> Preferences -> Locations - ``` - - 目录下可以找到这个工具。或者在终端里执行 - - ``` - xcode-select --install - ``` - - - - 该步执行后 `gcc` 和 `make`就被安装上了 - ### Windows #### 安装方法1 diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md index e3743f4f9d..f17c74bdf3 100644 --- a/documentation20/webdocs/markdowndocs/connector-java-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md @@ -1,5 +1,7 @@ # Java Connector +Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 + TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 @@ -24,7 +26,8 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | --- | --- | --- | -| 2.0.4 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 61f70c5962..d89cdd4c92 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -117,7 +117,17 @@ Connection = DriverManager.getConnection(url, properties); -## 16. 怎么报告问题? +## 16. 如何进行数据迁移? + +TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事: + +- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname +- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 +- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 + + + +## 17. 怎么报告问题? 如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: 1. /var/log/taos diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/webdocs/markdowndocs/insert-ch.md index 77ba596d4e..96e7a4613b 100644 --- a/documentation20/webdocs/markdowndocs/insert-ch.md +++ b/documentation20/webdocs/markdowndocs/insert-ch.md @@ -24,7 +24,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M)。 - TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。 -- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。 +- 对同一张表,如果新插入记录的时间戳已经存在,默认(没有使用 UPDATE 1 创建数据库)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。 ## Prometheus直接写入 @@ -37,7 +37,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 对应的TDengine版本。因为用到了TDengine的客户端动态链接库,因此需要安装好和服务端相同版本的TDengine程序;比如服务端版本是TDengine 2.0.0, 则在bailongma所在的linux服务器(可以与TDengine在同一台服务器,或者不同服务器) Bailongma项目中有一个文件夹blm_prometheus,存放了prometheus的写入API程序。编译过程如下: -``` +```bash cd blm_prometheus go build ``` @@ -79,7 +79,7 @@ blm_prometheus对prometheus提供服务的端口号。 ### 启动示例 通过以下命令启动一个blm_prometheus的API服务 -``` +```bash ./blm_prometheus -port 8088 ``` 假设blm_prometheus所在服务器的IP地址为"10.1.2.3",则在prometheus的配置文件中部分增加url为 @@ -107,7 +107,7 @@ prometheus产生的数据格式如下: } ``` 其中,apiserver_request_latencies_bucket为prometheus采集的时序数据的名称,后面{}中的为该时序数据的标签。blm_prometheus会以时序数据的名称在TDengine中自动创建一个超级表,并将{}中的标签转换成TDengine的tag值,Timestamp作为时间戳,value作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。 -``` +```mysql use prometheus; select * from apiserver_request_latencies_bucket; ``` @@ -124,7 +124,7 @@ select * from apiserver_request_latencies_bucket; Bailongma项目中有一个文件夹blm_telegraf,存放了Telegraf的写入API程序。编译过程如下: -``` +```bash cd blm_telegraf go build ``` @@ -175,7 +175,7 @@ blm_telegraf对telegraf提供服务的端口号。 ### 启动示例 通过以下命令启动一个blm_telegraf的API服务 -``` +```bash ./blm_telegraf -host 127.0.0.1 -port 8089 ``` @@ -213,7 +213,7 @@ telegraf产生的数据格式如下: 其中,name字段为telegraf采集的时序数据的名称,tags字段为该时序数据的标签。blm_telegraf会以时序数据的名称在TDengine中自动创建一个超级表,并将tags字段中的标签转换成TDengine的tag值,Timestamp作为时间戳,fields字段中的值作为该时序数据的值。因此在TDengine的客户端中,可以通过以下指令查到这个数据是否成功写入。 -``` +```mysql use telegraf; select * from cpu; ``` diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 8a68d02dfd..8c2ef19382 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -29,8 +29,12 @@ # number of threads per CPU core # numOfThreadsPerCore 1.0 -# the proportion of total threads responsible for query -# ratioOfQueryThreads 0.5 +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# tsRatioOfQueryCores 1.0 # number of management nodes in the system # numOfMnodes 3 @@ -265,5 +269,5 @@ # enable/disable stream (continuous query) # stream 1 -# only 50% CPU resources will be used in query processing -# halfCoresForQuery 0 +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index d6dccf7045..6f481a6d6c 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -9,6 +9,9 @@ set -e verMode=edge pagMode=full +iplist="" +serverFqdn="" + # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory @@ -227,6 +230,157 @@ function install_header() { ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + #echo -e -n "$(hostnamectl status --static)" + #echo -e -n "$(hostnamectl status --transient)" + #echo -e -n "$(hostnamectl status --pretty)" + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname -f) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : @@ -247,7 +401,9 @@ function install_config() { if [ "$interactiveFqdn" == "no" ]; then return 0 - fi + fi + + local_fqdn_check #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" @@ -446,6 +602,9 @@ function install_service_on_systemd() { ${csudo} bash -c "echo '[Service]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'Type=simple' >> ${taosd_service_config}" ${csudo} bash -c "echo 'ExecStart=/usr/bin/taosd' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStartPre=/usr/local/taos/bin/setDelay.sh' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStartPost=/usr/local/taos/bin/resetDelay.sh' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'ExecStopPost=/usr/local/taos/bin/resetDelay.sh' >> ${taosd_service_config}" ${csudo} bash -c "echo 'LimitNOFILE=infinity' >> ${taosd_service_config}" ${csudo} bash -c "echo 'LimitNPROC=infinity' >> ${taosd_service_config}" ${csudo} bash -c "echo 'LimitCORE=infinity' >> ${taosd_service_config}" @@ -454,6 +613,7 @@ function install_service_on_systemd() { ${csudo} bash -c "echo 'Restart=always' >> ${taosd_service_config}" ${csudo} bash -c "echo 'StartLimitBurst=3' >> ${taosd_service_config}" ${csudo} bash -c "echo 'StartLimitInterval=60s' >> ${taosd_service_config}" + #${csudo} bash -c "echo 'StartLimitIntervalSec=60s' >> ${taosd_service_config}" ${csudo} bash -c "echo >> ${taosd_service_config}" ${csudo} bash -c "echo '[Install]' >> ${taosd_service_config}" ${csudo} bash -c "echo 'WantedBy=multi-user.target' >> ${taosd_service_config}" @@ -634,9 +794,9 @@ function update_TDengine() { fi if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" else - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos -h $serverFqdn${NC} in shell${NC}" fi echo @@ -655,8 +815,8 @@ function update_TDengine() { function install_TDengine() { # Start to install if [ ! -e taos.tar.gz ]; then - echo "File taos.tar.gz does not exist" - exit 1 + echo "File taos.tar.gz does not exist" + exit 1 fi tar -zxf taos.tar.gz @@ -702,41 +862,54 @@ function install_TDengine() { echo echo -e "${GREEN_DARK}To configure TDengine ${NC}: edit /etc/taos/taos.cfg" if ((${service_mod}==0)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} systemctl start taosd${NC}" elif ((${service_mod}==1)); then - echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: ${csudo} service taosd start${NC}" else - echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" + echo -e "${GREEN_DARK}To start TDengine ${NC}: taosd${NC}" fi - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - fi + #if [ ${openresty_work} = 'true' ]; then + # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + #else + # echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" + #fi if [ ! -z "$firstEp" ]; then - echo - echo -e "${GREEN_DARK}Please run${NC}: taos -h $firstEp${GREEN_DARK} to login into cluster, then${NC}" - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" + echo fi + echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" echo else # Only install client install_bin install_config - echo echo -e "\033[44;32;1mTDengine client is installed successfully!${NC}" fi touch ~/.taos_history - rm -rf $(tar -tf taos.tar.gz) } ## ==============================Main program starts from here============================ +serverFqdn=$(hostname -f) if [ "$verType" == "server" ]; then # Install server and client if [ -x ${bin_dir}/taosd ]; then diff --git a/packaging/tools/install_power.sh b/packaging/tools/install_power.sh index 5929b52afc..1e3cb81b7d 100755 --- a/packaging/tools/install_power.sh +++ b/packaging/tools/install_power.sh @@ -9,6 +9,8 @@ set -e verMode=edge pagMode=full +iplist="" +serverFqdn="" # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory @@ -172,7 +174,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/power || : ${csudo} rm -f ${bin_link_dir}/powerd || : ${csudo} rm -f ${bin_link_dir}/powerdemo || : - ${csudo} rm -f ${bin_link_dir}/powerdump || : ${csudo} rm -f ${bin_link_dir}/rmpower || : ${csudo} rm -f ${bin_link_dir}/tarbitrator || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -183,7 +184,6 @@ function install_bin() { [ -x ${install_main_dir}/bin/power ] && ${csudo} ln -s ${install_main_dir}/bin/power ${bin_link_dir}/power || : [ -x ${install_main_dir}/bin/powerd ] && ${csudo} ln -s ${install_main_dir}/bin/powerd ${bin_link_dir}/powerd || : [ -x ${install_main_dir}/bin/powerdemo ] && ${csudo} ln -s ${install_main_dir}/bin/powerdemo ${bin_link_dir}/powerdemo || : - [ -x ${install_main_dir}/bin/powerdump ] && ${csudo} ln -s ${install_main_dir}/bin/powerdump ${bin_link_dir}/powerdump || : [ -x ${install_main_dir}/bin/remove_power.sh ] && ${csudo} ln -s ${install_main_dir}/bin/remove_power.sh ${bin_link_dir}/rmpower || : [ -x ${install_main_dir}/bin/set_core.sh ] && ${csudo} ln -s ${install_main_dir}/bin/set_core.sh ${bin_link_dir}/set_core || : [ -x ${install_main_dir}/bin/tarbitrator ] && ${csudo} ln -s ${install_main_dir}/bin/tarbitrator ${bin_link_dir}/tarbitrator || : @@ -227,6 +227,157 @@ function install_header() { ${csudo} ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h } +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + #echo -e -n "$(hostnamectl status --static)" + #echo -e -n "$(hostnamectl status --transient)" + #echo -e -n "$(hostnamectl status --pretty)" + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname -f) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + function install_config() { #${csudo} rm -f ${install_main_dir}/cfg/taos.cfg || : @@ -248,6 +399,8 @@ function install_config() { if [ "$interactiveFqdn" == "no" ]; then return 0 fi + + local_fqdn_check #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" #FQDN_FORMAT="(:[1-6][0-9][0-9][0-9][0-9]$)" @@ -611,9 +764,9 @@ function update_PowerDB() { fi if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" else - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}" + echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power -h $serverFqdn${NC} in shell${NC}" fi echo @@ -686,17 +839,30 @@ function install_PowerDB() { echo -e "${GREEN_DARK}To start PowerDB ${NC}: powerd${NC}" fi - if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" - else - echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}" - fi + #if [ ${openresty_work} = 'true' ]; then + # echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${nginx_port}${NC}" + #else + # echo -e "${GREEN_DARK}To access PowerDB ${NC}: use ${GREEN_UNDERLINE}power${NC} in shell${NC}" + #fi if [ ! -z "$firstEp" ]; then - echo - echo -e "${GREEN_DARK}Please run${NC}: power -h $firstEp${GREEN_DARK} to login into cluster, then${NC}" + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access PowerDB ${NC}: power -h $serverFqdn${GREEN_DARK} to login into PowerDB server${NC}" + echo fi echo -e "\033[44;32;1mPowerDB is installed successfully!${NC}" echo @@ -713,6 +879,7 @@ function install_PowerDB() { ## ==============================Main program starts from here============================ +serverFqdn=$(hostname -f) if [ "$verType" == "server" ]; then # Install server and client if [ -x ${bin_dir}/powerd ]; then diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 52919976ee..6b4d183764 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -3,6 +3,10 @@ # This file is used to install tdengine rpm package on centos systems. The operating system # is required to use systemd to manage services at boot #set -x + +iplist="" +serverFqdn="" + # -----------------------Variables definition--------------------- script_dir=$(dirname $(readlink -f "$0")) # Dynamic directory @@ -92,7 +96,6 @@ function install_bin() { ${csudo} rm -f ${bin_link_dir}/taos || : ${csudo} rm -f ${bin_link_dir}/taosd || : ${csudo} rm -f ${bin_link_dir}/taosdemo || : - ${csudo} rm -f ${bin_link_dir}/taosdump || : ${csudo} rm -f ${bin_link_dir}/rmtaos || : ${csudo} rm -f ${bin_link_dir}/set_core || : @@ -102,10 +105,160 @@ function install_bin() { [ -x ${bin_dir}/taos ] && ${csudo} ln -s ${bin_dir}/taos ${bin_link_dir}/taos || : [ -x ${bin_dir}/taosd ] && ${csudo} ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosdemo ] && ${csudo} ln -s ${bin_dir}/taosdemo ${bin_link_dir}/taosdemo || : - [ -x ${bin_dir}/taosdump ] && ${csudo} ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo} ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : } +function add_newHostname_to_hosts() { + localIp="127.0.0.1" + OLD_IFS="$IFS" + IFS=" " + iphost=$(cat /etc/hosts | grep $1 | awk '{print $1}') + arr=($iphost) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$localIp" ]]; then + return + fi + done + ${csudo} echo "127.0.0.1 $1" >> /etc/hosts ||: +} + +function set_hostname() { + echo -e -n "${GREEN}Please enter one hostname(must not be 'localhost')${NC}:" + read newHostname + while true; do + if [[ ! -z "$newHostname" && "$newHostname" != "localhost" ]]; then + break + else + read -p "Please enter one hostname(must not be 'localhost'):" newHostname + fi + done + + ${csudo} hostname $newHostname ||: + retval=`echo $?` + if [[ $retval != 0 ]]; then + echo + echo "set hostname fail!" + return + fi + #echo -e -n "$(hostnamectl status --static)" + #echo -e -n "$(hostnamectl status --transient)" + #echo -e -n "$(hostnamectl status --pretty)" + + #ubuntu/centos /etc/hostname + if [[ -e /etc/hostname ]]; then + ${csudo} echo $newHostname > /etc/hostname ||: + fi + + #debian: #HOSTNAME=yourname + if [[ -e /etc/sysconfig/network ]]; then + ${csudo} sed -i -r "s/#*\s*(HOSTNAME=\s*).*/\1$newHostname/" /etc/sysconfig/network ||: + fi + + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$newHostname/" ${cfg_install_dir}/taos.cfg + serverFqdn=$newHostname + + if [[ -e /etc/hosts ]]; then + add_newHostname_to_hosts $newHostname + fi +} + +function is_correct_ipaddr() { + newIp=$1 + OLD_IFS="$IFS" + IFS=" " + arr=($iplist) + IFS="$OLD_IFS" + for s in ${arr[@]} + do + if [[ "$s" == "$newIp" ]]; then + return 0 + fi + done + + return 1 +} + +function set_ipAsFqdn() { + iplist=$(ip address |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F "/" '{print $1}') ||: + if [ -z "$iplist" ]; then + iplist=$(ifconfig |grep inet |grep -v inet6 |grep -v 127.0.0.1 |awk '{print $2}' |awk -F ":" '{print $2}') ||: + fi + + if [ -z "$iplist" ]; then + echo + echo -e -n "${GREEN}Unable to get local ip, use 127.0.0.1${NC}" + localFqdn="127.0.0.1" + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + echo + return + fi + + echo -e -n "${GREEN}Please choose an IP from local IP list${NC}:" + echo + echo -e -n "${GREEN}$iplist${NC}" + echo + echo + echo -e -n "${GREEN}Notes: if IP is used as the node name, data can NOT be migrated to other machine directly${NC}:" + read localFqdn + while true; do + if [ ! -z "$localFqdn" ]; then + # Check if correct ip address + is_correct_ipaddr $localFqdn + retval=`echo $?` + if [[ $retval != 0 ]]; then + read -p "Please choose an IP from local IP list:" localFqdn + else + # Write the local FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(fqdn\s*).*/\1$localFqdn/" ${cfg_install_dir}/taos.cfg + serverFqdn=$localFqdn + break + fi + else + read -p "Please choose an IP from local IP list:" localFqdn + fi + done +} + +function local_fqdn_check() { + #serverFqdn=$(hostname -f) + echo + echo -e -n "System hostname is: ${GREEN}$serverFqdn${NC}" + echo + if [[ "$serverFqdn" == "" ]] || [[ "$serverFqdn" == "localhost" ]]; then + echo -e -n "${GREEN}It is strongly recommended to configure a hostname for this machine ${NC}" + echo + + while true + do + read -r -p "Set hostname now? [Y/n] " input + if [ ! -n "$input" ]; then + set_hostname + break + else + case $input in + [yY][eE][sS]|[yY]) + set_hostname + break + ;; + + [nN][oO]|[nN]) + set_ipAsFqdn + break + ;; + + *) + echo "Invalid input..." + ;; + esac + fi + done + fi +} + function install_config() { if [ ! -f ${cfg_install_dir}/taos.cfg ]; then ${csudo} ${csudo} mkdir -p ${cfg_install_dir} @@ -113,6 +266,8 @@ function install_config() { ${csudo} chmod 644 ${cfg_install_dir}/* fi + local_fqdn_check + ${csudo} mv ${cfg_dir}/taos.cfg ${cfg_dir}/taos.cfg.org ${csudo} ln -s ${cfg_install_dir}/taos.cfg ${cfg_dir} #FQDN_FORMAT="(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)" @@ -130,19 +285,19 @@ function install_config() { read firstEp; fi while true; do - if [ ! -z "$firstEp" ]; then - # check the format of the firstEp - #if [[ $firstEp == $FQDN_PATTERN ]]; then - # Write the first FQDN to configuration file - ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg - break - #else - # read -p "Please enter the correct FQDN:port: " firstEp - #fi - else + if [ ! -z "$firstEp" ]; then + # check the format of the firstEp + #if [[ $firstEp == $FQDN_PATTERN ]]; then + # Write the first FQDN to configuration file + ${csudo} sed -i -r "s/#*\s*(firstEp\s*).*/\1$firstEp/" ${cfg_install_dir}/taos.cfg break - fi - done + #else + # read -p "Please enter the correct FQDN:port: " firstEp + #fi + else + break + fi + done # user email #EMAIL_PATTERN='^[A-Za-z0-9\u4e00-\u9fa5]+@[a-zA-Z0-9_-]+(\.[a-zA-Z0-9_-]+)+$' @@ -300,18 +455,32 @@ function install_TDengine() { echo -e "${GREEN_DARK}To start TDengine ${NC}: ./taosd${NC}" fi - echo -e "${GREEN_DARK}To access TDengine ${NC}: use ${GREEN_UNDERLINE}taos${NC} in shell${NC}" - + + if [ ! -z "$firstEp" ]; then - echo - echo -e "${GREEN_DARK}Please run${NC}: taos -h $firstEp${GREEN_DARK} to login into cluster, then${NC}" - echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" - echo - fi + tmpFqdn=${firstEp%%:*} + substr=":" + if [[ $firstEp =~ $substr ]];then + tmpPort=${firstEp#*:} + else + tmpPort="" + fi + if [[ "$tmpPort" != "" ]];then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + else + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + fi + echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" + echo + elif [ ! -z "$serverFqdn" ]; then + echo -e "${GREEN_DARK}To access TDengine ${NC}: taos -h $serverFqdn${GREEN_DARK} to login into TDengine server${NC}" + echo + fi echo echo -e "\033[44;32;1mTDengine is installed successfully!${NC}" } ## ==============================Main program starts from here============================ +serverFqdn=$(hostname -f) install_TDengine diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index b5d06a4adb..0b9c33950c 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.9.0' +version: '2.0.11.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.9.0 + - usr/lib/libtaos.so.2.0.11.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/client/inc/tscLog.h b/src/client/inc/tscLog.h index 9d01edae36..5bbf05aa5f 100644 --- a/src/client/inc/tscLog.h +++ b/src/client/inc/tscLog.h @@ -22,16 +22,16 @@ extern "C" { #include "tlog.h" -extern int32_t cDebugFlag; -extern int32_t tscEmbedded; +extern uint32_t cDebugFlag; +extern uint32_t tscEmbedded; -#define tscFatal(...) { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} -#define tscError(...) { if (cDebugFlag & DEBUG_ERROR) { taosPrintLog("TSC ERROR ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} -#define tscWarn(...) { if (cDebugFlag & DEBUG_WARN) { taosPrintLog("TSC WARN ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} -#define tscInfo(...) { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} -#define tscDebug(...) { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", cDebugFlag, __VA_ARGS__); }} -#define tscTrace(...) { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", cDebugFlag, __VA_ARGS__); }} -#define tscDebugL(...){ if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", cDebugFlag, __VA_ARGS__); }} +#define tscFatal(...) do { if (cDebugFlag & DEBUG_FATAL) { taosPrintLog("TSC FATAL ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0) +#define tscError(...) do { if (cDebugFlag & DEBUG_ERROR) { taosPrintLog("TSC ERROR ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0) +#define tscWarn(...) do { if (cDebugFlag & DEBUG_WARN) { taosPrintLog("TSC WARN ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0) +#define tscInfo(...) do { if (cDebugFlag & DEBUG_INFO) { taosPrintLog("TSC ", tscEmbedded ? 255 : cDebugFlag, __VA_ARGS__); }} while(0) +#define tscDebug(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLog("TSC ", cDebugFlag, __VA_ARGS__); }} while(0) +#define tscTrace(...) do { if (cDebugFlag & DEBUG_TRACE) { taosPrintLog("TSC ", cDebugFlag, __VA_ARGS__); }} while(0) +#define tscDebugL(...) do { if (cDebugFlag & DEBUG_DEBUG) { taosPrintLongString("TSC ", cDebugFlag, __VA_ARGS__); }} while(0) #ifdef __cplusplus } diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f7832c9818..d3996ccf7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -36,7 +36,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql); int32_t tscHandleMultivnodeInsert(SSqlObj *pSql); -int32_t tscHandleInsertRetry(SSqlObj* pSql); +int32_t tscHandleInsertRetry(SSqlObj* parent, SSqlObj* child); void tscBuildResFromSubqueries(SSqlObj *pSql); TAOS_ROW doSetResultRowData(SSqlObj *pSql); diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 2c8641da76..37d05de731 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -110,11 +110,12 @@ SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint uint32_t offset); void* tscDestroyBlockArrayList(SArray* pDataBlockList); +void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable); + int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pDataList); -int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, - STableDataBlocks** dataBlocks); +int32_t tscMergeTableDataBlocks(SSqlObj* pSql); +int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, + STableDataBlocks** dataBlocks, SArray* pBlockList); /** * for the projection query on metric or point interpolation query on metric, @@ -233,7 +234,7 @@ void tscVgroupTableCopy(SVgroupTableInfo* info, SVgroupTableInfo* pInfo); int tscGetSTableVgroupInfo(SSqlObj* pSql, int32_t clauseIndex); int tscGetTableMeta(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo); -int tscGetMeterMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists); +int tscGetTableMetaEx(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); void tscDoQuery(SSqlObj* pSql); @@ -275,6 +276,8 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); bool hasMoreVnodesToTry(SSqlObj *pSql); bool hasMoreClauseToTry(SSqlObj* pSql); +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache); + void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp); @@ -284,6 +287,9 @@ bool tscSetSqlOwner(SSqlObj* pSql); void tscClearSqlOwner(SSqlObj* pSql); int32_t doArithmeticCalculate(SQueryInfo* pQueryInfo, tFilePage* pOutput, int32_t rowSize, int32_t finalRowSize); +char* serializeTagData(STagData* pTagData, char* pMsg); +int32_t copyTagData(STagData* dst, const STagData* src); + void* malloc_throw(size_t size); void* calloc_throw(size_t nmemb, size_t size); char* strdup_throw(const char* str); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index a1b6174de0..99ee62fa7f 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -37,40 +37,6 @@ extern "C" { #include "qTsbuf.h" #include "tcmdtype.h" -#if 0 -static UNUSED_FUNC void *u_malloc (size_t __size) { - uint32_t v = rand(); - - if (v % 5000 <= 0) { - return NULL; - } else { - return malloc(__size); - } -} - -static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) { - uint32_t v = rand(); - if (v % 5000 <= 0) { - return NULL; - } else { - return calloc(num, __size); - } -} - -static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { - uint32_t v = rand(); - if (v % 5000 <= 0) { - return NULL; - } else { - return realloc(p, __size); - } -} - -#define calloc u_calloc -#define malloc u_malloc -#define realloc u_realloc -#endif - // forward declaration struct SSqlInfo; struct SLocalReducer; @@ -78,7 +44,7 @@ struct SLocalReducer; // data source from sql string or from file enum { DATA_FROM_SQL_STRING = 1, - DATA_FROM_DATA_FILE = 2, + DATA_FROM_DATA_FILE = 2, }; typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows); @@ -118,10 +84,10 @@ typedef struct STableMetaInfo { * 1. keep the vgroup index during the multi-vnode super table projection query * 2. keep the vgroup index for multi-vnode insertion */ - int32_t vgroupIndex; - char name[TSDB_TABLE_FNAME_LEN]; // (super) table name - char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql - SArray* tagColList; // SArray, involved tag columns + int32_t vgroupIndex; + char name[TSDB_TABLE_FNAME_LEN]; // (super) table name + char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql + SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; /* the structure for sql function in select clause */ @@ -136,7 +102,7 @@ typedef struct SSqlExpr { int16_t numOfParams; // argument value of each function tVariant param[3]; // parameters are not more than 3 int32_t offset; // sub result column value of arithmetic expression. - int16_t resColId; // result column id + int16_t resColId; // result column id } SSqlExpr; typedef struct SColumnIndex { @@ -204,22 +170,17 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char tableId[TSDB_TABLE_FNAME_LEN]; - int8_t tsSource; // where does the UNIX timestamp come from, server or client - bool ordered; // if current rows are ordered or not - int64_t vgId; // virtual group id - int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending - int32_t numOfTables; // number of tables in current submit block - int32_t rowSize; // row size for current table - uint32_t nAllocSize; - uint32_t headerSize; // header for table info (uid, tid, submit metadata) - uint32_t size; - - /* - * the table meta of table, the table meta will be used during submit, keep a ref - * to avoid it to be removed from cache - */ - STableMeta *pTableMeta; + char tableId[TSDB_TABLE_FNAME_LEN]; + int8_t tsSource; // where does the UNIX timestamp come from, server or client + bool ordered; // if current rows are ordered or not + int64_t vgId; // virtual group id + int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending + int32_t numOfTables; // number of tables in current submit block + int32_t rowSize; // row size for current table + uint32_t nAllocSize; + uint32_t headerSize; // header for table info (uid, tid, submit metadata) + uint32_t size; + STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache char *pData; // for parameter ('?') binding @@ -252,7 +213,7 @@ typedef struct SQueryInfo { int64_t clauseLimit; // limit for current sub clause int64_t prjOffset; // offset value in the original sql expression, only applied at client side - int64_t tableLimit; // table limit in case of super table projection query + global order + limit + int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX int16_t resColumnId; // result column id @@ -268,7 +229,7 @@ typedef struct { int32_t numOfTablesInSubmit; }; - int32_t insertType; + uint32_t insertType; int32_t clauseIndex; // index of multiple subclause query char * curSql; // current sql, resume position of sql after parsing paused @@ -284,10 +245,14 @@ typedef struct { int32_t numOfParams; int8_t dataSourceType; // load data from file or not - int8_t submitSchema; // submit block is built with table schema - STagData *pTagData; // NOTE: pTagData->data is used as a variant length array - SHashObj *pTableList; // referred table involved in sql - SArray *pDataBlocks; // SArray submit data blocks after parsing sql + int8_t submitSchema; // submit block is built with table schema + STagData tagData; // NOTE: pTagData->data is used as a variant length array + + STableMeta **pTableMetaList; // all involved tableMeta list of current insert sql statement. + int32_t numOfTables; + + SHashObj *pTableBlockHashList; // data block for each table + SArray *pDataBlocks; // SArray. Merged submit block for each vgroup } SSqlCmd; typedef struct SResRec { @@ -320,8 +285,8 @@ typedef struct { char ** buffer; // Buffer used to put multibytes encoded using unicode (wchar_t) SColumnIndex* pColumnIndex; - SArithmeticSupport* pArithSup; // support the arithmetic expression calculation on agg functions - struct SLocalReducer* pLocalReducer; + SArithmeticSupport *pArithSup; // support the arithmetic expression calculation on agg functions + struct SLocalReducer *pLocalReducer; } SSqlRes; typedef struct STscObj { @@ -382,6 +347,7 @@ typedef struct SSqlObj { typedef struct SSqlStream { SSqlObj *pSql; + const char* dstTable; uint32_t streamId; char listed; bool isProject; @@ -408,6 +374,8 @@ typedef struct SSqlStream { struct SSqlStream *prev, *next; } SSqlStream; +void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable); + int32_t tscInitRpc(const char *user, const char *secret, void** pDnodeConn); void tscInitMsgsFp(); diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 6b2722b43f..97b962e53a 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -365,6 +365,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { static void tscProcessAsyncError(SSchedMsg *pMsg) { void (*fp)() = pMsg->ahandle; terrno = *(int32_t*) pMsg->msg; + tfree(pMsg->msg); (*fp)(pMsg->thandle, NULL, *(int32_t*)pMsg->msg); } @@ -380,6 +381,7 @@ void tscQueueAsyncError(void(*fp), void *param, int32_t code) { taosScheduleTask(tscQhandle, &schedMsg); } + void tscQueueAsyncRes(SSqlObj *pSql) { if (pSql == NULL || pSql->signature != pSql) { tscDebug("%p SqlObj is freed, not add into queue async res", pSql); @@ -389,7 +391,10 @@ void tscQueueAsyncRes(SSqlObj *pSql) { tscError("%p add into queued async res, code:%s", pSql, tstrerror(pSql->res.code)); SSqlRes *pRes = &pSql->res; - assert(pSql->fp != NULL && pSql->fetchFp != NULL); + + if (pSql->fp == NULL || pSql->fetchFp == NULL){ + return; + } pSql->fp = pSql->fetchFp; (*pSql->fp)(pSql->param, pSql, pRes->code); @@ -410,52 +415,26 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (code != TSDB_CODE_SUCCESS) { tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); goto _error; - } else { - tscDebug("%p get %s successfully", pSql, msg); } + tscDebug("%p get %s successfully", pSql, msg); if (pSql->pStream == NULL) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY)) { - tscDebug("%p update table meta in local cache, continue to process sql and send corresponding subquery", pSql); + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { + tscDebug("%p update table meta in local cache, continue to process sql and send the corresponding query", pSql); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); + assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; - } else { - assert(code == TSDB_CODE_SUCCESS); - } - - // param already freed by other routine and pSql in tscCache when ctrl + c - if (atomic_load_ptr(&pSql->param) == NULL) { - return; - } - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); - - SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; - SSqlObj * pParObj = trs->pParentSql; - - // NOTE: the vgroupInfo for the queried super table must be existed here. - assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && - pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); - - // tscProcessSql can add error into async res - tscProcessSql(pSql); - return; - } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) { - tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - return; - } else { - assert(code == TSDB_CODE_SUCCESS); } assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); + // tscProcessSql can add error into async res tscProcessSql(pSql); return; @@ -465,16 +444,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); + + assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; - } else { - assert(code == TSDB_CODE_SUCCESS); } - // in case of insert, redo parsing the sql string and build new submit data block for two reasons: - // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. - // 2. vnode may need the schema information along with submit block to update its local table schema. - if (pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) { + assert(pCmd->command != TSDB_SQL_INSERT); + + if (pCmd->command == TSDB_SQL_SELECT) { tscDebug("%p redo parse sql string and proceed", pSql); pCmd->parseFinished = false; tscResetSqlCmdObj(pCmd, false); @@ -486,17 +464,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - if (pCmd->command == TSDB_SQL_INSERT) { - /* - * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, - * and send the required submit block according to index value in supporter to server. - */ - pSql->fp = pSql->fetchFp; // restore the fp - tscHandleInsertRetry(pSql); - } else if (pCmd->command == TSDB_SQL_SELECT) { // in case of other query type, continue - tscProcessSql(pSql); - } - }else { // in all other cases, simple retry + tscProcessSql(pSql); + } else { // in all other cases, simple retry tscProcessSql(pSql); } @@ -551,6 +520,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (!pSql->cmd.parseFinished) { tsParseSql(pSql, false); } + (*pSql->fp)(pSql->param, pSql, code); return; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 7921399330..f313d355f1 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -48,7 +48,7 @@ break; \ } \ GET_RES_INFO(ctx)->numOfRes = (res); \ - } while (0); + } while (0) #define INC_INIT_VAL(ctx, res) (GET_RES_INFO(ctx)->numOfRes += (res)); @@ -482,17 +482,16 @@ int32_t no_data_info(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId DO_UPDATE_TAG_COLUMNS(ctx, k); \ (num) += 1; \ } \ - } while (0); + } while (0) #define DUPATE_DATA_WITHOUT_TS(ctx, left, right, num, sign) \ -do { \ - if (((left) < (right)) ^ (sign)) { \ - (left) = (right); \ + do { \ + if (((left) < (right)) ^ (sign)) { \ + (left) = (right); \ DO_UPDATE_TAG_COLUMNS_WITHOUT_TS(ctx); \ - (num) += 1; \ - } \ - } while (0); - + (num) += 1; \ + } \ + } while (0) #define LOOPCHECK_N(val, list, ctx, tsdbType, sign, num) \ for (int32_t i = 0; i < ((ctx)->size); ++i) { \ @@ -709,15 +708,14 @@ static int32_t firstDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY en return BLK_DATA_ALL_NEEDED; } - return BLK_DATA_ALL_NEEDED; - // TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter - // is invalid -// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); -// if (pInfo->hasResult != DATA_SET_FLAG) { -// return BLK_DATA_ALL_NEEDED; -// } else { // data in current block is not earlier than current result -// return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; -// } + // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->aOutputBuf is + // the previous windowRes output buffer, not current unloaded block. In this case, the following filter is invalid + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + if (pInfo->hasResult != DATA_SET_FLAG) { + return BLK_DATA_ALL_NEEDED; + } else { // data in current block is not earlier than current result + return (pInfo->ts <= start) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } } static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end, int32_t colId) { @@ -730,16 +728,14 @@ static int32_t lastDistFuncRequired(SQLFunctionCtx *pCtx, TSKEY start, TSKEY end return BLK_DATA_ALL_NEEDED; } - return BLK_DATA_ALL_NEEDED; - // TODO pCtx->aOutputBuf is the previous windowRes output buffer, not current unloaded block. so the following filter - // is invalid - -// SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); -// if (pInfo->hasResult != DATA_SET_FLAG) { -// return BLK_DATA_ALL_NEEDED; -// } else { -// return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; -// } + // the pCtx should be set to current Ctx and output buffer before call this function. Otherwise, pCtx->aOutputBuf is + // the previous windowRes output buffer, not current unloaded block. In this case, the following filter is invalid + SFirstLastInfo *pInfo = (SFirstLastInfo*) (pCtx->aOutputBuf + pCtx->inputBytes); + if (pInfo->hasResult != DATA_SET_FLAG) { + return BLK_DATA_ALL_NEEDED; + } else { + return (pInfo->ts > end) ? BLK_DATA_NO_NEEDED : BLK_DATA_ALL_NEEDED; + } } ////////////////////////////////////////////////////////////////////////////////////////////// @@ -1909,7 +1905,7 @@ static void valuePairAssign(tValuePair *dst, int16_t type, const char *val, int6 (dst)->timestamp = (src)->timestamp; \ (dst)->v = (src)->v; \ memcpy((dst)->pTags, (src)->pTags, (size_t)(__l)); \ - } while (0); + } while (0) static void do_top_function_add(STopBotInfo *pInfo, int32_t maxLen, void *pData, int64_t ts, uint16_t type, SExtTagsInfo *pTagInfo, char *pTags, int16_t stage) { @@ -2222,7 +2218,8 @@ static void buildTopBotStruct(STopBotInfo *pTopBotInfo, SQLFunctionCtx *pCtx) { tmp += POINTER_BYTES * pCtx->param[0].i64Key; size_t size = sizeof(tValuePair) + pCtx->tagInfo.tagsLen; - +// assert(pCtx->param[0].i64Key > 0); + for (int32_t i = 0; i < pCtx->param[0].i64Key; ++i) { pTopBotInfo->res[i] = (tValuePair*) tmp; pTopBotInfo->res[i]->pTags = tmp + sizeof(tValuePair); @@ -2589,10 +2586,11 @@ static void percentile_next_step(SQLFunctionCtx *pCtx) { // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + } else { + pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, GET_DOUBLE_VAL(&pInfo->minval), GET_DOUBLE_VAL(&pInfo->maxval)); } pInfo->stage += 1; - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, GET_DOUBLE_VAL(&pInfo->minval), GET_DOUBLE_VAL(&pInfo->maxval)); } else { pResInfo->complete = true; } @@ -2883,7 +2881,7 @@ static void leastsquares_function_f(SQLFunctionCtx *pCtx, int32_t index) { int32_t *p = pData; LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); break; - }; + } case TSDB_DATA_TYPE_TINYINT: { int8_t *p = pData; LEASTSQR_CAL(param, pInfo->startVal, p, 0, pCtx->param[1].dKey); @@ -3647,11 +3645,21 @@ static bool twa_function_setup(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - pInfo->lastKey = INT64_MIN; + pInfo->p.key = INT64_MIN; pInfo->win = TSWINDOW_INITIALIZER; return true; } +static double twa_get_area(SPoint1 s, SPoint1 e) { + if ((s.val >= 0 && e.val >= 0)|| (s.val <=0 && e.val <= 0)) { + return (s.val + e.val) * (e.key - s.key) / 2; + } + + double x = (s.key * e.val - e.key * s.val)/(e.val - s.val); + double val = (s.val * (x - s.key) + e.val * (e.key - x)) / 2; + return val; +} + static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t index, int32_t size) { int32_t notNullElems = 0; TSKEY *primaryKey = pCtx->ptsList; @@ -3662,28 +3670,29 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t int32_t i = index; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + SPoint1* last = &pInfo->p; if (pCtx->start.key != INT64_MIN) { assert((pCtx->start.key < primaryKey[tsIndex + i] && pCtx->order == TSDB_ORDER_ASC) || (pCtx->start.key > primaryKey[tsIndex + i] && pCtx->order == TSDB_ORDER_DESC)); - assert(pInfo->lastKey == INT64_MIN); + assert(last->key == INT64_MIN); - pInfo->lastKey = primaryKey[tsIndex + i]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + last->key = primaryKey[tsIndex + i]; + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); - pInfo->dOutput += ((pInfo->lastValue + pCtx->start.val) / 2) * (pInfo->lastKey - pCtx->start.key); + pInfo->dOutput += twa_get_area(pCtx->start, *last); pInfo->hasResult = DATA_SET_FLAG; pInfo->win.skey = pCtx->start.key; notNullElems++; i += step; - } else if (pInfo->lastKey == INT64_MIN) { - pInfo->lastKey = primaryKey[tsIndex + i]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + } else if (pInfo->p.key == INT64_MIN) { + last->key = primaryKey[tsIndex + i]; + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); pInfo->hasResult = DATA_SET_FLAG; - pInfo->win.skey = pInfo->lastKey; + pInfo->win.skey = last->key; notNullElems++; i += step; } @@ -3697,9 +3706,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3710,9 +3719,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3723,9 +3732,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3736,9 +3745,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = (double) val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = (double) val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3749,9 +3758,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3762,9 +3771,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3773,20 +3782,19 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t // the last interpolated time window value if (pCtx->end.key != INT64_MIN) { - pInfo->dOutput += ((pInfo->lastValue + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->lastKey); - pInfo->lastValue = pCtx->end.val; - pInfo->lastKey = pCtx->end.key; + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end); + pInfo->p = pCtx->end; } - pInfo->win.ekey = pInfo->lastKey; + pInfo->win.ekey = pInfo->p.key; return notNullElems; } static void twa_function(SQLFunctionCtx *pCtx) { - void * data = GET_INPUT_CHAR(pCtx); + void *data = GET_INPUT_CHAR(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // skip null value int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); @@ -3807,6 +3815,7 @@ static void twa_function(SQLFunctionCtx *pCtx) { } } +//TODO refactor static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { @@ -3823,23 +3832,23 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { int32_t size = pCtx->size; if (pCtx->start.key != INT64_MIN) { - assert(pInfo->lastKey == INT64_MIN); + assert(pInfo->p.key == INT64_MIN); - pInfo->lastKey = primaryKey[index]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + pInfo->p.key = primaryKey[index]; + GET_TYPED_DATA(pInfo->p.val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); - pInfo->dOutput += ((pInfo->lastValue + pCtx->start.val) / 2) * (pInfo->lastKey - pCtx->start.key); + pInfo->dOutput += twa_get_area(pCtx->start, pInfo->p); pInfo->hasResult = DATA_SET_FLAG; pInfo->win.skey = pCtx->start.key; notNullElems++; i += 1; - } else if (pInfo->lastKey == INT64_MIN) { - pInfo->lastKey = primaryKey[index]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + } else if (pInfo->p.key == INT64_MIN) { + pInfo->p.key = primaryKey[index]; + GET_TYPED_DATA(pInfo->p.val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); pInfo->hasResult = DATA_SET_FLAG; - pInfo->win.skey = pInfo->lastKey; + pInfo->win.skey = pInfo->p.key; notNullElems++; i += 1; } @@ -3853,9 +3862,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3866,9 +3875,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3879,9 +3888,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3892,9 +3901,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = (double) val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = (double) val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3905,9 +3914,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st);//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key); + pInfo->p = st; } break; } @@ -3918,9 +3927,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st);//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key); + pInfo->p = st; } break; } @@ -3929,12 +3938,11 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { // the last interpolated time window value if (pCtx->end.key != INT64_MIN) { - pInfo->dOutput += ((pInfo->lastValue + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->lastKey); - pInfo->lastValue = pCtx->end.val; - pInfo->lastKey = pCtx->end.key; + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end);//((pInfo->p.val + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->p.key); + pInfo->p = pCtx->end; } - pInfo->win.ekey = pInfo->lastKey; + pInfo->win.ekey = pInfo->p.key; SET_VAL(pCtx, notNullElems, 1); @@ -3965,7 +3973,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) { pBuf->dOutput += pInput->dOutput; pBuf->win = pInput->win; - pBuf->lastKey = pInput->lastKey; + pBuf->p = pInput->p; } SET_VAL(pCtx, numOfNotNull, 1); @@ -3992,15 +4000,14 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); - assert(pInfo->win.ekey == pInfo->lastKey && pInfo->hasResult == pResInfo->hasResult); - if (pInfo->hasResult != DATA_SET_FLAG) { setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); return; } - + + assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult); if (pInfo->win.ekey == pInfo->win.skey) { - *(double *)pCtx->aOutputBuf = pInfo->lastValue; + *(double *)pCtx->aOutputBuf = pInfo->p.val; } else { *(double *)pCtx->aOutputBuf = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 4c28adc261..192af4dbdf 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -46,7 +46,8 @@ typedef struct SCreateBuilder { SSqlObj *pInterSql; int32_t (*fp)(void *para, char* result); Stage callStage; -} SCreateBuilder; +} SCreateBuilder; + static void tscSetLocalQueryResult(SSqlObj *pSql, const char *val, const char *columnName, int16_t type, size_t valueLength); static int32_t tscSetValueToResObj(SSqlObj *pSql, int32_t rowLen) { @@ -207,10 +208,7 @@ static int32_t tscProcessDescribeTable(SSqlObj *pSql) { const int32_t TYPE_COLUMN_LENGTH = 16; const int32_t NOTE_COLUMN_MIN_LENGTH = 8; - int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH;//tscMaxLengthOfTagsFields(pSql); -// if (noteFieldLen == 0) { -// noteFieldLen = NOTE_COLUMN_MIN_LENGTH; -// } + int32_t noteFieldLen = NOTE_COLUMN_MIN_LENGTH; int32_t rowLen = tscBuildTableSchemaResultFields(pSql, NUM_OF_DESC_TABLE_COLUMNS, TYPE_COLUMN_LENGTH, noteFieldLen); tscFieldInfoUpdateOffset(pQueryInfo); @@ -571,7 +569,7 @@ static int32_t tscRebuildDDLForSubTable(SSqlObj *pSql, const char *tableName, ch return TSDB_CODE_TSC_OUT_OF_MEMORY; } - char fullName[TSDB_TABLE_FNAME_LEN] = {0}; + char fullName[TSDB_TABLE_FNAME_LEN * 2] = {0}; extractDBName(pTableMetaInfo->name, fullName); extractTableName(pMeta->sTableId, param->sTableName); snprintf(fullName + strlen(fullName), TSDB_TABLE_FNAME_LEN - strlen(fullName), ".%s", param->sTableName); @@ -822,26 +820,39 @@ static int32_t tscProcessClientVer(SSqlObj *pSql) { } +// TODO add test cases. +static int32_t checkForOnlineNode(SSqlObj* pSql) { + int32_t* data = pSql->res.length; + if (data == NULL) { + return TSDB_CODE_SUCCESS; + } + + int32_t total = data[0]; + int32_t online = data[1]; + return (online < total)? TSDB_CODE_RPC_NETWORK_UNAVAIL:TSDB_CODE_SUCCESS; +} + static int32_t tscProcessServStatus(SSqlObj *pSql) { STscObj* pObj = pSql->pTscObj; SSqlObj* pHb = (SSqlObj*)taosAcquireRef(tscObjRef, pObj->hbrid); if (pHb != NULL) { - int32_t code = pHb->res.code; + pSql->res.code = pHb->res.code; taosReleaseRef(tscObjRef, pObj->hbrid); - if (code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; - return pSql->res.code; - } - } else { - if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - return pSql->res.code; - } + } + + if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + return pSql->res.code; + } + + pSql->res.code = checkForOnlineNode(pHb); + if (pSql->res.code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { + return pSql->res.code; } SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0); - SSqlExpr* pExpr = taosArrayGetP(pQueryInfo->exprList, 0); + int32_t val = 1; tscSetLocalQueryResult(pSql, (char*) &val, pExpr->aliasName, TSDB_DATA_TYPE_INT, sizeof(int32_t)); return TSDB_CODE_SUCCESS; diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 7fc5b8debb..a99918975e 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -726,10 +726,14 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); SSchema p1 = {0}; - if (pExpr->colInfo.colIndex != TSDB_TBNAME_COLUMN_INDEX) { - p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); - } else { + if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { p1 = tGetTableNameColumnSchema(); + } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { + p1.bytes = pExpr->resBytes; + p1.type = (uint8_t) pExpr->resType; + tstrncpy(p1.name, pExpr->aliasName, tListLen(p1.name)); + } else { + p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); } int32_t inter = 0; diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 18e5b6f074..ded12f64ea 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -686,17 +686,14 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) { } } -static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **str, SParsedDataColInfo *spd, - int32_t *totalNum) { - SSqlCmd * pCmd = &pSql->cmd; +static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColInfo *spd, int32_t *totalNum) { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + STableMeta *pTableMeta = pTableMetaInfo->pTableMeta; STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableDataBlocks *dataBuf = NULL; - int32_t ret = tscGetDataBlockFromList(pTableList, pCmd->pDataBlocks, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, - sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, - pTableMeta, &dataBuf); + int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, + sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -799,8 +796,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { sToken = tStrGetToken(sql, &index, false, 0, NULL); sql += index; - tscAllocPayload(pCmd, sizeof(STagData)); - //the source super table is moved to the secondary position of the pTableMetaInfo list if (pQueryInfo->numOfTables < 2) { tscAddEmptyMetaInfo(pQueryInfo); @@ -812,13 +807,8 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { return code; } - STagData *pTag = realloc(pCmd->pTagData, offsetof(STagData, data)); - if (pTag == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - memset(pTag, 0, offsetof(STagData, data)); - tstrncpy(pTag->name, pSTableMeterMetaInfo->name, sizeof(pTag->name)); - pCmd->pTagData = pTag; + tstrncpy(pCmd->tagData.name, pSTableMeterMetaInfo->name, sizeof(pCmd->tagData.name)); + pCmd->tagData.dataLen = 0; code = tscGetTableMeta(pSql, pSTableMeterMetaInfo); if (code != TSDB_CODE_SUCCESS) { @@ -949,14 +939,15 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { } tdSortKVRowByColIdx(row); - pTag = (STagData*)realloc(pCmd->pTagData, offsetof(STagData, data) + kvRowLen(row)); + pCmd->tagData.dataLen = kvRowLen(row); + char* pTag = realloc(pCmd->tagData.data, pCmd->tagData.dataLen); if (pTag == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - pCmd->pTagData = pTag; - pTag->dataLen = htonl(kvRowLen(row)); - kvRowCpy(pTag->data, row); + + kvRowCpy(pTag, row); free(row); + pCmd->tagData.data = pTag; index = 0; sToken = tStrGetToken(sql, &index, false, 0, NULL); @@ -975,7 +966,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { } createTable = true; - code = tscGetMeterMetaEx(pSql, pTableMetaInfo, true); + code = tscGetTableMetaEx(pSql, pTableMetaInfo, true); if (TSDB_CODE_TSC_ACTION_IN_PROGRESS == code) { return code; } @@ -986,7 +977,7 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) { } else { sql = sToken.z; } - code = tscGetMeterMetaEx(pSql, pTableMetaInfo, false); + code = tscGetTableMetaEx(pSql, pTableMetaInfo, false); if (pCmd->curSql == NULL) { assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS); @@ -1058,18 +1049,17 @@ int tsParseInsertSql(SSqlObj *pSql) { return code; } - if (NULL == pCmd->pTableList) { - pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - pCmd->pDataBlocks = taosArrayInit(4, POINTER_BYTES); - if (NULL == pCmd->pTableList || NULL == pSql->cmd.pDataBlocks) { + if (NULL == pCmd->pTableBlockHashList) { + pCmd->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + if (NULL == pCmd->pTableBlockHashList) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto _error; + goto _clean; } } else { str = pCmd->curSql; } - tscDebug("%p create data block list for submit data:%p, pTableList:%p", pSql, pCmd->pDataBlocks, pCmd->pTableList); + tscDebug("%p create data block list hashList:%p", pSql, pCmd->pTableBlockHashList); while (1) { int32_t index = 0; @@ -1091,7 +1081,7 @@ int tsParseInsertSql(SSqlObj *pSql) { */ if (totalNum == 0) { code = TSDB_CODE_TSC_INVALID_SQL; - goto _error; + goto _clean; } else { break; } @@ -1104,11 +1094,11 @@ int tsParseInsertSql(SSqlObj *pSql) { // Check if the table name available or not if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) { code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z); - goto _error; + goto _clean; } if ((code = tscSetTableFullName(pTableMetaInfo, &sTblToken, pSql)) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) { @@ -1122,12 +1112,12 @@ int tsParseInsertSql(SSqlObj *pSql) { tscError("%p async insert parse error, code:%s", pSql, tstrerror(code)); pCmd->curSql = NULL; - goto _error; + goto _clean; } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); - goto _error; + goto _clean; } index = 0; @@ -1136,7 +1126,7 @@ int tsParseInsertSql(SSqlObj *pSql) { if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z); - goto _error; + goto _clean; } STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -1148,32 +1138,32 @@ int tsParseInsertSql(SSqlObj *pSql) { tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns); if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } /* * app here insert data in different vnodes, so we need to set the following * data in another submit procedure using async insert routines */ - code = doParseInsertStatement(pSql, pCmd->pTableList, &str, &spd, &totalNum); + code = doParseInsertStatement(pCmd, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } } else if (sToken.type == TK_FILE) { if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); if (sToken.type != TK_STRING && sToken.type != TK_ID) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); - goto _error; + goto _clean; } str += index; if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); - goto _error; + goto _clean; } strncpy(pCmd->payload, sToken.z, sToken.n); @@ -1183,7 +1173,7 @@ int tsParseInsertSql(SSqlObj *pSql) { wordexp_t full_path; if (wordexp(pCmd->payload, &full_path, 0) != 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z); - goto _error; + goto _clean; } tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize); @@ -1195,7 +1185,7 @@ int tsParseInsertSql(SSqlObj *pSql) { SSchema * pSchema = tscGetTableSchema(pTableMeta); if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } SParsedDataColInfo spd = {0}; @@ -1230,7 +1220,7 @@ int tsParseInsertSql(SSqlObj *pSql) { if (spd.hasVal[t] == true) { code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z); - goto _error; + goto _clean; } spd.hasVal[t] = true; @@ -1241,13 +1231,13 @@ int tsParseInsertSql(SSqlObj *pSql) { if (!findColumnIndex) { code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column name", sToken.z); - goto _error; + goto _clean; } } if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > tinfo.numOfColumns) { code = tscInvalidSQLErrMsg(pCmd->payload, "column name expected", sToken.z); - goto _error; + goto _clean; } index = 0; @@ -1256,16 +1246,16 @@ int tsParseInsertSql(SSqlObj *pSql) { if (sToken.type != TK_VALUES) { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z); - goto _error; + goto _clean; } - code = doParseInsertStatement(pSql, pCmd->pTableList, &str, &spd, &totalNum); + code = doParseInsertStatement(pCmd, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } } else { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); - goto _error; + goto _clean; } } @@ -1274,25 +1264,18 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (taosArrayGetSize(pCmd->pDataBlocks) > 0) { // merge according to vgId - if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) { - goto _error; + if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId + if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { + goto _clean; } } code = TSDB_CODE_SUCCESS; goto _clean; -_error: - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); - _clean: - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; - - pCmd->curSql = NULL; + pCmd->curSql = NULL; pCmd->parseFinished = 1; - return code; } @@ -1309,7 +1292,6 @@ int tsInsertInitialCheck(SSqlObj *pSql) { pCmd->count = 0; pCmd->command = TSDB_SQL_INSERT; - pSql->res.numOfRows = 0; SQueryInfo *pQueryInfo = tscGetQueryInfoDetailSafely(pCmd, pCmd->clauseIndex); @@ -1373,7 +1355,8 @@ int tsParseSql(SSqlObj *pSql, bool initial) { pSql->parseRetry++; ret = tscToSQLCmd(pSql, &SQLInfo); } - SQLInfoDestroy(&SQLInfo); + + SqlInfoDestroy(&SQLInfo); } /* @@ -1399,7 +1382,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL); } - if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) { + if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { return code; } @@ -1456,18 +1439,21 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { int32_t count = 0; int32_t maxRows = 0; - tscDestroyBlockArrayList(pSql->cmd.pDataBlocks); - pCmd->pDataBlocks = taosArrayInit(1, POINTER_BYTES); + tfree(pCmd->pTableMetaList); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + + if (pCmd->pTableBlockHashList == NULL) { + pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + } STableDataBlocks *pTableDataBlock = NULL; - int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, tinfo.rowSize, sizeof(SSubmitBlk), pTableMetaInfo->name, pTableMeta, &pTableDataBlock); + int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, + sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL); if (ret != TSDB_CODE_SUCCESS) { // return ret; } - taosArrayPush(pCmd->pDataBlocks, &pTableDataBlock); tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows); - char *tokenBuf = calloc(1, 4096); while ((readLen = tgetline(&line, &n, fp)) != -1) { @@ -1529,8 +1515,6 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport)); SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL); - - pNew->cmd.pDataBlocks = taosArrayInit(4, POINTER_BYTES); pCmd->count = 1; FILE *fp = fopen(pCmd->payload, "r"); @@ -1538,7 +1522,7 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { pSql->res.code = TAOS_SYSTEM_ERROR(errno); tscError("%p failed to open file %s to load data from file, code:%s", pSql, pCmd->payload, tstrerror(pSql->res.code)); - tfree(pSupporter) + tfree(pSupporter); tscQueueAsyncRes(pSql); return; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 30e0729427..8134a35811 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -800,9 +800,9 @@ static int insertStmtExecute(STscStmt* stmt) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); assert(pCmd->numOfClause == 1); - if (taosArrayGetSize(pCmd->pDataBlocks) > 0) { + if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgid - int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks); + int code = tscMergeTableDataBlocks(stmt->pSql); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 18fc79c474..f813ff85d9 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -262,6 +262,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { SSqlStream *pStream = pObj->streamList; while (pStream) { tstrncpy(pSdesc->sql, pStream->pSql->sqlstr, sizeof(pSdesc->sql)); + if (pStream->dstTable == NULL) { + pSdesc->dstTable[0] = 0; + } else { + tstrncpy(pSdesc->dstTable, pStream->dstTable, sizeof(pSdesc->dstTable)); + } pSdesc->streamId = htonl(pStream->streamId); pSdesc->num = htobe64(pStream->num); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index dd336bcf05..71863cbe15 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -66,9 +66,9 @@ static bool validateTagParams(SArray* pTagsList, SArray* pFieldList, SSqlCmd* pC static int32_t setObjFullName(char* fullName, const char* account, SStrToken* pDB, SStrToken* tableName, int32_t* len); -static void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength); +static void getColumnName(tSqlExprItem* pItem, char* resultFieldName, int32_t nameLength); -static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult); +static int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult); static int32_t insertResultField(SQueryInfo* pQueryInfo, int32_t outputIndex, SColumnList* pIdList, int16_t bytes, int8_t type, char* fieldName, SSqlExpr* pSqlExpr); @@ -87,7 +87,7 @@ static int32_t parseIntervalClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuery static int32_t parseOffsetClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); static int32_t parseSlidingClause(SSqlObj* pSql, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySql); -static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem); +static int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem); static int32_t parseWhereClause(SQueryInfo* pQueryInfo, tSQLExpr** pExpr, SSqlObj* pSql); static int32_t parseFillClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQuerySQL); @@ -1279,7 +1279,7 @@ static void tscInsertPrimaryTSSourceColumn(SQueryInfo* pQueryInfo, SColumnIndex* tscColumnListInsert(pQueryInfo->colList, &tsCol); } -static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSQLExprItem* pItem) { +static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t exprIndex, tSqlExprItem* pItem) { const char* msg1 = "invalid column name, illegal column type, or columns in arithmetic expression from two tables"; const char* msg2 = "invalid arithmetic expression in select clause"; const char* msg3 = "tag columns can not be used in arithmetic expression"; @@ -1310,7 +1310,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t SColumnIndex index = {.tableIndex = tableIndex}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), - -1000, sizeof(double), false); + getNewResColId(pQueryInfo), sizeof(double), false); char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z; size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1); @@ -1420,7 +1420,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t return TSDB_CODE_SUCCESS; } -static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSQLExprItem* pItem) { +static void addProjectQueryCol(SQueryInfo* pQueryInfo, int32_t startPos, SColumnIndex* pIndex, tSqlExprItem* pItem) { SSqlExpr* pExpr = doAddProjectCol(pQueryInfo, pIndex->columnIndex, pIndex->tableIndex); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, pIndex->tableIndex); @@ -1484,7 +1484,7 @@ int32_t parseSelectClause(SSqlCmd* pCmd, int32_t clauseIndex, tSQLExprList* pSel for (int32_t i = 0; i < pSelection->nExpr; ++i) { int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - tSQLExprItem* pItem = &pSelection->a[i]; + tSqlExprItem* pItem = &pSelection->a[i]; // project on all fields int32_t optr = pItem->pNode->nSQLOptr; @@ -1643,7 +1643,7 @@ static int32_t doAddProjectionExprAndResultFields(SQueryInfo* pQueryInfo, SColum return numOfTotalColumns; } -int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQLExprItem* pItem) { +int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExprItem* pItem) { const char* msg0 = "invalid column name"; const char* msg1 = "tag for normal table query is not allowed"; @@ -1767,7 +1767,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS return TSDB_CODE_SUCCESS; } -void setResultColName(char* name, tSQLExprItem* pItem, int32_t functionId, SStrToken* pToken, bool multiCols) { +void setResultColName(char* name, tSqlExprItem* pItem, int32_t functionId, SStrToken* pToken, bool multiCols) { if (pItem->aliasName != NULL) { tstrncpy(name, pItem->aliasName, TSDB_COL_NAME_LEN); } else if (multiCols) { @@ -1775,19 +1775,22 @@ void setResultColName(char* name, tSQLExprItem* pItem, int32_t functionId, SStrT int32_t len = MIN(pToken->n + 1, TSDB_COL_NAME_LEN); tstrncpy(uname, pToken->z, len); - int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1; - char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1] = {0}; + if (tsKeepOriginalColumnName) { // keep the original column name + tstrncpy(name, uname, TSDB_COL_NAME_LEN); + } else { + int32_t size = TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1; + char tmp[TSDB_COL_NAME_LEN + tListLen(aAggs[functionId].aName) + 2 + 1] = {0}; + snprintf(tmp, size, "%s(%s)", aAggs[functionId].aName, uname); - snprintf(tmp, size, "%s(%s)", aAggs[functionId].aName, uname); - - tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + tstrncpy(name, tmp, TSDB_COL_NAME_LEN); + } } else { // use the user-input result column name int32_t len = MIN(pItem->pNode->token.n + 1, TSDB_COL_NAME_LEN); tstrncpy(name, pItem->pNode->token.z, len); } } -int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSQLExprItem* pItem, bool finalResult) { +int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t colIndex, tSqlExprItem* pItem, bool finalResult) { STableMetaInfo* pTableMetaInfo = NULL; int32_t optr = pItem->pNode->nSQLOptr; @@ -1817,7 +1820,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (pItem->pNode->pParam != NULL) { - tSQLExprItem* pParamElem = &pItem->pNode->pParam->a[0]; + tSqlExprItem* pParamElem = &pItem->pNode->pParam->a[0]; SStrToken* pToken = &pParamElem->pNode->colInfo; int16_t sqlOptr = pParamElem->pNode->nSQLOptr; if ((pToken->z == NULL || pToken->n == 0) @@ -1918,7 +1921,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); + tSqlExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2037,7 +2040,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col /* in first/last function, multiple columns can be add to resultset */ for (int32_t i = 0; i < pItem->pNode->pParam->nExpr; ++i) { - tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); + tSqlExprItem* pParamElem = &(pItem->pNode->pParam->a[i]); if (pParamElem->pNode->nSQLOptr != TK_ALL && pParamElem->pNode->nSQLOptr != TK_ID) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } @@ -2150,7 +2153,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - tSQLExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); + tSqlExprItem* pParamElem = &(pItem->pNode->pParam->a[0]); if (pParamElem->pNode->nSQLOptr != TK_ID) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -2350,7 +2353,7 @@ static SColumnList getColumnList(int32_t num, int16_t tableIndex, int32_t column return columnList; } -void getColumnName(tSQLExprItem* pItem, char* resultFieldName, int32_t nameLength) { +void getColumnName(tSqlExprItem* pItem, char* resultFieldName, int32_t nameLength) { if (pItem->aliasName != NULL) { strncpy(resultFieldName, pItem->aliasName, nameLength); } else { @@ -3517,7 +3520,7 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSQLExpr* pExpr, SQueryInfo* pQuer int32_t outputIndex = (int32_t)tscSqlExprNumOfExprs(pQueryInfo); - tSQLExprItem item = {.pNode = pExpr, .aliasName = NULL}; + tSqlExprItem item = {.pNode = pExpr, .aliasName = NULL}; // sql function list in selection clause. // Append the sqlExpr into exprList of pQueryInfo structure sequentially @@ -3734,7 +3737,7 @@ static int32_t setExprToCond(tSQLExpr** parent, tSQLExpr* pExpr, const char* msg return invalidSqlErrMsg(msgBuf, msg); } - *parent = tSQLExprCreate((*parent), pExpr, parentOptr); + *parent = tSqlExprCreate((*parent), pExpr, parentOptr); } else { *parent = pExpr; } @@ -3782,7 +3785,7 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSQL * to release expression, e.g., m1.ts = m2.ts, * since this expression is used to set the join query type */ - tSQLExprDestroy(*pExpr); + tSqlExprDestroy(*pExpr); } else { ret = setExprToCond(&pCondExpr->pTimewindow, *pExpr, msg3, parentOptr, pQueryInfo->msg); } @@ -3928,17 +3931,17 @@ static void doCompactQueryExpr(tSQLExpr** pExpr) { if ((*pExpr)->pLeft == NULL && (*pExpr)->pRight == NULL && ((*pExpr)->nSQLOptr == TK_OR || (*pExpr)->nSQLOptr == TK_AND)) { - tSQLExprNodeDestroy(*pExpr); + tSqlExprNodeDestroy(*pExpr); *pExpr = NULL; } else if ((*pExpr)->pLeft == NULL && (*pExpr)->pRight != NULL) { tSQLExpr* tmpPtr = (*pExpr)->pRight; - tSQLExprNodeDestroy(*pExpr); + tSqlExprNodeDestroy(*pExpr); (*pExpr) = tmpPtr; } else if ((*pExpr)->pRight == NULL && (*pExpr)->pLeft != NULL) { tSQLExpr* tmpPtr = (*pExpr)->pLeft; - tSQLExprNodeDestroy(*pExpr); + tSqlExprNodeDestroy(*pExpr); (*pExpr) = tmpPtr; } @@ -3961,7 +3964,7 @@ static void doExtractExprForSTable(SSqlCmd* pCmd, tSQLExpr** pExpr, SQueryInfo* (*pExpr) = NULL; } else { - *pOut = tSQLExprCreate(NULL, NULL, (*pExpr)->nSQLOptr); + *pOut = tSqlExprCreate(NULL, NULL, (*pExpr)->nSQLOptr); doExtractExprForSTable(pCmd, &(*pExpr)->pLeft, pQueryInfo, &((*pOut)->pLeft), tableIndex); doExtractExprForSTable(pCmd, &(*pExpr)->pRight, pQueryInfo, &((*pOut)->pRight), tableIndex); @@ -4168,23 +4171,23 @@ static int32_t validateJoinExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr static void cleanQueryExpr(SCondExpr* pCondExpr) { if (pCondExpr->pTableCond) { - tSQLExprDestroy(pCondExpr->pTableCond); + tSqlExprDestroy(pCondExpr->pTableCond); } if (pCondExpr->pTagCond) { - tSQLExprDestroy(pCondExpr->pTagCond); + tSqlExprDestroy(pCondExpr->pTagCond); } if (pCondExpr->pColumnCond) { - tSQLExprDestroy(pCondExpr->pColumnCond); + tSqlExprDestroy(pCondExpr->pColumnCond); } if (pCondExpr->pTimewindow) { - tSQLExprDestroy(pCondExpr->pTimewindow); + tSqlExprDestroy(pCondExpr->pTimewindow); } if (pCondExpr->pJoinExpr) { - tSQLExprDestroy(pCondExpr->pJoinExpr); + tSqlExprDestroy(pCondExpr->pJoinExpr); } } @@ -4252,8 +4255,8 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE tsSetSTableQueryCond(&pQueryInfo->tagCond, uid, &bw); doCompactQueryExpr(pExpr); - - tSQLExprDestroy(p1); + + tSqlExprDestroy(p1); tExprTreeDestroy(&p, NULL); taosArrayDestroy(colList); @@ -4910,6 +4913,8 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { tVariantListItem* pItem = taosArrayGet(pVarList, 1); SSchema* pTagsSchema = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, columnIndex.columnIndex); + pAlterSQL->tagData.data = calloc(1, pTagsSchema->bytes * TSDB_NCHAR_SIZE + VARSTR_HEADER_SIZE); + if (tVariantDump(&pItem->pVar, pAlterSQL->tagData.data, pTagsSchema->type, true) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg13); } @@ -5317,7 +5322,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn // keep original limitation value in globalLimit pQueryInfo->clauseLimit = pQueryInfo->limit.limit; pQueryInfo->prjOffset = pQueryInfo->limit.offset; - pQueryInfo->tableLimit = -1; + pQueryInfo->vgroupLimit = -1; if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { /* @@ -5327,7 +5332,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn * than or equal to the value of limit. */ if (pQueryInfo->limit.limit > 0) { - pQueryInfo->tableLimit = pQueryInfo->limit.limit + pQueryInfo->limit.offset; + pQueryInfo->vgroupLimit = pQueryInfo->limit.limit + pQueryInfo->limit.offset; pQueryInfo->limit.limit = -1; } @@ -5913,25 +5918,33 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ if (pExprList->nExpr != 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + bool server_status = false; tSQLExpr* pExpr = pExprList->a[0].pNode; if (pExpr->operand.z == NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - + //handle 'select 1' + if (pExpr->token.n == 1 && 0 == strncasecmp(pExpr->token.z, "1", 1)) { + server_status = true; + } else { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + } // TODO redefine the function - SDNodeDynConfOption functionsInfo[5] = {{"database()", 10}, - {"server_version()", 16}, - {"server_status()", 15}, - {"client_version()", 16}, - {"current_user()", 14}}; + SDNodeDynConfOption functionsInfo[5] = {{"database()", 10}, + {"server_version()", 16}, + {"server_status()", 15}, + {"client_version()", 16}, + {"current_user()", 14}}; int32_t index = -1; - for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { - if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && - functionsInfo[i].len == pExpr->operand.n) { - index = i; - break; + if (server_status == true) { + index = 2; + } else { + for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { + if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && + functionsInfo[i].len == pExpr->operand.n) { + index = i; + break; + } } } @@ -6141,96 +6154,105 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) { const int32_t TABLE_INDEX = 0; const int32_t STABLE_INDEX = 1; - STableMetaInfo* pStableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX); + STableMetaInfo* pStableMetaInfo = tscGetMetaInfo(pQueryInfo, STABLE_INDEX); // super table name, create table by using dst - SStrToken* pToken = &(pCreateTable->usingInfo.stableName); + int32_t numOfTables = (int32_t) taosArrayGetSize(pCreateTable->childTableInfo); + for(int32_t j = 0; j < numOfTables; ++j) { + SCreatedTableInfo* pCreateTableInfo = taosArrayGet(pCreateTable->childTableInfo, j); - if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - int32_t code = tscSetTableFullName(pStableMeterMetaInfo, pToken, pSql); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - // get meter meta from mnode - tstrncpy(pCreateTable->usingInfo.tagdata.name, pStableMeterMetaInfo->name, sizeof(pCreateTable->usingInfo.tagdata.name)); - SArray* pList = pInfo->pCreateTableInfo->usingInfo.pTagVals; - - code = tscGetTableMeta(pSql, pStableMeterMetaInfo); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - size_t size = taosArrayGetSize(pList); - if (tscGetNumOfTags(pStableMeterMetaInfo->pTableMeta) != size) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); - } - - // too long tag values will return invalid sql, not be truncated automatically - SSchema* pTagSchema = tscGetTableTagSchema(pStableMeterMetaInfo->pTableMeta); - - STagData* pTag = &pCreateTable->usingInfo.tagdata; - SKVRowBuilder kvRowBuilder = {0}; - if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - - int32_t ret = TSDB_CODE_SUCCESS; - for (int32_t i = 0; i < size; ++i) { - SSchema* pSchema = &pTagSchema[i]; - tVariantListItem* pItem = taosArrayGet(pList, i); - - char tagVal[TSDB_MAX_TAGS_LEN]; - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - if (pItem->pVar.nLen > pSchema->bytes) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } + SStrToken* pToken = &pCreateTableInfo->stableName; + if (tscValidateName(pToken) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); - - // check again after the convert since it may be converted from binary to nchar. - if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { - int16_t len = varDataTLen(tagVal); - if (len > pSchema->bytes) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); - } + int32_t code = tscSetTableFullName(pStableMetaInfo, pToken, pSql); + if (code != TSDB_CODE_SUCCESS) { + return code; } + // get table meta from mnode + tstrncpy(pCreateTableInfo->tagdata.name, pStableMetaInfo->name, tListLen(pCreateTableInfo->tagdata.name)); + SArray* pList = pCreateTableInfo->pTagVals; + + code = tscGetTableMeta(pSql, pStableMetaInfo); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + size_t size = taosArrayGetSize(pList); + if (tscGetNumOfTags(pStableMetaInfo->pTableMeta) != size) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + + // too long tag values will return invalid sql, not be truncated automatically + SSchema *pTagSchema = tscGetTableTagSchema(pStableMetaInfo->pTableMeta); + STagData *pTag = &pCreateTableInfo->tagdata; + + SKVRowBuilder kvRowBuilder = {0}; + if (tdInitKVRowBuilder(&kvRowBuilder) < 0) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + int32_t ret = TSDB_CODE_SUCCESS; + for (int32_t i = 0; i < size; ++i) { + SSchema* pSchema = &pTagSchema[i]; + tVariantListItem* pItem = taosArrayGet(pList, i); + + char tagVal[TSDB_MAX_TAGS_LEN]; + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + if (pItem->pVar.nLen > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + ret = tVariantDump(&(pItem->pVar), tagVal, pSchema->type, true); + + // check again after the convert since it may be converted from binary to nchar. + if (pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) { + int16_t len = varDataTLen(tagVal); + if (len > pSchema->bytes) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + } + + if (ret != TSDB_CODE_SUCCESS) { + tdDestroyKVRowBuilder(&kvRowBuilder); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); + } + + SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); + tdDestroyKVRowBuilder(&kvRowBuilder); + if (row == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + tdSortKVRowByColIdx(row); + pTag->dataLen = kvRowLen(row); + + if (pTag->data == NULL) { + pTag->data = malloc(pTag->dataLen); + } + + kvRowCpy(pTag->data, row); + free(row); + + // table name + if (tscValidateName(&(pCreateTableInfo->name)) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX); + ret = tscSetTableFullName(pTableMetaInfo, &pCreateTableInfo->name, pSql); if (ret != TSDB_CODE_SUCCESS) { - tdDestroyKVRowBuilder(&kvRowBuilder); - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + return ret; } - - - tdAddColToKVRow(&kvRowBuilder, pSchema->colId, pSchema->type, tagVal); - } - - SKVRow row = tdGetKVRowFromBuilder(&kvRowBuilder); - tdDestroyKVRowBuilder(&kvRowBuilder); - if (row == NULL) { - return TSDB_CODE_TSC_OUT_OF_MEMORY; - } - tdSortKVRowByColIdx(row); - pTag->dataLen = kvRowLen(row); - kvRowCpy(pTag->data, row); - free(row); - - // table name - if (tscValidateName(&pInfo->pCreateTableInfo->name) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } - - STableMetaInfo* pTableMeterMetaInfo = tscGetMetaInfo(pQueryInfo, TABLE_INDEX); - ret = tscSetTableFullName(pTableMeterMetaInfo, &pInfo->pCreateTableInfo->name, pSql); - if (ret != TSDB_CODE_SUCCESS) { - return ret; + pCreateTableInfo->fullname = strndup(pTableMetaInfo->name, TSDB_TABLE_FNAME_LEN); } return TSDB_CODE_SUCCESS; @@ -6347,14 +6369,13 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { assert(pQuerySql != NULL && (pQuerySql->from == NULL || taosArrayGetSize(pQuerySql->from) > 0)); const char* msg0 = "invalid table name"; - const char* msg2 = "point interpolation query needs timestamp"; - const char* msg5 = "fill only available for interval query"; - const char* msg6 = "start(end) time of query range required or time range too large"; - const char* msg7 = "illegal number of tables in from clause"; - const char* msg8 = "too many columns in selection clause"; - const char* msg9 = "TWA query requires both the start and end time"; - const char* msg10 = "too many tables in from clause"; - const char* msg11 = "invalid table alias name"; + const char* msg1 = "point interpolation query needs timestamp"; + const char* msg2 = "fill only available for interval query"; + const char* msg3 = "start(end) time of query range required or time range too large"; + const char* msg4 = "illegal number of tables in from clause"; + const char* msg5 = "too many columns in selection clause"; + const char* msg6 = "too many tables in from clause"; + const char* msg7 = "invalid table alias name"; int32_t code = TSDB_CODE_SUCCESS; @@ -6370,7 +6391,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { // too many result columns not support order by in query if (pQuerySql->pSelection->nExpr > TSDB_MAX_COLUMNS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); } /* @@ -6388,13 +6409,13 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { size_t fromSize = taosArrayGetSize(pQuerySql->from); if (fromSize > TSDB_MAX_JOIN_TABLE_NUM * 2) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); } pQueryInfo->command = TSDB_SQL_SELECT; if (fromSize > 4) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg10); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); } // set all query tables, which are maybe more than one. @@ -6427,12 +6448,12 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { tVariantListItem* p1 = taosArrayGet(pQuerySql->from, i + 1); if (p1->pVar.nType != TSDB_DATA_TYPE_BINARY) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } SStrToken aliasName = {.z = p1->pVar.pz, .n = p1->pVar.nLen, .type = TK_STRING}; if (tscValidateName(&aliasName) != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg11); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg7); } // has no table alias name @@ -6510,12 +6531,6 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } } - // user does not specified the query time window, twa is not allowed in such case. - if ((pQueryInfo->window.skey == INT64_MIN || pQueryInfo->window.ekey == INT64_MAX || - (pQueryInfo->window.ekey == INT64_MAX / 1000 && tinfo.precision == TSDB_TIME_PRECISION_MILLI)) && tscIsTWAQuery(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg9); - } - // no result due to invalid query time range if (pQueryInfo->window.skey > pQueryInfo->window.ekey) { pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; @@ -6523,7 +6538,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { } if (!hasTimestampForPointInterpQuery(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } // in case of join query, time range is required. @@ -6531,7 +6546,7 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); if (timeRange == 0 && pQueryInfo->window.skey == 0) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } @@ -6551,19 +6566,19 @@ int32_t doCheckForQuery(SSqlObj* pSql, SQuerySQL* pQuerySql, int32_t index) { */ if (pQuerySql->fillType != NULL) { if (pQueryInfo->interval.interval == 0 && (!tscIsPointInterpQuery(pQueryInfo))) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } - if (pQueryInfo->interval.interval > 0 && pQueryInfo->interval.intervalUnit != 'n' && pQueryInfo->interval.intervalUnit != 'y') { + if (pQueryInfo->interval.interval > 0) { bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER); if (initialWindows) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); // number of result is not greater than 10,000,000 if ((timeRange == 0) || (timeRange / pQueryInfo->interval.interval) > MAX_INTERVAL_TIME_WINDOW) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg6); + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 59bcdd691d..ded04388f4 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -147,12 +147,18 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { SSqlObj *pSql = tres; SSqlRes *pRes = &pSql->res; - if (code == 0) { + if (code == TSDB_CODE_SUCCESS) { SHeartBeatRsp *pRsp = (SHeartBeatRsp *)pRes->pRsp; - SRpcEpSet * epSet = &pRsp->epSet; + SRpcEpSet *epSet = &pRsp->epSet; if (epSet->numOfEps > 0) { tscEpSetHtons(epSet); - tscUpdateMgmtEpSet(pSql, epSet); + if (!tscEpSetIsEqual(&pSql->pTscObj->tscCorMgmtEpSet->epSet, epSet)) { + tscTrace("%p updating epset: numOfEps: %d, inUse: %d", pSql, epSet->numOfEps, epSet->inUse); + for (int8_t i = 0; i < epSet->numOfEps; i++) { + tscTrace("endpoint %d: fqdn=%s, port=%d", i, epSet->fqdn[i], epSet->port[i]); + } + tscUpdateMgmtEpSet(pSql, epSet); + } } pSql->pTscObj->connId = htonl(pRsp->connId); @@ -161,11 +167,40 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { tscKillConnection(pObj); return; } else { - if (pRsp->queryId) tscKillQuery(pObj, htonl(pRsp->queryId)); - if (pRsp->streamId) tscKillStream(pObj, htonl(pRsp->streamId)); + if (pRsp->queryId) { + tscKillQuery(pObj, htonl(pRsp->queryId)); + } + + if (pRsp->streamId) { + tscKillStream(pObj, htonl(pRsp->streamId)); + } } + + int32_t total = htonl(pRsp->totalDnodes); + int32_t online = htonl(pRsp->onlineDnodes); + assert(online <= total); + + if (online < total) { + tscError("HB:%p, total dnode:%d, online dnode:%d", pSql, total, online); + pSql->res.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + } + + if (pRes->length == NULL) { + pRes->length = calloc(2, sizeof(int32_t)); + } + + pRes->length[0] = total; + pRes->length[1] = online; } else { tscDebug("%" PRId64 " heartbeat failed, code:%s", pObj->hbrid, tstrerror(code)); + if (pRes->length == NULL) { + pRes->length = calloc(2, sizeof(int32_t)); + } + + pRes->length[1] = 0; + if (pRes->length[0] == 0) { + pRes->length[0] = 1; // make sure that the value of the total node is greater than the online node + } } if (pObj->hbrid != 0) { @@ -280,18 +315,20 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { } int32_t cmd = pCmd->command; - if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_INSERT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && + + // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema + if (cmd == TSDB_SQL_INSERT && rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + pSql->cmd.submitSchema = 1; + } + + if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || - rpcMsg->code == TSDB_CODE_APP_NOT_READY || - rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) { - tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry); - - // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema - if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { - pSql->cmd.submitSchema = 1; - } + rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { + + pSql->retry++; + tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), pSql->retry); pSql->res.code = rpcMsg->code; // keep the previous error code if (pSql->retry > pSql->maxRetry) { @@ -451,10 +488,10 @@ int tscProcessSql(SSqlObj *pSql) { int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg *) pSql->cmd.payload; - pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); - pRetrieveMsg->free = htons(pQueryInfo->type); + pRetrieveMsg->free = htons(pQueryInfo->type); + pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle); // todo valid the vgroupId at the client side STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -681,7 +718,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->queryType = htonl(pQueryInfo->type); - pQueryMsg->tableLimit = htobe64(pQueryInfo->tableLimit); + pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit); size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number @@ -1234,12 +1271,12 @@ int32_t tscBuildKillMsg(SSqlObj *pSql, SSqlInfo *pInfo) { int tscEstimateCreateTableMsgLength(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &(pSql->cmd); - - int32_t size = minMsgSize() + sizeof(SCMCreateTableMsg); + int32_t size = minMsgSize() + sizeof(SCMCreateTableMsg) + sizeof(SCreateTableMsg); SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; if (pCreateTableInfo->type == TSQL_CREATE_TABLE_FROM_STABLE) { - size += sizeof(STagData); + int32_t numOfTables = (int32_t)taosArrayGetSize(pInfo->pCreateTableInfo->childTableInfo); + size += numOfTables * (sizeof(SCreateTableMsg) + TSDB_MAX_TAGS_LEN); } else { size += sizeof(SSchema) * (pCmd->numOfCols + pCmd->count); } @@ -1267,33 +1304,55 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - SCMCreateTableMsg *pCreateTableMsg = (SCMCreateTableMsg *)pCmd->payload; - strcpy(pCreateTableMsg->tableId, pTableMetaInfo->name); - // use dbinfo from table id without modifying current db info - tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateTableMsg->db); - - SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo; - - pCreateTableMsg->igExists = pCreateTable->existCheck ? 1 : 0; - pCreateTableMsg->numOfColumns = htons(pCmd->numOfCols); - pCreateTableMsg->numOfTags = htons(pCmd->count); - - pCreateTableMsg->sqlLen = 0; - char *pMsg = (char *)pCreateTableMsg->schema; + SCreateTableMsg* pCreateMsg = (SCreateTableMsg*)((char*) pCreateTableMsg + sizeof(SCMCreateTableMsg)); + char* pMsg = NULL; int8_t type = pInfo->pCreateTableInfo->type; if (type == TSQL_CREATE_TABLE_FROM_STABLE) { // create by using super table, tags value - STagData* pTag = &pInfo->pCreateTableInfo->usingInfo.tagdata; - *(int32_t*)pMsg = htonl(pTag->dataLen); - pMsg += sizeof(int32_t); - memcpy(pMsg, pTag->name, sizeof(pTag->name)); - pMsg += sizeof(pTag->name); - memcpy(pMsg, pTag->data, pTag->dataLen); - pMsg += pTag->dataLen; + SArray* list = pInfo->pCreateTableInfo->childTableInfo; + + int32_t numOfTables = (int32_t) taosArrayGetSize(list); + pCreateTableMsg->numOfTables = htonl(numOfTables); + + pMsg = (char*) pCreateMsg; + for(int32_t i = 0; i < numOfTables; ++i) { + SCreateTableMsg* pCreate = (SCreateTableMsg*) pMsg; + + pCreate->numOfColumns = htons(pCmd->numOfCols); + pCreate->numOfTags = htons(pCmd->count); + pMsg += sizeof(SCreateTableMsg); + + SCreatedTableInfo* p = taosArrayGet(list, i); + strcpy(pCreate->tableId, p->fullname); + pCreate->igExists = (p->igExist)? 1 : 0; + + // use dbinfo from table id without modifying current db info + tscGetDBInfoFromTableFullName(p->fullname, pCreate->db); + pMsg = serializeTagData(&p->tagdata, pMsg); + + int32_t len = (int32_t)(pMsg - (char*) pCreate); + pCreate->len = htonl(len); + } } else { // create (super) table - pSchema = (SSchema *)pCreateTableMsg->schema; + pCreateTableMsg->numOfTables = htonl(1); // only one table will be created + + strcpy(pCreateMsg->tableId, pTableMetaInfo->name); + + // use dbinfo from table id without modifying current db info + tscGetDBInfoFromTableFullName(pTableMetaInfo->name, pCreateMsg->db); + + SCreateTableSQL *pCreateTable = pInfo->pCreateTableInfo; + + pCreateMsg->igExists = pCreateTable->existCheck ? 1 : 0; + pCreateMsg->numOfColumns = htons(pCmd->numOfCols); + pCreateMsg->numOfTags = htons(pCmd->count); + + pCreateMsg->sqlLen = 0; + pMsg = (char *)pCreateMsg->schema; + + pSchema = (SSchema *)pCreateMsg->schema; for (int i = 0; i < pCmd->numOfCols + pCmd->count; ++i) { TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i); @@ -1310,7 +1369,7 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SQuerySQL *pQuerySql = pInfo->pCreateTableInfo->pSelect; strncpy(pMsg, pQuerySql->selectToken.z, pQuerySql->selectToken.n + 1); - pCreateTableMsg->sqlLen = htons(pQuerySql->selectToken.n + 1); + pCreateMsg->sqlLen = htons(pQuerySql->selectToken.n + 1); pMsg += pQuerySql->selectToken.n + 1; } } @@ -1394,6 +1453,43 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } +//int tscBuildCancelQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { +// SCancelQueryMsg *pCancelMsg = (SCancelQueryMsg*) pSql->cmd.payload; +// pCancelMsg->qhandle = htobe64(pSql->res.qhandle); +// +// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); +// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); +// +// if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { +// int32_t vgIndex = pTableMetaInfo->vgroupIndex; +// if (pTableMetaInfo->pVgroupTables == NULL) { +// SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; +// assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); +// +// pCancelMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); +// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex); +// } else { +// int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); +// assert(vgIndex >= 0 && vgIndex < numOfVgroups); +// +// SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); +// +// pCancelMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); +// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex); +// } +// } else { +// STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; +// pCancelMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId); +// tscDebug("%p build cancel query msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId); +// } +// +// pSql->cmd.payloadLen = sizeof(SCancelQueryMsg); +// pSql->cmd.msgType = TSDB_MSG_TYPE_CANCEL_QUERY; +// +// pCancelMsg->header.contLen = htonl(sizeof(SCancelQueryMsg)); +// return TSDB_CODE_SUCCESS; +//} + int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; pCmd->payloadLen = sizeof(SAlterDbMsg); @@ -1550,13 +1646,8 @@ int tscBuildTableMetaMsg(SSqlObj *pSql, SSqlInfo *pInfo) { char *pMsg = (char *)pInfoMsg + sizeof(STableInfoMsg); - if (pCmd->autoCreated && pCmd->pTagData != NULL) { - int len = htonl(pCmd->pTagData->dataLen); - if (len > 0) { - len += sizeof(pCmd->pTagData->name) + sizeof(pCmd->pTagData->dataLen); - memcpy(pInfoMsg->tags, pCmd->pTagData, len); - pMsg += len; - } + if (pCmd->autoCreated && pCmd->tagData.dataLen != 0) { + pMsg = serializeTagData(&pCmd->tagData, pMsg); } pCmd->payloadLen = (int32_t)(pMsg - (char*)pInfoMsg); @@ -2056,6 +2147,10 @@ int tscProcessConnectRsp(SSqlObj *pSql) { if (pConnect->epSet.numOfEps > 0) { tscEpSetHtons(&pConnect->epSet); tscUpdateMgmtEpSet(pSql, &pConnect->epSet); + + for (int i = 0; i < pConnect->epSet.numOfEps; ++i) { + tscDebug("%p epSet.fqdn[%d]: %s, pObj:%p", pSql, i, pConnect->epSet.fqdn[i], pObj); + } } strcpy(pObj->sversion, pConnect->serverVersion); @@ -2102,10 +2197,7 @@ int tscProcessDropTableRsp(SSqlObj *pSql) { */ tscDebug("%p force release table meta after drop table:%s", pSql, pTableMetaInfo->name); taosCacheRelease(tscMetaCache, (void **)&pTableMeta, true); - - if (pTableMetaInfo->pTableMeta) { - taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true); - } + assert(pTableMetaInfo->pTableMeta == NULL); return 0; } @@ -2213,7 +2305,7 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { void tscTableMetaCallBack(void *param, TAOS_RES *res, int code); -static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { +static int32_t getTableMetaFromMnode(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { SSqlObj *pNew = calloc(1, sizeof(SSqlObj)); if (NULL == pNew) { tscError("%p malloc failed for new sqlobj to get table meta", pSql); @@ -2240,15 +2332,13 @@ static int32_t getTableMetaFromMgmt(SSqlObj *pSql, STableMetaInfo *pTableMetaInf tstrncpy(pNewMeterMetaInfo->name, pTableMetaInfo->name, sizeof(pNewMeterMetaInfo->name)); - if (pSql->cmd.pTagData != NULL) { - int size = offsetof(STagData, data) + htonl(pSql->cmd.pTagData->dataLen); - pNew->cmd.pTagData = calloc(1, size); - if (pNew->cmd.pTagData == NULL) { + if (pSql->cmd.autoCreated) { + int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); + if (code != TSDB_CODE_SUCCESS) { tscError("%p malloc failed for new tag data to get table meta", pSql); tscFreeSqlObj(pNew); return TSDB_CODE_TSC_OUT_OF_MEMORY; } - memcpy(pNew->cmd.pTagData, pSql->cmd.pTagData, size); } tscDebug("%p new pSqlObj:%p to get tableMeta, auto create:%d", pSql, pNew, pNew->cmd.autoCreated); @@ -2283,10 +2373,10 @@ int32_t tscGetTableMeta(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo) { return TSDB_CODE_SUCCESS; } - return getTableMetaFromMgmt(pSql, pTableMetaInfo); + return getTableMetaFromMnode(pSql, pTableMetaInfo); } -int tscGetMeterMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists) { +int tscGetTableMetaEx(SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, bool createIfNotExists) { pSql->cmd.autoCreated = createIfNotExists; return tscGetTableMeta(pSql, pTableMetaInfo); } @@ -2310,7 +2400,7 @@ int tscRenewTableMeta(SSqlObj *pSql, int32_t tableIndex) { } taosCacheRelease(tscMetaCache, (void **)&(pTableMetaInfo->pTableMeta), true); - return getTableMetaFromMgmt(pSql, pTableMetaInfo); + return getTableMetaFromMnode(pSql, pTableMetaInfo); } static bool allVgroupInfoRetrieved(SSqlCmd* pCmd, int32_t clauseIndex) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 8e165241f0..d7dec2f356 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -900,9 +900,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { strtolower(pSql->sqlstr, sql); pCmd->curSql = NULL; - if (NULL != pCmd->pTableList) { - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; + if (NULL != pCmd->pTableBlockHashList) { + taosHashCleanup(pCmd->pTableBlockHashList); + pCmd->pTableBlockHashList = NULL; } pSql->fp = asyncCallback; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 68c3bcae16..74b8e4d958 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -535,6 +535,10 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr); } +void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) { + pStream->dstTable = dstTable; +} + TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)) { STscObj *pObj = (STscObj *)taos; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 819a323db5..a328ae4d04 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -384,7 +384,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { } SQueryInfo *pSubQueryInfo = tscGetQueryInfoDetail(&pPrevSub->cmd, 0); - STSBuf *pTSBuf = pSubQueryInfo->tsBuf; + STSBuf *pTsBuf = pSubQueryInfo->tsBuf; pSubQueryInfo->tsBuf = NULL; // free result for async object will also free sqlObj @@ -402,7 +402,7 @@ static int32_t tscLaunchRealSubqueries(SSqlObj* pSql) { pSql->pSubs[i] = pNew; SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pNew->cmd, 0); - pQueryInfo->tsBuf = pTSBuf; // transfer the ownership of timestamp comp-z data to the new created object + pQueryInfo->tsBuf = pTsBuf; // transfer the ownership of timestamp comp-z data to the new created object // set the second stage sub query for join process TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_SEC_STAGE); @@ -1648,7 +1648,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { pRes->qhandle = 0x1; // hack the qhandle check - const uint32_t nBufferSize = (1u << 16); // 64KB + const uint32_t nBufferSize = (1u << 16u); // 64KB SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -2149,6 +2149,38 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { } } +static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) { + if (pParentObj->retry > pParentObj->maxRetry) { + tscError("%p max retry reached, abort the retry effort", pParentObj); + return false; + } + + for (int32_t i = 0; i < numOfSub; ++i) { + int32_t code = pParentObj->pSubs[i]->res.code; + if (code == TSDB_CODE_SUCCESS) { + continue; + } + + if (code != TSDB_CODE_TDB_TABLE_RECONFIGURE && code != TSDB_CODE_TDB_INVALID_TABLE_ID && + code != TSDB_CODE_VND_INVALID_VGROUP_ID && code != TSDB_CODE_RPC_NETWORK_UNAVAIL && + code != TSDB_CODE_APP_NOT_READY) { + pParentObj->res.code = code; + return false; + } + } + + return true; +} + +static void doFreeInsertSupporter(SSqlObj* pSqlObj) { + assert(pSqlObj != NULL && pSqlObj->subState.numOfSub > 0); + + for(int32_t i = 0; i < pSqlObj->subState.numOfSub; ++i) { + SSqlObj* pSql = pSqlObj->pSubs[i]; + tfree(pSql->param); + } +} + static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) { SInsertSupporter *pSupporter = (SInsertSupporter *)param; SSqlObj* pParentObj = pSupporter->pSql; @@ -2163,23 +2195,81 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) assert(pSql != NULL && pSql->res.code == numOfRows); pParentObj->res.code = pSql->res.code; - } - tfree(pSupporter); + // set the flag in the parent sqlObj + if (pSql->cmd.submitSchema) { + pParentObj->cmd.submitSchema = 1; + } + } if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) { return; } - - tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); // restore user defined fp pParentObj->fp = pParentObj->fetchFp; + int32_t numOfSub = pParentObj->subState.numOfSub; + doFreeInsertSupporter(pParentObj); - // todo remove this parameter in async callback function definition. - // all data has been sent to vnode, call user function - int32_t v = (pParentObj->res.code != TSDB_CODE_SUCCESS) ? pParentObj->res.code : (int32_t)pParentObj->res.numOfRows; - (*pParentObj->fp)(pParentObj->param, pParentObj, v); + if (pParentObj->res.code == TSDB_CODE_SUCCESS) { + tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); + + // todo remove this parameter in async callback function definition. + // all data has been sent to vnode, call user function + int32_t v = (pParentObj->res.code != TSDB_CODE_SUCCESS) ? pParentObj->res.code : (int32_t)pParentObj->res.numOfRows; + (*pParentObj->fp)(pParentObj->param, pParentObj, v); + } else { + if (!needRetryInsert(pParentObj, numOfSub)) { + tscQueueAsyncRes(pParentObj); + return; + } + + int32_t numOfFailed = 0; + for(int32_t i = 0; i < numOfSub; ++i) { + SSqlObj* pSql = pParentObj->pSubs[i]; + if (pSql->res.code != TSDB_CODE_SUCCESS) { + numOfFailed += 1; + + // clean up tableMeta in cache + tscFreeQueryInfo(&pSql->cmd, true); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0); + STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0); + tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); + + tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql); + } + } + + tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj, + pParentObj->res.numOfRows, numOfFailed, numOfSub); + + tscDebug("%p cleanup %d tableMeta in cache", pParentObj, pParentObj->cmd.numOfTables); + for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) { + taosCacheRelease(tscMetaCache, (void**)&(pParentObj->cmd.pTableMetaList[i]), true); + } + + pParentObj->cmd.parseFinished = false; + pParentObj->subState.numOfRemain = numOfFailed; + + tscResetSqlCmdObj(&pParentObj->cmd, false); + + // in case of insert, redo parsing the sql string and build new submit data block for two reasons: + // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. + // 2. vnode may need the schema information along with submit block to update its local table schema. + tscDebug("%p re-parse sql to generate submit data, retry:%d", pParentObj, pParentObj->retry); + pParentObj->retry++; + + int32_t code = tsParseSql(pParentObj, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; + + if (code != TSDB_CODE_SUCCESS) { + pParentObj->res.code = code; + tscQueueAsyncRes(pParentObj); + return; + } + + tscDoQuery(pParentObj); + } } /** @@ -2187,20 +2277,16 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) * @param pSql * @return */ -int32_t tscHandleInsertRetry(SSqlObj* pSql) { +int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) { assert(pSql != NULL && pSql->param != NULL); - SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param; assert(pSupporter->index < pSupporter->pSql->subState.numOfSub); - STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index); + STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.pDataBlocks, pSupporter->index); int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); - // free the data block created from insert sql string - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); - if ((pRes->code = code)!= TSDB_CODE_SUCCESS) { tscQueueAsyncRes(pSql); return code; // here the pSql may have been released already. @@ -2213,6 +2299,24 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + // it is the failure retry insert + if (pSql->pSubs != NULL) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + SInsertSupporter* pSup = calloc(1, sizeof(SInsertSupporter)); + pSup->index = i; + pSup->pSql = pSql; + + pSub->param = pSup; + tscDebug("%p sub:%p launch sub insert, orderOfSub:%d", pSql, pSub, i); + if (pSub->res.code != TSDB_CODE_SUCCESS) { + tscHandleInsertRetry(pSql, pSub); + } + } + + return TSDB_CODE_SUCCESS; + } + pSql->subState.numOfSub = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks); assert(pSql->subState.numOfSub > 0); @@ -2399,12 +2503,12 @@ void tscBuildResFromSubqueries(SSqlObj *pSql) { tscRestoreSQLFuncForSTableQuery(pQueryInfo); } - while (1) { - assert (pRes->row >= pRes->numOfRows); - - doBuildResFromSubqueries(pSql); - tsem_post(&pSql->rspSem); - return; + assert (pRes->row >= pRes->numOfRows); + doBuildResFromSubqueries(pSql); + if (pRes->code == TSDB_CODE_SUCCESS) { + (*pSql->fp)(pSql->param, pSql, pRes->numOfRows); + } else { + tscQueueAsyncRes(pSql); } } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a98132d319..99cddc17d3 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -333,13 +333,15 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { if (isNull(p, TSDB_DATA_TYPE_NCHAR)) { memcpy(dst, p, varDataTLen(p)); - } else { + } else if (varDataLen(p) > 0) { int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); varDataSetLen(dst, length); if (length == 0) { tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); } + } else { + varDataSetLen(dst, 0); } p += pInfo->field.bytes; @@ -377,7 +379,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } -static void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) { +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) { if (pCmd == NULL || pCmd->numOfClause == 0) { return; } @@ -403,12 +405,18 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) { pCmd->msgType = 0; pCmd->parseFinished = 0; pCmd->autoCreated = 0; - - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; - - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + for(int32_t i = 0; i < pCmd->numOfTables; ++i) { + if (pCmd->pTableMetaList && pCmd->pTableMetaList[i]) { + taosCacheRelease(tscMetaCache, (void**)&(pCmd->pTableMetaList[i]), false); + } + } + + pCmd->numOfTables = 0; + tfree(pCmd->pTableMetaList); + + pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); tscFreeQueryInfo(pCmd, removeFromCache); } @@ -510,7 +518,8 @@ void tscFreeSqlObj(SSqlObj* pSql) { tscFreeSqlResult(pSql); tscResetSqlCmdObj(pCmd, false); - tfree(pCmd->pTagData); + tfree(pCmd->tagData.data); + pCmd->tagData.dataLen = 0; memset(pCmd->payload, 0, (size_t)pCmd->allocSize); tfree(pCmd->payload); @@ -575,6 +584,21 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) { return NULL; } +void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable) { + if (pBlockHashTable == NULL) { + return NULL; + } + + STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL); + while(p) { + tscDestroyDataBlock(*p); + p = taosHashIterate(pBlockHashTable, p); + } + + taosHashCleanup(pBlockHashTable); + return NULL; +} + int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { SSqlCmd* pCmd = &pSql->cmd; assert(pDataBlock->pTableMeta != NULL); @@ -671,9 +695,8 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff return TSDB_CODE_SUCCESS; } -int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, - STableDataBlocks** dataBlocks) { +int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, + STableDataBlocks** dataBlocks, SArray* pBlockList) { *dataBlocks = NULL; STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id)); @@ -688,7 +711,9 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t } taosHashPut(pHashList, (const char*)&id, sizeof(int64_t), (char*)dataBlocks, POINTER_BYTES); - taosArrayPush(pDataBlockList, dataBlocks); + if (pBlockList) { + taosArrayPush(pBlockList, dataBlocks); + } } return TSDB_CODE_SUCCESS; @@ -769,22 +794,37 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { return result; } -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { +static void extractTableMeta(SSqlCmd* pCmd) { + pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList); + pCmd->pTableMetaList = calloc(pCmd->numOfTables, POINTER_BYTES); + + STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL); + int32_t i = 0; + while(p1) { + STableDataBlocks* pBlocks = *p1; + pCmd->pTableMetaList[i++] = taosCacheTransfer(tscMetaCache, (void**) &pBlocks->pTableMeta); + p1 = taosHashIterate(pCmd->pTableBlockHashList, p1); + } + + pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList); +} + +int32_t tscMergeTableDataBlocks(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); - size_t total = taosArrayGetSize(pTableDataBlockList); - for (int32_t i = 0; i < total; ++i) { + STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL); + + STableDataBlocks* pOneTableBlock = *p; + while(pOneTableBlock) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i); int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); STableDataBlocks* dataBuf = NULL; - int32_t ret = - tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, - tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf); + int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, + tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); if (ret != TSDB_CODE_SUCCESS) { tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); taosHashCleanup(pVnodeDataBlockHashList); @@ -839,14 +879,19 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { // the length does not include the SSubmitBlk structure pBlocks->dataLen = htonl(finalLen); dataBuf->numOfTables += 1; + + p = taosHashIterate(pCmd->pTableBlockHashList, p); + if (p == NULL) { + break; + } + + pOneTableBlock = *p; } - tscDestroyBlockArrayList(pTableDataBlockList); + extractTableMeta(pCmd); // free the table data blocks; pCmd->pDataBlocks = pVnodeDataBlockList; - -// tscFreeUnusedDataBlocks(pCmd->pDataBlocks); taosHashCleanup(pVnodeDataBlockHashList); return TSDB_CODE_SUCCESS; @@ -1893,15 +1938,11 @@ SSqlObj* createSimpleSubObj(SSqlObj* pSql, void (*fp)(), void* param, int32_t cm pCmd->parseFinished = 1; pCmd->autoCreated = pSql->cmd.autoCreated; - if (pSql->cmd.pTagData != NULL) { - int size = offsetof(STagData, data) + htonl(pSql->cmd.pTagData->dataLen); - pNew->cmd.pTagData = calloc(1, size); - if (pNew->cmd.pTagData == NULL) { - tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0); - free(pNew); - return NULL; - } - memcpy(pNew->cmd.pTagData, pSql->cmd.pTagData, size); + int32_t code = copyTagData(&pNew->cmd.tagData, &pSql->cmd.tagData); + if (code != TSDB_CODE_SUCCESS) { + tscError("%p new subquery failed, unable to malloc tag data, tableIndex:%d", pSql, 0); + free(pNew); + return NULL; } if (tscAddSubqueryInfo(pCmd) != TSDB_CODE_SUCCESS) { @@ -2006,7 +2047,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pnCmd->numOfClause = 0; pnCmd->clauseIndex = 0; pnCmd->pDataBlocks = NULL; + + pnCmd->numOfTables = 0; pnCmd->parseFinished = 1; + pnCmd->pTableMetaList = NULL; + pnCmd->pTableBlockHashList = NULL; if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2023,6 +2068,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pNewQueryInfo->limit = pQueryInfo->limit; pNewQueryInfo->slimit = pQueryInfo->slimit; pNewQueryInfo->order = pQueryInfo->order; + pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit; pNewQueryInfo->tsBuf = NULL; pNewQueryInfo->fillType = pQueryInfo->fillType; pNewQueryInfo->fillVal = NULL; @@ -2531,6 +2577,9 @@ void* tscVgroupInfoClear(SVgroupsInfo *vgroupList) { for(int32_t j = 0; j < pVgroupInfo->numOfEps; ++j) { tfree(pVgroupInfo->epAddr[j].fqdn); } + for(int32_t j = pVgroupInfo->numOfEps; j < TSDB_MAX_REPLICA; j++) { + assert( pVgroupInfo->epAddr[j].fqdn == NULL ); + } } tfree(vgroupList); @@ -2543,6 +2592,41 @@ void tscSVgroupInfoCopy(SVgroupInfo* dst, const SVgroupInfo* src) { for(int32_t i = 0; i < dst->numOfEps; ++i) { tfree(dst->epAddr[i].fqdn); dst->epAddr[i].port = src->epAddr[i].port; + assert(dst->epAddr[i].fqdn == NULL); + dst->epAddr[i].fqdn = strdup(src->epAddr[i].fqdn); } } + +char* serializeTagData(STagData* pTagData, char* pMsg) { + int32_t n = (int32_t) strlen(pTagData->name); + *(int32_t*) pMsg = htonl(n); + pMsg += sizeof(n); + + memcpy(pMsg, pTagData->name, n); + pMsg += n; + + *(int32_t*)pMsg = htonl(pTagData->dataLen); + pMsg += sizeof(int32_t); + + memcpy(pMsg, pTagData->data, pTagData->dataLen); + pMsg += pTagData->dataLen; + + return pMsg; +} + +int32_t copyTagData(STagData* dst, const STagData* src) { + dst->dataLen = src->dataLen; + tstrncpy(dst->name, src->name, tListLen(dst->name)); + + if (dst->dataLen > 0) { + dst->data = malloc(dst->dataLen); + if (dst->data == NULL) { + return -1; + } + + memcpy(dst->data, src->data, dst->dataLen); + } + + return 0; +} \ No newline at end of file diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index 69bbccd67e..bec8590536 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -36,7 +36,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_FETCH, "fetch" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_INSERT, "insert" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_UPDATE_TAGS_VAL, "update-tag-val" ) - + // the SQL below is for mgmt node TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" ) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 2cd8e67b4f..a34044dc0a 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -46,7 +46,7 @@ extern int32_t tsShellActivityTimer; extern uint32_t tsMaxTmrCtrl; extern float tsNumOfThreadsPerCore; extern int32_t tsNumOfCommitThreads; -extern float tsRatioOfQueryThreads; // todo remove it +extern float tsRatioOfQueryCores; extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; @@ -57,7 +57,9 @@ extern char tsTempDir[]; //query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing -extern int32_t tsHalfCoresForQuery; // only 50% will be used in query processing +extern int32_t tsRetrieveBlockingModel;// retrieve threads will be blocked + +extern int32_t tsKeepOriginalColumnName; // client extern int32_t tsTableMetaKeepTimer; @@ -132,7 +134,7 @@ extern int32_t tsEnableStream; // internal extern int32_t tsPrintAuth; -extern int32_t tscEmbedded; +extern uint32_t tscEmbedded; extern char configDir[]; extern char tsVnodeDir[]; extern char tsDnodeDir[]; @@ -174,7 +176,7 @@ extern int32_t tsLogKeepDays; extern int32_t dDebugFlag; extern int32_t vDebugFlag; extern int32_t mDebugFlag; -extern int32_t cDebugFlag; +extern uint32_t cDebugFlag; extern int32_t jniDebugFlag; extern int32_t tmrDebugFlag; extern int32_t sdbDebugFlag; @@ -184,7 +186,7 @@ extern int32_t monDebugFlag; extern int32_t uDebugFlag; extern int32_t rpcDebugFlag; extern int32_t odbcDebugFlag; -extern int32_t qDebugFlag; +extern uint32_t qDebugFlag; extern int32_t wDebugFlag; extern int32_t cqDebugFlag; extern int32_t debugFlag; diff --git a/src/common/inc/tulog.h b/src/common/inc/tulog.h index 2dc2895e63..74f572619b 100644 --- a/src/common/inc/tulog.h +++ b/src/common/inc/tulog.h @@ -23,7 +23,7 @@ extern "C" { #include "tlog.h" extern int32_t uDebugFlag; -extern int32_t tscEmbedded; +extern uint32_t tscEmbedded; #define uFatal(...) { if (uDebugFlag & DEBUG_FATAL) { taosPrintLog("UTL FATAL", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }} #define uError(...) { if (uDebugFlag & DEBUG_ERROR) { taosPrintLog("UTL ERROR ", tscEmbedded ? 255 : uDebugFlag, __VA_ARGS__); }} diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 415cd9926c..673fc16a26 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -25,7 +25,6 @@ #include "tutil.h" #include "tlocale.h" #include "ttimezone.h" -#include "tsync.h" // cluster char tsFirst[TSDB_EP_LEN] = {0}; @@ -52,7 +51,7 @@ int32_t tsMaxConnections = 5000; int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; int32_t tsNumOfCommitThreads = 1; -float tsRatioOfQueryThreads = 0.5f; +float tsRatioOfQueryCores = 1.0f; int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; @@ -107,8 +106,11 @@ int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance // positive value (in MB) int32_t tsQueryBufferSize = -1; -// only 50% cpu will be used in query processing in dnode -int32_t tsHalfCoresForQuery = 0; +// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing. +int32_t tsRetrieveBlockingModel = 0; + +// last_row(*), first(*), last_row(ts, col1, col2) query, the result fields will be the original column name +int32_t tsKeepOriginalColumnName = 0; // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; @@ -209,24 +211,24 @@ int32_t tsVersion = 0; // log int32_t tsNumOfLogLines = 10000000; -int32_t mDebugFlag = 135; -int32_t sdbDebugFlag = 135; +int32_t mDebugFlag = 131; +int32_t sdbDebugFlag = 131; int32_t dDebugFlag = 135; int32_t vDebugFlag = 135; -int32_t cDebugFlag = 131; +uint32_t cDebugFlag = 131; int32_t jniDebugFlag = 131; int32_t odbcDebugFlag = 131; int32_t httpDebugFlag = 131; int32_t mqttDebugFlag = 131; int32_t monDebugFlag = 131; -int32_t qDebugFlag = 131; +uint32_t qDebugFlag = 131; int32_t rpcDebugFlag = 131; int32_t uDebugFlag = 131; int32_t debugFlag = 0; int32_t sDebugFlag = 135; int32_t wDebugFlag = 135; int32_t tsdbDebugFlag = 131; -int32_t cqDebugFlag = 135; +int32_t cqDebugFlag = 131; int32_t fsDebugFlag = 135; int32_t (*monStartSystemFp)() = NULL; @@ -456,7 +458,7 @@ static void doInitGlobalConfig(void) { cfg.option = "arbitrator"; cfg.ptr = tsArbitrator; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = TSDB_EP_LEN; @@ -484,12 +486,12 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "ratioOfQueryThreads"; - cfg.ptr = &tsRatioOfQueryThreads; + cfg.option = "ratioOfQueryCores"; + cfg.ptr = &tsRatioOfQueryCores; cfg.valType = TAOS_CFG_VTYPE_FLOAT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; - cfg.minValue = 0.1f; - cfg.maxValue = 0.9f; + cfg.minValue = 0.0f; + cfg.maxValue = 2.0f; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -927,8 +929,8 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); - cfg.option = "halfCoresForQuery"; - cfg.ptr = &tsHalfCoresForQuery; + cfg.option = "retrieveBlockingModel"; + cfg.ptr = &tsRetrieveBlockingModel; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 0; @@ -937,11 +939,21 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); + cfg.option = "keepColumnName"; + cfg.ptr = &tsKeepOriginalColumnName; + cfg.valType = TAOS_CFG_VTYPE_INT32; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; + cfg.minValue = 0; + cfg.maxValue = 1; + cfg.ptrLength = 1; + cfg.unitType = TAOS_CFG_UTYPE_NONE; + taosInitConfigOption(cfg); + // locale & charset cfg.option = "timezone"; cfg.ptr = tsTimezone; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsTimezone); @@ -951,7 +963,7 @@ static void doInitGlobalConfig(void) { cfg.option = "locale"; cfg.ptr = tsLocale; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsLocale); @@ -961,7 +973,7 @@ static void doInitGlobalConfig(void) { cfg.option = "charset"; cfg.ptr = tsCharset; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsCharset); diff --git a/src/common/src/tvariant.c b/src/common/src/tvariant.c index fc00f50a7a..571ec2e0dd 100644 --- a/src/common/src/tvariant.c +++ b/src/common/src/tvariant.c @@ -705,7 +705,7 @@ int32_t tVariantDump(tVariant *pVariant, char *payload, int16_t type, bool inclu *((int32_t *)payload) = TSDB_DATA_FLOAT_NULL; return 0; } else { - double value; + double value = -1; int32_t ret; ret = convertToDouble(pVariant->pz, pVariant->nLen, &value); if ((errno == ERANGE && (float)value == -1) || (ret != 0)) { diff --git a/src/connector/grafanaplugin b/src/connector/grafanaplugin index ec77d9049a..32e2c97a4c 160000 --- a/src/connector/grafanaplugin +++ b/src/connector/grafanaplugin @@ -1 +1 @@ -Subproject commit ec77d9049a719dabfd1a7c1122a209e201861944 +Subproject commit 32e2c97a4cf7bedaa99f5d6dd8cb036e7f4470df diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index efb8795962..de76c30e8e 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -57,6 +57,7 @@ typedef struct SCqObj { uint64_t uid; int32_t tid; // table ID int32_t rowSize; // bytes of a row + char * dstTable; char * sqlStr; // SQL string STSchema * pSchema; // pointer to schema array void * pStream; @@ -96,7 +97,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) { pthread_mutex_init(&pContext->mutex, NULL); - cInfo("vgId:%d, CQ is opened", pContext->vgId); + cDebug("vgId:%d, CQ is opened", pContext->vgId); return pContext; } @@ -130,7 +131,7 @@ void cqClose(void *handle) { taosTmrCleanUp(pContext->tmrCtrl); pContext->tmrCtrl = NULL; - cInfo("vgId:%d, CQ is closed", pContext->vgId); + cDebug("vgId:%d, CQ is closed", pContext->vgId); free(pContext); } @@ -141,7 +142,7 @@ void cqStart(void *handle) { SCqContext *pContext = handle; if (pContext->dbConn || pContext->master) return; - cInfo("vgId:%d, start all CQs", pContext->vgId); + cDebug("vgId:%d, start all CQs", pContext->vgId); pthread_mutex_lock(&pContext->mutex); pContext->master = 1; @@ -160,7 +161,7 @@ void cqStop(void *handle) { return; } SCqContext *pContext = handle; - cInfo("vgId:%d, stop all CQs", pContext->vgId); + cDebug("vgId:%d, stop all CQs", pContext->vgId); if (pContext->dbConn == NULL || pContext->master == 0) return; pthread_mutex_lock(&pContext->mutex); @@ -185,7 +186,7 @@ void cqStop(void *handle) { pthread_mutex_unlock(&pContext->mutex); } -void *cqCreate(void *handle, uint64_t uid, int32_t tid, char *sqlStr, STSchema *pSchema) { +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) { if (tsEnableStream == 0) { return NULL; } @@ -195,9 +196,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t tid, char *sqlStr, STSchema * if (pObj == NULL) return NULL; pObj->uid = uid; - pObj->tid = tid; - pObj->sqlStr = malloc(strlen(sqlStr)+1); - strcpy(pObj->sqlStr, sqlStr); + pObj->tid = sid; + if (dstTable != NULL) { + pObj->dstTable = strdup(dstTable); + } + pObj->sqlStr = strdup(sqlStr); pObj->pSchema = tdDupSchema(pSchema); pObj->rowSize = schemaTLen(pSchema); @@ -247,6 +250,7 @@ void cqDrop(void *handle) { cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr); tdFreeSchema(pObj->pSchema); + free(pObj->dstTable); free(pObj->sqlStr); free(pObj); @@ -292,8 +296,9 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { if (pObj->pStream == NULL) { pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL); if (pObj->pStream) { + tscSetStreamDestTable(pObj->pStream, pObj->dstTable); pContext->num++; - cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr); + cDebug("vgId:%d, id:%d CQ:%s is opened", pContext->vgId, pObj->tid, pObj->sqlStr); } else { cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr); } diff --git a/src/cq/test/cqtest.c b/src/cq/test/cqtest.c index 41380f0d86..f378835f0a 100644 --- a/src/cq/test/cqtest.c +++ b/src/cq/test/cqtest.c @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { tdDestroyTSchemaBuilder(&schemaBuilder); for (int sid =1; sid<10; ++sid) { - cqCreate(pCq, sid, sid, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema); + cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema); } tdFreeSchema(pSchema); diff --git a/src/dnode/inc/dnodeInt.h b/src/dnode/inc/dnodeInt.h index 7595f5fd02..1327cd4433 100644 --- a/src/dnode/inc/dnodeInt.h +++ b/src/dnode/inc/dnodeInt.h @@ -36,6 +36,14 @@ extern int32_t dDebugFlag; #define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }} #define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }} +typedef enum { + TSDB_RUN_STATUS_INITIALIZE, + TSDB_RUN_STATUS_RUNING, + TSDB_RUN_STATUS_STOPPED +} SRunStatus; + +SRunStatus dnodeGetRunStatus(); + #ifdef __cplusplus } #endif diff --git a/src/dnode/inc/dnodeModule.h b/src/dnode/inc/dnodeModule.h index edcefbdd0c..e645784c8f 100644 --- a/src/dnode/inc/dnodeModule.h +++ b/src/dnode/inc/dnodeModule.h @@ -22,8 +22,8 @@ extern "C" { #include "dnodeInt.h" int32_t dnodeInitModules(); -void dnodeStartModules(); void dnodeCleanupModules(); +bool dnodeStartMnode(SMInfos *pMinfos); void dnodeProcessModuleStatus(uint32_t moduleStatus); #ifdef __cplusplus diff --git a/src/dnode/inc/dnodeVRead.h b/src/dnode/inc/dnodeVRead.h index 30dfb1b3a4..9c88886f88 100644 --- a/src/dnode/inc/dnodeVRead.h +++ b/src/dnode/inc/dnodeVRead.h @@ -24,8 +24,10 @@ extern "C" { int32_t dnodeInitVRead(); void dnodeCleanupVRead(); void dnodeDispatchToVReadQueue(SRpcMsg *pMsg); -void * dnodeAllocVReadQueue(void *pVnode); -void dnodeFreeVReadQueue(void *pRqueue); +void * dnodeAllocVQueryQueue(void *pVnode); +void * dnodeAllocVFetchQueue(void *pVnode); +void dnodeFreeVQueryQueue(void *pQqueue); +void dnodeFreeVFetchQueue(void *pFqueue); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeVnodes.h b/src/dnode/inc/dnodeVnodes.h index a942a00c78..9ecbd5f052 100644 --- a/src/dnode/inc/dnodeVnodes.h +++ b/src/dnode/inc/dnodeVnodes.h @@ -23,8 +23,8 @@ extern "C" { int32_t dnodeInitVnodes(); void dnodeCleanupVnodes(); -int32_t dnodeInitTimer(); -void dnodeCleanupTimer(); +int32_t dnodeInitStatusTimer(); +void dnodeCleanupStatusTimer(); void dnodeSendStatusMsgToMnode(); #ifdef __cplusplus diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c index 103710bf6f..e7dc7efeb2 100644 --- a/src/dnode/src/dnodeEps.c +++ b/src/dnode/src/dnodeEps.c @@ -130,7 +130,7 @@ static void dnodePrintEps(SDnodeEps *eps) { dDebug("print dnodeEp, dnodeNum:%d", eps->dnodeNum); for (int32_t i = 0; i < eps->dnodeNum; i++) { SDnodeEp *ep = &eps->dnodeEps[i]; - dDebug("dnodeId:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort); + dDebug("dnode:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort); } } diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index bc387e2171..414b66653d 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -121,7 +121,7 @@ void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) { dnodeSendRedirectMsg(pMsg, true); } else { SMnodeMsg *pWrite = mnodeCreateMsg(pMsg); - dDebug("msg:%p, app:%p type:%s is put into mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, + dTrace("msg:%p, app:%p type:%s is put into mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); taosWriteQitem(tsMWriteQueue, TAOS_QTYPE_RPC, pWrite); } @@ -130,7 +130,7 @@ void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) { } static void dnodeFreeMWriteMsg(SMnodeMsg *pWrite) { - dDebug("msg:%p, app:%p type:%s is freed from mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, + dTrace("msg:%p, app:%p type:%s is freed from mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); mnodeCleanupMsg(pWrite); diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 381c9bc6ad..48aed0baa1 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -17,6 +17,7 @@ #include "os.h" #include "taos.h" #include "tnote.h" +#include "ttimer.h" #include "tconfig.h" #include "tfile.h" #include "twal.h" @@ -39,6 +40,7 @@ #include "dnodeShell.h" #include "dnodeTelemetry.h" +void *tsDnodeTmr = NULL; static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED; static int32_t dnodeInitStorage(); @@ -68,8 +70,8 @@ static SStep tsDnodeSteps[] = { {"dnode-server", dnodeInitServer, dnodeCleanupServer}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, - {"dnode-tmr", dnodeInitTimer, dnodeCleanupTimer}, {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, + {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, }; @@ -91,6 +93,23 @@ static int32_t dnodeInitComponents() { return dnodeStepInit(tsDnodeSteps, stepSize); } +static int32_t dnodeInitTmr() { + tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); + if (tsDnodeTmr == NULL) { + dError("failed to init dnode timer"); + return -1; + } + + return 0; +} + +static void dnodeCleanupTmr() { + if (tsDnodeTmr != NULL) { + taosTmrCleanUp(tsDnodeTmr); + tsDnodeTmr = NULL; + } +} + int32_t dnodeInitSystem() { dnodeSetRunStatus(TSDB_RUN_STATUS_INITIALIZE); tscEmbedded = 1; @@ -100,6 +119,7 @@ int32_t dnodeInitSystem() { taosReadGlobalLogCfg(); taosSetCoreDump(); taosInitNotes(); + dnodeInitTmr(); signal(SIGPIPE, SIG_IGN); if (dnodeCreateDir(tsLogDir) < 0) { @@ -125,9 +145,9 @@ int32_t dnodeInitSystem() { return -1; } - dnodeStartModules(); dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING); + dnodeReportStep("TDengine", "initialized successfully", 1); dInfo("TDengine is initialized successfully"); return 0; @@ -136,6 +156,7 @@ int32_t dnodeInitSystem() { void dnodeCleanUpSystem() { if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) { dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED); + dnodeCleanupTmr(); dnodeCleanupComponents(); taos_cleanup(); taosCloseLog(); diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index 9eb52cbf5a..62de85445c 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -97,6 +97,20 @@ void dnodeCleanupModules() { } } +static int32_t dnodeStartModules() { + for (EModuleType module = 1; module < TSDB_MOD_MAX; ++module) { + if (tsModule[module].enable && tsModule[module].startFp) { + int32_t code = (*tsModule[module].startFp)(); + if (code != 0) { + dError("failed to start module:%s, code:%d", tsModule[module].name, code); + return code; + } + } + } + + return 0; +} + int32_t dnodeInitModules() { dnodeAllocModules(); @@ -110,17 +124,7 @@ int32_t dnodeInitModules() { } dInfo("dnode modules is initialized"); - return 0; -} - -void dnodeStartModules() { - for (EModuleType module = 1; module < TSDB_MOD_MAX; ++module) { - if (tsModule[module].enable && tsModule[module].startFp) { - if ((*tsModule[module].startFp)() != 0) { - dError("failed to start module:%s", tsModule[module].name); - } - } - } + return dnodeStartModules(); } void dnodeProcessModuleStatus(uint32_t moduleStatus) { diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index bf74e14963..de0c360c88 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -96,7 +96,7 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { rspMsg.code = TSDB_CODE_APP_NOT_READY; rpcSendResponse(&rspMsg); rpcFreeCont(pMsg->pCont); - dDebug("RPC %p, msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); + dTrace("RPC %p, msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); return; } @@ -151,7 +151,7 @@ void dnodeCleanupClient() { static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (dnodeGetRunStatus() == TSDB_RUN_STATUS_STOPPED) { if (pMsg == NULL || pMsg->pCont == NULL) return; - dDebug("msg:%p is ignored since dnode is stopping", pMsg); + dTrace("msg:%p is ignored since dnode is stopping", pMsg); rpcFreeCont(pMsg->pCont); return; } diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 6ec2c30c8c..9419068587 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -70,8 +70,7 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep; - int32_t numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore; - numOfThreads = (int32_t) ((1.0 - tsRatioOfQueryThreads) * numOfThreads / 2.0); + int32_t numOfThreads = (tsNumOfCores * tsNumOfThreadsPerCore) / 2.0; if (numOfThreads < 1) { numOfThreads = 1; } diff --git a/src/dnode/src/dnodeStep.c b/src/dnode/src/dnodeStep.c index 93b4f26c70..367f9223b5 100644 --- a/src/dnode/src/dnodeStep.c +++ b/src/dnode/src/dnodeStep.c @@ -57,12 +57,13 @@ int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize) { int32_t code = (*pStep->initFp)(); if (code != 0) { - dDebug("step:%s will init", pStep->name); + dDebug("step:%s will cleanup", pStep->name); taosStepCleanupImp(pSteps, step); return code; } + dInfo("step:%s is initialized", pStep->name); - dnodeReportStep(pStep->name, "Initialization complete", step + 1 >= stepSize); + dnodeReportStep(pStep->name, "Initialization complete", 0); } return 0; diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index a135cda055..36232893b5 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -16,12 +16,15 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tgrant.h" +#include "tconfig.h" #include "dnodeMain.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static tsem_t exitSem; int32_t main(int32_t argc, char *argv[]) { + int dump_config = 0; + // Set global configuration file for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0) { @@ -35,6 +38,8 @@ int32_t main(int32_t argc, char *argv[]) { printf("'-c' requires a parameter, default:%s\n", configDir); exit(EXIT_FAILURE); } + } else if (strcmp(argv[i], "-C") == 0) { + dump_config = 1; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; @@ -87,6 +92,20 @@ int32_t main(int32_t argc, char *argv[]) { #endif } + if (0 != dump_config) { + tscEmbedded = 1; + taosInitGlobalCfg(); + taosReadGlobalLogCfg(); + + if (!taosReadGlobalCfg()) { + printf("TDengine read global config failed"); + exit(EXIT_FAILURE); + } + + taosDumpGlobalCfg(); + exit(EXIT_SUCCESS); + } + if (tsem_init(&exitSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index 0842aeb521..a9dfa96702 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -16,6 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tqueue.h" +#include "tworker.h" #include "dnodeVMgmt.h" typedef struct { @@ -23,9 +24,8 @@ typedef struct { char pCont[]; } SMgmtMsg; -static taos_qset tsMgmtQset = NULL; -static taos_queue tsMgmtQueue = NULL; -static pthread_t tsQthread; +static SWorkerPool tsVMgmtWP; +static taos_queue tsVMgmtQueue = NULL; static void * dnodeProcessMgmtQueue(void *param); static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); @@ -47,45 +47,23 @@ int32_t dnodeInitVMgmt() { int32_t code = vnodeInitMgmt(); if (code != TSDB_CODE_SUCCESS) return -1; - tsMgmtQset = taosOpenQset(); - if (tsMgmtQset == NULL) { - dError("failed to create the vmgmt queue set"); - return -1; - } + tsVMgmtWP.name = "vmgmt"; + tsVMgmtWP.workerFp = dnodeProcessMgmtQueue; + tsVMgmtWP.min = 1; + tsVMgmtWP.max = 1; + if (tWorkerInit(&tsVMgmtWP) != 0) return -1; - tsMgmtQueue = taosOpenQueue(); - if (tsMgmtQueue == NULL) { - dError("failed to create the vmgmt queue"); - return -1; - } - - taosAddIntoQset(tsMgmtQset, tsMgmtQueue, NULL); - - pthread_attr_t thAttr; - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - - code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL); - pthread_attr_destroy(&thAttr); - if (code != 0) { - dError("failed to create thread to process vmgmt queue, reason:%s", strerror(errno)); - return -1; - } + tsVMgmtQueue = tWorkerAllocQueue(&tsVMgmtWP, NULL); dInfo("dnode vmgmt is initialized"); return TSDB_CODE_SUCCESS; } void dnodeCleanupVMgmt() { - if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset); - if (tsQthread) pthread_join(tsQthread, NULL); - - if (tsMgmtQueue) taosCloseQueue(tsMgmtQueue); - if (tsMgmtQset) taosCloseQset(tsMgmtQset); - - tsMgmtQset = NULL; - tsMgmtQueue = NULL; + tWorkerFreeQueue(&tsVMgmtWP, tsVMgmtQueue); + tWorkerCleanup(&tsVMgmtWP); + tsVMgmtQueue = NULL; vnodeCleanupMgmt(); } @@ -97,7 +75,7 @@ static int32_t dnodeWriteToMgmtQueue(SRpcMsg *pMsg) { pMgmt->rpcMsg = *pMsg; pMgmt->rpcMsg.pCont = pMgmt->pCont; memcpy(pMgmt->pCont, pMsg->pCont, pMsg->contLen); - taosWriteQitem(tsMgmtQueue, TAOS_QTYPE_RPC, pMgmt); + taosWriteQitem(tsVMgmtQueue, TAOS_QTYPE_RPC, pMgmt); return TSDB_CODE_SUCCESS; } @@ -112,28 +90,30 @@ void dnodeDispatchToVMgmtQueue(SRpcMsg *pMsg) { rpcFreeCont(pMsg->pCont); } -static void *dnodeProcessMgmtQueue(void *param) { - SMgmtMsg *pMgmt; - SRpcMsg * pMsg; - SRpcMsg rsp = {0}; - int32_t qtype; - void * handle; +static void *dnodeProcessMgmtQueue(void *wparam) { + SWorker * pWorker = wparam; + SWorkerPool *pPool = pWorker->pPool; + SMgmtMsg * pMgmt; + SRpcMsg * pMsg; + SRpcMsg rsp = {0}; + int32_t qtype; + void * handle; while (1) { - if (taosReadQitemFromQset(tsMgmtQset, &qtype, (void **)&pMgmt, &handle) == 0) { - dDebug("qset:%p, dnode mgmt got no message from qset, exit", tsMgmtQset); + if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) { + dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset); break; } pMsg = &pMgmt->rpcMsg; - dDebug("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[pMsg->msgType]); + dTrace("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[pMsg->msgType]); if (dnodeProcessMgmtMsgFp[pMsg->msgType]) { rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg); } else { rsp.code = TSDB_CODE_DND_MSG_NOT_PROCESSED; } - dDebug("msg:%p, is processed, code:0x%x", pMgmt, rsp.code); + dTrace("msg:%p, is processed, code:0x%x", pMgmt, rsp.code); if (rsp.code != TSDB_CODE_DND_ACTION_IN_PROGRESS) { rsp.handle = pMsg->handle; rsp.pCont = NULL; @@ -218,7 +198,7 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { SCreateMnodeMsg *pCfg = pMsg->pCont; pCfg->dnodeId = htonl(pCfg->dnodeId); if (pCfg->dnodeId != dnodeGetDnodeId()) { - dDebug("dnodeId:%d, in create mnode msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId()); + dDebug("dnode:%d, in create mnode msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId()); return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED; } @@ -227,7 +207,7 @@ static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { return TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED; } - dDebug("dnodeId:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum); + dDebug("dnode:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum); for (int i = 0; i < pCfg->mnodes.mnodeNum; ++i) { pCfg->mnodes.mnodeInfos[i].mnodeId = htonl(pCfg->mnodes.mnodeInfos[i].mnodeId); dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index 07496b142a..3f31e49370 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -16,66 +16,39 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tqueue.h" +#include "tworker.h" #include "dnodeVRead.h" -typedef struct { - pthread_t thread; // thread - int32_t workerId; // worker ID -} SVReadWorker; - -typedef struct { - int32_t max; // max number of workers - int32_t min; // min number of workers - int32_t num; // current number of workers - SVReadWorker * worker; - pthread_mutex_t mutex; -} SVReadWorkerPool; - static void *dnodeProcessReadQueue(void *pWorker); // module global variable -static SVReadWorkerPool tsVReadWP; -static taos_qset tsVReadQset; +static SWorkerPool tsVQueryWP; +static SWorkerPool tsVFetchWP; int32_t dnodeInitVRead() { - tsVReadQset = taosOpenQset(); + const int32_t maxFetchThreads = 4; - tsVReadWP.min = tsNumOfCores; - tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore; - if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min; - tsVReadWP.worker = calloc(sizeof(SVReadWorker), tsVReadWP.max); - pthread_mutex_init(&tsVReadWP.mutex, NULL); + // calculate the available query thread + float threadsForQuery = MAX(tsNumOfCores * tsRatioOfQueryCores, 1); - if (tsVReadWP.worker == NULL) return -1; - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - pWorker->workerId = i; - } + tsVQueryWP.name = "vquery"; + tsVQueryWP.workerFp = dnodeProcessReadQueue; + tsVQueryWP.min = (int32_t) threadsForQuery; + tsVQueryWP.max = tsVQueryWP.min; + if (tWorkerInit(&tsVQueryWP) != 0) return -1; + + tsVFetchWP.name = "vfetch"; + tsVFetchWP.workerFp = dnodeProcessReadQueue; + tsVFetchWP.min = MIN(maxFetchThreads, tsNumOfCores); + tsVFetchWP.max = tsVFetchWP.min; + if (tWorkerInit(&tsVFetchWP) != 0) return -1; - dInfo("dnode vread is initialized, min worker:%d max worker:%d", tsVReadWP.min, tsVReadWP.max); return 0; } void dnodeCleanupVRead() { - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - if (pWorker->thread) { - taosQsetThreadResume(tsVReadQset); - } - } - - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - if (pWorker->thread) { - pthread_join(pWorker->thread, NULL); - } - } - - free(tsVReadWP.worker); - taosCloseQset(tsVReadQset); - pthread_mutex_destroy(&tsVReadWP.mutex); - - dInfo("dnode vread is closed"); + tWorkerCleanup(&tsVFetchWP); + tWorkerCleanup(&tsVQueryWP); } void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { @@ -88,6 +61,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { pHead->vgId = htonl(pHead->vgId); pHead->contLen = htonl(pHead->contLen); + assert(pHead->contLen > 0); void *pVnode = vnodeAcquire(pHead->vgId); if (pVnode != NULL) { int32_t code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg); @@ -107,43 +81,20 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { rpcFreeCont(pMsg->pCont); } -void *dnodeAllocVReadQueue(void *pVnode) { - pthread_mutex_lock(&tsVReadWP.mutex); - taos_queue queue = taosOpenQueue(); - if (queue == NULL) { - pthread_mutex_unlock(&tsVReadWP.mutex); - return NULL; - } - - taosAddIntoQset(tsVReadQset, queue, pVnode); - - // spawn a thread to process queue - if (tsVReadWP.num < tsVReadWP.max) { - do { - SVReadWorker *pWorker = tsVReadWP.worker + tsVReadWP.num; - - pthread_attr_t thAttr; - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessReadQueue, pWorker) != 0) { - dError("failed to create thread to process vread vqueue since %s", strerror(errno)); - } - - pthread_attr_destroy(&thAttr); - tsVReadWP.num++; - dDebug("dnode vread worker:%d is launched, total:%d", pWorker->workerId, tsVReadWP.num); - } while (tsVReadWP.num < tsVReadWP.min); - } - - pthread_mutex_unlock(&tsVReadWP.mutex); - dDebug("pVnode:%p, dnode vread queue:%p is allocated", pVnode, queue); - - return queue; +void *dnodeAllocVQueryQueue(void *pVnode) { + return tWorkerAllocQueue(&tsVQueryWP, pVnode); } -void dnodeFreeVReadQueue(void *pRqueue) { - taosCloseQueue(pRqueue); +void *dnodeAllocVFetchQueue(void *pVnode) { + return tWorkerAllocQueue(&tsVFetchWP, pVnode); +} + +void dnodeFreeVQueryQueue(void *pQqueue) { + tWorkerFreeQueue(&tsVQueryWP, pQqueue); +} + +void dnodeFreeVFetchQueue(void *pFqueue) { + tWorkerFreeQueue(&tsVFetchWP, pFqueue); } void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { @@ -160,18 +111,20 @@ void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) { } -static void *dnodeProcessReadQueue(void *pWorker) { - SVReadMsg *pRead; - int32_t qtype; - void * pVnode; +static void *dnodeProcessReadQueue(void *wparam) { + SWorker * pWorker = wparam; + SWorkerPool *pPool = pWorker->pPool; + SVReadMsg * pRead; + int32_t qtype; + void * pVnode; while (1) { - if (taosReadQitemFromQset(tsVReadQset, &qtype, (void **)&pRead, &pVnode) == 0) { - dDebug("qset:%p dnode vread got no message from qset, exiting", tsVReadQset); + if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) { + dDebug("dnode vquery got no message from qset:%p, exiting", pPool->qset); break; } - dTrace("msg:%p, app:%p type:%s will be processed in vread queue, qtype:%d", pRead, pRead->rpcAhandle, + dTrace("msg:%p, app:%p type:%s will be processed in vquery queue, qtype:%d", pRead, pRead->rpcAhandle, taosMsg[pRead->msgType], qtype); int32_t code = vnodeProcessRead(pVnode, pRead); diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index 37566c0c31..44c8c486d4 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -30,39 +30,30 @@ typedef struct { int32_t * vnodeList; } SOpenVnodeThread; -void * tsDnodeTmr = NULL; +extern void * tsDnodeTmr; static void * tsStatusTimer = NULL; static uint32_t tsRebootTime = 0; +static int32_t tsOpenVnodes = 0; +static int32_t tsTotalVnodes = 0; static void dnodeSendStatusMsg(void *handle, void *tmrId); static void dnodeProcessStatusRsp(SRpcMsg *pMsg); -int32_t dnodeInitTimer() { - tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); - if (tsDnodeTmr == NULL) { - dError("failed to init dnode timer"); - return -1; - } - +int32_t dnodeInitStatusTimer() { dnodeAddClientRspHandle(TSDB_MSG_TYPE_DM_STATUS_RSP, dnodeProcessStatusRsp); tsRebootTime = taosGetTimestampSec(); taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer); - dInfo("dnode timer is initialized"); + dInfo("dnode status timer is initialized"); return TSDB_CODE_SUCCESS; } -void dnodeCleanupTimer() { +void dnodeCleanupStatusTimer() { if (tsStatusTimer != NULL) { taosTmrStopA(&tsStatusTimer); tsStatusTimer = NULL; } - - if (tsDnodeTmr != NULL) { - taosTmrCleanUp(tsDnodeTmr); - tsDnodeTmr = NULL; - } } static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { @@ -95,21 +86,27 @@ static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { static void *dnodeOpenVnode(void *param) { SOpenVnodeThread *pThread = param; - + char stepDesc[TSDB_STEP_DESC_LEN] = {0}; + dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); for (int32_t v = 0; v < pThread->vnodeNum; ++v) { int32_t vgId = pThread->vnodeList[v]; + snprintf(stepDesc, TSDB_STEP_DESC_LEN, "vgId:%d, start to restore, %d of %d have been opened", vgId, tsOpenVnodes, tsTotalVnodes); + dnodeReportStep("open-vnodes", stepDesc, 0); + if (vnodeOpen(vgId) < 0) { dError("vgId:%d, failed to open vnode by thread:%d", vgId, pThread->threadIndex); pThread->failed++; } else { - dDebug("vgId:%d, is openned by thread:%d", vgId, pThread->threadIndex); + dDebug("vgId:%d, is opened by thread:%d", vgId, pThread->threadIndex); pThread->opened++; } + + atomic_add_fetch_32(&tsOpenVnodes, 1); } - dDebug("thread:%d, total vnodes:%d, openned:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, + dDebug("thread:%d, total vnodes:%d, opened:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, pThread->failed); return NULL; } @@ -118,6 +115,7 @@ int32_t dnodeInitVnodes() { int32_t vnodeList[TSDB_MAX_VNODES] = {0}; int32_t numOfVnodes = 0; int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); + tsTotalVnodes = numOfVnodes; if (status != TSDB_CODE_SUCCESS) { dInfo("get dnode list failed"); @@ -138,7 +136,7 @@ int32_t dnodeInitVnodes() { pThread->vnodeList[pThread->vnodeNum++] = vnodeList[v]; } - dDebug("start %d threads to open %d vnodes", threadNum, numOfVnodes); + dInfo("start %d threads to open %d vnodes", threadNum, numOfVnodes); for (int32_t t = 0; t < threadNum; ++t) { SOpenVnodeThread *pThread = &threads[t]; @@ -167,7 +165,7 @@ int32_t dnodeInitVnodes() { } free(threads); - dInfo("there are total vnodes:%d, openned:%d", numOfVnodes, openVnodes); + dInfo("there are total vnodes:%d, opened:%d", numOfVnodes, openVnodes); if (failedVnodes != 0) { dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes); diff --git a/src/inc/dnode.h b/src/inc/dnode.h index b80c3dacb1..9aab5b9b50 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -29,13 +29,6 @@ typedef struct { int32_t httpReqNum; } SStatisInfo; -typedef enum { - TSDB_RUN_STATUS_INITIALIZE, - TSDB_RUN_STATUS_RUNING, - TSDB_RUN_STATUS_STOPPED -} SRunStatus; - -SRunStatus dnodeGetRunStatus(); SStatisInfo dnodeGetStatisInfo(); bool dnodeIsFirstDeploy(); @@ -56,8 +49,10 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid); void *dnodeAllocVWriteQueue(void *pVnode); void dnodeFreeVWriteQueue(void *pWqueue); void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code); -void *dnodeAllocVReadQueue(void *pVnode); -void dnodeFreeVReadQueue(void *pRqueue); +void *dnodeAllocVQueryQueue(void *pVnode); +void *dnodeAllocVFetchQueue(void *pVnode); +void dnodeFreeVQueryQueue(void *pQqueue); +void dnodeFreeVFetchQueue(void *pFqueue); int32_t dnodeAllocateMPeerQueue(); void dnodeFreeMPeerQueue(); diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index e0d7e01843..7c7e7ec31a 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -206,9 +206,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode") TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Vnode memory is full because commit failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Database memory is full for commit failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, 0, 0x050C, "Database memory is full for waiting commit") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_SYNCING, 0, 0x0513, "Database is syncing") // tsdb @@ -265,6 +266,13 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CONFIG, 0, 0x0900, "Invalid Sy TAOS_DEFINE_ERROR(TSDB_CODE_SYN_NOT_ENABLED, 0, 0x0901, "Sync module not enabled") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_VERSION, 0, 0x0902, "Invalid Sync version") TAOS_DEFINE_ERROR(TSDB_CODE_SYN_CONFIRM_EXPIRED, 0, 0x0903, "Sync confirm expired") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_TOO_MANY_FWDINFO, 0, 0x0904, "Too many sync fwd infos") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_PROTOCOL, 0, 0x0905, "Mismatched protocol") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_CLUSTERID, 0, 0x0906, "Mismatched clusterId") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_MISMATCHED_SIGNATURE, 0, 0x0907, "Mismatched signature") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_CHECKSUM, 0, 0x0908, "Invalid msg checksum") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGLEN, 0, 0x0909, "Invalid msg length") +TAOS_DEFINE_ERROR(TSDB_CODE_SYN_INVALID_MSGTYPE, 0, 0x090A, "Invalid msg type") // wal TAOS_DEFINE_ERROR(TSDB_CODE_WAL_APP_ERROR, 0, 0x1000, "Unexpected generic error in wal") diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index ea9d608d92..b7f0de54fe 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -266,6 +266,7 @@ typedef struct { } SMDCreateTableMsg; typedef struct { + int32_t len; // one create table message char tableId[TSDB_TABLE_FNAME_LEN]; char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; int8_t igExists; @@ -273,9 +274,13 @@ typedef struct { int16_t numOfTags; int16_t numOfColumns; int16_t sqlLen; // the length of SQL, it starts after schema , sql is a null-terminated string - int32_t contLen; int8_t reserved[16]; char schema[]; +} SCreateTableMsg; + +typedef struct { + int32_t numOfTables; + int32_t contLen; } SCMCreateTableMsg; typedef struct { @@ -473,7 +478,7 @@ typedef struct { int16_t numOfGroupCols; // num of group by columns int16_t orderByIdx; int16_t orderType; // used in group by xx order by xxx - int64_t tableLimit; // limit the number of rows for each table, used in order by + limit in stable projection query. + int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query. int16_t prjOrder; // global order in super table projection query. int64_t limit; int64_t offset; @@ -730,8 +735,8 @@ typedef struct SMultiTableMeta { typedef struct { int32_t dataLen; - char name[TSDB_TABLE_FNAME_LEN]; - char data[TSDB_MAX_TAGS_LEN + TD_KV_ROW_HEAD_SIZE + sizeof(SColIdx) * TSDB_MAX_TAGS]; + char name[TSDB_TABLE_FNAME_LEN]; + char *data; } STagData; /* @@ -787,6 +792,7 @@ typedef struct { typedef struct { char sql[TSDB_SHOW_SQL_LEN]; + char dstTable[TSDB_TABLE_NAME_LEN]; uint32_t streamId; int64_t num; // number of computing/cycles int64_t useconds; diff --git a/src/inc/tcq.h b/src/inc/tcq.h index afa744a9c4..ad123d4080 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -42,7 +42,7 @@ void cqStart(void *handle); void cqStop(void *handle); // cqCreate is called by TSDB to start an instance of CQ -void *cqCreate(void *handle, uint64_t uid, int32_t sid, char *sqlStr, STSchema *pSchema); +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); // cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate void cqDrop(void *handle); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 1769bd6566..04d6c78815 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -48,7 +48,7 @@ typedef struct { void *cqH; int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); - void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema); + void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); void (*cqDropFunc)(void *handle); } STsdbAppH; diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 1303195ef1..4dae86bbed 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -119,11 +119,6 @@ int32_t syncGetNodesRole(int64_t rid, SNodesRole *); extern char *syncRole[]; //global configurable parameters -extern int32_t tsMaxSyncNum; -extern int32_t tsSyncTcpThreads; -extern int32_t tsMaxWatchFiles; -extern int32_t tsSyncTimer; -extern int32_t tsMaxFwdInfo; extern int32_t sDebugFlag; extern char tsArbitrator[]; extern uint16_t tsSyncPort; diff --git a/src/inc/ttype.h b/src/inc/ttype.h index 7d5779c43f..3dd0c58ae2 100644 --- a/src/inc/ttype.h +++ b/src/inc/ttype.h @@ -8,25 +8,26 @@ extern "C" { #include "taosdef.h" #define GET_TYPED_DATA(_v, _finalType, _type, _data) \ - switch (_type) { \ - case TSDB_DATA_TYPE_TINYINT: \ + switch (_type) { \ + case TSDB_DATA_TYPE_BOOL: \ + case TSDB_DATA_TYPE_TINYINT: \ (_v) = (_finalType)GET_INT8_VAL(_data); \ - break; \ - case TSDB_DATA_TYPE_SMALLINT: \ + break; \ + case TSDB_DATA_TYPE_SMALLINT: \ (_v) = (_finalType)GET_INT16_VAL(_data); \ - break; \ - case TSDB_DATA_TYPE_BIGINT: \ + break; \ + case TSDB_DATA_TYPE_BIGINT: \ (_v) = (_finalType)(GET_INT64_VAL(_data)); \ - break; \ - case TSDB_DATA_TYPE_FLOAT: \ + break; \ + case TSDB_DATA_TYPE_FLOAT: \ (_v) = (_finalType)GET_FLOAT_VAL(_data); \ - break; \ - case TSDB_DATA_TYPE_DOUBLE: \ + break; \ + case TSDB_DATA_TYPE_DOUBLE: \ (_v) = (_finalType)GET_DOUBLE_VAL(_data); \ - break; \ - default: \ + break; \ + default: \ (_v) = (_finalType)GET_INT32_VAL(_data); \ - break; \ + break; \ }; #ifdef __cplusplus diff --git a/src/inc/vnode.h b/src/inc/vnode.h index ba64a3d826..a4f7c00b66 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -34,6 +34,7 @@ typedef struct { void * rpcHandle; void * rpcAhandle; void * qhandle; + void * pVnode; int8_t qtype; int8_t msgType; SRspRet rspRet; @@ -47,7 +48,7 @@ typedef struct { void * pVnode; SRpcMsg rpcMsg; SRspRet rspRet; - char reserveForSync[16]; + char reserveForSync[24]; SWalHead pHead[]; } SVWriteMsg; diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 2c6e4a308c..2415695617 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -45,6 +45,7 @@ typedef struct SShellArguments { char* timezone; bool is_raw_time; bool is_use_passwd; + bool dump_config; char file[TSDB_FILENAME_LEN]; char dir[TSDB_FILENAME_LEN]; int threadNum; @@ -85,6 +86,6 @@ extern void set_terminal_mode(); extern int get_old_terminal_mode(struct termios* tio); extern void reset_terminal_mode(); extern SShellArguments args; -extern TAOS_RES* result; +extern int64_t result; #endif diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index af8beb7987..627d06ac2e 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -46,7 +46,7 @@ char CONTINUE_PROMPT[] = " -> "; int prompt_size = 6; #endif -TAOS_RES *result = NULL; +int64_t result = 0; SShellHistory history; #define DEFAULT_MAX_BINARY_DISPLAY_WIDTH 30 @@ -260,6 +260,14 @@ int32_t shellRunCommand(TAOS* con, char* command) { } +void freeResultWithRid(int64_t rid) { + SSqlObj* pSql = taosAcquireRef(tscObjRef, rid); + if(pSql){ + taos_free_result(pSql); + taosReleaseRef(tscObjRef, rid); + } +} + void shellRunCommandOnServer(TAOS *con, char command[]) { int64_t st, et; wordexp_t full_path; @@ -294,18 +302,22 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { st = taosGetTimestampUs(); - TAOS_RES* pSql = taos_query_h(con, command, &result); + TAOS_RES* tmpSql = NULL; + TAOS_RES* pSql = taos_query_h(con, command, &tmpSql); if (taos_errno(pSql)) { taos_error(pSql, st); return; } + atomic_store_64(&result, ((SSqlObj*)tmpSql)->self); + int64_t oresult = atomic_load_64(&result); + if (regex_match(command, "^\\s*use\\s+[a-zA-Z0-9_]+\\s*;\\s*$", REG_EXTENDED | REG_ICASE)) { fprintf(stdout, "Database changed.\n\n"); fflush(stdout); - atomic_store_ptr(&result, 0); - taos_free_result(pSql); + atomic_store_64(&result, 0); + freeResultWithRid(oresult); return; } @@ -313,8 +325,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { int error_no = 0; int numOfRows = shellDumpResult(pSql, fname, &error_no, printMode); if (numOfRows < 0) { - atomic_store_ptr(&result, 0); - taos_free_result(pSql); + atomic_store_64(&result, 0); + freeResultWithRid(oresult); return; } @@ -336,8 +348,8 @@ void shellRunCommandOnServer(TAOS *con, char command[]) { wordfree(&full_path); } - atomic_store_ptr(&result, 0); - taos_free_result(pSql); + atomic_store_64(&result, 0); + freeResultWithRid(oresult); } /* Function to do regular expression check */ @@ -501,7 +513,7 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { row = taos_fetch_row(tres); } while( row != NULL); - result = NULL; + result = 0; fclose(fp); return numOfRows; @@ -509,7 +521,9 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { static void shellPrintNChar(const char *str, int length, int width) { - int pos = 0, cols = 0; + wchar_t tail[3]; + int pos = 0, cols = 0, totalCols = 0, tailLen = 0; + while (pos < length) { wchar_t wc; int bytes = mbtowc(&wc, str + pos, MB_CUR_MAX); @@ -526,15 +540,44 @@ static void shellPrintNChar(const char *str, int length, int width) { #else int w = wcwidth(wc); #endif - if (w > 0) { - if (width > 0 && cols + w > width) { - break; - } + if (w <= 0) { + continue; + } + + if (width <= 0) { + printf("%lc", wc); + continue; + } + + totalCols += w; + if (totalCols > width) { + break; + } + if (totalCols <= (width - 3)) { printf("%lc", wc); cols += w; + } else { + tail[tailLen] = wc; + tailLen++; } } + if (totalCols > width) { + // width could be 1 or 2, so printf("...") cannot be used + for (int i = 0; i < 3; i++) { + if (cols >= width) { + break; + } + putchar('.'); + ++cols; + } + } else { + for (int i = 0; i < tailLen; i++) { + printf("%lc", tail[i]); + } + cols = totalCols; + } + for (; cols < width; cols++) { putchar(' '); } @@ -656,13 +699,21 @@ static int calcColWidth(TAOS_FIELD* field, int precision) { return MAX(25, width); case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: if (field->bytes > tsMaxBinaryDisplayWidth) { return MAX(tsMaxBinaryDisplayWidth, width); } else { return MAX(field->bytes, width); } + case TSDB_DATA_TYPE_NCHAR: { + int16_t bytes = field->bytes * TSDB_NCHAR_SIZE; + if (bytes > tsMaxBinaryDisplayWidth) { + return MAX(tsMaxBinaryDisplayWidth, width); + } else { + return MAX(bytes, width); + } + } + case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { return MAX(14, width); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 6f4ee3fc50..9eb30ccdcc 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -33,12 +33,13 @@ const char *argp_program_bug_address = ""; static char doc[] = ""; static char args_doc[] = ""; static struct argp_option options[] = { - {"host", 'h', "HOST", 0, "TDengine server IP address to connect. The default host is localhost."}, + {"host", 'h', "HOST", 0, "TDengine server FQDN to connect. The default host is localhost."}, {"password", 'p', "PASSWORD", OPTION_ARG_OPTIONAL, "The password to use when connecting to the server."}, {"port", 'P', "PORT", 0, "The TCP/IP port number to use for the connection."}, {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."}, {"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."}, + {"dump-config", 'C', 0, 0, "Dump configuration."}, {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, {"raw-time", 'r', 0, 0, "Output time as uint64_t."}, {"file", 'f', "FILE", 0, "Script to run without enter the shell."}, @@ -96,6 +97,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; + case 'C': + arguments->dump_config = true; + break; case 's': arguments->commands = arg; break; diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index a2ce78d36f..041ad71ccb 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -15,18 +15,35 @@ #include "os.h" #include "shell.h" +#include "tconfig.h" #include "tnettest.h" pthread_t pid; +static tsem_t cancelSem; void shellQueryInterruptHandler(int signum) { + tsem_post(&cancelSem); +} + +void *cancelHandler(void *arg) { + while(1) { + if (tsem_wait(&cancelSem) != 0) { + taosMsleep(10); + continue; + } + #ifdef LINUX - void* pResHandle = atomic_val_compare_exchange_64(&result, result, 0); - taos_stop_query(pResHandle); + int64_t rid = atomic_val_compare_exchange_64(&result, result, 0); + SSqlObj* pSql = taosAcquireRef(tscObjRef, rid); + taos_stop_query(pSql); + taosReleaseRef(tscObjRef, rid); #else - printf("\nReceive ctrl+c or other signal, quit shell.\n"); - exit(0); + printf("\nReceive ctrl+c or other signal, quit shell.\n"); + exit(0); #endif + } + + return NULL; } int checkVersion() { @@ -58,6 +75,7 @@ SShellArguments args = { .timezone = NULL, .is_raw_time = false, .is_use_passwd = false, + .dump_config = false, .file = "\0", .dir = "\0", .threadNum = 5, @@ -78,6 +96,19 @@ int main(int argc, char* argv[]) { shellParseArgument(argc, argv, &args); + if (args.dump_config) { + taosInitGlobalCfg(); + taosReadGlobalLogCfg(); + + if (!taosReadGlobalCfg()) { + printf("TDengine read global config failed"); + exit(EXIT_FAILURE); + } + + taosDumpGlobalCfg(); + exit(0); + } + if (args.netTestRole && args.netTestRole[0] != 0) { taos_init(); taosNetTest(args.netTestRole, args.host, args.port, args.pktLen); @@ -90,6 +121,14 @@ int main(int argc, char* argv[]) { exit(EXIT_FAILURE); } + if (tsem_init(&cancelSem, 0, 0) != 0) { + printf("failed to create cancel semphore\n"); + exit(EXIT_FAILURE); + } + + pthread_t spid; + pthread_create(&spid, NULL, cancelHandler, NULL); + /* Interrupt handler. */ struct sigaction act; memset(&act, 0, sizeof(struct sigaction)); diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index a92831de25..7cb6c75302 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -24,7 +24,7 @@ void printHelp() { printf("taos shell is used to test the TDengine database\n"); printf("%s%s\n", indent, "-h"); - printf("%s%s%s\n", indent, indent, "TDengine server IP address to connect. The default host is localhost."); + printf("%s%s%s\n", indent, indent, "TDengine server FQDN to connect. The default host is localhost."); printf("%s%s\n", indent, "-p"); printf("%s%s%s\n", indent, indent, "The password to use when connecting to the server."); printf("%s%s\n", indent, "-P"); @@ -35,6 +35,8 @@ void printHelp() { printf("%s%s%s\n", indent, indent, "The user auth to use when connecting to the server."); printf("%s%s\n", indent, "-c"); printf("%s%s%s\n", indent, indent, "Configuration directory."); + printf("%s%s\n", indent, "-C"); + printf("%s%s%s\n", indent, indent, "Dump configuration."); printf("%s%s\n", indent, "-s"); printf("%s%s%s\n", indent, indent, "Commands to run without enter the shell."); printf("%s%s\n", indent, "-r"); @@ -104,6 +106,8 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { fprintf(stderr, "Option -c requires an argument\n"); exit(EXIT_FAILURE); } + } else if (strcmp(argv[i], "-C") == 0) { + arguments->dump_config = true; } else if (strcmp(argv[i], "-s") == 0) { if (i < argc - 1) { arguments->commands = argv[++i]; diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index a7258c9724..588d21574b 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -14,6 +14,9 @@ */ #include +#include +#include + #include "os.h" #include "taos.h" #include "taosdef.h" @@ -64,7 +67,10 @@ enum _show_tables_index { TSDB_SHOW_TABLES_NAME_INDEX, TSDB_SHOW_TABLES_CREATED_TIME_INDEX, TSDB_SHOW_TABLES_COLUMNS_INDEX, - TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_UID_INDEX, + TSDB_SHOW_TABLES_TID_INDEX, + TSDB_SHOW_TABLES_VGID_INDEX, TSDB_MAX_SHOW_TABLES }; @@ -92,24 +98,27 @@ typedef struct { extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; - int32_t tables; + char name[TSDB_DB_NAME_LEN + 1]; + char create_time[32]; + int32_t ntables; int32_t vgroups; - int16_t replications; + int16_t replica; int16_t quorum; - int16_t daysPerFile; - int16_t daysToKeep; - int16_t daysToKeep1; - int16_t daysToKeep2; - int32_t cacheBlockSize; //MB - int32_t totalBlocks; - int32_t minRowsPerFileBlock; - int32_t maxRowsPerFileBlock; - int8_t walLevel; - int32_t fsyncPeriod; - int8_t compression; - int8_t precision; // time resolution + int16_t days; + char keeplist[32]; + //int16_t daysToKeep; + //int16_t daysToKeep1; + //int16_t daysToKeep2; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + char precision[8]; // time resolution int8_t update; + char status[16]; } SDbInfo; typedef struct { @@ -128,8 +137,17 @@ typedef struct { int32_t totalThreads; char dbName[TSDB_TABLE_NAME_LEN + 1]; void *taosCon; + int64_t rowsOfDumpOut; + int64_t tablesOfDumpOut; } SThreadParaObj; +typedef struct { + int64_t totalRowsOfDumpOut; + int64_t totalChildTblsOfDumpOut; + int32_t totalSuperTblsOfDumpOut; + int32_t totalDatabasesOfDumpOut; +} resultStatistics; + static int64_t totalDumpOutRows = 0; SDbInfo **dbInfos = NULL; @@ -167,6 +185,7 @@ static struct argp_option options[] = { // input/output file {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, #ifdef _TD_POWER_ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, #else @@ -200,6 +219,8 @@ struct arguments { // output file char outpath[TSDB_FILENAME_LEN+1]; char inpath[TSDB_FILENAME_LEN+1]; + // result file + char *resultFile; char *encode; // dump unit option bool all_databases; @@ -274,6 +295,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; + case 'r': + arguments->resultFile = arg; + break; case 'c': if (wordexp(arg, &full_path, 0) != 0) { fprintf(stderr, "Invalid path %s\n", arg); @@ -343,19 +367,22 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +static resultStatistics g_resultStatistics = {0}; +static FILE *g_fpOfResult = NULL; +static int g_numOfCores = 1; int taosDumpOut(struct arguments *arguments); int taosDumpIn(struct arguments *arguments); void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); int taosCheckParam(struct arguments *arguments); void taosFreeDbInfos(); -static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); +static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); struct arguments tsArguments = { // connection option @@ -371,7 +398,8 @@ struct arguments tsArguments = { 0, // outpath and inpath "", - "", + "", + "./dump_result.txt", NULL, // dump unit option false, @@ -392,18 +420,34 @@ struct arguments tsArguments = { 0, false }; - -int queryDB(TAOS *taos, char *command) { - TAOS_RES *pSql = NULL; + +static int queryDbImpl(TAOS *taos, char *command) { + int i; + TAOS_RES *res = NULL; int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; + } - pSql = taos_query(taos, command); - code = taos_errno(pSql); - if (code) { - fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); - } - taos_free_result(pSql); - return code; + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run <%s>, reason: %s\n", command, taos_errstr(res)); + taos_free_result(res); + //taos_close(taos); + return -1; + } + + taos_free_result(res); + return 0; } int main(int argc, char *argv[]) { @@ -430,6 +474,7 @@ int main(int argc, char *argv[]) { printf("mysqlFlag: %d\n", tsArguments.mysqlFlag); printf("outpath: %s\n", tsArguments.outpath); printf("inpath: %s\n", tsArguments.inpath); + printf("resultFile: %s\n", tsArguments.resultFile); printf("encode: %s\n", tsArguments.encode); printf("all_databases: %d\n", tsArguments.all_databases); printf("databases: %d\n", tsArguments.databases); @@ -459,13 +504,80 @@ int main(int argc, char *argv[]) { if (taosCheckParam(&tsArguments) < 0) { exit(EXIT_FAILURE); } + + g_fpOfResult = fopen(tsArguments.resultFile, "a"); + if (NULL == g_fpOfResult) { + fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile); + return 1; + }; - if (tsArguments.isDumpIn) { - if (taosDumpIn(&tsArguments) < 0) return -1; - } else { - if (taosDumpOut(&tsArguments) < 0) return -1; + fprintf(g_fpOfResult, "#############################################################################\n"); + fprintf(g_fpOfResult, "============================== arguments config =============================\n"); + { + fprintf(g_fpOfResult, "host: %s\n", tsArguments.host); + fprintf(g_fpOfResult, "user: %s\n", tsArguments.user); + fprintf(g_fpOfResult, "password: %s\n", tsArguments.password); + fprintf(g_fpOfResult, "port: %u\n", tsArguments.port); + fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion); + fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag); + fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath); + fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath); + fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile); + fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode); + fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases); + fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases); + fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly); + fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property); + fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time); + fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time); + fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch); + fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len); + fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch); + fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num); + fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys); + fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort); + fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn); + fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } } + g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); + + time_t tTime = time(NULL); + struct tm tm = *localtime(&tTime); + + if (tsArguments.isDumpIn) { + fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); + fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpIn(&tsArguments) < 0) { + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return -1; + } + } else { + fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); + fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpOut(&tsArguments) < 0) { + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return -1; + } + + fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); + fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut); + fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut); + fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut); + } + + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return 0; } @@ -586,64 +698,97 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu sprintf(tmpCommand, "select tbname from %s", metric); - TAOS_RES *result = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(result); + TAOS_RES *res = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command %s\n", tmpCommand); free(tmpCommand); - taos_free_result(result); + taos_free_result(res); + return -1; + } + free(tmpCommand); + + char tmpBuf[TSDB_FILENAME_LEN + 1]; + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".select-tbname.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(result); - - int32_t numOfTable = 0; - int32_t numOfThread = *totalNumOfThread; - char tmpFileName[TSDB_FILENAME_LEN + 1]; - while ((row = taos_fetch_row(result)) != NULL) { - if (0 == numOfTable) { - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(result); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - free(tmpCommand); - return -1; - } - - numOfThread++; - } + TAOS_FIELD *fields = taos_fetch_fields(res); + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - - taosWrite(fd, &tableRecord, sizeof(STableRecord)); - + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); numOfTable++; - - if (numOfTable >= arguments->table_batch) { - numOfTable = 0; - close(fd); - fd = -1; - } } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); + + int maxThreads = arguments->thread_num; + int tableOfPerFile ; + if (numOfTable <= arguments->thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / arguments->thread_num; + if (0 != numOfTable % arguments->thread_num) { + tableOfPerFile += 1; + } + } + + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; + } + + int32_t numOfThread = *totalNumOfThread; + int subFd = -1; + for (; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); + close(fd); + return -1; + } + + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); + } + + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); if (fd >= 0) { close(fd); fd = -1; - } - - taos_free_result(result); + } *totalNumOfThread = numOfThread; - - free(tmpCommand); return 0; } @@ -700,7 +845,7 @@ int taosDumpOut(struct arguments *arguments) { int32_t code = taos_errno(result); if (code != 0) { - fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); goto _exit_failure; } @@ -736,27 +881,29 @@ int taosDumpOut(struct arguments *arguments) { } strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); -#if 0 if (arguments->with_property) { - dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - dbInfos[count]->daysToKeep1; - dbInfos[count]->daysToKeep2; - dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + //dbInfos[count]->daysToKeep1; + //dbInfos[count]->daysToKeep2; + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } -#endif count++; if (arguments->databases) { @@ -781,6 +928,8 @@ int taosDumpOut(struct arguments *arguments) { taosDumpDb(dbInfos[0], arguments, fp, taos); } else { // case: taosdump tablex tabley ... taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); + fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfos[0]->name); + g_resultStatistics.totalDatabasesOfDumpOut++; sprintf(command, "use %s", dbInfos[0]->name); @@ -796,6 +945,7 @@ int taosDumpOut(struct arguments *arguments) { int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 int normalTblFd = -1; int32_t retCode; + int superTblCnt = 0 ; for (int i = 1; arguments->arg_list[i]; i++) { if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); @@ -803,11 +953,17 @@ int taosDumpOut(struct arguments *arguments) { } if (tableRecordInfo.isMetric) { // dump all table of this metric - (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); } else { if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric - (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } } retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); } @@ -819,13 +975,17 @@ int taosDumpOut(struct arguments *arguments) { goto _clean_tmp_file; } } + + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; if (-1 != normalTblFd){ taosClose(normalTblFd); } // start multi threads to dumpout - taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + taosStartDumpOutWorkThreads(taos, arguments, totalNumOfThread, dbInfos[0]->name); char tmpFileName[TSDB_FILENAME_LEN + 1]; _clean_tmp_file: @@ -855,41 +1015,27 @@ _exit_failure: return -1; } -int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { +int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { TAOS_ROW row = NULL; - TAOS_RES *tmpResult = NULL; + TAOS_RES* res = NULL; int count = 0; - char* tempCommand = (char *)malloc(COMMAND_SIZE); - if (tempCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } - - char* tbuf = (char *)malloc(COMMAND_SIZE); - if (tbuf == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - free(tempCommand); - return -1; - } - - sprintf(tempCommand, "describe %s", table); + char sqlstr[COMMAND_SIZE]; + sprintf(sqlstr, "describe %s.%s;", dbName, table); - tmpResult = taos_query(taosCon, tempCommand); - int32_t code = taos_errno(tmpResult); + res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + TAOS_FIELD *fields = taos_fetch_fields(res); tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); - while ((row = taos_fetch_row(tmpResult)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], @@ -901,12 +1047,10 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe count++; } - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; if (isSuperTable) { - free(tempCommand); - free(tbuf); return count; } @@ -915,37 +1059,33 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; - sprintf(tempCommand, "select %s from %s", tableDes->cols[i].field, table); + sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table); - tmpResult = taos_query(taosCon, tempCommand); - code = taos_errno(tmpResult); + res = taos_query(taosCon, sqlstr); + code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - fields = taos_fetch_fields(tmpResult); + fields = taos_fetch_fields(res); - row = taos_fetch_row(tmpResult); + row = taos_fetch_row(res); if (NULL == row) { - fprintf(stderr, " fetch failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, " fetch failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } if (row[0] == NULL) { sprintf(tableDes->cols[i].note, "%s", "NULL"); - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; continue; } - int32_t* length = taos_fetch_lengths(tmpResult); + int32_t* length = taos_fetch_lengths(res); //int32_t* length = taos_fetch_lengths(tmpResult); switch (fields[0].type) { @@ -970,18 +1110,22 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe case TSDB_DATA_TYPE_DOUBLE: sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); break; - case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_BINARY: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); tableDes->cols[i].note[0] = '\''; + char tbuf[COMMAND_SIZE]; converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); *(pstr++) = '\''; break; - case TSDB_DATA_TYPE_NCHAR: + } + case TSDB_DATA_TYPE_NCHAR: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + char tbuf[COMMAND_SIZE]; convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); break; + } case TSDB_DATA_TYPE_TIMESTAMP: sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); #if 0 @@ -1001,17 +1145,14 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe break; } - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; } - free(tempCommand); - free(tbuf); - return count; } -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1030,7 +1171,7 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); */ - count = taosGetTableDes(table, tableDes, taosCon, false); + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); if (count < 0) { free(tableDes); @@ -1038,10 +1179,10 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI } // create child-table using super-table - taosDumpCreateMTableClause(tableDes, metric, count, fp); + taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName); } else { // dump table definition - count = taosGetTableDes(table, tableDes, taosCon, false); + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); if (count < 0) { free(tableDes); @@ -1049,39 +1190,28 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI } // create normal-table or super-table - taosDumpCreateTableClause(tableDes, count, fp); + taosDumpCreateTableClause(tableDes, count, fp, dbName); } free(tableDes); - return taosDumpTableData(fp, table, arguments, taosCon); + return taosDumpTableData(fp, table, arguments, taosCon, dbName); } void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } - - char *pstr = tmpCommand; - - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); + char *pstr = sqlstr; + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); if (isDumpProperty) { - #if 0 pstr += sprintf(pstr, - "TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d", - dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize, - dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression, - dbInfo->precision, dbInfo->update); - #endif + "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update); } pstr += sprintf(pstr, ";"); - - fprintf(fp, "%s\n\n", tmpCommand); - free(tmpCommand); + fprintf(fp, "%s\n\n", sqlstr); } void* taosDumpOutWorkThreadFp(void *arg) @@ -1090,34 +1220,34 @@ void* taosDumpOutWorkThreadFp(void *arg) STableRecord tableRecord; int fd; - char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; - sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + char tmpBuf[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); if (fd == -1) { - fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpBuf); return NULL; } FILE *fp = NULL; - memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); if (tsArguments.outpath[0] != 0) { - sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); } else { - sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); } - fp = fopen(tmpFileName, "w"); + fp = fopen(tmpBuf, "w"); if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", tmpFileName); + fprintf(stderr, "failed to open file %s\n", tmpBuf); close(fd); return NULL; } - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, "use %s", pThread->dbName); + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, "use %s", pThread->dbName); - TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); int32_t code = taos_errno(tmpResult); if (code != 0) { fprintf(stderr, "invalid database %s\n", pThread->dbName); @@ -1127,11 +1257,47 @@ void* taosDumpOutWorkThreadFp(void *arg) return NULL; } + int fileNameIndex = 1; + int tablesInOneFile = 0; + int64_t lastRowsPrint = 5000000; fprintf(fp, "USE %s;\n\n", pThread->dbName); while (1) { ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + + int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName); + if (ret >= 0) { + // TODO: sum table count and table rows by self + pThread->tablesOfDumpOut++; + pThread->rowsOfDumpOut += ret; + + if (pThread->rowsOfDumpOut >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName); + lastRowsPrint += 5000000; + } + + tablesInOneFile++; + if (tablesInOneFile >= tsArguments.table_batch) { + fclose(fp); + tablesInOneFile = 0; + + memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); + if (tsArguments.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); + } else { + sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); + } + fileNameIndex++; + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + close(fd); + taos_free_result(tmpResult); + return NULL; + } + } + } } taos_free_result(tmpResult); @@ -1141,21 +1307,18 @@ void* taosDumpOutWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName) { pthread_attr_t thattr; SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); for (int t = 0; t < numOfThread; ++t) { SThreadParaObj *pThread = threadObj + t; + pThread->rowsOfDumpOut = 0; + pThread->tablesOfDumpOut = 0; pThread->threadIndex = t; pThread->totalThreads = numOfThread; tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); - pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); - - if (pThread->taosCon == NULL) { - fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); - exit(0); - } + pThread->taosCon = taosCon; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); @@ -1170,15 +1333,24 @@ static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfTh pthread_join(threadObj[t].threadID, NULL); } + // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt + int64_t totalRowsOfDumpOut = 0; + int64_t totalChildTblsOfDumpOut = 0; for (int32_t t = 0; t < numOfThread; ++t) { - taos_close(threadObj[t].taosCon); + totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut; + totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut; } + + fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", totalRowsOfDumpOut); + g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut; + g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut; free(threadObj); } -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1187,15 +1359,15 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { exit(-1); } - count = taosGetTableDes(table, tableDes, taosCon, true); + count = taosGetTableDes(dbName, table, tableDes, taosCon, true); if (count < 0) { free(tableDes); - fprintf(stderr, "failed to get stable schema\n"); + fprintf(stderr, "failed to get stable[%s] schema\n", table); exit(-1); } - taosDumpCreateTableClause(tableDes, count, fp); + taosDumpCreateTableClause(tableDes, count, fp, dbName); free(tableDes); return 0; @@ -1207,38 +1379,19 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) TAOS_ROW row; int fd = -1; STableRecord tableRecord; + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - exit(-1); - } - - sprintf(tmpCommand, "use %s", dbName); + sprintf(sqlstr, "show %s.stables", dbName); - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(tmpResult); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); if (code != 0) { - fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); - free(tmpCommand); - taos_free_result(tmpResult); - exit(-1); - } - - taos_free_result(tmpResult); - - sprintf(tmpCommand, "show stables"); - - tmpResult = taos_query(taosCon, tmpCommand); - code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); - free(tmpCommand); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); exit(-1); } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + TAOS_FIELD *fields = taos_fetch_fields(res); char tmpFileName[TSDB_FILENAME_LEN + 1]; memset(tmpFileName, 0, TSDB_FILENAME_LEN); @@ -1246,32 +1399,38 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); if (fd == -1) { fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(tmpResult); - free(tmpCommand); + taos_free_result(res); (void)remove(".stables.tmp"); exit(-1); } - while ((row = taos_fetch_row(tmpResult)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); } - taos_free_result(tmpResult); + taos_free_result(res); (void)lseek(fd, 0, SEEK_SET); + int superTblCnt = 0; while (1) { ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - (void)taosDumpStable(tableRecord.name, fp, taosCon); + int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName); + if (0 == ret) { + superTblCnt++; + } } + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + close(fd); (void)remove(".stables.tmp"); - free(tmpCommand); return 0; } @@ -1282,111 +1441,126 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao STableRecord tableRecord; taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } - - sprintf(tmpCommand, "use %s", dbInfo->name); - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "invalid database %s\n", dbInfo->name); - free(tmpCommand); - taos_free_result(tmpResult); - return -1; - } - taos_free_result(tmpResult); + fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name); + g_resultStatistics.totalDatabasesOfDumpOut++; + + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; fprintf(fp, "USE %s;\n\n", dbInfo->name); (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - sprintf(tmpCommand, "show tables"); + sprintf(sqlstr, "show %s.tables", dbInfo->name); - tmpResult = taos_query(taosCon, tmpCommand); - code = taos_errno(tmpResult); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tmpCommand); - free(tmpCommand); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + char tmpBuf[TSDB_FILENAME_LEN + 1]; + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".show-tables.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + taos_free_result(res); + return -1; + } - int32_t numOfTable = 0; - int32_t numOfThread = 0; - char tmpFileName[TSDB_FILENAME_LEN + 1]; - while ((row = taos_fetch_row(tmpResult)) != NULL) { - if (0 == numOfTable) { - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(tmpResult); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - free(tmpCommand); - return -1; - } - - numOfThread++; - } + TAOS_FIELD *fields = taos_fetch_fields(res); + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); - + taosWrite(fd, &tableRecord, sizeof(STableRecord)); - + numOfTable++; - - if (numOfTable >= arguments->table_batch) { - numOfTable = 0; - close(fd); - fd = -1; - } } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); + + int maxThreads = tsArguments.thread_num; + int tableOfPerFile ; + if (numOfTable <= tsArguments.thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / tsArguments.thread_num; + if (0 != numOfTable % tsArguments.thread_num) { + tableOfPerFile += 1; + } + } + + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; + } + + int32_t numOfThread = 0; + int subFd = -1; + for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".show-tables.tmp"); + (void)remove(tmpBuf); + close(fd); + return -1; + } + + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); + } + + sprintf(tmpBuf, ".show-tables.tmp"); + (void)remove(tmpBuf); if (fd >= 0) { close(fd); fd = -1; } - taos_free_result(tmpResult); + taos_free_result(res); // start multi threads to dumpout - taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + taosStartDumpOutWorkThreads(taosCon, arguments, numOfThread, dbInfo->name); for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - - free(tmpCommand); + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName) { int counter = 0; int count_temp = 0; + char sqlstr[COMMAND_SIZE]; - char* tmpBuf = (char *)malloc(COMMAND_SIZE); - if (tmpBuf == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } + char* pstr = sqlstr; - char* pstr = tmpBuf; - - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -1420,12 +1594,10 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n", tmpBuf); - - free(tmpBuf); + fprintf(fp, "%s\n\n", sqlstr); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName) { int counter = 0; int count_temp = 0; @@ -1438,7 +1610,7 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols char *pstr = NULL; pstr = tmpBuf; - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -1479,48 +1651,36 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols free(tmpBuf); } -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { - /* char temp[MAX_COMMAND_SIZE] = "\0"; */ - int64_t totalRows = 0; +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName) { + int64_t lastRowsPrint = 5000000; + int64_t totalRows = 0; int count = 0; char *pstr = NULL; TAOS_ROW row = NULL; int numFields = 0; - char *tbuf = NULL; - - char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; + + if (arguments->schemaonly) { + return 0; } int32_t sql_buf_len = arguments->max_sql_len; char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); if (tmpBuffer == NULL) { fprintf(stderr, "failed to allocate memory\n"); - free(tmpCommand); return -1; } pstr = tmpBuffer; - if (arguments->schemaonly) { - free(tmpCommand); - free(tmpBuffer); - return 0; - } + char sqlstr[1024] = {0}; + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + dbName, tbname, arguments->start_time, arguments->end_time); - sprintf(tmpCommand, - "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", - tbname, - arguments->start_time, - arguments->end_time); - - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + TAOS_RES* tmpResult = taos_query(taosCon, sqlstr); int32_t code = taos_errno(tmpResult); if (code != 0) { - fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); - free(tmpCommand); + fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult)); free(tmpBuffer); taos_free_result(tmpResult); return -1; @@ -1529,14 +1689,6 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* numFields = taos_field_count(tmpResult); assert(numFields > 0); TAOS_FIELD *fields = taos_fetch_fields(tmpResult); - tbuf = (char *)malloc(COMMAND_SIZE); - if (tbuf == NULL) { - fprintf(stderr, "No enough memory\n"); - free(tmpCommand); - free(tmpBuffer); - taos_free_result(tmpResult); - return -1; - } int rowFlag = 0; int32_t curr_sqlstr_len = 0; @@ -1550,7 +1702,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* if (count == 0) { total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s VALUES (", tbname); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbname); } else { if (arguments->mysqlFlag) { if (0 == rowFlag) { @@ -1594,17 +1746,21 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* case TSDB_DATA_TYPE_DOUBLE: curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); break; - case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_BINARY: { + char tbuf[COMMAND_SIZE] = {0}; //*(pstr++) = '\''; converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); //pstr = stpcpy(pstr, tbuf); //*(pstr++) = '\''; pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; - case TSDB_DATA_TYPE_NCHAR: + } + case TSDB_DATA_TYPE_NCHAR: { + char tbuf[COMMAND_SIZE] = {0}; convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; + } case TSDB_DATA_TYPE_TIMESTAMP: if (!arguments->mysqlFlag) { curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]); @@ -1624,9 +1780,14 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); - totalRows++; + totalRows++; count++; fprintf(fp, "%s", tmpBuffer); + + if (totalRows >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname); + lastRowsPrint += 5000000; + } total_sqlstr_len += curr_sqlstr_len; @@ -1638,19 +1799,12 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* //} } + fprintf(fp, "\n"); atomic_add_fetch_64(&totalDumpOutRows, totalRows); - - fprintf(fp, "\n"); - - if (tbuf) { - free(tbuf); - } taos_free_result(tmpResult); - tmpResult = NULL; - free(tmpCommand); free(tmpBuffer); - return 0; + return totalRows; } int taosCheckParam(struct arguments *arguments) { @@ -1986,159 +2140,6 @@ static FILE* taosOpenDumpInFile(char *fptr) { return f; } -int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { - char *command = NULL; - char *lcommand = NULL; - int tsize = 0; - char *line = NULL; - _Bool isRun = true; - size_t line_size = 0; - char *pstr = NULL; - char *lstr = NULL; - size_t inbytesleft = 0; - size_t outbytesleft = COMMAND_SIZE; - char *tcommand = NULL; - char *charsetOfFile = NULL; - iconv_t cd = (iconv_t)(-1); - - command = (char *)malloc(COMMAND_SIZE); - lcommand = (char *)malloc(COMMAND_SIZE); - if (command == NULL || lcommand == NULL) { - fprintf(stderr, "failed to connect to allocate memory\n"); - goto _dumpin_exit_failure; - } - - // Resolve locale - if (*fcharset != '\0') { - charsetOfFile = fcharset; - } else { - charsetOfFile = encode; - } - - if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { - cd = iconv_open(tsCharset, charsetOfFile); - if (cd == ((iconv_t)(-1))) { - fprintf(stderr, "Failed to open iconv handle\n"); - goto _dumpin_exit_failure; - } - } - - pstr = command; - int64_t linenu = 0; - while (1) { - ssize_t size = getline(&line, &line_size, fp); - linenu++; - if (size <= 0) break; - if (size == 1) { - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - - taosReplaceCtrlChar(tcommand); - - if (queryDB(taos, tcommand) != 0) { - fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); - exit(0); - } - - pstr = command; - pstr[0] = '\0'; - tsize = 0; - isRun = true; - } - - continue; - } - - /* if (line[0] == '-' && line[1] == '-') continue; */ - - line[size - 1] = 0; - - if (tsize + size - 1 > COMMAND_SIZE) { - fprintf(stderr, "command is too long\n"); - goto _dumpin_exit_failure; - } - - if (line[size - 2] == '\\') { - line[size - 2] = ' '; - isRun = false; - } else { - isRun = true; - } - - memcpy(pstr, line, size - 1); - pstr += (size - 1); - *pstr = '\0'; - - if (!isRun) continue; - - if (command != pstr && !isEmptyCommand(command)) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - if (queryDB(taos, tcommand) != 0) { - fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); - exit(0); - } - } - - pstr = command; - *pstr = '\0'; - tsize = 0; - } - - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(lcommand); - if (queryDB(taos, tcommand) != 0) - fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); - } - - if (cd != ((iconv_t)(-1))) iconv_close(cd); - tfree(line); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return 0; - -_dumpin_exit_failure: - if (cd != ((iconv_t)(-1))) iconv_close(cd); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return -1; -} - int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { int read_len = 0; char * cmd = NULL; @@ -2152,6 +2153,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c return -1; } + int lastRowsPrint = 5000000; int lineNo = 0; while ((read_len = getline(&line, &line_len, fp)) != -1) { ++lineNo; @@ -2172,12 +2174,18 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c memcpy(cmd + cmd_len, line, read_len); cmd[read_len + cmd_len]= '\0'; - if (queryDB(taos, cmd)) { + if (queryDbImpl(taos, cmd)) { fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; + cmd_len = 0; + + if (lineNo >= lastRowsPrint) { + printf(" %d lines already be executed from file %s\n", lineNo, fileName); + lastRowsPrint += 5000000; + } } tfree(cmd); @@ -2204,7 +2212,7 @@ void* taosDumpInWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpInWorkThreads(struct arguments *args) +static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args) { pthread_attr_t thattr; SThreadParaObj *pThread; @@ -2219,11 +2227,7 @@ static void taosStartDumpInWorkThreads(struct arguments *args) pThread = threadObj + t; pThread->threadIndex = t; pThread->totalThreads = totalThreads; - pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); - if (pThread->taosCon == NULL) { - fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); - exit(0); - } + pThread->taosCon = taosCon; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); @@ -2272,7 +2276,7 @@ int taosDumpIn(struct arguments *arguments) { taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); } - taosStartDumpInWorkThreads(arguments); + taosStartDumpInWorkThreads(taos, arguments); taos_close(taos); taosFreeSQLFiles(); diff --git a/src/mnode/inc/mnodeMnode.h b/src/mnode/inc/mnodeMnode.h index 93f2fa11ea..ffdec02eb6 100644 --- a/src/mnode/inc/mnodeMnode.h +++ b/src/mnode/inc/mnodeMnode.h @@ -43,8 +43,8 @@ void mnodeIncMnodeRef(struct SMnodeObj *pMnode); void mnodeDecMnodeRef(struct SMnodeObj *pMnode); char * mnodeGetMnodeRoleStr(); -void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet); -void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet); +void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet, bool redirect); +void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet, bool redirect); char* mnodeGetMnodeMasterEp(); void mnodeGetMnodeInfos(void *mnodes); diff --git a/src/mnode/inc/mnodeSdb.h b/src/mnode/inc/mnodeSdb.h index 31ea2da640..e4df562d81 100644 --- a/src/mnode/inc/mnodeSdb.h +++ b/src/mnode/inc/mnodeSdb.h @@ -59,7 +59,7 @@ typedef struct SSdbRow { SMnodeMsg *pMsg; int32_t (*fpReq)(SMnodeMsg *pMsg); int32_t (*fpRsp)(SMnodeMsg *pMsg, int32_t code); - char reserveForSync[16]; + char reserveForSync[24]; SWalHead pHead[]; } SSdbRow; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index f297dd51dd..745fbf2d98 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -104,7 +104,7 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { dnodeUpdateEp(pDnode->dnodeId, pDnode->dnodeEp, pDnode->dnodeFqdn, &pDnode->dnodePort); mnodeUpdateDnodeEps(); - mInfo("dnode:%d, fqdn:%s ep:%s port:%d, do insert action", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); + mInfo("dnode:%d, fqdn:%s ep:%s port:%d is created", pDnode->dnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } @@ -303,7 +303,7 @@ void mnodeUpdateDnode(SDnodeObj *pDnode) { int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mError("dnodeId:%d, failed update", pDnode->dnodeId); + mError("dnode:%d, failed update", pDnode->dnodeId); } } diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 7b520c6022..6e001f4dfb 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -91,6 +91,7 @@ int32_t mnodeStartSystem() { return -1; } + dnodeReportStep("mnode-grant", "start to set grant infomation", 0); grantReset(TSDB_GRANT_ALL, 0); tsMgmtIsRunning = true; diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 68acae7dec..8b3b2896ff 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -72,7 +72,7 @@ static int32_t mnodeMnodeActionInsert(SSdbRow *pRow) { pDnode->isMgmt = true; mnodeDecDnodeRef(pDnode); - mInfo("mnode:%d, fqdn:%s ep:%s port:%u, do insert action", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, + mInfo("mnode:%d, fqdn:%s ep:%s port:%u is created", pMnode->mnodeId, pDnode->dnodeFqdn, pDnode->dnodeEp, pDnode->dnodePort); return TSDB_CODE_SUCCESS; } @@ -202,13 +202,13 @@ void mnodeCancelGetNextMnode(void *pIter) { void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { bool set = false; SMInfos mInfos = {0}; - mInfo("vgId:1, update mnodes epSet, numOfMnodes:%d pMinfos:%p", mnodeGetMnodesNum(), pMinfos); if (pMinfos != NULL) { + mInfo("vgId:1, update mnodes epSet, numOfMinfos:%d", pMinfos->mnodeNum); set = true; mInfos = *pMinfos; - } - else { + } else { + mInfo("vgId:1, update mnodes epSet, numOfMnodes:%d", mnodeGetMnodesNum()); int32_t index = 0; void * pIter = NULL; while (1) { @@ -273,14 +273,14 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { mnodeMnodeUnLock(); } -void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) { +void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet, bool redirect) { mnodeMnodeRdLock(); *epSet = tsMEpForPeer; mnodeMnodeUnLock(); mTrace("vgId:1, mnodes epSet for peer is returned, num:%d inUse:%d", tsMEpForPeer.numOfEps, tsMEpForPeer.inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { - if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort + TSDB_PORT_DNODEDNODE) { + if (redirect && strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort + TSDB_PORT_DNODEDNODE) { epSet->inUse = (i + 1) % epSet->numOfEps; mTrace("vgId:1, mnode:%d, for peer ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { @@ -289,14 +289,19 @@ void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) { } } -void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet) { +void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet, bool redirect) { mnodeMnodeRdLock(); *epSet = tsMEpForShell; mnodeMnodeUnLock(); + if (mnodeGetDnodesNum() <= 1) { + epSet->numOfEps = 0; + return; + } + mTrace("vgId:1, mnodes epSet for shell is returned, num:%d inUse:%d", tsMEpForShell.numOfEps, tsMEpForShell.inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { - if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { + if (redirect && strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { epSet->inUse = (i + 1) % epSet->numOfEps; mTrace("vgId:1, mnode:%d, for shell ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { diff --git a/src/mnode/src/mnodePeer.c b/src/mnode/src/mnodePeer.c index 99fcf9b778..9bd8d7e4d7 100644 --- a/src/mnode/src/mnodePeer.c +++ b/src/mnode/src/mnodePeer.c @@ -53,7 +53,7 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForPeer(epSet); + mnodeGetMnodeEpSetForPeer(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 36b6ff7a59..7c35829f88 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -282,27 +282,34 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi // not thread safe, need optimized int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SHeartBeatMsg *pHBMsg) { - pConn->numOfQueries = htonl(pHBMsg->numOfQueries); - if (pConn->numOfQueries > 0) { + pConn->numOfQueries = 0; + pConn->numOfStreams = 0; + int32_t numOfQueries = htonl(pHBMsg->numOfQueries); + int32_t numOfStreams = htonl(pHBMsg->numOfStreams); + + if (numOfQueries > 0) { if (pConn->pQueries == NULL) { pConn->pQueries = calloc(sizeof(SQueryDesc), QUERY_STREAM_SAVE_SIZE); } - int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfQueries) * sizeof(SQueryDesc); + pConn->numOfQueries = MIN(QUERY_STREAM_SAVE_SIZE, numOfQueries); + + int32_t saveSize = pConn->numOfQueries * sizeof(SQueryDesc); if (saveSize > 0 && pConn->pQueries != NULL) { memcpy(pConn->pQueries, pHBMsg->pData, saveSize); } } - pConn->numOfStreams = htonl(pHBMsg->numOfStreams); - if (pConn->numOfStreams > 0) { + if (numOfStreams > 0) { if (pConn->pStreams == NULL) { pConn->pStreams = calloc(sizeof(SStreamDesc), QUERY_STREAM_SAVE_SIZE); } - int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfStreams) * sizeof(SStreamDesc); + pConn->numOfStreams = MIN(QUERY_STREAM_SAVE_SIZE, numOfStreams); + + int32_t saveSize = pConn->numOfStreams * sizeof(SStreamDesc); if (saveSize > 0 && pConn->pStreams != NULL) { - memcpy(pConn->pStreams, pHBMsg->pData + pConn->numOfQueries * sizeof(SQueryDesc), saveSize); + memcpy(pConn->pStreams, pHBMsg->pData + numOfQueries * sizeof(SQueryDesc), saveSize); } } @@ -450,6 +457,12 @@ static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; + pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "dest table"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + pShow->bytes[cols] = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; strcpy(pSchema[cols].name, "ip:port"); @@ -524,6 +537,10 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, pShow->bytes[cols]); cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->dstTable, pShow->bytes[cols]); + cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port); STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, pShow->bytes[cols]); diff --git a/src/mnode/src/mnodeRead.c b/src/mnode/src/mnodeRead.c index c2a70bc01d..200f589b78 100644 --- a/src/mnode/src/mnodeRead.c +++ b/src/mnode/src/mnodeRead.c @@ -50,7 +50,7 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForShell(epSet); + mnodeGetMnodeEpSetForShell(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/mnode/src/mnodeSdb.c b/src/mnode/src/mnodeSdb.c index 6cc4e09735..1ab6a363e7 100644 --- a/src/mnode/src/mnodeSdb.c +++ b/src/mnode/src/mnodeSdb.c @@ -183,18 +183,23 @@ static int32_t sdbInitWal() { return -1; } - sdbInfo("vgId:1, open wal for restore"); + sdbInfo("vgId:1, open sdb wal for restore"); int32_t code = walRestore(tsSdbMgmt.wal, NULL, sdbProcessWrite); if (code != TSDB_CODE_SUCCESS) { sdbError("vgId:1, failed to open wal for restore since %s", tstrerror(code)); return -1; } + + sdbInfo("vgId:1, sdb wal load success"); return 0; } static void sdbRestoreTables() { int32_t totalRows = 0; int32_t numOfTables = 0; + + sdbInfo("vgId:1, sdb start to check for integrity"); + for (int32_t tableId = 0; tableId < SDB_TABLE_MAX; ++tableId) { SSdbTable *pTable = sdbGetTableFromId(tableId); if (pTable == NULL) continue; @@ -204,7 +209,7 @@ static void sdbRestoreTables() { totalRows += pTable->numOfRows; numOfTables++; - sdbDebug("vgId:1, sdb:%s is restored, rows:%" PRId64, pTable->name, pTable->numOfRows); + sdbInfo("vgId:1, sdb:%s is checked, rows:%" PRId64, pTable->name, pTable->numOfRows); } sdbInfo("vgId:1, sdb is restored, mver:%" PRIu64 " rows:%d tables:%d", tsSdbMgmt.version, totalRows, numOfTables); @@ -628,6 +633,12 @@ static int32_t sdbProcessWrite(void *wparam, void *hparam, int32_t qtype, void * SSdbTable *pTable = sdbGetTableFromId(tableId); assert(pTable != NULL); + if (!mnodeIsRunning() && tsSdbMgmt.version % 100000 == 0) { + char stepDesc[TSDB_STEP_DESC_LEN] = {0}; + snprintf(stepDesc, TSDB_STEP_DESC_LEN, "%" PRIu64 " rows have been restored", tsSdbMgmt.version); + dnodeReportStep("mnode-sdb", stepDesc, 0); + } + if (qtype == TAOS_QTYPE_QUERY) return sdbPerformDeleteAction(pHead, pTable); pthread_mutex_lock(&tsSdbMgmt.mutex); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 2da46d5b4b..3c1c92226a 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -282,7 +282,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { pRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); pRsp->totalDnodes = htonl(mnodeGetDnodesNum()); - mnodeGetMnodeEpSetForShell(&pRsp->epSet); + mnodeGetMnodeEpSetForShell(&pRsp->epSet, false); pMsg->rpcRsp.rsp = pRsp; pMsg->rpcRsp.len = sizeof(SHeartBeatRsp); @@ -349,7 +349,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { pConnectRsp->writeAuth = pUser->writeAuth; pConnectRsp->superAuth = pUser->superAuth; - mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet); + mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet, false); connect_over: if (code != TSDB_CODE_SUCCESS) { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 6297bb21d0..a5d7729ec4 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -49,12 +49,14 @@ #define CREATE_CTABLE_RETRY_TIMES 10 #define CREATE_CTABLE_RETRY_SEC 14 -int64_t tsCTableRid = -1; -static void * tsChildTableSdb; -int64_t tsSTableRid = -1; -static void * tsSuperTableSdb; -static int32_t tsChildTableUpdateSize; -static int32_t tsSuperTableUpdateSize; +int64_t tsCTableRid = -1; +static void * tsChildTableSdb; +int64_t tsSTableRid = -1; +static void * tsSuperTableSdb; +static SHashObj *tsSTableUidHash; +static int32_t tsChildTableUpdateSize; +static int32_t tsSuperTableUpdateSize; + static void * mnodeGetChildTable(char *tableId); static void * mnodeGetSuperTable(char *tableId); static void * mnodeGetSuperTableByUid(uint64_t uid); @@ -289,6 +291,7 @@ static int32_t mnodeChildTableActionDecode(SSdbRow *pRow) { } static int32_t mnodeChildTableActionRestored() { +#if 0 void *pIter = NULL; SCTableObj *pTable = NULL; @@ -345,6 +348,7 @@ static int32_t mnodeChildTableActionRestored() { } mnodeCancelGetNextChildTable(pIter); +#endif return 0; } @@ -447,6 +451,7 @@ static int32_t mnodeSuperTableActionInsert(SSdbRow *pRow) { } mnodeDecDbRef(pDb); + taosHashPut(tsSTableUidHash, &pStable->uid, sizeof(int64_t), &pStable, sizeof(int64_t)); return TSDB_CODE_SUCCESS; } @@ -459,6 +464,7 @@ static int32_t mnodeSuperTableActionDelete(SSdbRow *pRow) { } mnodeDecDbRef(pDb); + taosHashRemove(tsSTableUidHash, &pStable->uid, sizeof(int64_t)); return TSDB_CODE_SUCCESS; } @@ -570,6 +576,7 @@ static int32_t mnodeInitSuperTables() { .fpRestored = mnodeSuperTableActionRestored }; + tsSTableUidHash = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); tsSTableRid = sdbOpenTable(&desc); tsSuperTableSdb = sdbGetTableByRid(tsSTableRid); if (tsSuperTableSdb == NULL) { @@ -584,6 +591,9 @@ static int32_t mnodeInitSuperTables() { static void mnodeCleanupSuperTables() { sdbCloseTable(tsSTableRid); tsSuperTableSdb = NULL; + + taosHashCleanup(tsSTableUidHash); + tsSTableUidHash = NULL; } int32_t mnodeInitTables() { @@ -633,20 +643,12 @@ static void *mnodeGetSuperTable(char *tableId) { } static void *mnodeGetSuperTableByUid(uint64_t uid) { - SSTableObj *pStable = NULL; - void *pIter = NULL; + SSTableObj **ppStable = taosHashGet(tsSTableUidHash, &uid, sizeof(int64_t)); + if (ppStable == NULL || *ppStable == NULL) return NULL; - while (1) { - pIter = mnodeGetNextSuperTable(pIter, &pStable); - if (pStable == NULL) break; - if (pStable->uid == uid) { - mnodeCancelGetNextSuperTable(pIter); - return pStable; - } - mnodeDecTableRef(pStable); - } - - return NULL; + SSTableObj *pStable = *ppStable; + mnodeIncTableRef(pStable); + return pStable; } void *mnodeGetTable(char *tableId) { @@ -720,10 +722,17 @@ static void mnodeExtractTableName(char* tableId, char* name) { static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; - - if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pCreate->db); + + int32_t numOfTables = htonl(pCreate->numOfTables); + int32_t contentLen = htonl(pCreate->contLen); + if (numOfTables == 0 || contentLen == 0) { + // todo return error + } + + SCreateTableMsg *p = (SCreateTableMsg*)((char*) pCreate + sizeof(SCMCreateTableMsg)); + if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(p->db); if (pMsg->pDb == NULL) { - mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); + mError("msg:%p, app:%p table:%s, failed to create, db not selected", pMsg, pMsg->rpcMsg.ahandle, p->tableId); return TSDB_CODE_MND_DB_NOT_SELECTED; } @@ -732,28 +741,28 @@ static int32_t mnodeProcessCreateTableMsg(SMnodeMsg *pMsg) { return TSDB_CODE_MND_DB_IN_DROPPING; } - if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(pCreate->tableId); + if (pMsg->pTable == NULL) pMsg->pTable = mnodeGetTable(p->tableId); if (pMsg->pTable != NULL && pMsg->retry == 0) { - if (pCreate->getMeta) { - mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); + if (p->getMeta) { + mDebug("msg:%p, app:%p table:%s, continue to get meta", pMsg, pMsg->rpcMsg.ahandle, p->tableId); return mnodeGetChildTableMeta(pMsg); - } else if (pCreate->igExists) { - mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); + } else if (p->igExists) { + mDebug("msg:%p, app:%p table:%s, is already exist", pMsg, pMsg->rpcMsg.ahandle, p->tableId); return TSDB_CODE_SUCCESS; } else { mError("msg:%p, app:%p table:%s, failed to create, table already exist", pMsg, pMsg->rpcMsg.ahandle, - pCreate->tableId); + p->tableId); return TSDB_CODE_MND_TABLE_ALREADY_EXIST; } } - if (pCreate->numOfTags != 0) { + if (p->numOfTags != 0) { mDebug("msg:%p, app:%p table:%s, create stable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, - pCreate->tableId, pMsg->rpcMsg.handle); + p->tableId, pMsg->rpcMsg.handle); return mnodeProcessCreateSuperTableMsg(pMsg); } else { mDebug("msg:%p, app:%p table:%s, create ctable msg is received from thandle:%p", pMsg, pMsg->rpcMsg.ahandle, - pCreate->tableId, pMsg->rpcMsg.handle); + p->tableId, pMsg->rpcMsg.handle); return mnodeProcessCreateChildTableMsg(pMsg); } } @@ -859,7 +868,13 @@ static int32_t mnodeCreateSuperTableCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; - SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + SCMCreateTableMsg *pCreate1 = pMsg->rpcMsg.pCont; + if (pCreate1->numOfTables == 0) { + // todo return to error message + } + + SCreateTableMsg* pCreate = (SCreateTableMsg*)((char*)pCreate1 + sizeof(SCMCreateTableMsg)); + SSTableObj * pStable = calloc(1, sizeof(SSTableObj)); if (pStable == NULL) { mError("msg:%p, app:%p table:%s, failed to create, no enough memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); @@ -1599,8 +1614,11 @@ static void mnodeProcessDropSuperTableRsp(SRpcMsg *rpcMsg) { mInfo("drop stable rsp received, result:%s", tstrerror(rpcMsg->code)); } -static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SCTableObj *pTable) { - STagData * pTagData = NULL; +static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pCreateMsg, SCTableObj *pTable) { + SCreateTableMsg* pMsg = (SCreateTableMsg*) ((char*)pCreateMsg + sizeof(SCMCreateTableMsg)); + + char* tagData = NULL; + int32_t tagDataLen = 0; int32_t totalCols = 0; int32_t contLen = 0; @@ -1608,9 +1626,13 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SCTableObj * totalCols = pTable->superTable->numOfColumns + pTable->superTable->numOfTags; contLen = sizeof(SMDCreateTableMsg) + totalCols * sizeof(SSchema) + pTable->sqlLen; if (pMsg != NULL) { - pTagData = (STagData *)pMsg->schema; - tagDataLen = htonl(pTagData->dataLen); + int32_t nameLen = htonl(*(int32_t*)pMsg->schema); + char* p = pMsg->schema + nameLen + sizeof(int32_t); + + tagDataLen = htonl(*(int32_t*) p); contLen += tagDataLen; + + tagData = p + sizeof(int32_t); } } else { totalCols = pTable->numOfColumns; @@ -1662,7 +1684,7 @@ static void *mnodeBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SCTableObj * } if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) { - memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData->data, tagDataLen); + memcpy(pCreate->data + totalCols * sizeof(SSchema), tagData, tagDataLen); } if (pTable->info.type == TSDB_STREAM_TABLE) { @@ -1700,7 +1722,8 @@ static int32_t mnodeDoCreateChildTableFp(SMnodeMsg *pMsg) { static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { SCTableObj *pTable = (SCTableObj *)pMsg->pTable; - SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + + SCreateTableMsg *pCreate = (SCreateTableMsg*) ((char*)pMsg->rpcMsg.pCont + sizeof(SCMCreateTableMsg)); assert(pTable); if (code == TSDB_CODE_SUCCESS) { @@ -1728,40 +1751,42 @@ static int32_t mnodeDoCreateChildTableCb(SMnodeMsg *pMsg, int32_t code) { static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { SVgObj *pVgroup = pMsg->pVgroup; - SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + + SCMCreateTableMsg *p1 = pMsg->rpcMsg.pCont; + SCreateTableMsg *pCreate = (SCreateTableMsg*)((char*)p1 + sizeof(SCMCreateTableMsg)); + SCTableObj *pTable = calloc(1, sizeof(SCTableObj)); if (pTable == NULL) { mError("msg:%p, app:%p table:%s, failed to alloc memory", pMsg, pMsg->rpcMsg.ahandle, pCreate->tableId); return TSDB_CODE_MND_OUT_OF_MEMORY; } - if (pCreate->numOfColumns == 0) { - pTable->info.type = TSDB_CHILD_TABLE; - } else { - pTable->info.type = TSDB_NORMAL_TABLE; - } - - pTable->info.tableId = strdup(pCreate->tableId); + pTable->info.type = (pCreate->numOfColumns == 0)? TSDB_CHILD_TABLE:TSDB_NORMAL_TABLE; + pTable->info.tableId = strdup(pCreate->tableId); pTable->createdTime = taosGetTimestampMs(); pTable->tid = tid; pTable->vgId = pVgroup->vgId; if (pTable->info.type == TSDB_CHILD_TABLE) { - STagData *pTagData = (STagData *)pCreate->schema; // it is a tag key + int32_t nameLen = htonl(*(int32_t*) pCreate->schema); + char* name = (char*)pCreate->schema + sizeof(int32_t); + + char stableName[TSDB_TABLE_FNAME_LEN] = {0}; + memcpy(stableName, name, nameLen); char prefix[64] = {0}; size_t prefixLen = tableIdPrefix(pMsg->pDb->name, prefix, 64); - if (0 != strncasecmp(prefix, pTagData->name, prefixLen)) { + if (0 != strncasecmp(prefix, stableName, prefixLen)) { mError("msg:%p, app:%p table:%s, corresponding super table:%s not in this db", pMsg, pMsg->rpcMsg.ahandle, - pCreate->tableId, pTagData->name); + pCreate->tableId, stableName); mnodeDestroyChildTable(pTable); return TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; } - if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(pTagData->name); + if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(stableName); if (pMsg->pSTable == NULL) { mError("msg:%p, app:%p table:%s, corresponding super table:%s does not exist", pMsg, pMsg->rpcMsg.ahandle, - pCreate->tableId, pTagData->name); + pCreate->tableId, stableName); mnodeDestroyChildTable(pTable); return TSDB_CODE_MND_INVALID_TABLE_NAME; } @@ -1839,7 +1864,9 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { } static int32_t mnodeProcessCreateChildTableMsg(SMnodeMsg *pMsg) { - SCMCreateTableMsg *pCreate = pMsg->rpcMsg.pCont; + //SCMCreateTableMsg* p1 = pMsg->rpcMsg.pCont; // there are several tables here. + SCreateTableMsg* pCreate = (SCreateTableMsg*)(pMsg->rpcMsg.pCont + sizeof(SCMCreateTableMsg)); + int32_t code = grantCheck(TSDB_GRANT_TIMESERIES); if (code != TSDB_CODE_SUCCESS) { mError("msg:%p, app:%p table:%s, failed to create, grant timeseries failed", pMsg, pMsg->rpcMsg.ahandle, @@ -2187,15 +2214,23 @@ static int32_t mnodeDoGetChildTableMeta(SMnodeMsg *pMsg, STableMetaMsg *pMeta) { static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { STableInfoMsg *pInfo = pMsg->rpcMsg.pCont; - STagData *pTags = (STagData *)pInfo->tags; - int32_t tagLen = htonl(pTags->dataLen); - if (pTags->name[0] == 0) { - mError("msg:%p, app:%p table:%s, failed to create table on demand for stable is empty, tagLen:%d", pMsg, + + char* p = pInfo->tags; + int32_t nameLen = htonl(*(int32_t*) p); + p += sizeof(int32_t); + p += nameLen; + + int32_t tagLen = htonl(*(int32_t*) p); + p += sizeof(int32_t); + + int32_t totalLen = nameLen + tagLen + sizeof(int32_t)*2; + if (tagLen == 0 || nameLen == 0) { + mError("msg:%p, app:%p table:%s, failed to create table on demand for super table is empty, tagLen:%d", pMsg, pMsg->rpcMsg.ahandle, pInfo->tableId, tagLen); return TSDB_CODE_MND_INVALID_STABLE_NAME; } - int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + tagLen; + int32_t contLen = sizeof(SCMCreateTableMsg) + sizeof(SCreateTableMsg) + totalLen; SCMCreateTableMsg *pCreateMsg = calloc(1, contLen); if (pCreateMsg == NULL) { mError("msg:%p, app:%p table:%s, failed to create table while get meta info, no enough memory", pMsg, @@ -2203,16 +2238,24 @@ static int32_t mnodeAutoCreateChildTable(SMnodeMsg *pMsg) { return TSDB_CODE_MND_OUT_OF_MEMORY; } - size_t size = sizeof(pInfo->tableId); - tstrncpy(pCreateMsg->tableId, pInfo->tableId, size); - tstrncpy(pCreateMsg->db, pMsg->pDb->name, sizeof(pCreateMsg->db)); - pCreateMsg->igExists = 1; - pCreateMsg->getMeta = 1; + SCreateTableMsg* pCreate = (SCreateTableMsg*) ((char*) pCreateMsg + sizeof(SCMCreateTableMsg)); + + size_t size = tListLen(pInfo->tableId); + tstrncpy(pCreate->tableId, pInfo->tableId, size); + tstrncpy(pCreate->db, pMsg->pDb->name, sizeof(pCreate->db)); + pCreate->igExists = 1; + pCreate->getMeta = 1; + + pCreateMsg->numOfTables = htonl(1); pCreateMsg->contLen = htonl(contLen); - memcpy(pCreateMsg->schema, pTags, contLen - sizeof(SCMCreateTableMsg)); + memcpy(pCreate->schema, pInfo->tags, totalLen); + + char name[TSDB_TABLE_FNAME_LEN] = {0}; + memcpy(name, pInfo->tags + sizeof(int32_t), nameLen); + mDebug("msg:%p, app:%p table:%s, start to create on demand, tagLen:%d stable:%s", pMsg, pMsg->rpcMsg.ahandle, - pInfo->tableId, tagLen, pTags->name); + pInfo->tableId, tagLen, name); if (pMsg->rpcMsg.pCont != pMsg->pCont) { tfree(pMsg->rpcMsg.pCont); diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index d3020de6bd..eec559600f 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -315,7 +315,8 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { - mTrace("dnode:%d, receive status from dnode, vgId:%d status is %d:%s", pDnode->dnodeId, pVgroup->vgId, pVgid->role, syncRole[pVgid->role]); + mTrace("dnode:%d, receive status from dnode, vgId:%d status:%s last:%s", pDnode->dnodeId, pVgroup->vgId, + syncRole[pVload->role], syncRole[pVgid->role]); pVgid->role = pVload->role; if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c index 53981238a7..c0699b05b3 100644 --- a/src/mnode/src/mnodeWrite.c +++ b/src/mnode/src/mnodeWrite.c @@ -50,7 +50,7 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForShell(epSet); + mnodeGetMnodeEpSetForShell(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/os/inc/osMemory.h b/src/os/inc/osMemory.h index 0616006650..439e4cab72 100644 --- a/src/os/inc/osMemory.h +++ b/src/os/inc/osMemory.h @@ -52,7 +52,7 @@ void taosTMemset(void *ptr, int c); free((void *)(x)); \ x = 0; \ } \ - } while (0); + } while (0) #ifdef TAOS_MEM_CHECK #ifdef TAOS_MEM_CHECK_TEST diff --git a/src/os/inc/osString.h b/src/os/inc/osString.h index b2846a31bc..e07bea4f40 100644 --- a/src/os/inc/osString.h +++ b/src/os/inc/osString.h @@ -39,7 +39,7 @@ extern "C" { do { \ strncpy((dst), (src), (size)); \ (dst)[(size)-1] = 0; \ - } while (0); + } while (0) #ifndef TAOS_OS_FUNC_STRING_STR2INT64 int64_t tsosStr2int64(char *str); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index b73f7ce3f5..d1278e2eee 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -140,6 +140,11 @@ typedef struct SQueryCostInfo { uint64_t numOfTimeWindows; } SQueryCostInfo; +typedef struct { + int64_t vgroupLimit; + int64_t ts; +} SOrderedPrjQueryInfo; + typedef struct SQuery { int16_t numOfCols; int16_t numOfTags; @@ -167,6 +172,7 @@ typedef struct SQuery { tFilePage** sdata; STableQueryInfo* current; + SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query. SSingleColumnFilterInfo* pFilterInfo; } SQuery; @@ -179,13 +185,13 @@ typedef struct SQueryRuntimeEnv { uint16_t scanFlag; // denotes reversed scan of data or not SFillInfo* pFillInfo; SResultRowInfo windowResInfo; - STSBuf* pTSBuf; + STSBuf* pTsBuf; STSCursor cur; SQueryCostInfo summary; void* pQueryHandle; void* pSecQueryHandle; // another thread for bool stableQuery; // super table query or not - bool topBotQuery; // false + bool topBotQuery; // TODO used bitwise flag bool groupbyNormalCol; // denote if this is a groupby normal column query bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation @@ -210,14 +216,13 @@ enum { typedef struct SQInfo { void* signature; int32_t code; // error code to returned to client - int64_t owner; // if it is in execution + int64_t owner; // if it is in execution void* tsdb; SMemRef memRef; int32_t vgId; STableGroupInfo tableGroupInfo; // table list SArray STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure SQueryRuntimeEnv runtimeEnv; -// SArray* arrTableIdInfo; SHashObj* arrTableIdInfo; int32_t groupIndex; @@ -233,6 +238,7 @@ typedef struct SQInfo { tsem_t ready; int32_t dataReady; // denote if query result is ready or not void* rspContext; // response context + int64_t startExecTs; // start to exec timestamp } SQInfo; #endif // TDENGINE_QUERYEXECUTOR_H diff --git a/src/query/inc/qSqlparser.h b/src/query/inc/qSqlparser.h index 513ab090f9..ee72500241 100644 --- a/src/query/inc/qSqlparser.h +++ b/src/query/inc/qSqlparser.h @@ -73,23 +73,27 @@ typedef struct SQuerySQL { SStrToken selectToken; // sql string } SQuerySQL; +typedef struct SCreatedTableInfo { + SStrToken name; // table name token + SStrToken stableName; // super table name token , for using clause + SArray *pTagVals; // create by using super table, tag value + char *fullname; // table full name + STagData tagdata; // true tag data, super table full name is in STagData + int8_t igExist; // ignore if exists +} SCreatedTableInfo; + typedef struct SCreateTableSQL { - struct SStrToken name; // meter name, create table [meterName] xxx - bool existCheck; - - int8_t type; // create normal table/from super table/ stream + SStrToken name; // table name, create table [name] xxx + int8_t type; // create normal table/from super table/ stream + bool existCheck; + struct { - SArray *pTagColumns; // SArray - SArray *pColumns; // SArray + SArray *pTagColumns; // SArray + SArray *pColumns; // SArray } colInfo; - - struct { - SStrToken stableName; // super table name, for using clause - SArray *pTagVals; // create by using metric, tag value - STagData tagdata; - } usingInfo; - - SQuerySQL *pSelect; + + SArray *childTableInfo; // SArray + SQuerySQL *pSelect; } SCreateTableSQL; typedef struct SAlterTableSQL { @@ -198,16 +202,16 @@ typedef struct tSQLExpr { } tSQLExpr; // used in select clause. select from xxx -typedef struct tSQLExprItem { +typedef struct tSqlExprItem { tSQLExpr *pNode; // The list of expressions char * aliasName; // alias name, null-terminated string -} tSQLExprItem; +} tSqlExprItem; // todo refactor by using SArray typedef struct tSQLExprList { int32_t nExpr; /* Number of expressions on the list */ int32_t nAlloc; /* Number of entries allocated below */ - tSQLExprItem *a; /* One entry for each expression */ + tSqlExprItem *a; /* One entry for each expression */ } tSQLExprList; /** @@ -229,62 +233,63 @@ SArray *tVariantListAppend(SArray *pList, tVariant *pVar, uint8_t sortOrder); SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int32_t index); SArray *tVariantListAppendToken(SArray *pList, SStrToken *pAliasToken, uint8_t sortOrder); -tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optType); +tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType); -void tSQLExprDestroy(tSQLExpr *); +void tSqlExprDestroy(tSQLExpr *pExpr); -tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pToken); +tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pToken); -void tSQLExprListDestroy(tSQLExprList *pList); +void tSqlExprListDestroy(tSQLExprList *pList); -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, +SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit); -SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pMetricName, - SArray *pTagVals, SQuerySQL *pSelect, int32_t type); +SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSelect, int32_t type); -void tSQLExprNodeDestroy(tSQLExpr *pExpr); +void tSqlExprNodeDestroy(tSQLExpr *pExpr); -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type); +SAlterTableSQL * tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type); +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists); void destroyAllSelectClause(SSubclauseInfo *pSql); void doDestroyQuerySql(SQuerySQL *pSql); +void freeCreateTableInfo(void* p); -SSqlInfo * setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pMeterName, int32_t type); +SSqlInfo * setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type); SSubclauseInfo *setSubclause(SSubclauseInfo *pClause, void *pSqlExprInfo); SSubclauseInfo *appendSelectClause(SSubclauseInfo *pInfo, void *pSubclause); -void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pMeterName, SStrToken *pIfNotExists); +void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists); -void SQLInfoDestroy(SSqlInfo *pInfo); +void SqlInfoDestroy(SSqlInfo *pInfo); void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...); -void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck); +void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck); void setShowOptions(SSqlInfo *pInfo, int32_t type, SStrToken* prefix, SStrToken* pPatterns); tDCLSQL *tTokenListAppend(tDCLSQL *pTokenList, SStrToken *pToken); void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDBInfo *pDB, SStrToken *pIgExists); -void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo); -void setCreateUserSQL(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd); -void setKillSQL(SSqlInfo *pInfo, int32_t type, SStrToken *ip); -void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege); +void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo); +void setCreateUserSql(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd); +void setKillSql(SSqlInfo *pInfo, int32_t type, SStrToken *ip); +void setAlterUserSql(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege); void setDefaultCreateDbOption(SCreateDBInfo *pDBInfo); // prefix show db.tables; -void setDBName(SStrToken *pCpxName, SStrToken *pDB); +void setDbName(SStrToken *pCpxName, SStrToken *pDb); -tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optType); +tSQLExpr *tSqlExprIdValueCreate(SStrToken *pToken, int32_t optrType); -tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType); +tSQLExpr *tSqlExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType); -void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType); +void tSqlSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType); -void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *pToken); +void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type); void *ParseAlloc(void *(*mallocProc)(size_t)); diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index fb71c8a5fe..dde2e39845 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -24,35 +24,34 @@ #define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t)) -int32_t getOutputInterResultBufSize(SQuery* pQuery); - -void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pRow, int16_t type); -void copyResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* dst, const SResultRow* src, int16_t type); -SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index); - -int32_t initWindowResInfo(SResultRowInfo* pWindowResInfo, int32_t size, int16_t type); - -void cleanupTimeWindowInfo(SResultRowInfo* pWindowResInfo); -void resetTimeWindowInfo(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pWindowResInfo); -void clearFirstNWindowRes(SQueryRuntimeEnv *pRuntimeEnv, int32_t num); - -void clearClosedTimeWindow(SQueryRuntimeEnv* pRuntimeEnv); -int32_t numOfClosedTimeWindow(SResultRowInfo* pWindowResInfo); -void closeTimeWindow(SResultRowInfo* pWindowResInfo, int32_t slot); -void closeAllTimeWindow(SResultRowInfo* pWindowResInfo); -void removeRedundantWindow(SResultRowInfo *pWindowResInfo, TSKEY lastKey, int32_t order); - -static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pWindowResInfo, int32_t slot) { - assert(pWindowResInfo != NULL && slot >= 0 && slot < pWindowResInfo->size); - return pWindowResInfo->pResult[slot]; -} - #define curTimeWindowIndex(_winres) ((_winres)->curIndex) #define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!sq))? (_q)->pExpr1[1].base.arg->argValue.i64:1) -bool isWindowResClosed(SResultRowInfo *pWindowResInfo, int32_t slot); +int32_t getOutputInterResultBufSize(SQuery* pQuery); + +size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv); +int32_t initResultRowInfo(SResultRowInfo* pResultRowInfo, int32_t size, int16_t type); +void cleanupResultRowInfo(SResultRowInfo* pResultRowInfo); + +void resetResultRowInfo(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo* pResultRowInfo); +void popFrontResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int32_t num); +void clearClosedResultRows(SQueryRuntimeEnv* pRuntimeEnv, SResultRowInfo *pResultRowInfo); +int32_t numOfClosedResultRows(SResultRowInfo* pResultRowInfo); +void closeAllResultRows(SResultRowInfo* pResultRowInfo); +void removeRedundantResultRows(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_t order); int32_t initResultRow(SResultRow *pResultRow); +void closeResultRow(SResultRowInfo* pResultRowInfo, int32_t slot); +bool isResultRowClosed(SResultRowInfo *pResultRowInfo, int32_t slot); +void clearResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* pResultRow, int16_t type); +void copyResultRow(SQueryRuntimeEnv* pRuntimeEnv, SResultRow* dst, const SResultRow* src, int16_t type); + +SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRow* pRow, int32_t index); + +static FORCE_INLINE SResultRow *getResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) { + assert(pResultRowInfo != NULL && slot >= 0 && slot < pResultRowInfo->size); + return pResultRowInfo->pResult[slot]; +} static FORCE_INLINE char *getPosInResultPage(SQueryRuntimeEnv *pRuntimeEnv, int32_t columnIndex, SResultRow *pResult, tFilePage* page) { @@ -71,8 +70,6 @@ bool notNull_filter(SColumnFilterElem *pFilter, char* minval, char* maxval); __filter_func_t *getRangeFilterFuncArray(int32_t type); __filter_func_t *getValueFilterFuncArray(int32_t type); -size_t getWindowResultSize(SQueryRuntimeEnv* pRuntimeEnv); - SResultRowPool* initResultRowPool(size_t size); SResultRow* getNewResultRow(SResultRowPool* p); int64_t getResultRowPoolMemSize(SResultRowPool* p); diff --git a/src/query/inc/queryLog.h b/src/query/inc/queryLog.h index a1c447a6eb..825ff12538 100644 --- a/src/query/inc/queryLog.h +++ b/src/query/inc/queryLog.h @@ -22,15 +22,15 @@ extern "C" { #include "tlog.h" -extern int32_t qDebugFlag; -extern int32_t tscEmbedded; +extern uint32_t qDebugFlag; +extern uint32_t tscEmbedded; -#define qFatal(...) { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} -#define qError(...) { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} -#define qWarn(...) { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} -#define qInfo(...) { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} -#define qDebug(...) { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} -#define qTrace(...) { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} +#define qFatal(...) do { if (qDebugFlag & DEBUG_FATAL) { taosPrintLog("QRY FATAL ", 255, __VA_ARGS__); }} while(0) +#define qError(...) do { if (qDebugFlag & DEBUG_ERROR) { taosPrintLog("QRY ERROR ", 255, __VA_ARGS__); }} while(0) +#define qWarn(...) do { if (qDebugFlag & DEBUG_WARN) { taosPrintLog("QRY WARN ", 255, __VA_ARGS__); }} while(0) +#define qInfo(...) do { if (qDebugFlag & DEBUG_INFO) { taosPrintLog("QRY ", 255, __VA_ARGS__); }} while(0) +#define qDebug(...) do { if (qDebugFlag & DEBUG_DEBUG) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) +#define qTrace(...) do { if (qDebugFlag & DEBUG_TRACE) { taosPrintLog("QRY ", qDebugFlag, __VA_ARGS__); }} while(0) #ifdef __cplusplus } diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index e2b3bb6cbf..0de6027006 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -168,12 +168,12 @@ ids(A) ::= ID(X). {A = X; } ids(A) ::= STRING(X). {A = X; } %type ifexists {SStrToken} -ifexists(X) ::= IF EXISTS. {X.n = 1;} -ifexists(X) ::= . {X.n = 0;} +ifexists(X) ::= IF EXISTS. { X.n = 1;} +ifexists(X) ::= . { X.n = 0;} %type ifnotexists {SStrToken} -ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;} -ifnotexists(X) ::= . {X.n = 0;} +ifnotexists(X) ::= IF NOT EXISTS. { X.n = 1;} +ifnotexists(X) ::= . { X.n = 0;} /////////////////////////////////THE CREATE STATEMENT/////////////////////////////////////// //create option for dnode/db/user/account @@ -183,32 +183,32 @@ cmd ::= CREATE ACCOUNT ids(X) PASS ids(Y) acct_optr(Z). cmd ::= CREATE DATABASE ifnotexists(Z) ids(X) db_optr(Y). { setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &X, &Y, &Z);} cmd ::= CREATE USER ids(X) PASS ids(Y). { setCreateUserSQL(pInfo, &X, &Y);} -pps(Y) ::= . {Y.n = 0; } -pps(Y) ::= PPS INTEGER(X). {Y = X; } +pps(Y) ::= . { Y.n = 0; } +pps(Y) ::= PPS INTEGER(X). { Y = X; } -tseries(Y) ::= . {Y.n = 0; } -tseries(Y) ::= TSERIES INTEGER(X). {Y = X; } +tseries(Y) ::= . { Y.n = 0; } +tseries(Y) ::= TSERIES INTEGER(X). { Y = X; } -dbs(Y) ::= . {Y.n = 0; } -dbs(Y) ::= DBS INTEGER(X). {Y = X; } +dbs(Y) ::= . { Y.n = 0; } +dbs(Y) ::= DBS INTEGER(X). { Y = X; } -streams(Y) ::= . {Y.n = 0; } -streams(Y) ::= STREAMS INTEGER(X). {Y = X; } +streams(Y) ::= . { Y.n = 0; } +streams(Y) ::= STREAMS INTEGER(X). { Y = X; } -storage(Y) ::= . {Y.n = 0; } -storage(Y) ::= STORAGE INTEGER(X). {Y = X; } +storage(Y) ::= . { Y.n = 0; } +storage(Y) ::= STORAGE INTEGER(X). { Y = X; } -qtime(Y) ::= . {Y.n = 0; } -qtime(Y) ::= QTIME INTEGER(X). {Y = X; } +qtime(Y) ::= . { Y.n = 0; } +qtime(Y) ::= QTIME INTEGER(X). { Y = X; } -users(Y) ::= . {Y.n = 0; } -users(Y) ::= USERS INTEGER(X). {Y = X; } +users(Y) ::= . { Y.n = 0; } +users(Y) ::= USERS INTEGER(X). { Y = X; } -conns(Y) ::= . {Y.n = 0; } -conns(Y) ::= CONNS INTEGER(X). {Y = X; } +conns(Y) ::= . { Y.n = 0; } +conns(Y) ::= CONNS INTEGER(X). { Y = X; } -state(Y) ::= . {Y.n = 0; } -state(Y) ::= STATE ids(X). {Y = X; } +state(Y) ::= . { Y.n = 0; } +state(Y) ::= STATE ids(X). { Y = X; } %type acct_optr {SCreateAcctSQL} acct_optr(Y) ::= pps(C) tseries(D) storage(P) streams(F) qtime(Q) dbs(E) users(K) conns(L) state(M). { @@ -269,7 +269,7 @@ alter_db_optr(Y) ::= alter_db_optr(Z) blocks(X). { Y = Z; Y.numOfBlocks = s alter_db_optr(Y) ::= alter_db_optr(Z) comp(X). { Y = Z; Y.compressionLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) wal(X). { Y = Z; Y.walLevel = strtol(X.z, NULL, 10); } alter_db_optr(Y) ::= alter_db_optr(Z) fsync(X). { Y = Z; Y.fsyncPeriod = strtol(X.z, NULL, 10); } -alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); } +alter_db_optr(Y) ::= alter_db_optr(Z) update(X). { Y = Z; Y.update = strtol(X.z, NULL, 10); } %type typename {TAOS_FIELD} typename(A) ::= ids(X). { @@ -279,13 +279,13 @@ typename(A) ::= ids(X). { //define binary type, e.g., binary(10), nchar(10) typename(A) ::= ids(X) LP signed(Y) RP. { - if (Y <= 0) { - X.type = 0; - tSQLSetColumnType(&A, &X); - } else { - X.type = -Y; // negative value of name length - tSQLSetColumnType(&A, &X); - } + if (Y <= 0) { + X.type = 0; + tSQLSetColumnType(&A, &X); + } else { + X.type = -Y; // negative value of name length + tSQLSetColumnType(&A, &X); + } } %type signed {int64_t} @@ -294,36 +294,60 @@ signed(A) ::= PLUS INTEGER(X). { A = strtol(X.z, NULL, 10); } signed(A) ::= MINUS INTEGER(X). { A = -strtol(X.z, NULL, 10);} ////////////////////////////////// The CREATE TABLE statement /////////////////////////////// -cmd ::= CREATE TABLE ifnotexists(Y) ids(X) cpxName(Z) create_table_args. { - X.n += Z.n; - setCreatedTableName(pInfo, &X, &Y); +cmd ::= CREATE TABLE create_table_args. {} +cmd ::= CREATE TABLE create_table_list(Z). { pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = Z;} + +%type create_table_list{SCreateTableSQL*} +%destructor create_table_list{destroyCreateTableSql($$);} +create_table_list(A) ::= create_from_stable(Z). { + SCreateTableSQL* pCreateTable = calloc(1, sizeof(SCreateTableSQL)); + pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); + + taosArrayPush(pCreateTable->childTableInfo, &Z); + pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; + A = pCreateTable; +} + +create_table_list(A) ::= create_table_list(X) create_from_stable(Z). { + taosArrayPush(X->childTableInfo, &Z); + A = X; } %type create_table_args{SCreateTableSQL*} -create_table_args(A) ::= LP columnlist(X) RP. { - A = tSetCreateSQLElems(X, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); - setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); +create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP. { + A = tSetCreateSQLElems(X, NULL, NULL, TSQL_CREATE_TABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); + + V.n += Z.n; + setCreatedTableName(pInfo, &V, &U); } // create super table -create_table_args(A) ::= LP columnlist(X) RP TAGS LP columnlist(Y) RP. { - A = tSetCreateSQLElems(X, Y, NULL, NULL, NULL, TSQL_CREATE_STABLE); - setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); +create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) LP columnlist(X) RP TAGS LP columnlist(Y) RP. { + A = tSetCreateSQLElems(X, Y, NULL, TSQL_CREATE_STABLE); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); + + V.n += Z.n; + setCreatedTableName(pInfo, &V, &U); } // create table by using super table // create table table_name using super_table_name tags(tag_values1, tag_values2) -create_table_args(A) ::= USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { - X.n += F.n; - A = tSetCreateSQLElems(NULL, NULL, &X, Y, NULL, TSQL_CREATE_TABLE_FROM_STABLE); - setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); +%type create_from_stable{SCreatedTableInfo} +create_from_stable(A) ::= ifnotexists(U) ids(V) cpxName(Z) USING ids(X) cpxName(F) TAGS LP tagitemlist(Y) RP. { + X.n += F.n; + V.n += Z.n; + A = createNewChildTableInfo(&X, Y, &V, &U); } // create stream // create table table_name as select count(*) from super_table_name interval(time) -create_table_args(A) ::= AS select(S). { - A = tSetCreateSQLElems(NULL, NULL, NULL, NULL, S, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); +create_table_args(A) ::= ifnotexists(U) ids(V) cpxName(Z) AS select(S). { + A = tSetCreateSQLElems(NULL, NULL, S, TSQL_CREATE_STREAM); + setSQLInfo(pInfo, A, NULL, TSDB_SQL_CREATE_TABLE); + + V.n += Z.n; + setCreatedTableName(pInfo, &V, &U); } %type column{TAOS_FIELD} @@ -335,7 +359,7 @@ columnlist(A) ::= column(X). {A = taosArrayInit(4, sizeof(T // The information used for a column is the name and type of column: // tinyint smallint int bigint float double bool timestamp binary(x) nchar(x) column(A) ::= ids(X) typename(Y). { - tSQLSetColumnInfo(&A, &X, &Y); + tSQLSetColumnInfo(&A, &X, &Y); } %type tagitemlist {SArray*} @@ -345,10 +369,10 @@ column(A) ::= ids(X) typename(Y). { tagitemlist(A) ::= tagitemlist(X) COMMA tagitem(Y). { A = tVariantListAppend(X, &Y, -1); } tagitemlist(A) ::= tagitem(X). { A = tVariantListAppend(NULL, &X, -1); } -tagitem(A) ::= INTEGER(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= FLOAT(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= STRING(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } -tagitem(A) ::= BOOL(X). {toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= INTEGER(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= FLOAT(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= STRING(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } +tagitem(A) ::= BOOL(X). { toTSDBType(X.type); tVariantCreate(&A, &X); } tagitem(A) ::= NULL(X). { X.type = 0; tVariantCreate(&A, &X); } tagitem(A) ::= MINUS(X) INTEGER(Y).{ @@ -445,11 +469,11 @@ tablelist(A) ::= ids(X) cpxName(Y). { } tablelist(A) ::= ids(X) cpxName(Y) ids(Z). { - toTSDBType(X.type); - toTSDBType(Z.type); - X.n += Y.n; - A = tVariantListAppendToken(NULL, &X, -1); - A = tVariantListAppendToken(A, &Z, -1); + toTSDBType(X.type); + toTSDBType(Z.type); + X.n += Y.n; + A = tVariantListAppendToken(NULL, &X, -1); + A = tVariantListAppendToken(A, &Z, -1); } tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { @@ -460,11 +484,11 @@ tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z). { } tablelist(A) ::= tablelist(Y) COMMA ids(X) cpxName(Z) ids(F). { - toTSDBType(X.type); - toTSDBType(F.type); - X.n += Z.n; - A = tVariantListAppendToken(Y, &X, -1); - A = tVariantListAppendToken(A, &F, -1); + toTSDBType(X.type); + toTSDBType(F.type); + X.n += Z.n; + A = tVariantListAppendToken(Y, &X, -1); + A = tVariantListAppendToken(A, &F, -1); } // The value of interval should be the form of "number+[a,s,m,h,d,n,y]" or "now" @@ -526,9 +550,9 @@ item(A) ::= ids(X) cpxName(Y). { } %type sortorder {int} -sortorder(A) ::= ASC. {A = TSDB_ORDER_ASC; } -sortorder(A) ::= DESC. {A = TSDB_ORDER_DESC;} -sortorder(A) ::= . {A = TSDB_ORDER_ASC;} //default is descend order +sortorder(A) ::= ASC. { A = TSDB_ORDER_ASC; } +sortorder(A) ::= DESC. { A = TSDB_ORDER_DESC;} +sortorder(A) ::= . { A = TSDB_ORDER_ASC; } // Ascending order by default //group by clause %type groupby_opt {SArray*} @@ -536,8 +560,8 @@ sortorder(A) ::= . {A = TSDB_ORDER_ASC;} //default is descend orde %type grouplist {SArray*} %destructor grouplist {taosArrayDestroy($$);} -groupby_opt(A) ::= . {A = 0;} -groupby_opt(A) ::= GROUP BY grouplist(X). {A = X;} +groupby_opt(A) ::= . { A = 0;} +groupby_opt(A) ::= GROUP BY grouplist(X). { A = X;} grouplist(A) ::= grouplist(X) COMMA item(Y). { A = tVariantListAppend(X, &Y, -1); @@ -583,20 +607,20 @@ where_opt(A) ::= WHERE expr(X). {A = X;} expr(A) ::= LP(X) expr(Y) RP(Z). {A = Y; A->token.z = X.z; A->token.n = (Z.z - X.z + 1);} -expr(A) ::= ID(X). {A = tSQLExprIdValueCreate(&X, TK_ID);} -expr(A) ::= ID(X) DOT ID(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);} -expr(A) ::= ID(X) DOT STAR(Y). {X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ALL);} +expr(A) ::= ID(X). { A = tSQLExprIdValueCreate(&X, TK_ID);} +expr(A) ::= ID(X) DOT ID(Y). { X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ID);} +expr(A) ::= ID(X) DOT STAR(Y). { X.n += (1+Y.n); A = tSQLExprIdValueCreate(&X, TK_ALL);} -expr(A) ::= INTEGER(X). {A = tSQLExprIdValueCreate(&X, TK_INTEGER);} -expr(A) ::= MINUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} -expr(A) ::= PLUS(X) INTEGER(Y). {X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} -expr(A) ::= FLOAT(X). {A = tSQLExprIdValueCreate(&X, TK_FLOAT);} -expr(A) ::= MINUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} -expr(A) ::= PLUS(X) FLOAT(Y). {X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} -expr(A) ::= STRING(X). {A = tSQLExprIdValueCreate(&X, TK_STRING);} -expr(A) ::= NOW(X). {A = tSQLExprIdValueCreate(&X, TK_NOW); } -expr(A) ::= VARIABLE(X). {A = tSQLExprIdValueCreate(&X, TK_VARIABLE);} -expr(A) ::= BOOL(X). {A = tSQLExprIdValueCreate(&X, TK_BOOL);} +expr(A) ::= INTEGER(X). { A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= MINUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= PLUS(X) INTEGER(Y). { X.n += Y.n; X.type = TK_INTEGER; A = tSQLExprIdValueCreate(&X, TK_INTEGER);} +expr(A) ::= FLOAT(X). { A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= MINUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= PLUS(X) FLOAT(Y). { X.n += Y.n; X.type = TK_FLOAT; A = tSQLExprIdValueCreate(&X, TK_FLOAT);} +expr(A) ::= STRING(X). { A = tSQLExprIdValueCreate(&X, TK_STRING);} +expr(A) ::= NOW(X). { A = tSQLExprIdValueCreate(&X, TK_NOW); } +expr(A) ::= VARIABLE(X). { A = tSQLExprIdValueCreate(&X, TK_VARIABLE);} +expr(A) ::= BOOL(X). { A = tSQLExprIdValueCreate(&X, TK_BOOL);} // ordinary functions: min(x), max(x), top(k, 20) expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSQLExprCreateFunction(Y, &X, &E, X.type); } diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 5a923db52c..38bb0b8a71 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -250,10 +250,9 @@ enum { }; typedef struct STwaInfo { - TSKEY lastKey; int8_t hasResult; // flag to denote has value double dOutput; - double lastValue; + SPoint1 p; STimeWindow win; } STwaInfo; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index a45c0ac6ef..0d3b068a4a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -35,7 +35,7 @@ * check if the primary column is load by default, otherwise, the program will * forced to load primary column explicitly. */ -#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0) +#define Q_STATUS_EQUAL(p, s) (((p) & (s)) != 0u) #define QUERY_IS_ASC_QUERY(q) (GET_FORWARD_DIRECTION_FACTOR((q)->order.order) == QUERY_ASC_FORWARD_STEP) #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) @@ -128,11 +128,14 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) +#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) static void setQueryStatus(SQuery *pQuery, int8_t status); static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); -#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) +static int32_t getMaximumIdleDurationSec() { + return tsShellActivityTimer * 2; +} static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -461,13 +464,13 @@ static bool hasNullValue(SColIndex* pColIndex, SDataStatis *pStatis, SDataStatis return true; } -static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pWindowResInfo, char *pData, +static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, char *pData, int16_t bytes, bool masterscan, uint64_t uid) { SET_RES_WINDOW_KEY(pRuntimeEnv->keyBuf, pData, bytes, uid); int32_t *p1 = (int32_t *)taosHashGet(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes)); if (p1 != NULL) { - pWindowResInfo->curIndex = *p1; + pResultRowInfo->curIndex = *p1; } else { if (!masterscan) { // not master scan, do not add new timewindow return NULL; @@ -475,46 +478,46 @@ static SResultRow *doPrepareResultRowFromKey(SQueryRuntimeEnv *pRuntimeEnv, SRes // TODO refactor // more than the capacity, reallocate the resources - if (pWindowResInfo->size >= pWindowResInfo->capacity) { + if (pResultRowInfo->size >= pResultRowInfo->capacity) { int64_t newCapacity = 0; - if (pWindowResInfo->capacity > 10000) { - newCapacity = (int64_t)(pWindowResInfo->capacity * 1.25); + if (pResultRowInfo->capacity > 10000) { + newCapacity = (int64_t)(pResultRowInfo->capacity * 1.25); } else { - newCapacity = (int64_t)(pWindowResInfo->capacity * 1.5); + newCapacity = (int64_t)(pResultRowInfo->capacity * 1.5); } - char *t = realloc(pWindowResInfo->pResult, (size_t)(newCapacity * POINTER_BYTES)); + char *t = realloc(pResultRowInfo->pResult, (size_t)(newCapacity * POINTER_BYTES)); if (t == NULL) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } - pWindowResInfo->pResult = (SResultRow **)t; + pResultRowInfo->pResult = (SResultRow **)t; - int32_t inc = (int32_t)newCapacity - pWindowResInfo->capacity; - memset(&pWindowResInfo->pResult[pWindowResInfo->capacity], 0, POINTER_BYTES * inc); + int32_t inc = (int32_t)newCapacity - pResultRowInfo->capacity; + memset(&pResultRowInfo->pResult[pResultRowInfo->capacity], 0, POINTER_BYTES * inc); - pWindowResInfo->capacity = (int32_t)newCapacity; + pResultRowInfo->capacity = (int32_t)newCapacity; } SResultRow *pResult = getNewResultRow(pRuntimeEnv->pool); - pWindowResInfo->pResult[pWindowResInfo->size] = pResult; + pResultRowInfo->pResult[pResultRowInfo->size] = pResult; int32_t ret = initResultRow(pResult); if (ret != TSDB_CODE_SUCCESS) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_OUT_OF_MEMORY); } // add a new result set for a new group - pWindowResInfo->curIndex = pWindowResInfo->size++; + pResultRowInfo->curIndex = pResultRowInfo->size++; taosHashPut(pRuntimeEnv->pResultRowHashTable, pRuntimeEnv->keyBuf, GET_RES_WINDOW_KEY_LEN(bytes), - (char *)&pWindowResInfo->curIndex, sizeof(int32_t)); + (char *)&pResultRowInfo->curIndex, sizeof(int32_t)); } // too many time window in query - if (pWindowResInfo->size > MAX_INTERVAL_TIME_WINDOW) { + if (pResultRowInfo->size > MAX_INTERVAL_TIME_WINDOW) { longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_TOO_MANY_TIMEWINDOW); } - return getResultRow(pWindowResInfo, pWindowResInfo->curIndex); + return getResultRow(pResultRowInfo, pResultRowInfo->curIndex); } // get the correct time window according to the handled timestamp @@ -611,19 +614,17 @@ static int32_t addNewWindowResultBuf(SResultRow *pWindowRes, SDiskbasedResultBuf return 0; } -static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pWindowResInfo, SDataBlockInfo* pBockInfo, +static int32_t setWindowOutputBufByKey(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, SDataBlockInfo* pBockInfo, STimeWindow *win, bool masterscan, bool* newWind, SResultRow** pResult) { assert(win->skey <= win->ekey); SDiskbasedResultBuf *pResultBuf = pRuntimeEnv->pResultBuf; // todo refactor int64_t uid = getResultInfoUId(pRuntimeEnv); - SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pWindowResInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, uid); + SResultRow *pResultRow = doPrepareResultRowFromKey(pRuntimeEnv, pResultRowInfo, (char *)&win->skey, TSDB_KEYSIZE, masterscan, uid); if (pResultRow == NULL) { *newWind = false; - - // no master scan, no result generated means error occurs - return masterscan? -1:0; + return masterscan? -1:0; // no master scan, no result generated means error occurs } *newWind = true; @@ -700,65 +701,60 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se return forwardStep; } +static int32_t updateResultRowCurrentIndex(SResultRowInfo* pWindowResInfo, TSKEY lastKey, bool ascQuery) { + int32_t i = 0; + int64_t skey = TSKEY_INITIAL_VAL; + + int32_t numOfClosed = 0; + for (i = 0; i < pWindowResInfo->size; ++i) { + SResultRow *pResult = pWindowResInfo->pResult[i]; + if (pResult->closed) { + numOfClosed += 1; + continue; + } + + TSKEY ekey = pResult->win.ekey; + if ((ekey <= lastKey && ascQuery) || (pResult->win.skey >= lastKey && !ascQuery)) { + closeResultRow(pWindowResInfo, i); + } else { + skey = pResult->win.skey; + break; + } + } + + // all windows are closed, set the last one to be the skey + if (skey == TSKEY_INITIAL_VAL) { + assert(i == pWindowResInfo->size); + pWindowResInfo->curIndex = pWindowResInfo->size - 1; + } else { + pWindowResInfo->curIndex = i; + pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex]->win.skey; + } + + return numOfClosed; +} + /** * NOTE: the query status only set for the first scan of master scan. */ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SResultRowInfo *pWindowResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->scanFlag != MASTER_SCAN) { - return pWindowResInfo->size; - } - - // for group by normal column query, close time window and return. - if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { - closeAllTimeWindow(pWindowResInfo); + if (pRuntimeEnv->scanFlag != MASTER_SCAN || pWindowResInfo->size == 0) { return pWindowResInfo->size; } // no qualified results exist, abort check int32_t numOfClosed = 0; - - if (pWindowResInfo->size == 0) { - return pWindowResInfo->size; - } + bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); // query completed - if ((lastKey >= pQuery->current->win.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (lastKey <= pQuery->current->win.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { - closeAllTimeWindow(pWindowResInfo); + if ((lastKey >= pQuery->current->win.ekey && ascQuery) || (lastKey <= pQuery->current->win.ekey && (!ascQuery))) { + closeAllResultRows(pWindowResInfo); pWindowResInfo->curIndex = pWindowResInfo->size - 1; setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL); } else { // set the current index to be the last unclosed window - int32_t i = 0; - int64_t skey = TSKEY_INITIAL_VAL; - - for (i = 0; i < pWindowResInfo->size; ++i) { - SResultRow *pResult = pWindowResInfo->pResult[i]; - if (pResult->closed) { - numOfClosed += 1; - continue; - } - - TSKEY ekey = pResult->win.ekey; - if ((ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) || - (pResult->win.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { - closeTimeWindow(pWindowResInfo, i); - } else { - skey = pResult->win.skey; - break; - } - } - - // all windows are closed, set the last one to be the skey - if (skey == TSKEY_INITIAL_VAL) { - assert(i == pWindowResInfo->size); - pWindowResInfo->curIndex = pWindowResInfo->size - 1; - } else { - pWindowResInfo->curIndex = i; - } - - pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex]->win.skey; + numOfClosed = updateResultRowCurrentIndex(pWindowResInfo, lastKey, ascQuery); // the number of completed slots are larger than the threshold, return current generated results to client. if (numOfClosed > pQuery->rec.threshold) { @@ -1047,24 +1043,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in } } -//static double getTSWindowInterpoVal(SColumnInfoData* pColInfo, int16_t srcColIndex, int16_t rowIndex, TSKEY key, char** prevRow, TSKEY* tsCols, int32_t step) { -// TSKEY start = tsCols[rowIndex]; -// TSKEY prevTs = (rowIndex == 0)? *(TSKEY *) prevRow[0] : tsCols[rowIndex - step]; -// -// double v1 = 0, v2 = 0, v = 0; -// char *prevVal = (rowIndex == 0)? prevRow[srcColIndex] : ((char*)pColInfo->pData) + (rowIndex - step) * pColInfo->info.bytes; -// -// GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)prevVal); -// GET_TYPED_DATA(v2, double, pColInfo->info.type, (char *)pColInfo->pData + rowIndex * pColInfo->info.bytes); -// -// SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; -// SPoint point2 = (SPoint){.key = start, .val = &v2}; -// SPoint point = (SPoint){.key = key, .val = &v}; -// taosGetLinearInterpolationVal(TSDB_DATA_TYPE_DOUBLE, &point1, &point2, &point); -// -// return v; -//} - // window start key interpolation static bool setTimeWindowInterpolationStartTs(SQueryRuntimeEnv* pRuntimeEnv, int32_t pos, int32_t numOfRows, SArray* pDataBlock, TSKEY* tsCols, STimeWindow* win) { SQuery* pQuery = pRuntimeEnv->pQuery; @@ -1235,6 +1213,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); } done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); @@ -1246,6 +1226,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_END_INTERP); } } @@ -1286,6 +1268,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); } done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); @@ -1296,6 +1280,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_END_INTERP); } } @@ -1363,14 +1349,7 @@ static int32_t setGroupResultOutputBuf(SQueryRuntimeEnv *pRuntimeEnv, char *pDat } int64_t v = -1; - switch(type) { - case TSDB_DATA_TYPE_BOOL: - case TSDB_DATA_TYPE_TINYINT: v = GET_INT8_VAL(pData); break; - case TSDB_DATA_TYPE_SMALLINT: v = GET_INT16_VAL(pData); break; - case TSDB_DATA_TYPE_INT: v = GET_INT32_VAL(pData); break; - case TSDB_DATA_TYPE_BIGINT: v = GET_INT64_VAL(pData); break; - } - + GET_TYPED_DATA(v, int64_t, type, pData); if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { if (pResultRow->key == NULL) { pResultRow->key = malloc(varDataTLen(pData)); @@ -1439,7 +1418,7 @@ static char *getGroupbyColumnData(SQuery *pQuery, int16_t *type, int16_t *bytes, static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { SQuery *pQuery = pRuntimeEnv->pQuery; - STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); + STSElem elem = tsBufGetElem(pRuntimeEnv->pTsBuf); SQLFunctionCtx *pCtx = pRuntimeEnv->pCtx; // compare tag first @@ -1451,8 +1430,8 @@ static int32_t doTSJoinFilter(SQueryRuntimeEnv *pRuntimeEnv, int32_t offset) { #if defined(_DEBUG_VIEW) printf("elem in comp ts file:%" PRId64 ", key:%" PRId64 ", tag:%"PRIu64", query order:%d, ts order:%d, traverse:%d, index:%d\n", - elem.ts, key, elem.tag.i64Key, pQuery->order.order, pRuntimeEnv->pTSBuf->tsOrder, - pRuntimeEnv->pTSBuf->cur.order, pRuntimeEnv->pTSBuf->cur.tsIndex); + elem.ts, key, elem.tag.i64Key, pQuery->order.order, pRuntimeEnv->pTsBuf->tsOrder, + pRuntimeEnv->pTsBuf->cur.order, pRuntimeEnv->pTsBuf->cur.tsIndex); #endif if (QUERY_IS_ASC_QUERY(pQuery)) { @@ -1622,9 +1601,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS // from top to bottom in desc // from bottom to top in asc order - if (pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->pTsBuf != NULL) { qDebug("QInfo:%p process data rows, numOfRows:%d, query order:%d, ts comp order:%d", pQInfo, pDataBlockInfo->rows, - pQuery->order.order, pRuntimeEnv->pTSBuf->cur.order); + pQuery->order.order, pRuntimeEnv->pTsBuf->cur.order); } int32_t offset = -1; @@ -1634,7 +1613,7 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS for (int32_t j = 0; j < pDataBlockInfo->rows; ++j) { offset = GET_COL_DATA_POS(pQuery, j, step); - if (pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->pTsBuf != NULL) { int32_t ret = doTSJoinFilter(pRuntimeEnv, offset); if (ret == TS_JOIN_TAG_NOT_EQUALS) { break; @@ -1748,9 +1727,9 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS prevTs = tsCols[offset]; prevRowIndex = offset; - if (pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->pTsBuf != NULL) { // if timestamp filter list is empty, quit current query - if (!tsBufNextPos(pRuntimeEnv->pTSBuf)) { + if (!tsBufNextPos(pRuntimeEnv->pTsBuf)) { setQueryStatus(pQuery, QUERY_COMPLETED); break; } @@ -1764,8 +1743,8 @@ static void rowwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis *pS item->lastKey = (QUERY_IS_ASC_QUERY(pQuery)? pDataBlockInfo->window.ekey:pDataBlockInfo->window.skey) + step; } - if (pRuntimeEnv->pTSBuf != NULL) { - item->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + if (pRuntimeEnv->pTsBuf != NULL) { + item->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); } // todo refactor: extract method @@ -1787,7 +1766,7 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl STableQueryInfo* pTableQInfo = pQuery->current; SResultRowInfo* pWindowResInfo = &pRuntimeEnv->windowResInfo; - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) { + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyNormalCol) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); @@ -1799,9 +1778,12 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl // interval query with limit applied int32_t numOfRes = 0; - if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); - } else { + } else if (pRuntimeEnv->groupbyNormalCol) { + closeAllResultRows(pWindowResInfo); + numOfRes = pWindowResInfo->size; + } else { // projection query numOfRes = (int32_t)getNumOfResult(pRuntimeEnv); // update the number of output result @@ -2103,7 +2085,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQInfo* pQInfo = (SQInfo*) GET_QINFO_ADDR(pRuntimeEnv); qDebug("QInfo:%p teardown runtime env", pQInfo); - cleanupTimeWindowInfo(&pRuntimeEnv->windowResInfo); + cleanupResultRowInfo(&pRuntimeEnv->windowResInfo); if (pRuntimeEnv->pCtx != NULL) { for (int32_t i = 0; i < pQuery->numOfOutput; ++i) { @@ -2125,7 +2107,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { destroyResultBuf(pRuntimeEnv->pResultBuf); doFreeQueryHandle(pQInfo); - pRuntimeEnv->pTSBuf = tsBufDestroy(pRuntimeEnv->pTSBuf); + pRuntimeEnv->pTsBuf = tsBufDestroy(pRuntimeEnv->pTsBuf); tfree(pRuntimeEnv->offset); tfree(pRuntimeEnv->keyBuf); @@ -2138,8 +2120,31 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); } +static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { + return pQInfo->rspContext != NULL; +} + #define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) +static bool isQueryKilled(SQInfo *pQInfo) { + if (IS_QUERY_KILLED(pQInfo)) { + return true; + } + + // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived + // abort current query execution. + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) && + (!needBuildResAfterQueryComplete(pQInfo))) { + + assert(pQInfo->startExecTs != 0); + qDebug("QInfo:%p retrieve not arrive beyond %d sec, abort current query execution, start:%"PRId64", current:%d", pQInfo, 1, + pQInfo->startExecTs, taosGetTimestampSec()); + return true; + } + + return false; +} + static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { @@ -2610,11 +2615,12 @@ static bool overlapWithTimeWindow(SQuery* pQuery, SDataBlockInfo* pBlockInfo) { } int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo * pWindowResInfo, void* pQueryHandle, SDataBlockInfo* pBlockInfo, SDataStatis **pStatis, SArray** pDataBlock, uint32_t* status) { - SQuery *pQuery = pRuntimeEnv->pQuery; - *status = BLK_DATA_NO_NEEDED; - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf > 0) { + SQuery *pQuery = pRuntimeEnv->pQuery; + SQueryCostInfo* pCost = &pRuntimeEnv->summary; + + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf > 0) { *status = BLK_DATA_ALL_NEEDED; } else { // check if this data block is required to load @@ -2634,7 +2640,6 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo * pW bool masterScan = IS_MASTER_SCAN(pRuntimeEnv); TSKEY k = QUERY_IS_ASC_QUERY(pQuery)? pBlockInfo->window.skey:pBlockInfo->window.ekey; - STimeWindow win = getActiveTimeWindow(pWindowResInfo, k, pQuery); if (setWindowOutputBufByKey(pRuntimeEnv, pWindowResInfo, pBlockInfo, &win, masterScan, &hasTimeWindow, &pResult) != TSDB_CODE_SUCCESS) { @@ -2658,35 +2663,34 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo * pW if ((*status) == BLK_DATA_NO_NEEDED) { qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); - pRuntimeEnv->summary.discardBlocks += 1; + pCost->discardBlocks += 1; } else if ((*status) == BLK_DATA_STATIS_NEEDED) { // this function never returns error? tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis); - - pRuntimeEnv->summary.loadBlockStatis += 1; + pCost->loadBlockStatis += 1; if (*pStatis == NULL) { // data block statistics does not exist, load data block *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); - pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; + pCost->totalCheckedRows += pBlockInfo->rows; } } else { assert((*status) == BLK_DATA_ALL_NEEDED); // load the data block statistics to perform further filter - pRuntimeEnv->summary.loadBlockStatis += 1; + pCost->loadBlockStatis += 1; tsdbRetrieveDataBlockStatisInfo(pQueryHandle, pStatis); if (!needToLoadDataBlock(pRuntimeEnv, *pStatis, pRuntimeEnv->pCtx, pBlockInfo->rows)) { // current block has been discard due to filter applied - pRuntimeEnv->summary.discardBlocks += 1; + pCost->discardBlocks += 1; qDebug("QInfo:%p data block discard, brange:%"PRId64 "-%"PRId64", rows:%d", GET_QINFO_ADDR(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); (*status) = BLK_DATA_DISCARD; } - pRuntimeEnv->summary.totalCheckedRows += pBlockInfo->rows; - pRuntimeEnv->summary.loadBlocks += 1; + pCost->totalCheckedRows += pBlockInfo->rows; + pCost->loadBlocks += 1; *pDataBlock = tsdbRetrieveDataBlock(pQueryHandle, NULL); if (*pDataBlock == NULL) { return terrno; @@ -2744,7 +2748,7 @@ int32_t binarySearchForKey(char *pValue, int num, TSKEY key, int order) { } numOfRows = lastPos - firstPos + 1; - midPos = (numOfRows >> 1) + firstPos; + midPos = (numOfRows >> 1u) + firstPos; if (key < keyList[midPos]) { lastPos = midPos - 1; @@ -2864,7 +2868,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { + if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -2914,7 +2918,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { if (QUERY_IS_INTERVAL_QUERY(pQuery) && (IS_MASTER_SCAN(pRuntimeEnv)|| pRuntimeEnv->scanFlag == REPEAT_SCAN)) { if (Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) { - closeAllTimeWindow(&pRuntimeEnv->windowResInfo); + closeAllResultRows(&pRuntimeEnv->windowResInfo); pRuntimeEnv->windowResInfo.curIndex = pRuntimeEnv->windowResInfo.size - 1; // point to the last time window } else { assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)); @@ -3002,7 +3006,7 @@ void setTagVal(SQueryRuntimeEnv *pRuntimeEnv, void *pTable, void *tsdb) { // set the join tag for first column SSqlFuncMsg *pFuncMsg = &pExprInfo->base; - if ((pFuncMsg->functionId == TSDB_FUNC_TS || pFuncMsg->functionId == TSDB_FUNC_PRJ) && pRuntimeEnv->pTSBuf != NULL && + if ((pFuncMsg->functionId == TSDB_FUNC_TS || pFuncMsg->functionId == TSDB_FUNC_PRJ) && pRuntimeEnv->pTsBuf != NULL && pFuncMsg->colInfo.colIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { assert(pFuncMsg->numOfParams == 1); @@ -3432,7 +3436,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { int64_t startt = taosGetTimestampMs(); while (1) { - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); tfree(pTableList); @@ -3693,8 +3697,8 @@ void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) { int32_t initResultRow(SResultRow *pResultRow) { pResultRow->pCellInfo = (SResultRowCellInfo*)((char*)pResultRow + sizeof(SResultRow)); - pResultRow->pageId = -1; - pResultRow->rowId = -1; + pResultRow->pageId = -1; + pResultRow->rowId = -1; return TSDB_CODE_SUCCESS; } @@ -3894,10 +3898,10 @@ static void setEnvBeforeReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatusI SQInfo *pQInfo = GET_QINFO_ADDR(pRuntimeEnv); SQuery *pQuery = pRuntimeEnv->pQuery; - pStatus->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); // save the cursor - if (pRuntimeEnv->pTSBuf) { - SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order); - bool ret = tsBufNextPos(pRuntimeEnv->pTSBuf); + pStatus->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); // save the cursor + if (pRuntimeEnv->pTsBuf) { + SWITCH_ORDER(pRuntimeEnv->pTsBuf->cur.order); + bool ret = tsBufNextPos(pRuntimeEnv->pTsBuf); assert(ret); } @@ -3939,9 +3943,9 @@ static void clearEnvAfterReverseScan(SQueryRuntimeEnv *pRuntimeEnv, SQueryStatus SWITCH_ORDER(pQuery->order.order); switchCtxOrder(pRuntimeEnv); - tsBufSetCursor(pRuntimeEnv->pTSBuf, &pStatus->cur); - if (pRuntimeEnv->pTSBuf) { - pRuntimeEnv->pTSBuf->cur.order = pQuery->order.order; + tsBufSetCursor(pRuntimeEnv->pTsBuf, &pStatus->cur); + if (pRuntimeEnv->pTsBuf) { + pRuntimeEnv->pTsBuf->cur.order = pQuery->order.order; } SET_MASTER_SCAN_FLAG(pRuntimeEnv); @@ -4018,7 +4022,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { cond.twindow.skey, cond.twindow.ekey); // check if query is killed or not - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } } @@ -4043,12 +4047,12 @@ void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv) { // for each group result, call the finalize function for each column SResultRowInfo *pWindowResInfo = &pRuntimeEnv->windowResInfo; if (pRuntimeEnv->groupbyNormalCol) { - closeAllTimeWindow(pWindowResInfo); + closeAllResultRows(pWindowResInfo); } for (int32_t i = 0; i < pWindowResInfo->size; ++i) { SResultRow *buf = pWindowResInfo->pResult[i]; - if (!isWindowResClosed(pWindowResInfo, i)) { + if (!isResultRowClosed(pWindowResInfo, i)) { continue; } @@ -4098,7 +4102,7 @@ static STableQueryInfo *createTableQueryInfo(SQueryRuntimeEnv *pRuntimeEnv, void // set more initial size of interval/groupby query if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { int32_t initialSize = 128; - int32_t code = initWindowResInfo(&pTableQueryInfo->windowResInfo, initialSize, TSDB_DATA_TYPE_INT); + int32_t code = initResultRowInfo(&pTableQueryInfo->windowResInfo, initialSize, TSDB_DATA_TYPE_INT); if (code != TSDB_CODE_SUCCESS) { return NULL; } @@ -4114,7 +4118,7 @@ void destroyTableQueryInfoImpl(STableQueryInfo *pTableQueryInfo) { } tVariantDestroy(&pTableQueryInfo->tag); - cleanupTimeWindowInfo(&pTableQueryInfo->windowResInfo); + cleanupResultRowInfo(&pTableQueryInfo->windowResInfo); } /** @@ -4130,7 +4134,7 @@ void setExecutionContext(SQInfo *pQInfo, int32_t groupIndex, TSKEY nextKey) { // lastKey needs to be updated pTableQueryInfo->lastKey = nextKey; - if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); } @@ -4219,13 +4223,13 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQ setTagVal(pRuntimeEnv, pTable, pQInfo->tsdb); // both the master and supplement scan needs to set the correct ts comp start position - if (pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->pTsBuf != NULL) { tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; if (pTableQueryInfo->cur.vgroupIndex == -1) { tVariantAssign(&pTableQueryInfo->tag, pTag); - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, &pTableQueryInfo->tag); + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQInfo->vgId, &pTableQueryInfo->tag); // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem)) { @@ -4239,7 +4243,7 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQ } // keep the cursor info of current meter - pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { @@ -4247,7 +4251,7 @@ int32_t setAdditionalInfo(SQInfo *pQInfo, void* pTable, STableQueryInfo *pTableQ } } else { - tsBufSetCursor(pRuntimeEnv->pTSBuf, &pTableQueryInfo->cur); + tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); @@ -4346,7 +4350,7 @@ static int32_t doCopyToSData(SQInfo *pQInfo, SResultRowInfo *pResultInfo, int32_ int32_t step = -1; qDebug("QInfo:%p start to copy data from windowResInfo to query buf", pQInfo); - int32_t totalSet = numOfClosedTimeWindow(pResultInfo); + int32_t totalSet = numOfClosedResultRows(pResultInfo); SResultRow** result = pResultInfo->pResult; if (orderType == TSDB_ORDER_ASC) { @@ -4456,11 +4460,23 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc SResultRowInfo * pWindowResInfo = &pTableQueryInfo->windowResInfo; pQuery->pos = QUERY_IS_ASC_QUERY(pQuery)? 0 : pDataBlockInfo->rows - 1; - if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->groupbyNormalCol) { + if (pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->groupbyNormalCol) { rowwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, pDataBlock); } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); } + + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + + // TODO refactor + if ((pTableQueryInfo->lastKey >= pTableQueryInfo->win.ekey && ascQuery) || (pTableQueryInfo->lastKey <= pTableQueryInfo->win.ekey && (!ascQuery))) { + closeAllResultRows(pWindowResInfo); + pWindowResInfo->curIndex = pWindowResInfo->size - 1; + } else { + updateResultRowCurrentIndex(pWindowResInfo, pTableQueryInfo->lastKey, ascQuery); + } + } } bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { @@ -4675,7 +4691,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { - if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { + if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -4770,7 +4786,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) { assert(*start <= pQuery->current->lastKey); // if queried with value filter, do NOT forward query start position - if (pQuery->limit.offset <= 0 || pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL || pRuntimeEnv->pFillInfo != NULL) { + if (pQuery->limit.offset <= 0 || pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL || pRuntimeEnv->pFillInfo != NULL) { return true; } @@ -4975,15 +4991,15 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo pQInfo->vgId = vgId; pRuntimeEnv->pQuery = pQuery; - pRuntimeEnv->pTSBuf = pTsBuf; + pRuntimeEnv->pTsBuf = pTsBuf; pRuntimeEnv->cur.vgroupIndex = -1; pRuntimeEnv->stableQuery = isSTableQuery; pRuntimeEnv->prevGroupId = INT32_MIN; pRuntimeEnv->groupbyNormalCol = isGroupbyNormalCol(pQuery->pGroupbyExpr); if (pTsBuf != NULL) { - int16_t order = (pQuery->order.order == pRuntimeEnv->pTSBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; - tsBufSetTraverseOrder(pRuntimeEnv->pTSBuf, order); + int16_t order = (pQuery->order.order == pRuntimeEnv->pTsBuf->tsOrder) ? TSDB_ORDER_ASC : TSDB_ORDER_DESC; + tsBufSetTraverseOrder(pRuntimeEnv->pTsBuf, order); } int32_t ps = DEFAULT_PAGE_SIZE; @@ -5005,7 +5021,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_INT; // group id } - code = initWindowResInfo(&pRuntimeEnv->windowResInfo, 8, type); + code = initResultRowInfo(&pRuntimeEnv->windowResInfo, 8, type); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5025,7 +5041,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, void *tsdb, int32_t vgId, bo type = TSDB_DATA_TYPE_TIMESTAMP; } - code = initWindowResInfo(&pRuntimeEnv->windowResInfo, numOfResultRows, type); + code = initResultRowInfo(&pRuntimeEnv->windowResInfo, numOfResultRows, type); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -5077,7 +5093,7 @@ static FORCE_INLINE void setEnvForEachBlock(SQInfo* pQInfo, STableQueryInfo* pTa TSKEY nextKey = pBlockInfo->window.skey; setIntervalQueryRange(pQInfo, nextKey); - if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { setAdditionalInfo(pQInfo, pTableQueryInfo->pTable, pTableQueryInfo); } } @@ -5112,7 +5128,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5170,7 +5186,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { SArray *group = GET_TABLEGROUP(pQInfo, 0); STableQueryInfo* pCheckInfo = taosArrayGetP(group, index); - if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->hasTagResults || pRuntimeEnv->pTsBuf != NULL) { setTagVal(pRuntimeEnv, pCheckInfo->pTable, pQInfo->tsdb); } @@ -5208,11 +5224,11 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { longjmp(pRuntimeEnv->env, terrno); } - if (pRuntimeEnv->pTSBuf != NULL) { + if (pRuntimeEnv->pTsBuf != NULL) { tVariant* pTag = &pRuntimeEnv->pCtx[0].tag; if (pRuntimeEnv->cur.vgroupIndex == -1) { - STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + STSElem elem = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQInfo->vgId, pTag); // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem)) { if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { @@ -5223,7 +5239,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { return false; } else { - STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, @@ -5234,10 +5250,10 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { } } } else { - STSElem elem = tsBufGetElem(pRuntimeEnv->pTSBuf); + STSElem elem = tsBufGetElem(pRuntimeEnv->pTsBuf); if (tVariantCompare(elem.tag, &pRuntimeEnv->pCtx[0].tag) != 0) { - STSElem elem1 = tsBufGetElemStartPos(pRuntimeEnv->pTSBuf, pQInfo->vgId, pTag); + STSElem elem1 = tsBufGetElemStartPos(pRuntimeEnv->pTsBuf, pQInfo->vgId, pTag); // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem1)) { if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { @@ -5248,7 +5264,7 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { return false; } else { - STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); } else { @@ -5257,8 +5273,8 @@ static bool multiTableMultioutputHelper(SQInfo *pQInfo, int32_t index) { } } else { - tsBufSetCursor(pRuntimeEnv->pTSBuf, &pRuntimeEnv->cur); - STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTSBuf); + tsBufSetCursor(pRuntimeEnv->pTsBuf, &pRuntimeEnv->cur); + STSCursor cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { qDebug("QInfo:%p continue scan ts_comp file, tag:%s blockIndex:%d, tsIndex:%d", pQInfo, pTag->pz, cur.blockIndex, cur.tsIndex); } else { @@ -5453,10 +5469,10 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQInfo->groupIndex = currentGroupIndex; // restore the group index assert(pQuery->rec.rows == pWindowResInfo->size); - clearClosedTimeWindow(pRuntimeEnv); + clearClosedResultRows(pRuntimeEnv, &pRuntimeEnv->windowResInfo); break; } - } else if (pRuntimeEnv->queryWindowIdentical && pRuntimeEnv->pTSBuf == NULL && !isTSCompQuery(pQuery)) { + } else if (pRuntimeEnv->queryWindowIdentical && pRuntimeEnv->pTsBuf == NULL && !isTSCompQuery(pQuery)) { //super table projection query with identical query time range for all tables. SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; resetDefaultResInfoOutputBuf(pRuntimeEnv); @@ -5479,19 +5495,25 @@ static void sequentialTableProcess(SQInfo *pQInfo) { // return; // } + if (pQuery->prjInfo.vgroupLimit != -1) { + assert(pQuery->limit.limit == -1 && pQuery->limit.offset == 0); + } else if (pQuery->limit.limit != -1) { + assert(pQuery->prjInfo.vgroupLimit == -1); + } + bool hasMoreBlock = true; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SQueryCostInfo *summary = &pRuntimeEnv->summary; while ((hasMoreBlock = tsdbNextDataBlock(pQueryHandle)) == true) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); STableQueryInfo **pTableQueryInfo = - (STableQueryInfo **)taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); + (STableQueryInfo **) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); if (pTableQueryInfo == NULL) { break; } @@ -5503,6 +5525,25 @@ static void sequentialTableProcess(SQInfo *pQInfo) { setTagVal(pRuntimeEnv, pQuery->current->pTable, pQInfo->tsdb); } + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->windowResInfo.size > pQuery->prjInfo.vgroupLimit) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } + + // it is a super table ordered projection query, check for the number of output for each vgroup + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->rec.rows >= pQuery->prjInfo.vgroupLimit) { + if (QUERY_IS_ASC_QUERY(pQuery) && blockInfo.window.skey >= pQuery->prjInfo.ts) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } else if (!QUERY_IS_ASC_QUERY(pQuery) && blockInfo.window.ekey <= pQuery->prjInfo.ts) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } + } + uint32_t status = 0; SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; @@ -5520,6 +5561,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } ensureOutputBuffer(pRuntimeEnv, &blockInfo); + int64_t prev = getNumOfResult(pRuntimeEnv); + pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); @@ -5530,17 +5573,30 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQuery->rec.rows = getNumOfResult(pRuntimeEnv); + int64_t inc = pQuery->rec.rows - prev; + pQuery->current->windowResInfo.size += (int32_t) inc; + // the flag may be set by tableApplyFunctionsOnBlock, clear it here CLEAR_QUERY_STATUS(pQuery, QUERY_COMPLETED); updateTableIdInfo(pQuery, pQInfo->arrTableIdInfo); - skipResults(pRuntimeEnv); - // the limitation of output result is reached, set the query completed - if (limitResults(pRuntimeEnv)) { - setQueryStatus(pQuery, QUERY_COMPLETED); - SET_STABLE_QUERY_OVER(pQInfo); - break; + if (pQuery->prjInfo.vgroupLimit >= 0) { + if (((pQuery->rec.rows + pQuery->rec.total) < pQuery->prjInfo.vgroupLimit) || ((pQuery->rec.rows + pQuery->rec.total) > pQuery->prjInfo.vgroupLimit && prev < pQuery->prjInfo.vgroupLimit)) { + if (QUERY_IS_ASC_QUERY(pQuery) && pQuery->prjInfo.ts < blockInfo.window.ekey) { + pQuery->prjInfo.ts = blockInfo.window.ekey; + } else if (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->prjInfo.ts > blockInfo.window.skey) { + pQuery->prjInfo.ts = blockInfo.window.skey; + } + } + } else { + // the limitation of output result is reached, set the query completed + skipResults(pRuntimeEnv); + if (limitResults(pRuntimeEnv)) { + setQueryStatus(pQuery, QUERY_COMPLETED); + SET_STABLE_QUERY_OVER(pQInfo); + break; + } } // while the output buffer is full or limit/offset is applied, query may be paused here @@ -5575,14 +5631,14 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } resetDefaultResInfoOutputBuf(pRuntimeEnv); - resetTimeWindowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); + resetResultRowInfo(pRuntimeEnv, &pRuntimeEnv->windowResInfo); SArray *group = GET_TABLEGROUP(pQInfo, 0); assert(taosArrayGetSize(group) == pQInfo->tableqinfoGroupInfo.numOfTables && 1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList)); while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5627,8 +5683,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { break; } - if (pRuntimeEnv->pTSBuf != NULL) { - pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; + if (pRuntimeEnv->pTsBuf != NULL) { + pRuntimeEnv->cur = pRuntimeEnv->pTsBuf->cur; } } else { @@ -5663,8 +5719,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { finalizeQueryResult(pRuntimeEnv); } - if (pRuntimeEnv->pTSBuf != NULL) { - pRuntimeEnv->cur = pRuntimeEnv->pTSBuf->cur; + if (pRuntimeEnv->pTsBuf != NULL) { + pRuntimeEnv->cur = pRuntimeEnv->pTsBuf->cur; } qDebug("QInfo %p numOfTables:%" PRIu64 ", index:%d, numOfGroups:%" PRIzu ", %" PRId64 @@ -5682,8 +5738,8 @@ static void doSaveContext(SQInfo *pQInfo) { SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); SWITCH_ORDER(pQuery->order.order); - if (pRuntimeEnv->pTSBuf != NULL) { - SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order); + if (pRuntimeEnv->pTsBuf != NULL) { + SWITCH_ORDER(pRuntimeEnv->pTsBuf->cur.order); } STsdbQueryCond cond = createTsdbQueryCond(pQuery, &pQuery->window); @@ -5711,8 +5767,8 @@ static void doRestoreContext(SQInfo *pQInfo) { SWAP(pQuery->window.skey, pQuery->window.ekey, TSKEY); - if (pRuntimeEnv->pTSBuf != NULL) { - SWITCH_ORDER(pRuntimeEnv->pTSBuf->cur.order); + if (pRuntimeEnv->pTsBuf != NULL) { + SWITCH_ORDER(pRuntimeEnv->pTsBuf->cur.order); } switchCtxOrder(pRuntimeEnv); @@ -5730,11 +5786,11 @@ static void doCloseAllTimeWindowAfterScan(SQInfo *pQInfo) { size_t num = taosArrayGetSize(group); for (int32_t j = 0; j < num; ++j) { STableQueryInfo* item = taosArrayGetP(group, j); - closeAllTimeWindow(&item->windowResInfo); + closeAllResultRows(&item->windowResInfo); } } } else { // close results for group result - closeAllTimeWindow(&pQInfo->runtimeEnv.windowResInfo); + closeAllResultRows(&pQInfo->runtimeEnv.windowResInfo); } } @@ -5768,7 +5824,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qDebug("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el); // query error occurred or query is killed, abort current execution - if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5789,7 +5845,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { setQueryStatus(pQuery, QUERY_COMPLETED); - if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); //TODO finalizeQueryResult may cause SEGSEV, since the memory may not allocated yet, add a cleanup function instead longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); @@ -5905,7 +5961,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) pQuery->rec.rows = getNumOfResult(pRuntimeEnv); doSecondaryArithmeticProcess(pQuery); - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5979,13 +6035,13 @@ static void tableIntervalProcessImpl(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) // here we can ignore the records in case of no interpolation // todo handle offset, in case of top/bottom interval query - if ((pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTSBuf != NULL) && pQuery->limit.offset > 0 && + if ((pQuery->numOfFilterCols > 0 || pRuntimeEnv->pTsBuf != NULL) && pQuery->limit.offset > 0 && pQuery->fillType == TSDB_FILL_NONE) { // maxOutput <= 0, means current query does not generate any results - int32_t numOfClosed = numOfClosedTimeWindow(&pRuntimeEnv->windowResInfo); + int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->windowResInfo); int32_t c = (int32_t)(MIN(numOfClosed, pQuery->limit.offset)); - clearFirstNWindowRes(pRuntimeEnv, c); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, c); pQuery->limit.offset -= c; } @@ -6022,7 +6078,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { pQuery->rec.rows = 0; copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQInfo->groupIndex); } // no result generated, abort @@ -6055,16 +6111,16 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) { // all data scanned, the group by normal column can return if (pRuntimeEnv->groupbyNormalCol) { // todo refactor with merge interval time result // maxOutput <= 0, means current query does not generate any results - int32_t numOfClosed = numOfClosedTimeWindow(&pRuntimeEnv->windowResInfo); + int32_t numOfClosed = numOfClosedResultRows(&pRuntimeEnv->windowResInfo); if ((pQuery->limit.offset > 0 && pQuery->limit.offset < numOfClosed) || pQuery->limit.offset == 0) { // skip offset result rows - clearFirstNWindowRes(pRuntimeEnv, (int32_t) pQuery->limit.offset); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, (int32_t) pQuery->limit.offset); pQuery->rec.rows = 0; pQInfo->groupIndex = 0; copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQInfo->groupIndex); doSecondaryArithmeticProcess(pQuery); limitResults(pRuntimeEnv); @@ -6098,7 +6154,7 @@ static void tableQueryImpl(SQInfo *pQInfo) { if (pRuntimeEnv->windowResInfo.size > 0) { copyFromWindowResToSData(pQInfo, &pRuntimeEnv->windowResInfo); - clearFirstNWindowRes(pRuntimeEnv, pQInfo->groupIndex); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, pQInfo->groupIndex); if (pQuery->rec.rows > 0) { qDebug("QInfo:%p %"PRId64" rows returned from group results, total:%"PRId64"", pQInfo, pQuery->rec.rows, pQuery->rec.total); @@ -6284,7 +6340,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset); pQueryMsg->limit = htobe64(pQueryMsg->limit); pQueryMsg->offset = htobe64(pQueryMsg->offset); - pQueryMsg->tableLimit = htobe64(pQueryMsg->tableLimit); + pQueryMsg->vgroupLimit = htobe64(pQueryMsg->vgroupLimit); pQueryMsg->order = htons(pQueryMsg->order); pQueryMsg->orderColId = htons(pQueryMsg->orderColId); @@ -6885,6 +6941,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQuery->fillType = pQueryMsg->fillType; pQuery->numOfTags = pQueryMsg->numOfTags; pQuery->tagColList = pTagCols; + pQuery->prjInfo.vgroupLimit = pQueryMsg->vgroupLimit; + pQuery->prjInfo.ts = (pQueryMsg->order == TSDB_ORDER_ASC)? INT64_MIN:INT64_MAX; pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); if (pQuery->colList == NULL) { @@ -6961,7 +7019,7 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQInfo->runtimeEnv.pResultRowHashTable = taosHashInit(pTableGroupInfo->numOfTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); pQInfo->runtimeEnv.keyBuf = malloc(TSDB_MAX_BYTES_PER_ROW); - pQInfo->runtimeEnv.pool = initResultRowPool(getWindowResultSize(&pQInfo->runtimeEnv)); + pQInfo->runtimeEnv.pool = initResultRowPool(getResultRowSize(&pQInfo->runtimeEnv)); pQInfo->runtimeEnv.prevRow = malloc(POINTER_BYTES * pQuery->numOfCols + srcSize); char* start = POINTER_BYTES * pQuery->numOfCols + (char*) pQInfo->runtimeEnv.prevRow; @@ -7479,7 +7537,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_lock(&pQInfo->lock); pQInfo->dataReady = QUERY_RESULT_READY; - buildRes = (pQInfo->rspContext != NULL); + buildRes = needBuildResAfterQueryComplete(pQInfo); // clear qhandle owner, it must be in the secure area. other thread may run ahead before current, after it is // put into task to be executed. @@ -7488,6 +7546,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); + // used in retrieve blocking model. tsem_post(&pQInfo->ready); return buildRes; } @@ -7504,7 +7563,9 @@ bool qTableQuery(qinfo_t qinfo) { return false; } - if (IS_QUERY_KILLED(pQInfo)) { + pQInfo->startExecTs = taosGetTimestampSec(); + + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); return doBuildResCheck(pQInfo); } @@ -7536,7 +7597,7 @@ bool qTableQuery(qinfo_t qinfo) { } SQuery* pQuery = pRuntimeEnv->pQuery; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p query is killed", pQInfo); } else if (pQuery->rec.rows == 0) { qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); @@ -7564,30 +7625,31 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex int32_t code = TSDB_CODE_SUCCESS; -#if _NON_BLOCKING_RETRIEVE - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - pthread_mutex_lock(&pQInfo->lock); - assert(pQInfo->rspContext == NULL); - - if (pQInfo->dataReady == QUERY_RESULT_READY) { - *buildRes = true; - qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%"PRId64", code:%d", pQInfo, pQuery->rowSize, pQuery->rec.rows, - pQInfo->code); - } else { - *buildRes = false; - qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + if (tsRetrieveBlockingModel) { pQInfo->rspContext = pRspContext; - assert(pQInfo->rspContext != NULL); - } + tsem_wait(&pQInfo->ready); + *buildRes = true; + code = pQInfo->code; + } else { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - code = pQInfo->code; - pthread_mutex_unlock(&pQInfo->lock); -#else - tsem_wait(&pQInfo->ready); - *buildRes = true; - code = pQInfo->code; -#endif + pthread_mutex_lock(&pQInfo->lock); + + assert(pQInfo->rspContext == NULL); + if (pQInfo->dataReady == QUERY_RESULT_READY) { + *buildRes = true; + qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->rowSize, + pQuery->rec.rows, tstrerror(pQInfo->code)); + } else { + *buildRes = false; + qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + pQInfo->rspContext = pRspContext; + assert(pQInfo->rspContext != NULL); + } + + code = pQInfo->code; + pthread_mutex_unlock(&pQInfo->lock); + } return code; } @@ -7655,7 +7717,7 @@ int32_t qQueryCompleted(qinfo_t qinfo) { } SQuery* pQuery = pQInfo->runtimeEnv.pQuery; - return IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); + return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); } int32_t qKillQuery(qinfo_t qinfo) { @@ -7952,8 +8014,6 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2 * 1000; - SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); @@ -7969,7 +8029,8 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } else { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; - void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_QHANDLE_LIFE_SPAN); + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), + (getMaximumIdleDurationSec()*1000)); // pthread_mutex_unlock(&pQueryMgmt->lock); return handle; diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index b6050dddd8..2a40533e90 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -21,6 +21,12 @@ #include "tcompare.h" #include "tsqlfunction.h" +#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (4 * FLT_EPSILON)) +#define FLT_GREATER(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) > (_y))) +#define FLT_LESS(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) < (_y))) +#define FLT_GREATEREQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) > (_y))) +#define FLT_LESSEQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) < (_y))) + bool less_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int8_t *)minval < pFilter->filterInfo.upperBndi); } @@ -38,35 +44,35 @@ bool less_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool less_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval < pFilter->filterInfo.upperBndd); + return FLT_LESS(*(float*)minval, pFilter->filterInfo.upperBndd); } bool less_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(double *)minval < pFilter->filterInfo.upperBndd); + return *(double *)minval < pFilter->filterInfo.upperBndd; } ////////////////////////////////////////////////////////////////// -bool large_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int8_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i16(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i16(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int16_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i32(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i32(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int32_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int64_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)maxval > pFilter->filterInfo.lowerBndd); +bool larger_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { + return FLT_GREATER(*(float*)maxval, pFilter->filterInfo.lowerBndd); } -bool large_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(double *)maxval > pFilter->filterInfo.lowerBndd); } ///////////////////////////////////////////////////////////////////// @@ -88,10 +94,14 @@ bool lessEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool lessEqual_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd); + return FLT_LESSEQUAL(*(float*)minval, pFilter->filterInfo.upperBndd); } bool lessEqual_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { + if ((fabs(*(double*)minval) - pFilter->filterInfo.upperBndd) <= 2 * DBL_EPSILON) { + return true; + } + return (*(double *)minval <= pFilter->filterInfo.upperBndd); } @@ -113,11 +123,15 @@ bool largeEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool largeEqual_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)maxval >= pFilter->filterInfo.lowerBndd); + return FLT_GREATEREQUAL(*(float*)maxval, pFilter->filterInfo.lowerBndd); } bool largeEqual_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(double *)maxval >= pFilter->filterInfo.lowerBndd); + if (fabs(*(double *)maxval - pFilter->filterInfo.lowerBndd) <= 2 * DBL_EPSILON) { + return true; + } + + return (*(double *)maxval - pFilter->filterInfo.lowerBndd > (2 * DBL_EPSILON)); } //////////////////////////////////////////////////////////////////////// @@ -162,10 +176,12 @@ bool equal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } } +// user specified input filter value and the original saved float value may needs to +// increase the tolerance to obtain the correct result. bool equal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(float *)minval == *(float *)maxval) { - return (fabs(*(float *)minval - pFilter->filterInfo.lowerBndd) <= FLT_EPSILON); - } else { /* range filter */ + return FLT_EQUAL(*(float*)minval, pFilter->filterInfo.lowerBndd); + } else { // range filter assert(*(float *)minval < *(float *)maxval); return *(float *)minval <= pFilter->filterInfo.lowerBndd && *(float *)maxval >= pFilter->filterInfo.lowerBndd; } @@ -173,10 +189,9 @@ bool equal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool equal_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(double *)minval == *(double *)maxval) { - return (*(double *)minval == pFilter->filterInfo.lowerBndd); - } else { /* range filter */ + return (fabs(*(double *)minval - pFilter->filterInfo.lowerBndd) <= 2 * DBL_EPSILON); + } else { // range filter assert(*(double *)minval < *(double *)maxval); - return *(double *)minval <= pFilter->filterInfo.lowerBndi && *(double *)maxval >= pFilter->filterInfo.lowerBndi; } } @@ -255,7 +270,7 @@ bool nequal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool nequal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(float *)minval == *(float *)maxval) { - return (*(float *)minval != pFilter->filterInfo.lowerBndd); + return !FLT_EQUAL(*(float *)minval, pFilter->filterInfo.lowerBndd); } return true; @@ -364,7 +379,8 @@ bool rangeFilter_i64_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) //////////////////////////////////////////////////////////////////////// bool rangeFilter_ds_ii(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd && *(float *)maxval >= pFilter->filterInfo.lowerBndd); + return FLT_LESSEQUAL(*(float *)minval, pFilter->filterInfo.upperBndd) && + FLT_GREATEREQUAL(*(float *)maxval, pFilter->filterInfo.lowerBndd); } bool rangeFilter_ds_ee(SColumnFilterElem *pFilter, char *minval, char *maxval) { @@ -376,7 +392,8 @@ bool rangeFilter_ds_ie(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool rangeFilter_ds_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd && *(float *)maxval > pFilter->filterInfo.lowerBndd); + return FLT_GREATER(*(float *)maxval, pFilter->filterInfo.lowerBndd) && + FLT_LESSEQUAL(*(float *)minval, pFilter->filterInfo.upperBndd); } ////////////////////////////////////////////////////////////////////////// @@ -400,7 +417,7 @@ bool rangeFilter_dd_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i8, - large_i8, + larger_i8, equal_i8, lessEqual_i8, largeEqual_i8, @@ -413,7 +430,7 @@ bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i16, - large_i16, + larger_i16, equal_i16, lessEqual_i16, largeEqual_i16, @@ -426,7 +443,7 @@ bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i32, - large_i32, + larger_i32, equal_i32, lessEqual_i32, largeEqual_i32, @@ -439,7 +456,7 @@ bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i64, - large_i64, + larger_i64, equal_i64, lessEqual_i64, largeEqual_i64, @@ -452,7 +469,7 @@ bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_ds, - large_ds, + larger_ds, equal_ds, lessEqual_ds, largeEqual_ds, @@ -465,7 +482,7 @@ bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_dd, - large_dd, + larger_dd, equal_dd, lessEqual_dd, largeEqual_dd, @@ -551,7 +568,7 @@ bool (*rangeFilterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *max __filter_func_t* getRangeFilterFuncArray(int32_t type) { switch(type) { - case TSDB_DATA_TYPE_BOOL: return rangeFilterFunc_i8; + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: return rangeFilterFunc_i8; case TSDB_DATA_TYPE_SMALLINT: return rangeFilterFunc_i16; case TSDB_DATA_TYPE_INT: return rangeFilterFunc_i32; @@ -565,7 +582,7 @@ __filter_func_t* getRangeFilterFuncArray(int32_t type) { __filter_func_t* getValueFilterFuncArray(int32_t type) { switch(type) { - case TSDB_DATA_TYPE_BOOL: return filterFunc_i8; + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: return filterFunc_i8; case TSDB_DATA_TYPE_SMALLINT: return filterFunc_i16; case TSDB_DATA_TYPE_INT: return filterFunc_i32; diff --git a/src/query/src/qParserImpl.c b/src/query/src/qParserImpl.c index 7d71d9f7f1..5e5bc63675 100644 --- a/src/query/src/qParserImpl.c +++ b/src/query/src/qParserImpl.c @@ -71,13 +71,13 @@ abort_parse: return sqlInfo; } -tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pToken) { +tSQLExprList *tSqlExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken *pToken) { if (pList == NULL) { pList = calloc(1, sizeof(tSQLExprList)); } if (pList->nAlloc <= pList->nExpr) { - pList->nAlloc = (pList->nAlloc << 1) + 4; + pList->nAlloc = (pList->nAlloc << 1u) + 4; pList->a = realloc(pList->a, pList->nAlloc * sizeof(pList->a[0])); if (pList->a == 0) { pList->nExpr = pList->nAlloc = 0; @@ -87,7 +87,7 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken assert(pList->a != 0); if (pNode || pToken) { - struct tSQLExprItem *pItem = &pList->a[pList->nExpr++]; + struct tSqlExprItem *pItem = &pList->a[pList->nExpr++]; memset(pItem, 0, sizeof(*pItem)); pItem->pNode = pNode; if (pToken) { // set the as clause @@ -101,62 +101,62 @@ tSQLExprList *tSQLExprListAppend(tSQLExprList *pList, tSQLExpr *pNode, SStrToken return pList; } -void tSQLExprListDestroy(tSQLExprList *pList) { +void tSqlExprListDestroy(tSQLExprList *pList) { if (pList == NULL) return; for (int32_t i = 0; i < pList->nExpr; ++i) { if (pList->a[i].aliasName != NULL) { free(pList->a[i].aliasName); } - tSQLExprDestroy(pList->a[i].pNode); + tSqlExprDestroy(pList->a[i].pNode); } free(pList->a); free(pList); } -tSQLExpr *tSQLExprIdValueCreate(SStrToken *pToken, int32_t optrType) { - tSQLExpr *pSQLExpr = calloc(1, sizeof(tSQLExpr)); +tSQLExpr *tSqlExprIdValueCreate(SStrToken *pToken, int32_t optrType) { + tSQLExpr *pSqlExpr = calloc(1, sizeof(tSQLExpr)); if (pToken != NULL) { - pSQLExpr->token = *pToken; + pSqlExpr->token = *pToken; } if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { toTSDBType(pToken->type); - tVariantCreate(&pSQLExpr->val, pToken); - pSQLExpr->nSQLOptr = optrType; + tVariantCreate(&pSqlExpr->val, pToken); + pSqlExpr->nSQLOptr = optrType; } else if (optrType == TK_NOW) { // use microsecond by default - pSQLExpr->val.i64Key = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO); - pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT; - pSQLExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond + pSqlExpr->val.i64Key = taosGetTimestamp(TSDB_TIME_PRECISION_MICRO); + pSqlExpr->val.nType = TSDB_DATA_TYPE_BIGINT; + pSqlExpr->nSQLOptr = TK_TIMESTAMP; // TK_TIMESTAMP used to denote the time value is in microsecond } else if (optrType == TK_VARIABLE) { - int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSQLExpr->val.i64Key); + int32_t ret = parseAbsoluteDuration(pToken->z, pToken->n, &pSqlExpr->val.i64Key); if (ret != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; } - pSQLExpr->val.nType = TSDB_DATA_TYPE_BIGINT; - pSQLExpr->nSQLOptr = TK_TIMESTAMP; + pSqlExpr->val.nType = TSDB_DATA_TYPE_BIGINT; + pSqlExpr->nSQLOptr = TK_TIMESTAMP; } else { // it must be the column name (tk_id) if it is not the number assert(optrType == TK_ID || optrType == TK_ALL); if (pToken != NULL) { - pSQLExpr->colInfo = *pToken; + pSqlExpr->colInfo = *pToken; } - pSQLExpr->nSQLOptr = optrType; + pSqlExpr->nSQLOptr = optrType; } - return pSQLExpr; + return pSqlExpr; } /* * pList is the parameters for function with id(optType) * function name is denoted by pFunctionToken */ -tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType) { +tSQLExpr *tSqlExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SStrToken *endToken, int32_t optType) { if (pFuncToken == NULL) return NULL; tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); @@ -177,7 +177,7 @@ tSQLExpr *tSQLExprCreateFunction(tSQLExprList *pList, SStrToken *pFuncToken, SSt * create binary expression in this procedure * if the expr is arithmetic, calculate the result and set it to tSQLExpr Object */ -tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { +tSQLExpr *tSqlExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { tSQLExpr *pExpr = calloc(1, sizeof(tSQLExpr)); if (pLeft != NULL && pRight != NULL && (optrType != TK_IN)) { @@ -223,8 +223,8 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { } } - tSQLExprDestroy(pLeft); - tSQLExprDestroy(pRight); + tSqlExprDestroy(pLeft); + tSqlExprDestroy(pRight); } else if ((pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_INTEGER) || (pLeft->nSQLOptr == TK_INTEGER && pRight->nSQLOptr == TK_FLOAT) || (pLeft->nSQLOptr == TK_FLOAT && pRight->nSQLOptr == TK_FLOAT)) { @@ -257,8 +257,8 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { } } - tSQLExprDestroy(pLeft); - tSQLExprDestroy(pRight); + tSqlExprDestroy(pLeft); + tSqlExprDestroy(pRight); } else { pExpr->nSQLOptr = optrType; @@ -288,7 +288,7 @@ tSQLExpr *tSQLExprCreate(tSQLExpr *pLeft, tSQLExpr *pRight, int32_t optrType) { return pExpr; } -void tSQLExprNodeDestroy(tSQLExpr *pExpr) { +void tSqlExprNodeDestroy(tSQLExpr *pExpr) { if (pExpr == NULL) { return; } @@ -297,20 +297,20 @@ void tSQLExprNodeDestroy(tSQLExpr *pExpr) { tVariantDestroy(&pExpr->val); } - tSQLExprListDestroy(pExpr->pParam); + tSqlExprListDestroy(pExpr->pParam); free(pExpr); } -void tSQLExprDestroy(tSQLExpr *pExpr) { +void tSqlExprDestroy(tSQLExpr *pExpr) { if (pExpr == NULL) { return; } - tSQLExprDestroy(pExpr->pLeft); - tSQLExprDestroy(pExpr->pRight); + tSqlExprDestroy(pExpr->pLeft); + tSqlExprDestroy(pExpr->pRight); - tSQLExprNodeDestroy(pExpr); + tSqlExprNodeDestroy(pExpr); } SArray *tVariantListAppendToken(SArray *pList, SStrToken *pToken, uint8_t order) { @@ -366,13 +366,13 @@ SArray *tVariantListInsert(SArray *pList, tVariant *pVar, uint8_t sortOrder, int return pList; } -void setDBName(SStrToken *pCpxName, SStrToken *pDB) { - pCpxName->type = pDB->type; - pCpxName->z = pDB->z; - pCpxName->n = pDB->n; +void setDbName(SStrToken *pCpxName, SStrToken *pDb) { + pCpxName->type = pDb->type; + pCpxName->z = pDb->z; + pCpxName->n = pDb->n; } -void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) { +void tSqlSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) { int32_t maxLen = sizeof(pField->name) / sizeof(pField->name[0]); // truncate the column name @@ -387,10 +387,10 @@ void tSQLSetColumnInfo(TAOS_FIELD *pField, SStrToken *pName, TAOS_FIELD *pType) pField->bytes = pType->bytes; } -void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { +void tSqlSetColumnType(TAOS_FIELD *pField, SStrToken *type) { pField->type = -1; - for (int8_t i = 0; i < tListLen(tDataTypeDesc); ++i) { + for (int32_t i = 0; i < tListLen(tDataTypeDesc); ++i) { if ((strncasecmp(type->z, tDataTypeDesc[i].aName, tDataTypeDesc[i].nameLen) == 0) && (type->n == tDataTypeDesc[i].nameLen)) { pField->type = i; @@ -438,7 +438,7 @@ void tSQLSetColumnType(TAOS_FIELD *pField, SStrToken *type) { /* * extract the select info out of sql string */ -SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, +SQuerySQL *tSetQuerySqlElems(SStrToken *pSelectToken, tSQLExprList *pSelection, SArray *pFrom, tSQLExpr *pWhere, SArray *pGroupby, SArray *pSortOrder, SIntervalVal *pInterval, SStrToken *pSliding, SArray *pFill, SLimitVal *pLimit, SLimitVal *pGLimit) { assert(pSelection != NULL); @@ -474,21 +474,28 @@ SQuerySQL *tSetQuerySQLElems(SStrToken *pSelectToken, tSQLExprList *pSelection, return pQuery; } -void freeVariant(void *pItem) { +static void freeVariant(void *pItem) { tVariantListItem* p = (tVariantListItem*) pItem; tVariantDestroy(&p->pVar); } +void freeCreateTableInfo(void* p) { + SCreatedTableInfo* pInfo = (SCreatedTableInfo*) p; + taosArrayDestroyEx(pInfo->pTagVals, freeVariant); + tfree(pInfo->fullname); + tfree(pInfo->tagdata.data); +} + void doDestroyQuerySql(SQuerySQL *pQuerySql) { if (pQuerySql == NULL) { return; } - - tSQLExprListDestroy(pQuerySql->pSelection); + + tSqlExprListDestroy(pQuerySql->pSelection); pQuerySql->pSelection = NULL; - - tSQLExprDestroy(pQuerySql->pWhere); + + tSqlExprDestroy(pQuerySql->pWhere); pQuerySql->pWhere = NULL; taosArrayDestroyEx(pQuerySql->pSortOrder, freeVariant); @@ -519,31 +526,30 @@ void destroyAllSelectClause(SSubclauseInfo *pClause) { tfree(pClause->pClause); } -SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pStableName, - SArray *pTagVals, SQuerySQL *pSelect, int32_t type) { +SCreateTableSQL *tSetCreateSqlElems(SArray *pCols, SArray *pTags, SQuerySQL *pSelect, int32_t type) { SCreateTableSQL *pCreate = calloc(1, sizeof(SCreateTableSQL)); switch (type) { case TSQL_CREATE_TABLE: { pCreate->colInfo.pColumns = pCols; - assert(pTagVals == NULL && pTags == NULL); + assert(pTags == NULL); break; } case TSQL_CREATE_STABLE: { pCreate->colInfo.pColumns = pCols; pCreate->colInfo.pTagColumns = pTags; - assert(pTagVals == NULL && pTags != NULL && pCols != NULL); - break; - } - case TSQL_CREATE_TABLE_FROM_STABLE: { - pCreate->usingInfo.pTagVals = pTagVals; - pCreate->usingInfo.stableName = *pStableName; + assert(pTags != NULL && pCols != NULL); break; } case TSQL_CREATE_STREAM: { pCreate->pSelect = pSelect; break; } + + case TSQL_CREATE_TABLE_FROM_STABLE: { + assert(0); + } + default: assert(false); } @@ -552,10 +558,22 @@ SCreateTableSQL *tSetCreateSQLElems(SArray *pCols, SArray *pTags, SStrToken *pSt return pCreate; } -SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray *pVals, int32_t type) { +SCreatedTableInfo createNewChildTableInfo(SStrToken *pTableName, SArray *pTagVals, SStrToken *pToken, SStrToken* igExists) { + SCreatedTableInfo info; + memset(&info, 0, sizeof(SCreatedTableInfo)); + + info.name = *pToken; + info.pTagVals = pTagVals; + info.stableName = *pTableName; + info.igExist = (igExists->n > 0)? 1:0; + + return info; +} + +SAlterTableSQL *tAlterTableSqlElems(SStrToken *pTableName, SArray *pCols, SArray *pVals, int32_t type) { SAlterTableSQL *pAlterTable = calloc(1, sizeof(SAlterTableSQL)); - pAlterTable->name = *pMeterName; + pAlterTable->name = *pTableName; pAlterTable->type = type; if (type == TSDB_ALTER_TABLE_ADD_COLUMN || type == TSDB_ALTER_TABLE_ADD_TAG_COLUMN) { @@ -573,24 +591,29 @@ SAlterTableSQL *tAlterTableSQLElems(SStrToken *pMeterName, SArray *pCols, SArray return pAlterTable; } -void SQLInfoDestroy(SSqlInfo *pInfo) { +void* destroyCreateTableSql(SCreateTableSQL* pCreate) { + doDestroyQuerySql(pCreate->pSelect); + + taosArrayDestroy(pCreate->colInfo.pColumns); + taosArrayDestroy(pCreate->colInfo.pTagColumns); + + taosArrayDestroyEx(pCreate->childTableInfo, freeCreateTableInfo); + tfree(pCreate); + + return NULL; +} + +void SqlInfoDestroy(SSqlInfo *pInfo) { if (pInfo == NULL) return; if (pInfo->type == TSDB_SQL_SELECT) { destroyAllSelectClause(&pInfo->subclauseInfo); } else if (pInfo->type == TSDB_SQL_CREATE_TABLE) { - SCreateTableSQL *pCreateTableInfo = pInfo->pCreateTableInfo; - doDestroyQuerySql(pCreateTableInfo->pSelect); - - taosArrayDestroy(pCreateTableInfo->colInfo.pColumns); - taosArrayDestroy(pCreateTableInfo->colInfo.pTagColumns); - - taosArrayDestroyEx(pCreateTableInfo->usingInfo.pTagVals, freeVariant); - tfree(pInfo->pCreateTableInfo); + pInfo->pCreateTableInfo = destroyCreateTableSql(pInfo->pCreateTableInfo); } else if (pInfo->type == TSDB_SQL_ALTER_TABLE) { taosArrayDestroyEx(pInfo->pAlterInfo->varList, freeVariant); taosArrayDestroy(pInfo->pAlterInfo->pAddColumns); - + tfree(pInfo->pAlterInfo->tagData.data); tfree(pInfo->pAlterInfo); } else { if (pInfo->pDCLInfo != NULL && pInfo->pDCLInfo->nAlloc > 0) { @@ -624,7 +647,7 @@ SSubclauseInfo* setSubclause(SSubclauseInfo* pSubclause, void *pSqlExprInfo) { return pSubclause; } -SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pMeterName, int32_t type) { +SSqlInfo*setSqlInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pTableName, int32_t type) { pInfo->type = type; if (type == TSDB_SQL_SELECT) { @@ -634,8 +657,8 @@ SSqlInfo* setSQLInfo(SSqlInfo *pInfo, void *pSqlExprInfo, SStrToken *pMeterName, pInfo->pCreateTableInfo = pSqlExprInfo; } - if (pMeterName != NULL) { - pInfo->pCreateTableInfo->name = *pMeterName; + if (pTableName != NULL) { + pInfo->pCreateTableInfo->name = *pTableName; } return pInfo; @@ -653,14 +676,14 @@ SSubclauseInfo* appendSelectClause(SSubclauseInfo *pQueryInfo, void *pSubclause) return pQueryInfo; } -void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pMeterName, SStrToken *pIfNotExists) { - pInfo->pCreateTableInfo->name = *pMeterName; +void setCreatedTableName(SSqlInfo *pInfo, SStrToken *pTableNameToken, SStrToken *pIfNotExists) { + pInfo->pCreateTableInfo->name = *pTableNameToken; pInfo->pCreateTableInfo->existCheck = (pIfNotExists->n != 0); } void tTokenListBuyMoreSpace(tDCLSQL *pTokenList) { if (pTokenList->nAlloc <= pTokenList->nTokens) { // - pTokenList->nAlloc = (pTokenList->nAlloc << 1) + 4; + pTokenList->nAlloc = (pTokenList->nAlloc << 1u) + 4; pTokenList->a = realloc(pTokenList->a, pTokenList->nAlloc * sizeof(pTokenList->a[0])); if (pTokenList->a == 0) { pTokenList->nTokens = pTokenList->nAlloc = 0; @@ -695,7 +718,7 @@ void setDCLSQLElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { va_end(va); } -void setDropDBTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck) { +void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SStrToken* pToken, SStrToken* existsCheck) { pInfo->type = type; pInfo->pDCLInfo = tTokenListAppend(pInfo->pDCLInfo, pToken); pInfo->pDCLInfo->existsCheck = (existsCheck->n == 1); @@ -735,7 +758,7 @@ void setCreateDBSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pToken, SCreateDBI pInfo->pDCLInfo->dbOpt.ignoreExists = pIgExists->n; // sql.y has: ifnotexists(X) ::= IF NOT EXISTS. {X.n = 1;} } -void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo) { +void setCreateAcctSql(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken *pPwd, SCreateAcctSQL *pAcctInfo) { pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); @@ -751,7 +774,7 @@ void setCreateAcctSQL(SSqlInfo *pInfo, int32_t type, SStrToken *pName, SStrToken } } -void setCreateUserSQL(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd) { +void setCreateUserSql(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd) { pInfo->type = TSDB_SQL_CREATE_USER; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); @@ -763,7 +786,7 @@ void setCreateUserSQL(SSqlInfo *pInfo, SStrToken *pName, SStrToken *pPasswd) { pInfo->pDCLInfo->user.passwd = *pPasswd; } -void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege) { +void setAlterUserSql(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* pPwd, SStrToken *pPrivilege) { pInfo->type = TSDB_SQL_ALTER_USER; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); @@ -788,7 +811,7 @@ void setAlterUserSQL(SSqlInfo *pInfo, int16_t type, SStrToken *pName, SStrToken* } } -void setKillSQL(SSqlInfo *pInfo, int32_t type, SStrToken *ip) { +void setKillSql(SSqlInfo *pInfo, int32_t type, SStrToken *ip) { pInfo->type = type; if (pInfo->pDCLInfo == NULL) { pInfo->pDCLInfo = calloc(1, sizeof(tDCLSQL)); diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index 3bdc0d477f..51125d62b9 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -234,7 +234,13 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { } int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { - double v = GET_DOUBLE_VAL(value); + double v = 0; + if (pBucket->type == TSDB_DATA_TYPE_FLOAT) { + v = GET_FLOAT_VAL(value); + } else { + v = GET_DOUBLE_VAL(value); + } + int32_t index = -1; if (pBucket->range.dMinVal == DBL_MAX) { diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 6c845b012f..65ac60e91f 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -43,7 +43,7 @@ int32_t getOutputInterResultBufSize(SQuery* pQuery) { return size; } -int32_t initWindowResInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t type) { +int32_t initResultRowInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t type) { pResultRowInfo->capacity = size; pResultRowInfo->type = type; @@ -59,10 +59,11 @@ int32_t initWindowResInfo(SResultRowInfo *pResultRowInfo, int32_t size, int16_t return TSDB_CODE_SUCCESS; } -void cleanupTimeWindowInfo(SResultRowInfo *pResultRowInfo) { +void cleanupResultRowInfo(SResultRowInfo *pResultRowInfo) { if (pResultRowInfo == NULL) { return; } + if (pResultRowInfo->capacity == 0) { assert(pResultRowInfo->pResult == NULL); return; @@ -77,7 +78,7 @@ void cleanupTimeWindowInfo(SResultRowInfo *pResultRowInfo) { tfree(pResultRowInfo->pResult); } -void resetTimeWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo) { +void resetResultRowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo) { if (pResultRowInfo == NULL || pResultRowInfo->capacity == 0) { return; } @@ -100,13 +101,12 @@ void resetTimeWindowInfo(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultR pResultRowInfo->prevSKey = TSKEY_INITIAL_VAL; } -void clearFirstNWindowRes(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { - SResultRowInfo *pResultRowInfo = &pRuntimeEnv->windowResInfo; +void popFrontResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo, int32_t num) { if (pResultRowInfo == NULL || pResultRowInfo->capacity == 0 || pResultRowInfo->size == 0 || num == 0) { return; } - int32_t numOfClosed = numOfClosedTimeWindow(pResultRowInfo); + int32_t numOfClosed = numOfClosedResultRows(pResultRowInfo); assert(num >= 0 && num <= numOfClosed); int16_t type = pResultRowInfo->type; @@ -159,17 +159,16 @@ void clearFirstNWindowRes(SQueryRuntimeEnv *pRuntimeEnv, int32_t num) { pResultRowInfo->curIndex = -1; } -void clearClosedTimeWindow(SQueryRuntimeEnv *pRuntimeEnv) { - SResultRowInfo *pResultRowInfo = &pRuntimeEnv->windowResInfo; +void clearClosedResultRows(SQueryRuntimeEnv *pRuntimeEnv, SResultRowInfo *pResultRowInfo) { if (pResultRowInfo == NULL || pResultRowInfo->capacity == 0 || pResultRowInfo->size == 0) { return; } - int32_t numOfClosed = numOfClosedTimeWindow(pResultRowInfo); - clearFirstNWindowRes(pRuntimeEnv, numOfClosed); + int32_t numOfClosed = numOfClosedResultRows(pResultRowInfo); + popFrontResultRow(pRuntimeEnv, &pRuntimeEnv->windowResInfo, numOfClosed); } -int32_t numOfClosedTimeWindow(SResultRowInfo *pResultRowInfo) { +int32_t numOfClosedResultRows(SResultRowInfo *pResultRowInfo) { int32_t i = 0; while (i < pResultRowInfo->size && pResultRowInfo->pResult[i]->closed) { ++i; @@ -178,7 +177,7 @@ int32_t numOfClosedTimeWindow(SResultRowInfo *pResultRowInfo) { return i; } -void closeAllTimeWindow(SResultRowInfo *pResultRowInfo) { +void closeAllResultRows(SResultRowInfo *pResultRowInfo) { assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size); for (int32_t i = 0; i < pResultRowInfo->size; ++i) { @@ -195,7 +194,7 @@ void closeAllTimeWindow(SResultRowInfo *pResultRowInfo) { * the last qualified time stamp in case of sliding query, which the sliding time is not equalled to the interval time. * NOTE: remove redundant, only when the result set order equals to traverse order */ -void removeRedundantWindow(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_t order) { +void removeRedundantResultRows(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_t order) { assert(pResultRowInfo->size >= 0 && pResultRowInfo->capacity >= pResultRowInfo->size); if (pResultRowInfo->size <= 1) { return; @@ -224,27 +223,27 @@ void removeRedundantWindow(SResultRowInfo *pResultRowInfo, TSKEY lastKey, int32_ } } -bool isWindowResClosed(SResultRowInfo *pResultRowInfo, int32_t slot) { +bool isResultRowClosed(SResultRowInfo *pResultRowInfo, int32_t slot) { return (getResultRow(pResultRowInfo, slot)->closed == true); } -void closeTimeWindow(SResultRowInfo *pResultRowInfo, int32_t slot) { +void closeResultRow(SResultRowInfo *pResultRowInfo, int32_t slot) { getResultRow(pResultRowInfo, slot)->closed = true; } -void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pWindowRes, int16_t type) { - if (pWindowRes == NULL) { +void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pResultRow, int16_t type) { + if (pResultRow == NULL) { return; } // the result does not put into the SDiskbasedResultBuf, ignore it. - if (pWindowRes->pageId >= 0) { - tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pWindowRes->pageId); + if (pResultRow->pageId >= 0) { + tFilePage *page = getResBufPage(pRuntimeEnv->pResultBuf, pResultRow->pageId); for (int32_t i = 0; i < pRuntimeEnv->pQuery->numOfOutput; ++i) { - SResultRowCellInfo *pResultInfo = &pWindowRes->pCellInfo[i]; + SResultRowCellInfo *pResultInfo = &pResultRow->pCellInfo[i]; - char * s = getPosInResultPage(pRuntimeEnv, i, pWindowRes, page); + char * s = getPosInResultPage(pRuntimeEnv, i, pResultRow, page); size_t size = pRuntimeEnv->pQuery->pExpr1[i].bytes; memset(s, 0, size); @@ -252,15 +251,15 @@ void clearResultRow(SQueryRuntimeEnv *pRuntimeEnv, SResultRow *pWindowRes, int16 } } - pWindowRes->numOfRows = 0; - pWindowRes->pageId = -1; - pWindowRes->rowId = -1; - pWindowRes->closed = false; + pResultRow->numOfRows = 0; + pResultRow->pageId = -1; + pResultRow->rowId = -1; + pResultRow->closed = false; if (type == TSDB_DATA_TYPE_BINARY || type == TSDB_DATA_TYPE_NCHAR) { - tfree(pWindowRes->key); + tfree(pResultRow->key); } else { - pWindowRes->win = TSWINDOW_INITIALIZER; + pResultRow->win = TSWINDOW_INITIALIZER; } } @@ -310,7 +309,7 @@ SResultRowCellInfo* getResultCell(SQueryRuntimeEnv* pRuntimeEnv, const SResultRo return (SResultRowCellInfo*)((char*) pRow->pCellInfo + pRuntimeEnv->rowCellInfoOffset[index]); } -size_t getWindowResultSize(SQueryRuntimeEnv* pRuntimeEnv) { +size_t getResultRowSize(SQueryRuntimeEnv* pRuntimeEnv) { return (pRuntimeEnv->pQuery->numOfOutput * sizeof(SResultRowCellInfo)) + pRuntimeEnv->interBufSize + sizeof(SResultRow); } diff --git a/src/query/src/sql.c b/src/query/src/sql.c index d4e2b7f5f4..fe82db72a6 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -97,26 +97,27 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 274 +#define YYNOCODE 276 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - int yy46; - tSQLExpr* yy64; - tVariant yy134; - SCreateAcctSQL yy149; - SArray* yy165; - int64_t yy207; - SLimitVal yy216; - TAOS_FIELD yy223; - SSubclauseInfo* yy231; - SCreateDBInfo yy268; - tSQLExprList* yy290; - SQuerySQL* yy414; - SCreateTableSQL* yy470; - SIntervalVal yy532; + int yy42; + SQuerySQL* yy84; + SCreatedTableInfo yy96; + SArray* yy131; + SCreateDBInfo yy148; + TAOS_FIELD yy163; + SLimitVal yy284; + SCreateAcctSQL yy309; + tSQLExpr* yy420; + int64_t yy459; + tSQLExprList* yy478; + SSubclauseInfo* yy513; + tVariant yy516; + SIntervalVal yy530; + SCreateTableSQL* yy538; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -126,17 +127,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 253 -#define YYNRULE 233 +#define YYNSTATE 257 +#define YYNRULE 236 #define YYNTOKEN 207 -#define YY_MAX_SHIFT 252 -#define YY_MIN_SHIFTREDUCE 420 -#define YY_MAX_SHIFTREDUCE 652 -#define YY_ERROR_ACTION 653 -#define YY_ACCEPT_ACTION 654 -#define YY_NO_ACTION 655 -#define YY_MIN_REDUCE 656 -#define YY_MAX_REDUCE 888 +#define YY_MAX_SHIFT 256 +#define YY_MIN_SHIFTREDUCE 426 +#define YY_MAX_SHIFTREDUCE 661 +#define YY_ERROR_ACTION 662 +#define YY_ACCEPT_ACTION 663 +#define YY_NO_ACTION 664 +#define YY_MIN_REDUCE 665 +#define YY_MAX_REDUCE 900 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -202,224 +203,226 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (571) +#define YY_ACTTAB_COUNT (579) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 108, 463, 141, 11, 654, 252, 802, 463, 140, 464, - /* 10 */ 162, 165, 876, 35, 36, 464, 37, 38, 159, 250, - /* 20 */ 170, 29, 141, 463, 206, 41, 39, 43, 40, 173, - /* 30 */ 780, 464, 875, 34, 33, 145, 141, 32, 31, 30, - /* 40 */ 35, 36, 791, 37, 38, 164, 876, 170, 29, 780, - /* 50 */ 21, 206, 41, 39, 43, 40, 191, 829, 799, 201, - /* 60 */ 34, 33, 21, 21, 32, 31, 30, 421, 422, 423, - /* 70 */ 424, 425, 426, 427, 428, 429, 430, 431, 432, 251, - /* 80 */ 35, 36, 181, 37, 38, 532, 776, 170, 29, 238, - /* 90 */ 246, 206, 41, 39, 43, 40, 174, 175, 777, 777, - /* 100 */ 34, 33, 872, 56, 32, 31, 30, 176, 871, 36, - /* 110 */ 780, 37, 38, 227, 226, 170, 29, 791, 17, 206, - /* 120 */ 41, 39, 43, 40, 108, 26, 870, 606, 34, 33, - /* 130 */ 78, 160, 32, 31, 30, 238, 157, 16, 218, 245, - /* 140 */ 244, 217, 216, 215, 243, 214, 242, 241, 240, 213, - /* 150 */ 239, 755, 103, 743, 744, 745, 746, 747, 748, 749, - /* 160 */ 750, 751, 752, 753, 754, 756, 37, 38, 229, 177, - /* 170 */ 170, 29, 224, 223, 206, 41, 39, 43, 40, 203, - /* 180 */ 62, 60, 8, 34, 33, 63, 118, 32, 31, 30, - /* 190 */ 169, 619, 27, 12, 610, 184, 613, 158, 616, 778, - /* 200 */ 169, 619, 188, 187, 610, 194, 613, 108, 616, 153, - /* 210 */ 169, 619, 561, 108, 610, 154, 613, 18, 616, 90, - /* 220 */ 89, 148, 166, 167, 34, 33, 205, 143, 32, 31, - /* 230 */ 30, 697, 166, 167, 131, 144, 564, 41, 39, 43, - /* 240 */ 40, 706, 166, 167, 131, 34, 33, 146, 17, 32, - /* 250 */ 31, 30, 32, 31, 30, 26, 16, 207, 245, 244, - /* 260 */ 21, 587, 588, 243, 828, 242, 241, 240, 698, 239, - /* 270 */ 61, 131, 76, 80, 147, 190, 102, 151, 85, 88, - /* 280 */ 79, 760, 156, 26, 758, 759, 82, 21, 42, 761, - /* 290 */ 556, 763, 764, 762, 225, 765, 777, 193, 42, 618, - /* 300 */ 249, 248, 96, 574, 121, 122, 608, 105, 42, 618, - /* 310 */ 70, 66, 69, 578, 617, 168, 579, 46, 152, 618, - /* 320 */ 14, 230, 548, 777, 617, 545, 638, 546, 150, 547, - /* 330 */ 13, 135, 133, 612, 617, 615, 139, 93, 92, 91, - /* 340 */ 620, 611, 609, 614, 13, 47, 538, 622, 50, 552, - /* 350 */ 46, 553, 537, 178, 179, 3, 22, 211, 75, 74, - /* 360 */ 149, 22, 10, 9, 48, 51, 142, 550, 885, 551, - /* 370 */ 87, 86, 101, 99, 779, 839, 838, 171, 835, 834, - /* 380 */ 172, 801, 771, 228, 806, 793, 808, 104, 821, 119, - /* 390 */ 820, 117, 120, 708, 212, 137, 24, 221, 705, 222, - /* 400 */ 26, 192, 100, 884, 72, 883, 881, 123, 726, 25, - /* 410 */ 573, 23, 138, 695, 49, 81, 693, 83, 84, 691, - /* 420 */ 790, 690, 195, 161, 199, 549, 57, 52, 180, 132, - /* 430 */ 688, 687, 686, 685, 684, 134, 682, 109, 680, 678, - /* 440 */ 44, 676, 674, 136, 204, 202, 58, 822, 200, 198, - /* 450 */ 196, 220, 77, 28, 231, 232, 233, 235, 652, 234, - /* 460 */ 236, 237, 247, 209, 183, 53, 651, 182, 185, 186, - /* 470 */ 64, 67, 155, 650, 643, 189, 193, 689, 558, 94, - /* 480 */ 683, 675, 126, 125, 727, 129, 124, 127, 128, 95, - /* 490 */ 130, 1, 114, 110, 111, 775, 2, 55, 59, 116, - /* 500 */ 112, 113, 115, 575, 106, 163, 197, 5, 580, 107, - /* 510 */ 6, 65, 621, 19, 4, 20, 15, 208, 623, 7, - /* 520 */ 210, 504, 500, 498, 497, 496, 493, 467, 219, 68, - /* 530 */ 45, 71, 73, 22, 534, 533, 531, 488, 54, 486, - /* 540 */ 478, 484, 480, 482, 476, 474, 505, 503, 502, 501, - /* 550 */ 499, 495, 494, 46, 465, 436, 434, 656, 655, 655, - /* 560 */ 655, 655, 655, 655, 655, 655, 655, 655, 655, 97, - /* 570 */ 98, + /* 0 */ 143, 469, 663, 256, 469, 162, 254, 12, 814, 470, + /* 10 */ 887, 142, 470, 37, 38, 147, 39, 40, 803, 233, + /* 20 */ 173, 31, 884, 469, 209, 43, 41, 45, 42, 64, + /* 30 */ 883, 470, 163, 36, 35, 105, 143, 34, 33, 32, + /* 40 */ 37, 38, 803, 39, 40, 168, 888, 173, 31, 110, + /* 50 */ 790, 209, 43, 41, 45, 42, 194, 780, 22, 782, + /* 60 */ 36, 35, 811, 882, 34, 33, 32, 427, 428, 429, + /* 70 */ 430, 431, 432, 433, 434, 435, 436, 437, 438, 255, + /* 80 */ 37, 38, 184, 39, 40, 538, 224, 173, 31, 143, + /* 90 */ 197, 209, 43, 41, 45, 42, 165, 23, 167, 888, + /* 100 */ 36, 35, 242, 57, 34, 33, 32, 179, 841, 38, + /* 110 */ 204, 39, 40, 231, 230, 173, 31, 49, 792, 209, + /* 120 */ 43, 41, 45, 42, 253, 252, 98, 615, 36, 35, + /* 130 */ 178, 781, 34, 33, 32, 788, 50, 17, 222, 249, + /* 140 */ 248, 221, 220, 219, 247, 218, 246, 245, 244, 217, + /* 150 */ 243, 764, 792, 752, 753, 754, 755, 756, 757, 758, + /* 160 */ 759, 760, 761, 762, 763, 765, 39, 40, 110, 180, + /* 170 */ 173, 31, 228, 227, 209, 43, 41, 45, 42, 34, + /* 180 */ 33, 32, 9, 36, 35, 65, 120, 34, 33, 32, + /* 190 */ 172, 628, 18, 13, 619, 110, 622, 210, 625, 28, + /* 200 */ 172, 628, 110, 159, 619, 187, 622, 224, 625, 155, + /* 210 */ 172, 628, 191, 190, 619, 156, 622, 66, 625, 92, + /* 220 */ 91, 150, 169, 170, 36, 35, 208, 840, 34, 33, + /* 230 */ 32, 160, 169, 170, 617, 250, 573, 43, 41, 45, + /* 240 */ 42, 706, 169, 170, 133, 36, 35, 783, 18, 34, + /* 250 */ 33, 32, 206, 104, 61, 28, 17, 792, 249, 248, + /* 260 */ 28, 62, 145, 247, 23, 246, 245, 244, 146, 243, + /* 270 */ 618, 715, 78, 82, 133, 193, 565, 23, 87, 90, + /* 280 */ 81, 769, 158, 196, 767, 768, 84, 631, 44, 770, + /* 290 */ 23, 772, 773, 771, 148, 774, 80, 149, 44, 627, + /* 300 */ 176, 242, 789, 707, 3, 124, 133, 23, 44, 627, + /* 310 */ 72, 68, 71, 177, 626, 789, 596, 597, 570, 627, + /* 320 */ 805, 63, 557, 19, 626, 554, 229, 555, 789, 556, + /* 330 */ 171, 137, 135, 29, 626, 153, 583, 95, 94, 93, + /* 340 */ 107, 154, 587, 234, 588, 789, 48, 647, 15, 52, + /* 350 */ 152, 14, 629, 181, 182, 141, 14, 621, 620, 624, + /* 360 */ 623, 546, 89, 88, 212, 24, 53, 547, 24, 151, + /* 370 */ 4, 48, 561, 144, 562, 77, 76, 11, 10, 897, + /* 380 */ 559, 851, 560, 103, 101, 791, 850, 174, 847, 846, + /* 390 */ 175, 232, 813, 833, 818, 820, 106, 832, 121, 122, + /* 400 */ 28, 123, 102, 119, 717, 216, 139, 26, 225, 714, + /* 410 */ 195, 582, 226, 896, 74, 895, 893, 125, 735, 27, + /* 420 */ 198, 25, 164, 202, 140, 558, 704, 83, 702, 85, + /* 430 */ 86, 700, 699, 183, 54, 134, 697, 696, 695, 694, + /* 440 */ 693, 136, 691, 689, 46, 687, 685, 802, 683, 138, + /* 450 */ 51, 58, 59, 834, 207, 205, 199, 203, 201, 30, + /* 460 */ 79, 235, 236, 237, 238, 239, 240, 241, 251, 161, + /* 470 */ 661, 214, 215, 186, 185, 660, 189, 157, 188, 69, + /* 480 */ 659, 652, 192, 60, 196, 166, 567, 698, 56, 128, + /* 490 */ 96, 692, 736, 126, 130, 97, 127, 129, 131, 132, + /* 500 */ 684, 1, 584, 787, 2, 108, 200, 117, 113, 111, + /* 510 */ 112, 114, 115, 116, 589, 118, 109, 5, 6, 20, + /* 520 */ 21, 630, 8, 7, 632, 211, 16, 213, 67, 510, + /* 530 */ 65, 506, 504, 503, 502, 499, 473, 223, 24, 70, + /* 540 */ 47, 73, 540, 539, 537, 55, 494, 492, 484, 490, + /* 550 */ 75, 486, 488, 482, 480, 511, 509, 508, 507, 505, + /* 560 */ 501, 500, 48, 471, 442, 440, 99, 665, 664, 664, + /* 570 */ 664, 664, 664, 664, 664, 664, 664, 664, 100, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 211, 1, 262, 262, 208, 209, 211, 1, 262, 9, - /* 10 */ 228, 271, 272, 13, 14, 9, 16, 17, 210, 211, - /* 20 */ 20, 21, 262, 1, 24, 25, 26, 27, 28, 228, - /* 30 */ 248, 9, 272, 33, 34, 262, 262, 37, 38, 39, - /* 40 */ 13, 14, 246, 16, 17, 271, 272, 20, 21, 248, - /* 50 */ 211, 24, 25, 26, 27, 28, 260, 268, 263, 270, - /* 60 */ 33, 34, 211, 211, 37, 38, 39, 45, 46, 47, + /* 0 */ 264, 1, 208, 209, 1, 210, 211, 264, 211, 9, + /* 10 */ 274, 264, 9, 13, 14, 264, 16, 17, 248, 211, + /* 20 */ 20, 21, 264, 1, 24, 25, 26, 27, 28, 216, + /* 30 */ 264, 9, 262, 33, 34, 211, 264, 37, 38, 39, + /* 40 */ 13, 14, 248, 16, 17, 273, 274, 20, 21, 211, + /* 50 */ 242, 24, 25, 26, 27, 28, 262, 244, 245, 246, + /* 60 */ 33, 34, 265, 264, 37, 38, 39, 45, 46, 47, /* 70 */ 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, - /* 80 */ 13, 14, 60, 16, 17, 5, 247, 20, 21, 78, - /* 90 */ 228, 24, 25, 26, 27, 28, 245, 245, 247, 247, - /* 100 */ 33, 34, 262, 103, 37, 38, 39, 66, 262, 14, - /* 110 */ 248, 16, 17, 33, 34, 20, 21, 246, 99, 24, - /* 120 */ 25, 26, 27, 28, 211, 106, 262, 100, 33, 34, - /* 130 */ 73, 260, 37, 38, 39, 78, 262, 85, 86, 87, + /* 80 */ 13, 14, 60, 16, 17, 5, 76, 20, 21, 264, + /* 90 */ 266, 24, 25, 26, 27, 28, 228, 211, 273, 274, + /* 100 */ 33, 34, 78, 103, 37, 38, 39, 66, 270, 14, + /* 110 */ 272, 16, 17, 33, 34, 20, 21, 104, 250, 24, + /* 120 */ 25, 26, 27, 28, 63, 64, 65, 100, 33, 34, + /* 130 */ 228, 0, 37, 38, 39, 249, 123, 85, 86, 87, /* 140 */ 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - /* 150 */ 98, 227, 211, 229, 230, 231, 232, 233, 234, 235, + /* 150 */ 98, 227, 250, 229, 230, 231, 232, 233, 234, 235, /* 160 */ 236, 237, 238, 239, 240, 241, 16, 17, 211, 128, - /* 170 */ 20, 21, 131, 132, 24, 25, 26, 27, 28, 266, - /* 180 */ 249, 268, 99, 33, 34, 102, 103, 37, 38, 39, - /* 190 */ 1, 2, 261, 44, 5, 127, 7, 262, 9, 242, - /* 200 */ 1, 2, 134, 135, 5, 264, 7, 211, 9, 60, - /* 210 */ 1, 2, 104, 211, 5, 66, 7, 109, 9, 70, - /* 220 */ 71, 72, 33, 34, 33, 34, 37, 262, 37, 38, - /* 230 */ 39, 215, 33, 34, 218, 262, 37, 25, 26, 27, - /* 240 */ 28, 215, 33, 34, 218, 33, 34, 262, 99, 37, - /* 250 */ 38, 39, 37, 38, 39, 106, 85, 15, 87, 88, - /* 260 */ 211, 116, 117, 92, 268, 94, 95, 96, 215, 98, - /* 270 */ 268, 218, 61, 62, 262, 126, 99, 262, 67, 68, - /* 280 */ 69, 227, 133, 106, 230, 231, 75, 211, 99, 235, - /* 290 */ 100, 237, 238, 239, 245, 241, 247, 107, 99, 110, - /* 300 */ 63, 64, 65, 100, 61, 62, 1, 104, 99, 110, - /* 310 */ 67, 68, 69, 100, 125, 59, 100, 104, 262, 110, - /* 320 */ 104, 245, 2, 247, 125, 5, 100, 7, 262, 9, - /* 330 */ 104, 61, 62, 5, 125, 7, 262, 67, 68, 69, - /* 340 */ 100, 5, 37, 7, 104, 104, 100, 105, 104, 5, - /* 350 */ 104, 7, 100, 33, 34, 99, 104, 100, 129, 130, - /* 360 */ 262, 104, 129, 130, 123, 121, 262, 5, 248, 7, - /* 370 */ 73, 74, 61, 62, 248, 243, 243, 243, 243, 243, - /* 380 */ 243, 211, 244, 243, 211, 246, 211, 211, 269, 211, - /* 390 */ 269, 250, 211, 211, 211, 211, 211, 211, 211, 211, - /* 400 */ 106, 246, 59, 211, 211, 211, 211, 211, 211, 211, - /* 410 */ 110, 211, 211, 211, 122, 211, 211, 211, 211, 211, - /* 420 */ 259, 211, 265, 265, 265, 105, 212, 120, 211, 211, - /* 430 */ 211, 211, 211, 211, 211, 211, 211, 258, 211, 211, - /* 440 */ 119, 211, 211, 211, 114, 118, 212, 212, 113, 112, - /* 450 */ 111, 76, 84, 124, 83, 49, 80, 53, 5, 82, - /* 460 */ 81, 79, 76, 212, 5, 212, 5, 136, 136, 5, - /* 470 */ 216, 216, 212, 5, 86, 127, 107, 212, 100, 213, - /* 480 */ 212, 212, 220, 224, 226, 222, 225, 223, 221, 213, - /* 490 */ 219, 217, 253, 257, 256, 246, 214, 108, 104, 251, - /* 500 */ 255, 254, 252, 100, 99, 1, 99, 115, 100, 99, - /* 510 */ 115, 73, 100, 104, 99, 104, 99, 101, 105, 99, - /* 520 */ 101, 9, 5, 5, 5, 5, 5, 77, 15, 73, - /* 530 */ 16, 130, 130, 104, 5, 5, 100, 5, 99, 5, - /* 540 */ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 550 */ 5, 5, 5, 104, 77, 59, 58, 0, 273, 273, - /* 560 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 21, - /* 570 */ 21, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 580 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 590 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 600 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 610 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 620 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 630 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 640 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 650 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 660 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 670 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 680 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 690 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 700 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 710 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 720 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 730 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 740 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 750 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 760 */ 273, 273, 273, 273, 273, 273, 273, 273, 273, 273, - /* 770 */ 273, 273, 273, 273, 273, 273, 273, 273, + /* 170 */ 20, 21, 131, 132, 24, 25, 26, 27, 28, 37, + /* 180 */ 38, 39, 99, 33, 34, 102, 103, 37, 38, 39, + /* 190 */ 1, 2, 99, 44, 5, 211, 7, 15, 9, 106, + /* 200 */ 1, 2, 211, 264, 5, 127, 7, 76, 9, 60, + /* 210 */ 1, 2, 134, 135, 5, 66, 7, 216, 9, 70, + /* 220 */ 71, 72, 33, 34, 33, 34, 37, 270, 37, 38, + /* 230 */ 39, 264, 33, 34, 1, 228, 37, 25, 26, 27, + /* 240 */ 28, 215, 33, 34, 218, 33, 34, 246, 99, 37, + /* 250 */ 38, 39, 268, 99, 270, 106, 85, 250, 87, 88, + /* 260 */ 106, 270, 264, 92, 211, 94, 95, 96, 264, 98, + /* 270 */ 37, 215, 61, 62, 218, 126, 100, 211, 67, 68, + /* 280 */ 69, 227, 133, 107, 230, 231, 75, 105, 99, 235, + /* 290 */ 211, 237, 238, 239, 264, 241, 73, 264, 99, 110, + /* 300 */ 247, 78, 249, 215, 61, 62, 218, 211, 99, 110, + /* 310 */ 67, 68, 69, 247, 125, 249, 116, 117, 104, 110, + /* 320 */ 248, 251, 2, 109, 125, 5, 247, 7, 249, 9, + /* 330 */ 59, 61, 62, 263, 125, 264, 100, 67, 68, 69, + /* 340 */ 104, 264, 100, 247, 100, 249, 104, 100, 104, 104, + /* 350 */ 264, 104, 100, 33, 34, 264, 104, 5, 5, 7, + /* 360 */ 7, 100, 73, 74, 100, 104, 121, 100, 104, 264, + /* 370 */ 99, 104, 5, 264, 7, 129, 130, 129, 130, 250, + /* 380 */ 5, 243, 7, 61, 62, 250, 243, 243, 243, 243, + /* 390 */ 243, 243, 211, 271, 211, 211, 211, 271, 211, 211, + /* 400 */ 106, 211, 59, 252, 211, 211, 211, 211, 211, 211, + /* 410 */ 248, 110, 211, 211, 211, 211, 211, 211, 211, 211, + /* 420 */ 267, 211, 267, 267, 211, 105, 211, 211, 211, 211, + /* 430 */ 211, 211, 211, 211, 120, 211, 211, 211, 211, 211, + /* 440 */ 211, 211, 211, 211, 119, 211, 211, 261, 211, 211, + /* 450 */ 122, 212, 212, 212, 114, 118, 111, 113, 112, 124, + /* 460 */ 84, 83, 49, 80, 82, 53, 81, 79, 76, 212, + /* 470 */ 5, 212, 212, 5, 136, 5, 5, 212, 136, 216, + /* 480 */ 5, 86, 127, 104, 107, 1, 100, 212, 108, 220, + /* 490 */ 213, 212, 226, 225, 221, 213, 224, 223, 222, 219, + /* 500 */ 212, 217, 100, 248, 214, 99, 99, 254, 258, 260, + /* 510 */ 259, 257, 256, 255, 100, 253, 99, 99, 115, 104, + /* 520 */ 104, 100, 99, 115, 105, 101, 99, 101, 73, 9, + /* 530 */ 102, 5, 5, 5, 5, 5, 77, 15, 104, 73, + /* 540 */ 16, 130, 5, 5, 100, 99, 5, 5, 5, 5, + /* 550 */ 130, 5, 5, 5, 5, 5, 5, 5, 5, 5, + /* 560 */ 5, 5, 104, 77, 59, 58, 21, 0, 275, 275, + /* 570 */ 275, 275, 275, 275, 275, 275, 275, 275, 21, 275, + /* 580 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 590 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 600 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 610 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 620 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 630 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 640 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 650 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 660 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 670 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 680 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 690 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 700 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 710 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 720 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 730 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 740 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 750 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 760 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 770 */ 275, 275, 275, 275, 275, 275, 275, 275, 275, 275, + /* 780 */ 275, 275, 275, 275, 275, 275, }; -#define YY_SHIFT_COUNT (252) +#define YY_SHIFT_COUNT (256) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (557) +#define YY_SHIFT_MAX (567) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 149, 52, 171, 189, 209, 6, 6, 6, 6, 6, - /* 10 */ 6, 0, 22, 209, 320, 320, 320, 19, 6, 6, - /* 20 */ 6, 6, 6, 57, 11, 11, 571, 199, 209, 209, + /* 0 */ 149, 52, 171, 10, 189, 209, 3, 3, 3, 3, + /* 10 */ 3, 3, 0, 22, 209, 320, 320, 320, 93, 3, + /* 20 */ 3, 3, 131, 3, 3, 223, 24, 24, 579, 199, /* 30 */ 209, 209, 209, 209, 209, 209, 209, 209, 209, 209, - /* 40 */ 209, 209, 209, 209, 209, 320, 320, 80, 80, 80, - /* 50 */ 80, 80, 80, 83, 80, 177, 6, 6, 6, 6, - /* 60 */ 145, 145, 108, 6, 6, 6, 6, 6, 6, 6, - /* 70 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - /* 80 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - /* 90 */ 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - /* 100 */ 6, 6, 294, 343, 343, 300, 300, 300, 343, 307, - /* 110 */ 292, 321, 330, 327, 335, 337, 339, 329, 294, 343, - /* 120 */ 343, 375, 375, 343, 368, 371, 406, 376, 377, 404, - /* 130 */ 379, 382, 343, 386, 343, 386, 343, 571, 571, 27, - /* 140 */ 67, 67, 67, 95, 150, 212, 212, 212, 211, 191, - /* 150 */ 191, 191, 191, 243, 270, 41, 68, 215, 215, 237, - /* 160 */ 190, 203, 213, 216, 226, 240, 328, 336, 305, 256, - /* 170 */ 242, 241, 244, 246, 252, 257, 229, 233, 344, 362, - /* 180 */ 297, 311, 453, 331, 459, 461, 332, 464, 468, 388, - /* 190 */ 348, 369, 378, 389, 394, 403, 405, 504, 407, 408, - /* 200 */ 410, 409, 392, 411, 395, 412, 415, 413, 417, 416, - /* 210 */ 420, 419, 438, 512, 517, 518, 519, 520, 521, 450, - /* 220 */ 513, 456, 514, 401, 402, 429, 529, 530, 436, 439, - /* 230 */ 429, 532, 534, 535, 536, 537, 538, 539, 540, 541, - /* 240 */ 542, 543, 544, 545, 546, 547, 449, 477, 548, 549, - /* 250 */ 496, 498, 557, + /* 40 */ 209, 209, 209, 209, 209, 209, 209, 320, 320, 80, + /* 50 */ 80, 80, 80, 80, 80, 80, 154, 3, 3, 3, + /* 60 */ 3, 200, 200, 214, 3, 3, 3, 3, 3, 3, + /* 70 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 80 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 90 */ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, + /* 100 */ 3, 3, 3, 3, 294, 343, 343, 301, 301, 301, + /* 110 */ 343, 314, 328, 325, 340, 337, 344, 346, 345, 335, + /* 120 */ 294, 343, 343, 343, 10, 343, 376, 378, 413, 383, + /* 130 */ 382, 412, 385, 388, 343, 392, 343, 392, 343, 579, + /* 140 */ 579, 27, 67, 67, 67, 95, 150, 212, 212, 212, + /* 150 */ 211, 191, 191, 191, 191, 243, 270, 41, 78, 142, + /* 160 */ 142, 83, 61, 176, 236, 242, 244, 247, 252, 352, + /* 170 */ 353, 233, 271, 182, 13, 245, 261, 264, 267, 246, + /* 180 */ 248, 367, 375, 289, 322, 465, 338, 468, 470, 342, + /* 190 */ 471, 475, 395, 355, 377, 386, 380, 379, 402, 406, + /* 200 */ 484, 407, 414, 417, 415, 403, 416, 408, 421, 418, + /* 210 */ 419, 423, 424, 427, 426, 428, 455, 520, 526, 527, + /* 220 */ 528, 529, 530, 459, 522, 466, 524, 411, 420, 434, + /* 230 */ 537, 538, 444, 446, 434, 541, 542, 543, 544, 546, + /* 240 */ 547, 548, 549, 550, 551, 552, 553, 554, 555, 556, + /* 250 */ 458, 486, 545, 557, 505, 507, 567, }; -#define YY_REDUCE_COUNT (138) -#define YY_REDUCE_MIN (-260) -#define YY_REDUCE_MAX (282) +#define YY_REDUCE_COUNT (140) +#define YY_REDUCE_MIN (-264) +#define YY_REDUCE_MAX (290) static const short yy_reduce_ofst[] = { - /* 0 */ -204, -76, 54, -260, -226, -211, -87, -149, -148, 49, - /* 10 */ 76, -205, -192, -240, -218, -199, -138, -129, -59, -4, - /* 20 */ 2, -43, -161, 16, 26, 53, -69, -259, -254, -227, - /* 30 */ -160, -154, -136, -126, -65, -35, -27, -15, 12, 15, - /* 40 */ 56, 66, 74, 98, 104, 120, 126, 132, 133, 134, - /* 50 */ 135, 136, 137, 138, 140, 139, 170, 173, 175, 176, - /* 60 */ 119, 121, 141, 178, 181, 182, 183, 184, 185, 186, - /* 70 */ 187, 188, 192, 193, 194, 195, 196, 197, 198, 200, - /* 80 */ 201, 202, 204, 205, 206, 207, 208, 210, 217, 218, - /* 90 */ 219, 220, 221, 222, 223, 224, 225, 227, 228, 230, - /* 100 */ 231, 232, 155, 214, 234, 157, 158, 159, 235, 161, - /* 110 */ 179, 236, 238, 245, 247, 239, 250, 248, 249, 251, - /* 120 */ 253, 254, 255, 260, 258, 261, 259, 262, 264, 267, - /* 130 */ 263, 271, 265, 266, 268, 276, 269, 274, 282, + /* 0 */ -206, -76, 54, -187, -228, -175, -162, -16, 53, 66, + /* 10 */ 79, 96, -203, -205, -264, -132, -98, 7, -230, -176, + /* 20 */ -43, -9, 1, -192, -114, 26, 56, 88, 70, -257, + /* 30 */ -253, -249, -242, -234, -201, -61, -33, -2, 4, 30, + /* 40 */ 33, 71, 77, 86, 91, 105, 109, 129, 135, 138, + /* 50 */ 143, 144, 145, 146, 147, 148, 72, 181, 183, 184, + /* 60 */ 185, 122, 126, 151, 187, 188, 190, 193, 194, 195, + /* 70 */ 196, 197, 198, 201, 202, 203, 204, 205, 206, 207, + /* 80 */ 208, 210, 213, 215, 216, 217, 218, 219, 220, 221, + /* 90 */ 222, 224, 225, 226, 227, 228, 229, 230, 231, 232, + /* 100 */ 234, 235, 237, 238, 162, 239, 240, 153, 155, 156, + /* 110 */ 241, 186, 249, 251, 250, 254, 256, 258, 253, 262, + /* 120 */ 255, 257, 259, 260, 263, 265, 266, 268, 272, 269, + /* 130 */ 274, 273, 276, 280, 275, 277, 279, 282, 288, 284, + /* 140 */ 290, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 653, 707, 696, 878, 878, 653, 653, 653, 653, 653, - /* 10 */ 653, 803, 671, 878, 653, 653, 653, 653, 653, 653, - /* 20 */ 653, 653, 653, 709, 709, 709, 798, 653, 653, 653, - /* 30 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, - /* 40 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, - /* 50 */ 653, 653, 653, 653, 653, 653, 653, 805, 807, 653, - /* 60 */ 825, 825, 796, 653, 653, 653, 653, 653, 653, 653, - /* 70 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, - /* 80 */ 653, 694, 653, 692, 653, 653, 653, 653, 653, 653, - /* 90 */ 653, 653, 653, 653, 653, 653, 681, 653, 653, 653, - /* 100 */ 653, 653, 653, 673, 673, 653, 653, 653, 673, 832, - /* 110 */ 836, 830, 818, 826, 817, 813, 812, 840, 653, 673, - /* 120 */ 673, 704, 704, 673, 725, 723, 721, 713, 719, 715, - /* 130 */ 717, 711, 673, 702, 673, 702, 673, 742, 757, 653, - /* 140 */ 841, 877, 831, 867, 866, 873, 865, 864, 653, 860, - /* 150 */ 861, 863, 862, 653, 653, 653, 653, 869, 868, 653, - /* 160 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 843, - /* 170 */ 653, 837, 833, 653, 653, 653, 653, 653, 653, 653, - /* 180 */ 653, 653, 653, 653, 653, 653, 653, 653, 653, 653, - /* 190 */ 653, 795, 653, 653, 804, 653, 653, 653, 653, 653, - /* 200 */ 653, 827, 653, 819, 653, 653, 653, 653, 653, 653, - /* 210 */ 653, 772, 653, 653, 653, 653, 653, 653, 653, 653, - /* 220 */ 653, 653, 653, 653, 653, 882, 653, 653, 653, 766, - /* 230 */ 880, 653, 653, 653, 653, 653, 653, 653, 653, 653, - /* 240 */ 653, 653, 653, 653, 653, 653, 728, 653, 679, 677, - /* 250 */ 653, 669, 653, + /* 0 */ 662, 716, 705, 713, 890, 890, 662, 662, 662, 662, + /* 10 */ 662, 662, 815, 680, 890, 662, 662, 662, 662, 662, + /* 20 */ 662, 662, 713, 662, 662, 718, 718, 718, 810, 662, + /* 30 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 40 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 50 */ 662, 662, 662, 662, 662, 662, 662, 662, 817, 819, + /* 60 */ 662, 837, 837, 808, 662, 662, 662, 662, 662, 662, + /* 70 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 80 */ 662, 662, 662, 703, 662, 701, 662, 662, 662, 662, + /* 90 */ 662, 662, 662, 662, 662, 662, 662, 662, 690, 662, + /* 100 */ 662, 662, 662, 662, 662, 682, 682, 662, 662, 662, + /* 110 */ 682, 844, 848, 842, 830, 838, 829, 825, 824, 852, + /* 120 */ 662, 682, 682, 682, 713, 682, 734, 732, 730, 722, + /* 130 */ 728, 724, 726, 720, 682, 711, 682, 711, 682, 751, + /* 140 */ 766, 662, 853, 889, 843, 879, 878, 885, 877, 876, + /* 150 */ 662, 872, 873, 875, 874, 662, 662, 662, 662, 881, + /* 160 */ 880, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 170 */ 662, 662, 855, 662, 849, 845, 662, 662, 662, 662, + /* 180 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 190 */ 662, 662, 662, 662, 807, 662, 662, 816, 662, 662, + /* 200 */ 662, 662, 662, 662, 839, 662, 831, 662, 662, 662, + /* 210 */ 662, 662, 784, 662, 662, 662, 662, 662, 662, 662, + /* 220 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 894, + /* 230 */ 662, 662, 662, 775, 892, 662, 662, 662, 662, 662, + /* 240 */ 662, 662, 662, 662, 662, 662, 662, 662, 662, 662, + /* 250 */ 737, 662, 688, 686, 662, 678, 662, }; /********** End of lemon-generated parsing tables *****************************/ @@ -977,34 +980,36 @@ static const char *const yyTokenName[] = { /* 242 */ "typename", /* 243 */ "signed", /* 244 */ "create_table_args", - /* 245 */ "columnlist", - /* 246 */ "select", - /* 247 */ "column", - /* 248 */ "tagitem", - /* 249 */ "selcollist", - /* 250 */ "from", - /* 251 */ "where_opt", - /* 252 */ "interval_opt", - /* 253 */ "fill_opt", - /* 254 */ "sliding_opt", - /* 255 */ "groupby_opt", - /* 256 */ "orderby_opt", - /* 257 */ "having_opt", - /* 258 */ "slimit_opt", - /* 259 */ "limit_opt", - /* 260 */ "union", - /* 261 */ "sclp", - /* 262 */ "expr", - /* 263 */ "as", - /* 264 */ "tablelist", - /* 265 */ "tmvar", - /* 266 */ "sortlist", - /* 267 */ "sortitem", - /* 268 */ "item", - /* 269 */ "sortorder", - /* 270 */ "grouplist", - /* 271 */ "exprlist", - /* 272 */ "expritem", + /* 245 */ "create_table_list", + /* 246 */ "create_from_stable", + /* 247 */ "columnlist", + /* 248 */ "select", + /* 249 */ "column", + /* 250 */ "tagitem", + /* 251 */ "selcollist", + /* 252 */ "from", + /* 253 */ "where_opt", + /* 254 */ "interval_opt", + /* 255 */ "fill_opt", + /* 256 */ "sliding_opt", + /* 257 */ "groupby_opt", + /* 258 */ "orderby_opt", + /* 259 */ "having_opt", + /* 260 */ "slimit_opt", + /* 261 */ "limit_opt", + /* 262 */ "union", + /* 263 */ "sclp", + /* 264 */ "expr", + /* 265 */ "as", + /* 266 */ "tablelist", + /* 267 */ "tmvar", + /* 268 */ "sortlist", + /* 269 */ "sortitem", + /* 270 */ "item", + /* 271 */ "sortorder", + /* 272 */ "grouplist", + /* 273 */ "exprlist", + /* 274 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1127,124 +1132,127 @@ static const char *const yyRuleName[] = { /* 112 */ "signed ::= INTEGER", /* 113 */ "signed ::= PLUS INTEGER", /* 114 */ "signed ::= MINUS INTEGER", - /* 115 */ "cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args", - /* 116 */ "create_table_args ::= LP columnlist RP", - /* 117 */ "create_table_args ::= LP columnlist RP TAGS LP columnlist RP", - /* 118 */ "create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP", - /* 119 */ "create_table_args ::= AS select", - /* 120 */ "columnlist ::= columnlist COMMA column", - /* 121 */ "columnlist ::= column", - /* 122 */ "column ::= ids typename", - /* 123 */ "tagitemlist ::= tagitemlist COMMA tagitem", - /* 124 */ "tagitemlist ::= tagitem", - /* 125 */ "tagitem ::= INTEGER", - /* 126 */ "tagitem ::= FLOAT", - /* 127 */ "tagitem ::= STRING", - /* 128 */ "tagitem ::= BOOL", - /* 129 */ "tagitem ::= NULL", - /* 130 */ "tagitem ::= MINUS INTEGER", - /* 131 */ "tagitem ::= MINUS FLOAT", - /* 132 */ "tagitem ::= PLUS INTEGER", - /* 133 */ "tagitem ::= PLUS FLOAT", - /* 134 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", - /* 135 */ "union ::= select", - /* 136 */ "union ::= LP union RP", - /* 137 */ "union ::= union UNION ALL select", - /* 138 */ "union ::= union UNION ALL LP select RP", - /* 139 */ "cmd ::= union", - /* 140 */ "select ::= SELECT selcollist", - /* 141 */ "sclp ::= selcollist COMMA", - /* 142 */ "sclp ::=", - /* 143 */ "selcollist ::= sclp expr as", - /* 144 */ "selcollist ::= sclp STAR", - /* 145 */ "as ::= AS ids", - /* 146 */ "as ::= ids", - /* 147 */ "as ::=", - /* 148 */ "from ::= FROM tablelist", - /* 149 */ "tablelist ::= ids cpxName", - /* 150 */ "tablelist ::= ids cpxName ids", - /* 151 */ "tablelist ::= tablelist COMMA ids cpxName", - /* 152 */ "tablelist ::= tablelist COMMA ids cpxName ids", - /* 153 */ "tmvar ::= VARIABLE", - /* 154 */ "interval_opt ::= INTERVAL LP tmvar RP", - /* 155 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", - /* 156 */ "interval_opt ::=", - /* 157 */ "fill_opt ::=", - /* 158 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", - /* 159 */ "fill_opt ::= FILL LP ID RP", - /* 160 */ "sliding_opt ::= SLIDING LP tmvar RP", - /* 161 */ "sliding_opt ::=", - /* 162 */ "orderby_opt ::=", - /* 163 */ "orderby_opt ::= ORDER BY sortlist", - /* 164 */ "sortlist ::= sortlist COMMA item sortorder", - /* 165 */ "sortlist ::= item sortorder", - /* 166 */ "item ::= ids cpxName", - /* 167 */ "sortorder ::= ASC", - /* 168 */ "sortorder ::= DESC", - /* 169 */ "sortorder ::=", - /* 170 */ "groupby_opt ::=", - /* 171 */ "groupby_opt ::= GROUP BY grouplist", - /* 172 */ "grouplist ::= grouplist COMMA item", - /* 173 */ "grouplist ::= item", - /* 174 */ "having_opt ::=", - /* 175 */ "having_opt ::= HAVING expr", - /* 176 */ "limit_opt ::=", - /* 177 */ "limit_opt ::= LIMIT signed", - /* 178 */ "limit_opt ::= LIMIT signed OFFSET signed", - /* 179 */ "limit_opt ::= LIMIT signed COMMA signed", - /* 180 */ "slimit_opt ::=", - /* 181 */ "slimit_opt ::= SLIMIT signed", - /* 182 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", - /* 183 */ "slimit_opt ::= SLIMIT signed COMMA signed", - /* 184 */ "where_opt ::=", - /* 185 */ "where_opt ::= WHERE expr", - /* 186 */ "expr ::= LP expr RP", - /* 187 */ "expr ::= ID", - /* 188 */ "expr ::= ID DOT ID", - /* 189 */ "expr ::= ID DOT STAR", - /* 190 */ "expr ::= INTEGER", - /* 191 */ "expr ::= MINUS INTEGER", - /* 192 */ "expr ::= PLUS INTEGER", - /* 193 */ "expr ::= FLOAT", - /* 194 */ "expr ::= MINUS FLOAT", - /* 195 */ "expr ::= PLUS FLOAT", - /* 196 */ "expr ::= STRING", - /* 197 */ "expr ::= NOW", - /* 198 */ "expr ::= VARIABLE", - /* 199 */ "expr ::= BOOL", - /* 200 */ "expr ::= ID LP exprlist RP", - /* 201 */ "expr ::= ID LP STAR RP", - /* 202 */ "expr ::= expr IS NULL", - /* 203 */ "expr ::= expr IS NOT NULL", - /* 204 */ "expr ::= expr LT expr", - /* 205 */ "expr ::= expr GT expr", - /* 206 */ "expr ::= expr LE expr", - /* 207 */ "expr ::= expr GE expr", - /* 208 */ "expr ::= expr NE expr", - /* 209 */ "expr ::= expr EQ expr", - /* 210 */ "expr ::= expr AND expr", - /* 211 */ "expr ::= expr OR expr", - /* 212 */ "expr ::= expr PLUS expr", - /* 213 */ "expr ::= expr MINUS expr", - /* 214 */ "expr ::= expr STAR expr", - /* 215 */ "expr ::= expr SLASH expr", - /* 216 */ "expr ::= expr REM expr", - /* 217 */ "expr ::= expr LIKE expr", - /* 218 */ "expr ::= expr IN LP exprlist RP", - /* 219 */ "exprlist ::= exprlist COMMA expritem", - /* 220 */ "exprlist ::= expritem", - /* 221 */ "expritem ::= expr", - /* 222 */ "expritem ::=", - /* 223 */ "cmd ::= RESET QUERY CACHE", - /* 224 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 225 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 226 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 227 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 228 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 229 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 230 */ "cmd ::= KILL CONNECTION INTEGER", - /* 231 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 232 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 115 */ "cmd ::= CREATE TABLE create_table_args", + /* 116 */ "cmd ::= CREATE TABLE create_table_list", + /* 117 */ "create_table_list ::= create_from_stable", + /* 118 */ "create_table_list ::= create_table_list create_from_stable", + /* 119 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP", + /* 120 */ "create_table_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP", + /* 121 */ "create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP", + /* 122 */ "create_table_args ::= ifnotexists ids cpxName AS select", + /* 123 */ "columnlist ::= columnlist COMMA column", + /* 124 */ "columnlist ::= column", + /* 125 */ "column ::= ids typename", + /* 126 */ "tagitemlist ::= tagitemlist COMMA tagitem", + /* 127 */ "tagitemlist ::= tagitem", + /* 128 */ "tagitem ::= INTEGER", + /* 129 */ "tagitem ::= FLOAT", + /* 130 */ "tagitem ::= STRING", + /* 131 */ "tagitem ::= BOOL", + /* 132 */ "tagitem ::= NULL", + /* 133 */ "tagitem ::= MINUS INTEGER", + /* 134 */ "tagitem ::= MINUS FLOAT", + /* 135 */ "tagitem ::= PLUS INTEGER", + /* 136 */ "tagitem ::= PLUS FLOAT", + /* 137 */ "select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt", + /* 138 */ "union ::= select", + /* 139 */ "union ::= LP union RP", + /* 140 */ "union ::= union UNION ALL select", + /* 141 */ "union ::= union UNION ALL LP select RP", + /* 142 */ "cmd ::= union", + /* 143 */ "select ::= SELECT selcollist", + /* 144 */ "sclp ::= selcollist COMMA", + /* 145 */ "sclp ::=", + /* 146 */ "selcollist ::= sclp expr as", + /* 147 */ "selcollist ::= sclp STAR", + /* 148 */ "as ::= AS ids", + /* 149 */ "as ::= ids", + /* 150 */ "as ::=", + /* 151 */ "from ::= FROM tablelist", + /* 152 */ "tablelist ::= ids cpxName", + /* 153 */ "tablelist ::= ids cpxName ids", + /* 154 */ "tablelist ::= tablelist COMMA ids cpxName", + /* 155 */ "tablelist ::= tablelist COMMA ids cpxName ids", + /* 156 */ "tmvar ::= VARIABLE", + /* 157 */ "interval_opt ::= INTERVAL LP tmvar RP", + /* 158 */ "interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP", + /* 159 */ "interval_opt ::=", + /* 160 */ "fill_opt ::=", + /* 161 */ "fill_opt ::= FILL LP ID COMMA tagitemlist RP", + /* 162 */ "fill_opt ::= FILL LP ID RP", + /* 163 */ "sliding_opt ::= SLIDING LP tmvar RP", + /* 164 */ "sliding_opt ::=", + /* 165 */ "orderby_opt ::=", + /* 166 */ "orderby_opt ::= ORDER BY sortlist", + /* 167 */ "sortlist ::= sortlist COMMA item sortorder", + /* 168 */ "sortlist ::= item sortorder", + /* 169 */ "item ::= ids cpxName", + /* 170 */ "sortorder ::= ASC", + /* 171 */ "sortorder ::= DESC", + /* 172 */ "sortorder ::=", + /* 173 */ "groupby_opt ::=", + /* 174 */ "groupby_opt ::= GROUP BY grouplist", + /* 175 */ "grouplist ::= grouplist COMMA item", + /* 176 */ "grouplist ::= item", + /* 177 */ "having_opt ::=", + /* 178 */ "having_opt ::= HAVING expr", + /* 179 */ "limit_opt ::=", + /* 180 */ "limit_opt ::= LIMIT signed", + /* 181 */ "limit_opt ::= LIMIT signed OFFSET signed", + /* 182 */ "limit_opt ::= LIMIT signed COMMA signed", + /* 183 */ "slimit_opt ::=", + /* 184 */ "slimit_opt ::= SLIMIT signed", + /* 185 */ "slimit_opt ::= SLIMIT signed SOFFSET signed", + /* 186 */ "slimit_opt ::= SLIMIT signed COMMA signed", + /* 187 */ "where_opt ::=", + /* 188 */ "where_opt ::= WHERE expr", + /* 189 */ "expr ::= LP expr RP", + /* 190 */ "expr ::= ID", + /* 191 */ "expr ::= ID DOT ID", + /* 192 */ "expr ::= ID DOT STAR", + /* 193 */ "expr ::= INTEGER", + /* 194 */ "expr ::= MINUS INTEGER", + /* 195 */ "expr ::= PLUS INTEGER", + /* 196 */ "expr ::= FLOAT", + /* 197 */ "expr ::= MINUS FLOAT", + /* 198 */ "expr ::= PLUS FLOAT", + /* 199 */ "expr ::= STRING", + /* 200 */ "expr ::= NOW", + /* 201 */ "expr ::= VARIABLE", + /* 202 */ "expr ::= BOOL", + /* 203 */ "expr ::= ID LP exprlist RP", + /* 204 */ "expr ::= ID LP STAR RP", + /* 205 */ "expr ::= expr IS NULL", + /* 206 */ "expr ::= expr IS NOT NULL", + /* 207 */ "expr ::= expr LT expr", + /* 208 */ "expr ::= expr GT expr", + /* 209 */ "expr ::= expr LE expr", + /* 210 */ "expr ::= expr GE expr", + /* 211 */ "expr ::= expr NE expr", + /* 212 */ "expr ::= expr EQ expr", + /* 213 */ "expr ::= expr AND expr", + /* 214 */ "expr ::= expr OR expr", + /* 215 */ "expr ::= expr PLUS expr", + /* 216 */ "expr ::= expr MINUS expr", + /* 217 */ "expr ::= expr STAR expr", + /* 218 */ "expr ::= expr SLASH expr", + /* 219 */ "expr ::= expr REM expr", + /* 220 */ "expr ::= expr LIKE expr", + /* 221 */ "expr ::= expr IN LP exprlist RP", + /* 222 */ "exprlist ::= exprlist COMMA expritem", + /* 223 */ "exprlist ::= expritem", + /* 224 */ "expritem ::= expr", + /* 225 */ "expritem ::=", + /* 226 */ "cmd ::= RESET QUERY CACHE", + /* 227 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 228 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 229 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 230 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 231 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 232 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 233 */ "cmd ::= KILL CONNECTION INTEGER", + /* 234 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 235 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1367,44 +1375,49 @@ static void yy_destructor( /********* Begin destructor definitions ***************************************/ case 227: /* keep */ case 228: /* tagitemlist */ - case 245: /* columnlist */ - case 253: /* fill_opt */ - case 255: /* groupby_opt */ - case 256: /* orderby_opt */ - case 266: /* sortlist */ - case 270: /* grouplist */ + case 247: /* columnlist */ + case 255: /* fill_opt */ + case 257: /* groupby_opt */ + case 258: /* orderby_opt */ + case 268: /* sortlist */ + case 272: /* grouplist */ { -taosArrayDestroy((yypminor->yy165)); +taosArrayDestroy((yypminor->yy131)); } break; - case 246: /* select */ + case 245: /* create_table_list */ { -doDestroyQuerySql((yypminor->yy414)); +destroyCreateTableSql((yypminor->yy538)); } break; - case 249: /* selcollist */ - case 261: /* sclp */ - case 271: /* exprlist */ + case 248: /* select */ { -tSQLExprListDestroy((yypminor->yy290)); +doDestroyQuerySql((yypminor->yy84)); } break; - case 251: /* where_opt */ - case 257: /* having_opt */ - case 262: /* expr */ - case 272: /* expritem */ + case 251: /* selcollist */ + case 263: /* sclp */ + case 273: /* exprlist */ { -tSQLExprDestroy((yypminor->yy64)); + tSqlExprListDestroy((yypminor->yy478)); } break; - case 260: /* union */ + case 253: /* where_opt */ + case 259: /* having_opt */ + case 264: /* expr */ + case 274: /* expritem */ { -destroyAllSelectClause((yypminor->yy231)); + tSqlExprDestroy((yypminor->yy420)); } break; - case 267: /* sortitem */ + case 262: /* union */ { -tVariantDestroy(&(yypminor->yy134)); +destroyAllSelectClause((yypminor->yy513)); +} + break; + case 269: /* sortitem */ +{ +tVariantDestroy(&(yypminor->yy516)); } break; /********* End destructor definitions *****************************************/ @@ -1813,124 +1826,127 @@ static const struct { { 243, -1 }, /* (112) signed ::= INTEGER */ { 243, -2 }, /* (113) signed ::= PLUS INTEGER */ { 243, -2 }, /* (114) signed ::= MINUS INTEGER */ - { 209, -6 }, /* (115) cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ - { 244, -3 }, /* (116) create_table_args ::= LP columnlist RP */ - { 244, -7 }, /* (117) create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ - { 244, -7 }, /* (118) create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ - { 244, -2 }, /* (119) create_table_args ::= AS select */ - { 245, -3 }, /* (120) columnlist ::= columnlist COMMA column */ - { 245, -1 }, /* (121) columnlist ::= column */ - { 247, -2 }, /* (122) column ::= ids typename */ - { 228, -3 }, /* (123) tagitemlist ::= tagitemlist COMMA tagitem */ - { 228, -1 }, /* (124) tagitemlist ::= tagitem */ - { 248, -1 }, /* (125) tagitem ::= INTEGER */ - { 248, -1 }, /* (126) tagitem ::= FLOAT */ - { 248, -1 }, /* (127) tagitem ::= STRING */ - { 248, -1 }, /* (128) tagitem ::= BOOL */ - { 248, -1 }, /* (129) tagitem ::= NULL */ - { 248, -2 }, /* (130) tagitem ::= MINUS INTEGER */ - { 248, -2 }, /* (131) tagitem ::= MINUS FLOAT */ - { 248, -2 }, /* (132) tagitem ::= PLUS INTEGER */ - { 248, -2 }, /* (133) tagitem ::= PLUS FLOAT */ - { 246, -12 }, /* (134) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 260, -1 }, /* (135) union ::= select */ - { 260, -3 }, /* (136) union ::= LP union RP */ - { 260, -4 }, /* (137) union ::= union UNION ALL select */ - { 260, -6 }, /* (138) union ::= union UNION ALL LP select RP */ - { 209, -1 }, /* (139) cmd ::= union */ - { 246, -2 }, /* (140) select ::= SELECT selcollist */ - { 261, -2 }, /* (141) sclp ::= selcollist COMMA */ - { 261, 0 }, /* (142) sclp ::= */ - { 249, -3 }, /* (143) selcollist ::= sclp expr as */ - { 249, -2 }, /* (144) selcollist ::= sclp STAR */ - { 263, -2 }, /* (145) as ::= AS ids */ - { 263, -1 }, /* (146) as ::= ids */ - { 263, 0 }, /* (147) as ::= */ - { 250, -2 }, /* (148) from ::= FROM tablelist */ - { 264, -2 }, /* (149) tablelist ::= ids cpxName */ - { 264, -3 }, /* (150) tablelist ::= ids cpxName ids */ - { 264, -4 }, /* (151) tablelist ::= tablelist COMMA ids cpxName */ - { 264, -5 }, /* (152) tablelist ::= tablelist COMMA ids cpxName ids */ - { 265, -1 }, /* (153) tmvar ::= VARIABLE */ - { 252, -4 }, /* (154) interval_opt ::= INTERVAL LP tmvar RP */ - { 252, -6 }, /* (155) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 252, 0 }, /* (156) interval_opt ::= */ - { 253, 0 }, /* (157) fill_opt ::= */ - { 253, -6 }, /* (158) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 253, -4 }, /* (159) fill_opt ::= FILL LP ID RP */ - { 254, -4 }, /* (160) sliding_opt ::= SLIDING LP tmvar RP */ - { 254, 0 }, /* (161) sliding_opt ::= */ - { 256, 0 }, /* (162) orderby_opt ::= */ - { 256, -3 }, /* (163) orderby_opt ::= ORDER BY sortlist */ - { 266, -4 }, /* (164) sortlist ::= sortlist COMMA item sortorder */ - { 266, -2 }, /* (165) sortlist ::= item sortorder */ - { 268, -2 }, /* (166) item ::= ids cpxName */ - { 269, -1 }, /* (167) sortorder ::= ASC */ - { 269, -1 }, /* (168) sortorder ::= DESC */ - { 269, 0 }, /* (169) sortorder ::= */ - { 255, 0 }, /* (170) groupby_opt ::= */ - { 255, -3 }, /* (171) groupby_opt ::= GROUP BY grouplist */ - { 270, -3 }, /* (172) grouplist ::= grouplist COMMA item */ - { 270, -1 }, /* (173) grouplist ::= item */ - { 257, 0 }, /* (174) having_opt ::= */ - { 257, -2 }, /* (175) having_opt ::= HAVING expr */ - { 259, 0 }, /* (176) limit_opt ::= */ - { 259, -2 }, /* (177) limit_opt ::= LIMIT signed */ - { 259, -4 }, /* (178) limit_opt ::= LIMIT signed OFFSET signed */ - { 259, -4 }, /* (179) limit_opt ::= LIMIT signed COMMA signed */ - { 258, 0 }, /* (180) slimit_opt ::= */ - { 258, -2 }, /* (181) slimit_opt ::= SLIMIT signed */ - { 258, -4 }, /* (182) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 258, -4 }, /* (183) slimit_opt ::= SLIMIT signed COMMA signed */ - { 251, 0 }, /* (184) where_opt ::= */ - { 251, -2 }, /* (185) where_opt ::= WHERE expr */ - { 262, -3 }, /* (186) expr ::= LP expr RP */ - { 262, -1 }, /* (187) expr ::= ID */ - { 262, -3 }, /* (188) expr ::= ID DOT ID */ - { 262, -3 }, /* (189) expr ::= ID DOT STAR */ - { 262, -1 }, /* (190) expr ::= INTEGER */ - { 262, -2 }, /* (191) expr ::= MINUS INTEGER */ - { 262, -2 }, /* (192) expr ::= PLUS INTEGER */ - { 262, -1 }, /* (193) expr ::= FLOAT */ - { 262, -2 }, /* (194) expr ::= MINUS FLOAT */ - { 262, -2 }, /* (195) expr ::= PLUS FLOAT */ - { 262, -1 }, /* (196) expr ::= STRING */ - { 262, -1 }, /* (197) expr ::= NOW */ - { 262, -1 }, /* (198) expr ::= VARIABLE */ - { 262, -1 }, /* (199) expr ::= BOOL */ - { 262, -4 }, /* (200) expr ::= ID LP exprlist RP */ - { 262, -4 }, /* (201) expr ::= ID LP STAR RP */ - { 262, -3 }, /* (202) expr ::= expr IS NULL */ - { 262, -4 }, /* (203) expr ::= expr IS NOT NULL */ - { 262, -3 }, /* (204) expr ::= expr LT expr */ - { 262, -3 }, /* (205) expr ::= expr GT expr */ - { 262, -3 }, /* (206) expr ::= expr LE expr */ - { 262, -3 }, /* (207) expr ::= expr GE expr */ - { 262, -3 }, /* (208) expr ::= expr NE expr */ - { 262, -3 }, /* (209) expr ::= expr EQ expr */ - { 262, -3 }, /* (210) expr ::= expr AND expr */ - { 262, -3 }, /* (211) expr ::= expr OR expr */ - { 262, -3 }, /* (212) expr ::= expr PLUS expr */ - { 262, -3 }, /* (213) expr ::= expr MINUS expr */ - { 262, -3 }, /* (214) expr ::= expr STAR expr */ - { 262, -3 }, /* (215) expr ::= expr SLASH expr */ - { 262, -3 }, /* (216) expr ::= expr REM expr */ - { 262, -3 }, /* (217) expr ::= expr LIKE expr */ - { 262, -5 }, /* (218) expr ::= expr IN LP exprlist RP */ - { 271, -3 }, /* (219) exprlist ::= exprlist COMMA expritem */ - { 271, -1 }, /* (220) exprlist ::= expritem */ - { 272, -1 }, /* (221) expritem ::= expr */ - { 272, 0 }, /* (222) expritem ::= */ - { 209, -3 }, /* (223) cmd ::= RESET QUERY CACHE */ - { 209, -7 }, /* (224) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 209, -7 }, /* (225) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 209, -7 }, /* (226) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 209, -7 }, /* (227) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 209, -8 }, /* (228) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 209, -9 }, /* (229) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 209, -3 }, /* (230) cmd ::= KILL CONNECTION INTEGER */ - { 209, -5 }, /* (231) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 209, -5 }, /* (232) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 209, -3 }, /* (115) cmd ::= CREATE TABLE create_table_args */ + { 209, -3 }, /* (116) cmd ::= CREATE TABLE create_table_list */ + { 245, -1 }, /* (117) create_table_list ::= create_from_stable */ + { 245, -2 }, /* (118) create_table_list ::= create_table_list create_from_stable */ + { 244, -6 }, /* (119) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + { 244, -10 }, /* (120) create_table_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + { 246, -10 }, /* (121) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + { 244, -5 }, /* (122) create_table_args ::= ifnotexists ids cpxName AS select */ + { 247, -3 }, /* (123) columnlist ::= columnlist COMMA column */ + { 247, -1 }, /* (124) columnlist ::= column */ + { 249, -2 }, /* (125) column ::= ids typename */ + { 228, -3 }, /* (126) tagitemlist ::= tagitemlist COMMA tagitem */ + { 228, -1 }, /* (127) tagitemlist ::= tagitem */ + { 250, -1 }, /* (128) tagitem ::= INTEGER */ + { 250, -1 }, /* (129) tagitem ::= FLOAT */ + { 250, -1 }, /* (130) tagitem ::= STRING */ + { 250, -1 }, /* (131) tagitem ::= BOOL */ + { 250, -1 }, /* (132) tagitem ::= NULL */ + { 250, -2 }, /* (133) tagitem ::= MINUS INTEGER */ + { 250, -2 }, /* (134) tagitem ::= MINUS FLOAT */ + { 250, -2 }, /* (135) tagitem ::= PLUS INTEGER */ + { 250, -2 }, /* (136) tagitem ::= PLUS FLOAT */ + { 248, -12 }, /* (137) select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 262, -1 }, /* (138) union ::= select */ + { 262, -3 }, /* (139) union ::= LP union RP */ + { 262, -4 }, /* (140) union ::= union UNION ALL select */ + { 262, -6 }, /* (141) union ::= union UNION ALL LP select RP */ + { 209, -1 }, /* (142) cmd ::= union */ + { 248, -2 }, /* (143) select ::= SELECT selcollist */ + { 263, -2 }, /* (144) sclp ::= selcollist COMMA */ + { 263, 0 }, /* (145) sclp ::= */ + { 251, -3 }, /* (146) selcollist ::= sclp expr as */ + { 251, -2 }, /* (147) selcollist ::= sclp STAR */ + { 265, -2 }, /* (148) as ::= AS ids */ + { 265, -1 }, /* (149) as ::= ids */ + { 265, 0 }, /* (150) as ::= */ + { 252, -2 }, /* (151) from ::= FROM tablelist */ + { 266, -2 }, /* (152) tablelist ::= ids cpxName */ + { 266, -3 }, /* (153) tablelist ::= ids cpxName ids */ + { 266, -4 }, /* (154) tablelist ::= tablelist COMMA ids cpxName */ + { 266, -5 }, /* (155) tablelist ::= tablelist COMMA ids cpxName ids */ + { 267, -1 }, /* (156) tmvar ::= VARIABLE */ + { 254, -4 }, /* (157) interval_opt ::= INTERVAL LP tmvar RP */ + { 254, -6 }, /* (158) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 254, 0 }, /* (159) interval_opt ::= */ + { 255, 0 }, /* (160) fill_opt ::= */ + { 255, -6 }, /* (161) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 255, -4 }, /* (162) fill_opt ::= FILL LP ID RP */ + { 256, -4 }, /* (163) sliding_opt ::= SLIDING LP tmvar RP */ + { 256, 0 }, /* (164) sliding_opt ::= */ + { 258, 0 }, /* (165) orderby_opt ::= */ + { 258, -3 }, /* (166) orderby_opt ::= ORDER BY sortlist */ + { 268, -4 }, /* (167) sortlist ::= sortlist COMMA item sortorder */ + { 268, -2 }, /* (168) sortlist ::= item sortorder */ + { 270, -2 }, /* (169) item ::= ids cpxName */ + { 271, -1 }, /* (170) sortorder ::= ASC */ + { 271, -1 }, /* (171) sortorder ::= DESC */ + { 271, 0 }, /* (172) sortorder ::= */ + { 257, 0 }, /* (173) groupby_opt ::= */ + { 257, -3 }, /* (174) groupby_opt ::= GROUP BY grouplist */ + { 272, -3 }, /* (175) grouplist ::= grouplist COMMA item */ + { 272, -1 }, /* (176) grouplist ::= item */ + { 259, 0 }, /* (177) having_opt ::= */ + { 259, -2 }, /* (178) having_opt ::= HAVING expr */ + { 261, 0 }, /* (179) limit_opt ::= */ + { 261, -2 }, /* (180) limit_opt ::= LIMIT signed */ + { 261, -4 }, /* (181) limit_opt ::= LIMIT signed OFFSET signed */ + { 261, -4 }, /* (182) limit_opt ::= LIMIT signed COMMA signed */ + { 260, 0 }, /* (183) slimit_opt ::= */ + { 260, -2 }, /* (184) slimit_opt ::= SLIMIT signed */ + { 260, -4 }, /* (185) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 260, -4 }, /* (186) slimit_opt ::= SLIMIT signed COMMA signed */ + { 253, 0 }, /* (187) where_opt ::= */ + { 253, -2 }, /* (188) where_opt ::= WHERE expr */ + { 264, -3 }, /* (189) expr ::= LP expr RP */ + { 264, -1 }, /* (190) expr ::= ID */ + { 264, -3 }, /* (191) expr ::= ID DOT ID */ + { 264, -3 }, /* (192) expr ::= ID DOT STAR */ + { 264, -1 }, /* (193) expr ::= INTEGER */ + { 264, -2 }, /* (194) expr ::= MINUS INTEGER */ + { 264, -2 }, /* (195) expr ::= PLUS INTEGER */ + { 264, -1 }, /* (196) expr ::= FLOAT */ + { 264, -2 }, /* (197) expr ::= MINUS FLOAT */ + { 264, -2 }, /* (198) expr ::= PLUS FLOAT */ + { 264, -1 }, /* (199) expr ::= STRING */ + { 264, -1 }, /* (200) expr ::= NOW */ + { 264, -1 }, /* (201) expr ::= VARIABLE */ + { 264, -1 }, /* (202) expr ::= BOOL */ + { 264, -4 }, /* (203) expr ::= ID LP exprlist RP */ + { 264, -4 }, /* (204) expr ::= ID LP STAR RP */ + { 264, -3 }, /* (205) expr ::= expr IS NULL */ + { 264, -4 }, /* (206) expr ::= expr IS NOT NULL */ + { 264, -3 }, /* (207) expr ::= expr LT expr */ + { 264, -3 }, /* (208) expr ::= expr GT expr */ + { 264, -3 }, /* (209) expr ::= expr LE expr */ + { 264, -3 }, /* (210) expr ::= expr GE expr */ + { 264, -3 }, /* (211) expr ::= expr NE expr */ + { 264, -3 }, /* (212) expr ::= expr EQ expr */ + { 264, -3 }, /* (213) expr ::= expr AND expr */ + { 264, -3 }, /* (214) expr ::= expr OR expr */ + { 264, -3 }, /* (215) expr ::= expr PLUS expr */ + { 264, -3 }, /* (216) expr ::= expr MINUS expr */ + { 264, -3 }, /* (217) expr ::= expr STAR expr */ + { 264, -3 }, /* (218) expr ::= expr SLASH expr */ + { 264, -3 }, /* (219) expr ::= expr REM expr */ + { 264, -3 }, /* (220) expr ::= expr LIKE expr */ + { 264, -5 }, /* (221) expr ::= expr IN LP exprlist RP */ + { 273, -3 }, /* (222) exprlist ::= exprlist COMMA expritem */ + { 273, -1 }, /* (223) exprlist ::= expritem */ + { 274, -1 }, /* (224) expritem ::= expr */ + { 274, 0 }, /* (225) expritem ::= */ + { 209, -3 }, /* (226) cmd ::= RESET QUERY CACHE */ + { 209, -7 }, /* (227) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 209, -7 }, /* (228) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 209, -7 }, /* (229) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 209, -7 }, /* (230) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 209, -8 }, /* (231) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 209, -9 }, /* (232) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 209, -3 }, /* (233) cmd ::= KILL CONNECTION INTEGER */ + { 209, -5 }, /* (234) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 209, -5 }, /* (235) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2011,6 +2027,7 @@ static void yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* program ::= cmd */ + case 115: /* cmd ::= CREATE TABLE create_table_args */ yytestcase(yyruleno==115); {} break; case 1: /* cmd ::= SHOW DATABASES */ @@ -2097,32 +2114,33 @@ static void yy_reduce( case 24: /* cmd ::= SHOW dbPrefix STABLES LIKE ids */ { SStrToken token; - setDBName(&token, &yymsp[-3].minor.yy0); + setDbName(&token, &yymsp[-3].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_METRIC, &token, &yymsp[0].minor.yy0); } break; case 25: /* cmd ::= SHOW dbPrefix VGROUPS */ { SStrToken token; - setDBName(&token, &yymsp[-1].minor.yy0); + setDbName(&token, &yymsp[-1].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, 0); } break; case 26: /* cmd ::= SHOW dbPrefix VGROUPS ids */ { SStrToken token; - setDBName(&token, &yymsp[-2].minor.yy0); + setDbName(&token, &yymsp[-2].minor.yy0); setShowOptions(pInfo, TSDB_MGMT_TABLE_VGROUP, &token, &yymsp[0].minor.yy0); } break; case 27: /* cmd ::= DROP TABLE ifexists ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - setDropDBTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); + setDropDbTableInfo(pInfo, TSDB_SQL_DROP_TABLE, &yymsp[-1].minor.yy0, &yymsp[-2].minor.yy0); } break; case 28: /* cmd ::= DROP DATABASE ifexists ids */ -{ setDropDBTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } +{ + setDropDbTableInfo(pInfo, TSDB_SQL_DROP_DB, &yymsp[0].minor.yy0, &yymsp[-1].minor.yy0); } break; case 29: /* cmd ::= DROP DNODE ids */ { setDCLSQLElems(pInfo, TSDB_SQL_DROP_DNODE, 1, &yymsp[0].minor.yy0); } @@ -2143,10 +2161,12 @@ static void yy_reduce( } break; case 34: /* cmd ::= ALTER USER ids PASS ids */ -{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } +{ + setAlterUserSql(pInfo, TSDB_ALTER_USER_PASSWD, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, NULL); } break; case 35: /* cmd ::= ALTER USER ids PRIVILEGE ids */ -{ setAlterUserSQL(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} +{ + setAlterUserSql(pInfo, TSDB_ALTER_USER_PRIVILEGES, &yymsp[-2].minor.yy0, NULL, &yymsp[0].minor.yy0);} break; case 36: /* cmd ::= ALTER DNODE ids ids */ { setDCLSQLElems(pInfo, TSDB_SQL_CFG_DNODE, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } @@ -2161,13 +2181,15 @@ static void yy_reduce( { setDCLSQLElems(pInfo, TSDB_SQL_CFG_LOCAL, 2, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } break; case 40: /* cmd ::= ALTER DATABASE ids alter_db_optr */ -{ SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy268, &t);} +{ SStrToken t = {0}; setCreateDBSQL(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy148, &t);} break; case 41: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy149);} +{ + setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy309);} break; case 42: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy149);} +{ + setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy309);} break; case 43: /* ids ::= ID */ case 44: /* ids ::= STRING */ yytestcase(yyruleno==44); @@ -2175,26 +2197,29 @@ static void yy_reduce( yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 45: /* ifexists ::= IF EXISTS */ -{yymsp[-1].minor.yy0.n = 1;} +{ yymsp[-1].minor.yy0.n = 1;} break; case 46: /* ifexists ::= */ case 48: /* ifnotexists ::= */ yytestcase(yyruleno==48); -{yymsp[1].minor.yy0.n = 0;} +{ yymsp[1].minor.yy0.n = 0;} break; case 47: /* ifnotexists ::= IF NOT EXISTS */ -{yymsp[-2].minor.yy0.n = 1;} +{ yymsp[-2].minor.yy0.n = 1;} break; case 49: /* cmd ::= CREATE DNODE ids */ { setDCLSQLElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 50: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSQL(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy149);} +{ + setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, + &yymsp[0].minor.yy309);} break; case 51: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ -{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy268, &yymsp[-2].minor.yy0);} +{ setCreateDBSQL(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy148, &yymsp[-2].minor.yy0);} break; case 52: /* cmd ::= CREATE USER ids PASS ids */ -{ setCreateUserSQL(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} +{ + setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} break; case 53: /* pps ::= */ case 55: /* tseries ::= */ yytestcase(yyruleno==55); @@ -2205,7 +2230,7 @@ static void yy_reduce( case 65: /* users ::= */ yytestcase(yyruleno==65); case 67: /* conns ::= */ yytestcase(yyruleno==67); case 69: /* state ::= */ yytestcase(yyruleno==69); -{yymsp[1].minor.yy0.n = 0; } +{ yymsp[1].minor.yy0.n = 0; } break; case 54: /* pps ::= PPS INTEGER */ case 56: /* tseries ::= TSERIES INTEGER */ yytestcase(yyruleno==56); @@ -2216,24 +2241,24 @@ static void yy_reduce( case 66: /* users ::= USERS INTEGER */ yytestcase(yyruleno==66); case 68: /* conns ::= CONNS INTEGER */ yytestcase(yyruleno==68); case 70: /* state ::= STATE ids */ yytestcase(yyruleno==70); -{yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } +{ yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 71: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy149.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy149.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy149.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy149.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy149.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy149.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy149.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy149.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy149.stat = yymsp[0].minor.yy0; + yylhsminor.yy309.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy309.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy309.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy309.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy309.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy309.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy309.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy309.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy309.stat = yymsp[0].minor.yy0; } - yymsp[-8].minor.yy149 = yylhsminor.yy149; + yymsp[-8].minor.yy309 = yylhsminor.yy309; break; case 72: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy165 = yymsp[0].minor.yy165; } +{ yymsp[-1].minor.yy131 = yymsp[0].minor.yy131; } break; case 73: /* cache ::= CACHE INTEGER */ case 74: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==74); @@ -2251,576 +2276,618 @@ static void yy_reduce( { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 86: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy268);} +{setDefaultCreateDbOption(&yymsp[1].minor.yy148);} break; case 87: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 88: /* db_optr ::= db_optr replica */ case 102: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==102); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 89: /* db_optr ::= db_optr quorum */ case 103: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==103); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 90: /* db_optr ::= db_optr days */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 91: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 92: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 93: /* db_optr ::= db_optr blocks */ case 105: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==105); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 94: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 95: /* db_optr ::= db_optr wal */ case 107: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==107); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 96: /* db_optr ::= db_optr fsync */ case 108: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==108); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 97: /* db_optr ::= db_optr comp */ case 106: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==106); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 98: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 99: /* db_optr ::= db_optr keep */ case 104: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==104); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.keep = yymsp[0].minor.yy165; } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.keep = yymsp[0].minor.yy131; } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 100: /* db_optr ::= db_optr update */ case 109: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==109); -{ yylhsminor.yy268 = yymsp[-1].minor.yy268; yylhsminor.yy268.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy268 = yylhsminor.yy268; +{ yylhsminor.yy148 = yymsp[-1].minor.yy148; yylhsminor.yy148.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy148 = yylhsminor.yy148; break; case 101: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy268);} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy148);} break; case 110: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSQLSetColumnType (&yylhsminor.yy223, &yymsp[0].minor.yy0); + tSqlSetColumnType(&yylhsminor.yy163, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy223 = yylhsminor.yy223; + yymsp[0].minor.yy163 = yylhsminor.yy163; break; case 111: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy207 <= 0) { - yymsp[-3].minor.yy0.type = 0; - tSQLSetColumnType(&yylhsminor.yy223, &yymsp[-3].minor.yy0); - } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy207; // negative value of name length - tSQLSetColumnType(&yylhsminor.yy223, &yymsp[-3].minor.yy0); - } + if (yymsp[-1].minor.yy459 <= 0) { + yymsp[-3].minor.yy0.type = 0; + tSqlSetColumnType(&yylhsminor.yy163, &yymsp[-3].minor.yy0); + } else { + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy459; // negative value of name length + tSqlSetColumnType(&yylhsminor.yy163, &yymsp[-3].minor.yy0); + } } - yymsp[-3].minor.yy223 = yylhsminor.yy223; + yymsp[-3].minor.yy163 = yylhsminor.yy163; break; case 112: /* signed ::= INTEGER */ -{ yylhsminor.yy207 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy207 = yylhsminor.yy207; +{ yylhsminor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy459 = yylhsminor.yy459; break; case 113: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy207 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy459 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 114: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy207 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy459 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; - case 115: /* cmd ::= CREATE TABLE ifnotexists ids cpxName create_table_args */ + case 116: /* cmd ::= CREATE TABLE create_table_list */ +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy538;} + break; + case 117: /* create_table_list ::= create_from_stable */ { - yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - setCreatedTableName(pInfo, &yymsp[-2].minor.yy0, &yymsp[-3].minor.yy0); + SCreateTableSQL* pCreateTable = calloc(1, sizeof(SCreateTableSQL)); + pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); + + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy96); + pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; + yylhsminor.yy538 = pCreateTable; } + yymsp[0].minor.yy538 = yylhsminor.yy538; break; - case 116: /* create_table_args ::= LP columnlist RP */ + case 118: /* create_table_list ::= create_table_list create_from_stable */ { - yymsp[-2].minor.yy470 = tSetCreateSQLElems(yymsp[-1].minor.yy165, NULL, NULL, NULL, NULL, TSQL_CREATE_TABLE); - setSQLInfo(pInfo, yymsp[-2].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); + taosArrayPush(yymsp[-1].minor.yy538->childTableInfo, &yymsp[0].minor.yy96); + yylhsminor.yy538 = yymsp[-1].minor.yy538; } + yymsp[-1].minor.yy538 = yylhsminor.yy538; break; - case 117: /* create_table_args ::= LP columnlist RP TAGS LP columnlist RP */ + case 119: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yymsp[-6].minor.yy470 = tSetCreateSQLElems(yymsp[-5].minor.yy165, yymsp[-1].minor.yy165, NULL, NULL, NULL, TSQL_CREATE_STABLE); - setSQLInfo(pInfo, yymsp[-6].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy538 = tSetCreateSqlElems(yymsp[-1].minor.yy131, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy538, NULL, TSDB_SQL_CREATE_TABLE); + + yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; + setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } + yymsp[-5].minor.yy538 = yylhsminor.yy538; break; - case 118: /* create_table_args ::= USING ids cpxName TAGS LP tagitemlist RP */ + case 120: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; - yymsp[-6].minor.yy470 = tSetCreateSQLElems(NULL, NULL, &yymsp[-5].minor.yy0, yymsp[-1].minor.yy165, NULL, TSQL_CREATE_TABLE_FROM_STABLE); - setSQLInfo(pInfo, yymsp[-6].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy538 = tSetCreateSqlElems(yymsp[-5].minor.yy131, yymsp[-1].minor.yy131, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy538, NULL, TSDB_SQL_CREATE_TABLE); + + yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; + setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } + yymsp[-9].minor.yy538 = yylhsminor.yy538; break; - case 119: /* create_table_args ::= AS select */ + case 121: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { - yymsp[-1].minor.yy470 = tSetCreateSQLElems(NULL, NULL, NULL, NULL, yymsp[0].minor.yy414, TSQL_CREATE_STREAM); - setSQLInfo(pInfo, yymsp[-1].minor.yy470, NULL, TSDB_SQL_CREATE_TABLE); + yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; + yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; + yylhsminor.yy96 = createNewChildTableInfo(&yymsp[-5].minor.yy0, yymsp[-1].minor.yy131, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } + yymsp[-9].minor.yy96 = yylhsminor.yy96; break; - case 120: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy165, &yymsp[0].minor.yy223); yylhsminor.yy165 = yymsp[-2].minor.yy165; } - yymsp[-2].minor.yy165 = yylhsminor.yy165; - break; - case 121: /* columnlist ::= column */ -{yylhsminor.yy165 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy165, &yymsp[0].minor.yy223);} - yymsp[0].minor.yy165 = yylhsminor.yy165; - break; - case 122: /* column ::= ids typename */ + case 122: /* create_table_args ::= ifnotexists ids cpxName AS select */ { - tSQLSetColumnInfo(&yylhsminor.yy223, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy223); + yylhsminor.yy538 = tSetCreateSqlElems(NULL, NULL, yymsp[0].minor.yy84, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy538, NULL, TSDB_SQL_CREATE_TABLE); + + yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; + setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-1].minor.yy223 = yylhsminor.yy223; + yymsp[-4].minor.yy538 = yylhsminor.yy538; break; - case 123: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy165 = tVariantListAppend(yymsp[-2].minor.yy165, &yymsp[0].minor.yy134, -1); } - yymsp[-2].minor.yy165 = yylhsminor.yy165; + case 123: /* columnlist ::= columnlist COMMA column */ +{taosArrayPush(yymsp[-2].minor.yy131, &yymsp[0].minor.yy163); yylhsminor.yy131 = yymsp[-2].minor.yy131; } + yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 124: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); } - yymsp[0].minor.yy165 = yylhsminor.yy165; + case 124: /* columnlist ::= column */ +{yylhsminor.yy131 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy131, &yymsp[0].minor.yy163);} + yymsp[0].minor.yy131 = yylhsminor.yy131; break; - case 125: /* tagitem ::= INTEGER */ - case 126: /* tagitem ::= FLOAT */ yytestcase(yyruleno==126); - case 127: /* tagitem ::= STRING */ yytestcase(yyruleno==127); - case 128: /* tagitem ::= BOOL */ yytestcase(yyruleno==128); -{toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy134, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy134 = yylhsminor.yy134; + case 125: /* column ::= ids typename */ +{ + tSqlSetColumnInfo(&yylhsminor.yy163, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy163); +} + yymsp[-1].minor.yy163 = yylhsminor.yy163; break; - case 129: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy134, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy134 = yylhsminor.yy134; + case 126: /* tagitemlist ::= tagitemlist COMMA tagitem */ +{ yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1); } + yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 130: /* tagitem ::= MINUS INTEGER */ - case 131: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==131); - case 132: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==132); - case 133: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==133); + case 127: /* tagitemlist ::= tagitem */ +{ yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1); } + yymsp[0].minor.yy131 = yylhsminor.yy131; + break; + case 128: /* tagitem ::= INTEGER */ + case 129: /* tagitem ::= FLOAT */ yytestcase(yyruleno==129); + case 130: /* tagitem ::= STRING */ yytestcase(yyruleno==130); + case 131: /* tagitem ::= BOOL */ yytestcase(yyruleno==131); +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy516 = yylhsminor.yy516; + break; + case 132: /* tagitem ::= NULL */ +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy516, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy516 = yylhsminor.yy516; + break; + case 133: /* tagitem ::= MINUS INTEGER */ + case 134: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==134); + case 135: /* tagitem ::= PLUS INTEGER */ yytestcase(yyruleno==135); + case 136: /* tagitem ::= PLUS FLOAT */ yytestcase(yyruleno==136); { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy134, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy134 = yylhsminor.yy134; + yymsp[-1].minor.yy516 = yylhsminor.yy516; break; - case 134: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + case 137: /* select ::= SELECT selcollist from where_opt interval_opt fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy414 = tSetQuerySQLElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy290, yymsp[-9].minor.yy165, yymsp[-8].minor.yy64, yymsp[-4].minor.yy165, yymsp[-3].minor.yy165, &yymsp[-7].minor.yy532, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy165, &yymsp[0].minor.yy216, &yymsp[-1].minor.yy216); + yylhsminor.yy84 = tSetQuerySqlElems(&yymsp[-11].minor.yy0, yymsp[-10].minor.yy478, yymsp[-9].minor.yy131, + yymsp[-8].minor.yy420, yymsp[-4].minor.yy131, yymsp[-3].minor.yy131, + &yymsp[-7].minor.yy530, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy131, + &yymsp[0].minor.yy284, &yymsp[-1].minor.yy284); } - yymsp[-11].minor.yy414 = yylhsminor.yy414; + yymsp[-11].minor.yy84 = yylhsminor.yy84; break; - case 135: /* union ::= select */ -{ yylhsminor.yy231 = setSubclause(NULL, yymsp[0].minor.yy414); } - yymsp[0].minor.yy231 = yylhsminor.yy231; + case 138: /* union ::= select */ +{ yylhsminor.yy513 = setSubclause(NULL, yymsp[0].minor.yy84); } + yymsp[0].minor.yy513 = yylhsminor.yy513; break; - case 136: /* union ::= LP union RP */ -{ yymsp[-2].minor.yy231 = yymsp[-1].minor.yy231; } + case 139: /* union ::= LP union RP */ +{ yymsp[-2].minor.yy513 = yymsp[-1].minor.yy513; } break; - case 137: /* union ::= union UNION ALL select */ -{ yylhsminor.yy231 = appendSelectClause(yymsp[-3].minor.yy231, yymsp[0].minor.yy414); } - yymsp[-3].minor.yy231 = yylhsminor.yy231; + case 140: /* union ::= union UNION ALL select */ +{ yylhsminor.yy513 = appendSelectClause(yymsp[-3].minor.yy513, yymsp[0].minor.yy84); } + yymsp[-3].minor.yy513 = yylhsminor.yy513; break; - case 138: /* union ::= union UNION ALL LP select RP */ -{ yylhsminor.yy231 = appendSelectClause(yymsp[-5].minor.yy231, yymsp[-1].minor.yy414); } - yymsp[-5].minor.yy231 = yylhsminor.yy231; + case 141: /* union ::= union UNION ALL LP select RP */ +{ yylhsminor.yy513 = appendSelectClause(yymsp[-5].minor.yy513, yymsp[-1].minor.yy84); } + yymsp[-5].minor.yy513 = yylhsminor.yy513; break; - case 139: /* cmd ::= union */ -{ setSQLInfo(pInfo, yymsp[0].minor.yy231, NULL, TSDB_SQL_SELECT); } - break; - case 140: /* select ::= SELECT selcollist */ + case 142: /* cmd ::= union */ { - yylhsminor.yy414 = tSetQuerySQLElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy290, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); -} - yymsp[-1].minor.yy414 = yylhsminor.yy414; + setSqlInfo(pInfo, yymsp[0].minor.yy513, NULL, TSDB_SQL_SELECT); } break; - case 141: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy290 = yymsp[-1].minor.yy290;} - yymsp[-1].minor.yy290 = yylhsminor.yy290; - break; - case 142: /* sclp ::= */ -{yymsp[1].minor.yy290 = 0;} - break; - case 143: /* selcollist ::= sclp expr as */ + case 143: /* select ::= SELECT selcollist */ { - yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290, yymsp[-1].minor.yy64, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy84 = tSetQuerySqlElems(&yymsp[-1].minor.yy0, yymsp[0].minor.yy478, NULL, NULL, NULL, NULL, NULL, + NULL, NULL, NULL, NULL); } - yymsp[-2].minor.yy290 = yylhsminor.yy290; + yymsp[-1].minor.yy84 = yylhsminor.yy84; break; - case 144: /* selcollist ::= sclp STAR */ + case 144: /* sclp ::= selcollist COMMA */ +{yylhsminor.yy478 = yymsp[-1].minor.yy478;} + yymsp[-1].minor.yy478 = yylhsminor.yy478; + break; + case 145: /* sclp ::= */ +{yymsp[1].minor.yy478 = 0;} + break; + case 146: /* selcollist ::= sclp expr as */ { - tSQLExpr *pNode = tSQLExprIdValueCreate(NULL, TK_ALL); - yylhsminor.yy290 = tSQLExprListAppend(yymsp[-1].minor.yy290, pNode, 0); + yylhsminor.yy478 = tSqlExprListAppend(yymsp[-2].minor.yy478, yymsp[-1].minor.yy420, + yymsp[0].minor.yy0.n ? &yymsp[0].minor.yy0 : 0); } - yymsp[-1].minor.yy290 = yylhsminor.yy290; + yymsp[-2].minor.yy478 = yylhsminor.yy478; break; - case 145: /* as ::= AS ids */ + case 147: /* selcollist ::= sclp STAR */ +{ + tSQLExpr *pNode = tSqlExprIdValueCreate(NULL, TK_ALL); + yylhsminor.yy478 = tSqlExprListAppend(yymsp[-1].minor.yy478, pNode, 0); +} + yymsp[-1].minor.yy478 = yylhsminor.yy478; + break; + case 148: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; - case 146: /* as ::= ids */ + case 149: /* as ::= ids */ { yylhsminor.yy0 = yymsp[0].minor.yy0; } yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 147: /* as ::= */ + case 150: /* as ::= */ { yymsp[1].minor.yy0.n = 0; } break; - case 148: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy165 = yymsp[0].minor.yy165;} + case 151: /* from ::= FROM tablelist */ +{yymsp[-1].minor.yy131 = yymsp[0].minor.yy131;} break; - case 149: /* tablelist ::= ids cpxName */ + case 152: /* tablelist ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy165 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[-1].minor.yy0, -1); // table alias name + yylhsminor.yy131 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy131 = tVariantListAppendToken(yylhsminor.yy131, &yymsp[-1].minor.yy0, -1); // table alias name } - yymsp[-1].minor.yy165 = yylhsminor.yy165; + yymsp[-1].minor.yy131 = yylhsminor.yy131; break; - case 150: /* tablelist ::= ids cpxName ids */ + case 153: /* tablelist ::= ids cpxName ids */ { - toTSDBType(yymsp[-2].minor.yy0.type); - toTSDBType(yymsp[0].minor.yy0.type); - yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy165 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[0].minor.yy0, -1); + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy131 = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy131 = tVariantListAppendToken(yylhsminor.yy131, &yymsp[0].minor.yy0, -1); } - yymsp[-2].minor.yy165 = yylhsminor.yy165; + yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 151: /* tablelist ::= tablelist COMMA ids cpxName */ + case 154: /* tablelist ::= tablelist COMMA ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy165 = tVariantListAppendToken(yymsp[-3].minor.yy165, &yymsp[-1].minor.yy0, -1); - yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy131 = tVariantListAppendToken(yymsp[-3].minor.yy131, &yymsp[-1].minor.yy0, -1); + yylhsminor.yy131 = tVariantListAppendToken(yylhsminor.yy131, &yymsp[-1].minor.yy0, -1); } - yymsp[-3].minor.yy165 = yylhsminor.yy165; + yymsp[-3].minor.yy131 = yylhsminor.yy131; break; - case 152: /* tablelist ::= tablelist COMMA ids cpxName ids */ + case 155: /* tablelist ::= tablelist COMMA ids cpxName ids */ { - toTSDBType(yymsp[-2].minor.yy0.type); - toTSDBType(yymsp[0].minor.yy0.type); - yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy165 = tVariantListAppendToken(yymsp[-4].minor.yy165, &yymsp[-2].minor.yy0, -1); - yylhsminor.yy165 = tVariantListAppendToken(yylhsminor.yy165, &yymsp[0].minor.yy0, -1); + toTSDBType(yymsp[-2].minor.yy0.type); + toTSDBType(yymsp[0].minor.yy0.type); + yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; + yylhsminor.yy131 = tVariantListAppendToken(yymsp[-4].minor.yy131, &yymsp[-2].minor.yy0, -1); + yylhsminor.yy131 = tVariantListAppendToken(yylhsminor.yy131, &yymsp[0].minor.yy0, -1); } - yymsp[-4].minor.yy165 = yylhsminor.yy165; + yymsp[-4].minor.yy131 = yylhsminor.yy131; break; - case 153: /* tmvar ::= VARIABLE */ + case 156: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; - case 154: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy532.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy532.offset.n = 0; yymsp[-3].minor.yy532.offset.z = NULL; yymsp[-3].minor.yy532.offset.type = 0;} + case 157: /* interval_opt ::= INTERVAL LP tmvar RP */ +{yymsp[-3].minor.yy530.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy530.offset.n = 0; yymsp[-3].minor.yy530.offset.z = NULL; yymsp[-3].minor.yy530.offset.type = 0;} break; - case 155: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy532.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy532.offset = yymsp[-1].minor.yy0;} + case 158: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ +{yymsp[-5].minor.yy530.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy530.offset = yymsp[-1].minor.yy0;} break; - case 156: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy532, 0, sizeof(yymsp[1].minor.yy532));} + case 159: /* interval_opt ::= */ +{memset(&yymsp[1].minor.yy530, 0, sizeof(yymsp[1].minor.yy530));} break; - case 157: /* fill_opt ::= */ -{yymsp[1].minor.yy165 = 0; } + case 160: /* fill_opt ::= */ +{yymsp[1].minor.yy131 = 0; } break; - case 158: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + case 161: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { tVariant A = {0}; toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy165, &A, -1, 0); - yymsp[-5].minor.yy165 = yymsp[-1].minor.yy165; + tVariantListInsert(yymsp[-1].minor.yy131, &A, -1, 0); + yymsp[-5].minor.yy131 = yymsp[-1].minor.yy131; } break; - case 159: /* fill_opt ::= FILL LP ID RP */ + case 162: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy165 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy131 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; - case 160: /* sliding_opt ::= SLIDING LP tmvar RP */ + case 163: /* sliding_opt ::= SLIDING LP tmvar RP */ {yymsp[-3].minor.yy0 = yymsp[-1].minor.yy0; } break; - case 161: /* sliding_opt ::= */ + case 164: /* sliding_opt ::= */ {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; - case 162: /* orderby_opt ::= */ - case 170: /* groupby_opt ::= */ yytestcase(yyruleno==170); -{yymsp[1].minor.yy165 = 0;} + case 165: /* orderby_opt ::= */ +{yymsp[1].minor.yy131 = 0;} break; - case 163: /* orderby_opt ::= ORDER BY sortlist */ - case 171: /* groupby_opt ::= GROUP BY grouplist */ yytestcase(yyruleno==171); -{yymsp[-2].minor.yy165 = yymsp[0].minor.yy165;} + case 166: /* orderby_opt ::= ORDER BY sortlist */ +{yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;} break; - case 164: /* sortlist ::= sortlist COMMA item sortorder */ + case 167: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy165 = tVariantListAppend(yymsp[-3].minor.yy165, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); + yylhsminor.yy131 = tVariantListAppend(yymsp[-3].minor.yy131, &yymsp[-1].minor.yy516, yymsp[0].minor.yy42); } - yymsp[-3].minor.yy165 = yylhsminor.yy165; + yymsp[-3].minor.yy131 = yylhsminor.yy131; break; - case 165: /* sortlist ::= item sortorder */ + case 168: /* sortlist ::= item sortorder */ { - yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[-1].minor.yy134, yymsp[0].minor.yy46); + yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[-1].minor.yy516, yymsp[0].minor.yy42); } - yymsp[-1].minor.yy165 = yylhsminor.yy165; + yymsp[-1].minor.yy131 = yylhsminor.yy131; break; - case 166: /* item ::= ids cpxName */ + case 169: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy134, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy516, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy134 = yylhsminor.yy134; + yymsp[-1].minor.yy516 = yylhsminor.yy516; break; - case 167: /* sortorder ::= ASC */ -{yymsp[0].minor.yy46 = TSDB_ORDER_ASC; } + case 170: /* sortorder ::= ASC */ +{ yymsp[0].minor.yy42 = TSDB_ORDER_ASC; } break; - case 168: /* sortorder ::= DESC */ -{yymsp[0].minor.yy46 = TSDB_ORDER_DESC;} + case 171: /* sortorder ::= DESC */ +{ yymsp[0].minor.yy42 = TSDB_ORDER_DESC;} break; - case 169: /* sortorder ::= */ -{yymsp[1].minor.yy46 = TSDB_ORDER_ASC;} + case 172: /* sortorder ::= */ +{ yymsp[1].minor.yy42 = TSDB_ORDER_ASC; } break; - case 172: /* grouplist ::= grouplist COMMA item */ + case 173: /* groupby_opt ::= */ +{ yymsp[1].minor.yy131 = 0;} + break; + case 174: /* groupby_opt ::= GROUP BY grouplist */ +{ yymsp[-2].minor.yy131 = yymsp[0].minor.yy131;} + break; + case 175: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy165 = tVariantListAppend(yymsp[-2].minor.yy165, &yymsp[0].minor.yy134, -1); + yylhsminor.yy131 = tVariantListAppend(yymsp[-2].minor.yy131, &yymsp[0].minor.yy516, -1); } - yymsp[-2].minor.yy165 = yylhsminor.yy165; + yymsp[-2].minor.yy131 = yylhsminor.yy131; break; - case 173: /* grouplist ::= item */ + case 176: /* grouplist ::= item */ { - yylhsminor.yy165 = tVariantListAppend(NULL, &yymsp[0].minor.yy134, -1); + yylhsminor.yy131 = tVariantListAppend(NULL, &yymsp[0].minor.yy516, -1); } - yymsp[0].minor.yy165 = yylhsminor.yy165; + yymsp[0].minor.yy131 = yylhsminor.yy131; break; - case 174: /* having_opt ::= */ - case 184: /* where_opt ::= */ yytestcase(yyruleno==184); - case 222: /* expritem ::= */ yytestcase(yyruleno==222); -{yymsp[1].minor.yy64 = 0;} + case 177: /* having_opt ::= */ + case 187: /* where_opt ::= */ yytestcase(yyruleno==187); + case 225: /* expritem ::= */ yytestcase(yyruleno==225); +{yymsp[1].minor.yy420 = 0;} break; - case 175: /* having_opt ::= HAVING expr */ - case 185: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==185); -{yymsp[-1].minor.yy64 = yymsp[0].minor.yy64;} + case 178: /* having_opt ::= HAVING expr */ + case 188: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==188); +{yymsp[-1].minor.yy420 = yymsp[0].minor.yy420;} break; - case 176: /* limit_opt ::= */ - case 180: /* slimit_opt ::= */ yytestcase(yyruleno==180); -{yymsp[1].minor.yy216.limit = -1; yymsp[1].minor.yy216.offset = 0;} + case 179: /* limit_opt ::= */ + case 183: /* slimit_opt ::= */ yytestcase(yyruleno==183); +{yymsp[1].minor.yy284.limit = -1; yymsp[1].minor.yy284.offset = 0;} break; - case 177: /* limit_opt ::= LIMIT signed */ - case 181: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==181); -{yymsp[-1].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-1].minor.yy216.offset = 0;} + case 180: /* limit_opt ::= LIMIT signed */ + case 184: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==184); +{yymsp[-1].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-1].minor.yy284.offset = 0;} break; - case 178: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} + case 181: /* limit_opt ::= LIMIT signed OFFSET signed */ +{ yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;} break; - case 179: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} + case 182: /* limit_opt ::= LIMIT signed COMMA signed */ +{ yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;} break; - case 182: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy216.limit = yymsp[-2].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[0].minor.yy207;} + case 185: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ +{yymsp[-3].minor.yy284.limit = yymsp[-2].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[0].minor.yy459;} break; - case 183: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy216.limit = yymsp[0].minor.yy207; yymsp[-3].minor.yy216.offset = yymsp[-2].minor.yy207;} + case 186: /* slimit_opt ::= SLIMIT signed COMMA signed */ +{yymsp[-3].minor.yy284.limit = yymsp[0].minor.yy459; yymsp[-3].minor.yy284.offset = yymsp[-2].minor.yy459;} break; - case 186: /* expr ::= LP expr RP */ -{yylhsminor.yy64 = yymsp[-1].minor.yy64; yylhsminor.yy64->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy64->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 189: /* expr ::= LP expr RP */ +{yylhsminor.yy420 = yymsp[-1].minor.yy420; yylhsminor.yy420->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy420->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 187: /* expr ::= ID */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 190: /* expr ::= ID */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 188: /* expr ::= ID DOT ID */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 191: /* expr ::= ID DOT ID */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 189: /* expr ::= ID DOT STAR */ -{yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 192: /* expr ::= ID DOT STAR */ +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 190: /* expr ::= INTEGER */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 193: /* expr ::= INTEGER */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 191: /* expr ::= MINUS INTEGER */ - case 192: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==192); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy64 = yylhsminor.yy64; + case 194: /* expr ::= MINUS INTEGER */ + case 195: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==195); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy420 = yylhsminor.yy420; break; - case 193: /* expr ::= FLOAT */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 196: /* expr ::= FLOAT */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 194: /* expr ::= MINUS FLOAT */ - case 195: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==195); -{yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy64 = yylhsminor.yy64; + case 197: /* expr ::= MINUS FLOAT */ + case 198: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==198); +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy420 = yylhsminor.yy420; break; - case 196: /* expr ::= STRING */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 199: /* expr ::= STRING */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 197: /* expr ::= NOW */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 200: /* expr ::= NOW */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 198: /* expr ::= VARIABLE */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 201: /* expr ::= VARIABLE */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 199: /* expr ::= BOOL */ -{yylhsminor.yy64 = tSQLExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 202: /* expr ::= BOOL */ +{ yylhsminor.yy420 = tSqlExprIdValueCreate(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 200: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy64 = tSQLExprCreateFunction(yymsp[-1].minor.yy290, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy64 = yylhsminor.yy64; + case 203: /* expr ::= ID LP exprlist RP */ +{ yylhsminor.yy420 = tSqlExprCreateFunction(yymsp[-1].minor.yy478, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, + yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy420 = yylhsminor.yy420; break; - case 201: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy64 = tSQLExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy64 = yylhsminor.yy64; + case 204: /* expr ::= ID LP STAR RP */ +{ yylhsminor.yy420 = + tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy420 = yylhsminor.yy420; break; - case 202: /* expr ::= expr IS NULL */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, NULL, TK_ISNULL);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 205: /* expr ::= expr IS NULL */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, NULL, TK_ISNULL);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 203: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-3].minor.yy64, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy64 = yylhsminor.yy64; + case 206: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-3].minor.yy420, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy420 = yylhsminor.yy420; break; - case 204: /* expr ::= expr LT expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LT);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 207: /* expr ::= expr LT expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_LT);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 205: /* expr ::= expr GT expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GT);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 208: /* expr ::= expr GT expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_GT);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 206: /* expr ::= expr LE expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LE);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 209: /* expr ::= expr LE expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_LE);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 207: /* expr ::= expr GE expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_GE);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 210: /* expr ::= expr GE expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_GE);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 208: /* expr ::= expr NE expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_NE);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 211: /* expr ::= expr NE expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_NE);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 209: /* expr ::= expr EQ expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_EQ);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 212: /* expr ::= expr EQ expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_EQ);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 210: /* expr ::= expr AND expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_AND);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 213: /* expr ::= expr AND expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_AND);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 211: /* expr ::= expr OR expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_OR); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 214: /* expr ::= expr OR expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_OR); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 212: /* expr ::= expr PLUS expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_PLUS); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 215: /* expr ::= expr PLUS expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_PLUS); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 213: /* expr ::= expr MINUS expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_MINUS); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 216: /* expr ::= expr MINUS expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_MINUS); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 214: /* expr ::= expr STAR expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_STAR); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 217: /* expr ::= expr STAR expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_STAR); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 215: /* expr ::= expr SLASH expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_DIVIDE);} - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 218: /* expr ::= expr SLASH expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_DIVIDE);} + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 216: /* expr ::= expr REM expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_REM); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 219: /* expr ::= expr REM expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_REM); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 217: /* expr ::= expr LIKE expr */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-2].minor.yy64, yymsp[0].minor.yy64, TK_LIKE); } - yymsp[-2].minor.yy64 = yylhsminor.yy64; + case 220: /* expr ::= expr LIKE expr */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-2].minor.yy420, yymsp[0].minor.yy420, TK_LIKE); } + yymsp[-2].minor.yy420 = yylhsminor.yy420; break; - case 218: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy64 = tSQLExprCreate(yymsp[-4].minor.yy64, (tSQLExpr*)yymsp[-1].minor.yy290, TK_IN); } - yymsp[-4].minor.yy64 = yylhsminor.yy64; + case 221: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy420 = tSqlExprCreate(yymsp[-4].minor.yy420, (tSQLExpr *)yymsp[-1].minor.yy478, TK_IN); } + yymsp[-4].minor.yy420 = yylhsminor.yy420; break; - case 219: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy290 = tSQLExprListAppend(yymsp[-2].minor.yy290,yymsp[0].minor.yy64,0);} - yymsp[-2].minor.yy290 = yylhsminor.yy290; + case 222: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy478 = tSqlExprListAppend(yymsp[-2].minor.yy478, yymsp[0].minor.yy420, 0);} + yymsp[-2].minor.yy478 = yylhsminor.yy478; break; - case 220: /* exprlist ::= expritem */ -{yylhsminor.yy290 = tSQLExprListAppend(0,yymsp[0].minor.yy64,0);} - yymsp[0].minor.yy290 = yylhsminor.yy290; + case 223: /* exprlist ::= expritem */ +{yylhsminor.yy478 = tSqlExprListAppend(0, yymsp[0].minor.yy420, 0);} + yymsp[0].minor.yy478 = yylhsminor.yy478; break; - case 221: /* expritem ::= expr */ -{yylhsminor.yy64 = yymsp[0].minor.yy64;} - yymsp[0].minor.yy64 = yylhsminor.yy64; + case 224: /* expritem ::= expr */ +{yylhsminor.yy420 = yymsp[0].minor.yy420;} + yymsp[0].minor.yy420 = yylhsminor.yy420; break; - case 223: /* cmd ::= RESET QUERY CACHE */ + case 226: /* cmd ::= RESET QUERY CACHE */ { setDCLSQLElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 224: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 227: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy165, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = + tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_COLUMN); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 225: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 228: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); SArray* K = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, NULL, K, TSDB_ALTER_TABLE_DROP_COLUMN); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 226: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 229: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy165, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = + tAlterTableSqlElems(&yymsp[-4].minor.yy0, yymsp[0].minor.yy131, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 227: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 230: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; toTSDBType(yymsp[0].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&yymsp[-4].minor.yy0, NULL, A, TSDB_ALTER_TABLE_DROP_TAG_COLUMN); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 228: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 231: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -2830,30 +2897,34 @@ static void yy_reduce( toTSDBType(yymsp[0].minor.yy0.type); A = tVariantListAppendToken(A, &yymsp[0].minor.yy0, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = + tAlterTableSqlElems(&yymsp[-5].minor.yy0, NULL, A, TSDB_ALTER_TABLE_CHANGE_TAG_COLUMN); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 229: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 232: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy134, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy516, -1); - SAlterTableSQL* pAlterTable = tAlterTableSQLElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); - setSQLInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); + SAlterTableSQL* pAlterTable = tAlterTableSqlElems(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL); + setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 230: /* cmd ::= KILL CONNECTION INTEGER */ -{setKillSQL(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} + case 233: /* cmd ::= KILL CONNECTION INTEGER */ +{ + setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 231: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ -{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} + case 234: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ +{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); + setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 232: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ -{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSQL(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} + case 235: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ +{yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); + setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: break; diff --git a/src/rpc/inc/rpcLog.h b/src/rpc/inc/rpcLog.h index 10e8476691..3c1614cda2 100644 --- a/src/rpc/inc/rpcLog.h +++ b/src/rpc/inc/rpcLog.h @@ -23,7 +23,7 @@ extern "C" { #include "tlog.h" extern int32_t rpcDebugFlag; -extern int32_t tscEmbedded; +extern uint32_t tscEmbedded; #define tFatal(...) { if (rpcDebugFlag & DEBUG_FATAL) { taosPrintLog("RPC FATAL ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }} #define tError(...) { if (rpcDebugFlag & DEBUG_ERROR) { taosPrintLog("RPC ERROR ", tscEmbedded ? 255 : rpcDebugFlag, __VA_ARGS__); }} diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 587c079fe6..a0c1649556 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -313,7 +313,7 @@ void *rpcOpen(const SRpcInit *pInit) { return NULL; } - tDebug("%s rpc is openned, threads:%d sessions:%d", pRpc->label, pRpc->numOfThreads, pInit->sessions); + tDebug("%s rpc is opened, threads:%d sessions:%d", pRpc->label, pRpc->numOfThreads, pInit->sessions); return pRpc; } @@ -631,15 +631,19 @@ static void rpcReleaseConn(SRpcConn *pConn) { // if there is an outgoing message, free it if (pConn->outType && pConn->pReqMsg) { SRpcReqContext *pContext = pConn->pContext; - if (pContext->pRsp) { + if (pContext) { + if (pContext->pRsp) { // for synchronous API, post semaphore to unblock app - pContext->pRsp->code = TSDB_CODE_RPC_APP_ERROR; - pContext->pRsp->pCont = NULL; - pContext->pRsp->contLen = 0; - tsem_post(pContext->pSem); + pContext->pRsp->code = TSDB_CODE_RPC_APP_ERROR; + pContext->pRsp->pCont = NULL; + pContext->pRsp->contLen = 0; + tsem_post(pContext->pSem); + } + pContext->pConn = NULL; + taosRemoveRef(tsRpcRefId, pContext->rid); + } else { + assert(0); } - pContext->pConn = NULL; - taosRemoveRef(tsRpcRefId, pContext->rid); } } @@ -1083,7 +1087,11 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE) { rpcCloseConn(pConn); } - tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); + if (pHead->msgType + 1 > 1 && pHead->msgType+1 < TSDB_MSG_TYPE_MAX) { + tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); + } else { + tError("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType], code); + } } } else { // msg is passed to app only parsing is ok rpcProcessIncomingMsg(pConn, pHead, pContext); diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 2850046d05..178b96c423 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -242,7 +242,14 @@ static void *taosAcceptTcpConnection(void *arg) { taosKeepTcpAlive(connFd); struct timeval to={1, 0}; - taosSetSockOpt(connFd, SOL_SOCKET, SO_RCVTIMEO, &to, sizeof(to)); + int32_t ret = taosSetSockOpt(connFd, SOL_SOCKET, SO_RCVTIMEO, &to, sizeof(to)); + if (ret != 0) { + taosCloseSocket(connFd); + tError("%s failed to set recv timeout fd(%s)for connection from:%s:%hu", pServerObj->label, strerror(errno), + taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); + continue; + } + // pick up the thread to handle this connection pThreadObj = pServerObj->pThreadObj[threadId]; diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 5721525ade..faa6d40da3 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -188,7 +188,8 @@ int main(int argc, char *argv[]) { tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs*appThreads); tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0*numOfReqs*appThreads/usedTime, msgSize); - getchar(); + int ch = getchar(); + UNUSED(ch); taosCloseLog(); diff --git a/src/sync/CMakeLists.txt b/src/sync/CMakeLists.txt index 60271c771c..aa38a56f38 100644 --- a/src/sync/CMakeLists.txt +++ b/src/sync/CMakeLists.txt @@ -5,12 +5,12 @@ INCLUDE_DIRECTORIES(inc) AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) - LIST(REMOVE_ITEM SRC src/tarbitrator.c) + LIST(REMOVE_ITEM SRC src/syncArbitrator.c) ADD_LIBRARY(sync ${SRC}) TARGET_LINK_LIBRARIES(sync tutil pthread common) - LIST(APPEND BIN_SRC src/tarbitrator.c) - LIST(APPEND BIN_SRC src/taosTcpPool.c) + LIST(APPEND BIN_SRC src/syncArbitrator.c) + LIST(APPEND BIN_SRC src/syncTcp.c) ADD_EXECUTABLE(tarbitrator ${BIN_SRC}) TARGET_LINK_LIBRARIES(tarbitrator sync common osdetail tutil) diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 6d0c52284f..d855c651f9 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -13,12 +13,14 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_SYNCINT_H -#define TDENGINE_SYNCINT_H +#ifndef TDENGINE_SYNC_INT_H +#define TDENGINE_SYNC_INT_H #ifdef __cplusplus extern "C" { #endif +#include "syncMsg.h" +#include "twal.h" #define sFatal(...) { if (sDebugFlag & DEBUG_FATAL) { taosPrintLog("SYN FATAL ", sDebugFlag, __VA_ARGS__); }} #define sError(...) { if (sDebugFlag & DEBUG_ERROR) { taosPrintLog("SYN ERROR ", sDebugFlag, __VA_ARGS__); }} @@ -27,83 +29,22 @@ extern "C" { #define sDebug(...) { if (sDebugFlag & DEBUG_DEBUG) { taosPrintLog("SYN ", sDebugFlag, __VA_ARGS__); }} #define sTrace(...) { if (sDebugFlag & DEBUG_TRACE) { taosPrintLog("SYN ", sDebugFlag, __VA_ARGS__); }} -typedef enum { - TAOS_SMSG_SYNC_DATA = 1, - TAOS_SMSG_FORWARD = 2, - TAOS_SMSG_FORWARD_RSP = 3, - TAOS_SMSG_SYNC_REQ = 4, - TAOS_SMSG_SYNC_RSP = 5, - TAOS_SMSG_SYNC_MUST = 6, - TAOS_SMSG_STATUS = 7, - TAOS_SMSG_SYNC_DATA_RSP = 8, -} ESyncMsgType; +#define SYNC_TCP_THREADS 2 +#define SYNC_MAX_NUM 2 #define SYNC_MAX_SIZE (TSDB_MAX_WAL_SIZE + sizeof(SWalHead) + sizeof(SSyncHead) + 16) #define SYNC_RECV_BUFFER_SIZE (5*1024*1024) -#define SYNC_FWD_TIMER 300 -#define SYNC_ROLE_TIMER 10000 -#define SYNC_WAIT_AFTER_CHOOSE_MASTER 3 + +#define SYNC_MAX_FWDS 512 +#define SYNC_FWD_TIMER 300 +#define SYNC_ROLE_TIMER 15000 // ms +#define SYNC_CHECK_INTERVAL 1 // ms +#define SYNC_WAIT_AFTER_CHOOSE_MASTER 10 // ms #define nodeRole pNode->peerInfo[pNode->selfIndex]->role #define nodeVersion pNode->peerInfo[pNode->selfIndex]->version #define nodeSStatus pNode->peerInfo[pNode->selfIndex]->sstatus -#pragma pack(push, 1) - -typedef struct { - char type; // msg type - char pversion; // protocol version - char reserved[6]; // not used - int32_t vgId; // vg ID - int32_t len; // content length, does not include head - // char cont[]; // message content starts from here -} SSyncHead; - -typedef struct { - SSyncHead syncHead; - uint16_t port; - char fqdn[TSDB_FQDN_LEN]; - int32_t sourceId; // only for arbitrator -} SFirstPkt; - -typedef struct { - int8_t sync; -} SFirstPktRsp; - -typedef struct { - int8_t role; - uint64_t version; -} SPeerStatus; - -typedef struct { - int8_t role; - int8_t ack; - int8_t type; - int8_t reserved[3]; - uint16_t tranId; - uint64_t version; - SPeerStatus peersStatus[]; -} SPeersStatus; - -typedef struct { - char name[TSDB_FILENAME_LEN]; - uint32_t magic; - uint32_t index; - uint64_t fversion; - int64_t size; -} SFileInfo; - -typedef struct { - int8_t sync; -} SFileAck; - -typedef struct { - uint64_t version; - int32_t code; -} SFwdRsp; - -#pragma pack(pop) - typedef struct { char * buffer; int32_t bufferSize; diff --git a/src/sync/inc/syncMsg.h b/src/sync/inc/syncMsg.h new file mode 100644 index 0000000000..73f4223c88 --- /dev/null +++ b/src/sync/inc/syncMsg.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_SYNC_MSG_H +#define TDENGINE_SYNC_MSG_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "tsync.h" + +typedef enum { + TAOS_SMSG_START = 0, + TAOS_SMSG_SYNC_DATA = 1, + TAOS_SMSG_SYNC_DATA_RSP = 2, + TAOS_SMSG_SYNC_FWD = 3, + TAOS_SMSG_SYNC_FWD_RSP = 4, + TAOS_SMSG_SYNC_REQ = 5, + TAOS_SMSG_SYNC_REQ_RSP = 6, + TAOS_SMSG_SYNC_MUST = 7, + TAOS_SMSG_SYNC_MUST_RSP = 8, + TAOS_SMSG_STATUS = 9, + TAOS_SMSG_STATUS_RSP = 10, + TAOS_SMSG_SETUP = 11, + TAOS_SMSG_SETUP_RSP = 12, + TAOS_SMSG_SYNC_FILE = 13, + TAOS_SMSG_SYNC_FILE_RSP = 14, + TAOS_SMSG_END = 15, +} ESyncMsgType; + +typedef enum { + SYNC_STATUS_BROADCAST, + SYNC_STATUS_BROADCAST_RSP, + SYNC_STATUS_SETUP_CONN, + SYNC_STATUS_SETUP_CONN_RSP, + SYNC_STATUS_EXCHANGE_DATA, + SYNC_STATUS_EXCHANGE_DATA_RSP, + SYNC_STATUS_CHECK_ROLE, + SYNC_STATUS_CHECK_ROLE_RSP +} ESyncStatusType; + +#pragma pack(push, 1) + +typedef struct { + int8_t type; // msg type + int8_t protocol; // protocol version + uint16_t signature; // fixed value + int32_t code; // + int32_t cId; // cluster Id + int32_t vgId; // vg ID + int32_t len; // content length, does not include head + uint32_t cksum; +} SSyncHead; + +typedef struct { + SSyncHead head; + uint16_t port; + uint16_t tranId; + int32_t sourceId; // only for arbitrator + char fqdn[TSDB_FQDN_LEN]; +} SSyncMsg; + +typedef struct { + SSyncHead head; + int8_t sync; + int8_t reserved; + uint16_t tranId; + int8_t reserverd[4]; +} SSyncRsp; + +typedef struct { + int8_t role; + uint64_t version; +} SPeerStatus; + +typedef struct { + SSyncHead head; + int8_t role; + int8_t ack; + int8_t type; + int8_t reserved[3]; + uint16_t tranId; + uint64_t version; + SPeerStatus peersStatus[TAOS_SYNC_MAX_REPLICA]; +} SPeersStatus; + +typedef struct { + SSyncHead head; + char name[TSDB_FILENAME_LEN]; + uint32_t magic; + uint32_t index; + uint64_t fversion; + int64_t size; +} SFileInfo; + +typedef struct { + SSyncHead head; + int8_t sync; +} SFileAck; + +typedef struct { + SSyncHead head; + uint64_t version; + int32_t code; +} SFwdRsp; + +#pragma pack(pop) + +#define SYNC_PROTOCOL_VERSION 1 +#define SYNC_SIGNATURE ((uint16_t)(0xCDEF)) + +extern char *statusType[]; + +uint16_t syncGenTranId(); +int32_t syncCheckHead(SSyncHead *pHead); + +void syncBuildSyncFwdMsg(SSyncHead *pHead, int32_t vgId, int32_t len); +void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t version, int32_t code); +void syncBuildSyncReqMsg(SSyncMsg *pMsg, int32_t vgId); +void syncBuildSyncDataMsg(SSyncMsg *pMsg, int32_t vgId); +void syncBuildSyncSetupMsg(SSyncMsg *pMsg, int32_t vgId); +void syncBuildPeersStatus(SPeersStatus *pMsg, int32_t vgId); + +void syncBuildFileAck(SFileAck *pMsg, int32_t vgId); +void syncBuildFileInfo(SFileInfo *pMsg, int32_t vgId); + +#ifdef __cplusplus +} +#endif + +#endif // TDENGINE_VNODEPEER_H diff --git a/src/sync/inc/taosTcpPool.h b/src/sync/inc/syncTcp.h similarity index 77% rename from src/sync/inc/taosTcpPool.h rename to src/sync/inc/syncTcp.h index 41043b0cd4..7db51f2a71 100644 --- a/src/sync/inc/taosTcpPool.h +++ b/src/sync/inc/syncTcp.h @@ -13,16 +13,13 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_TCP_POOL_H -#define TDENGINE_TCP_POOL_H +#ifndef TDENGINE_SYNC_TCP_POOL_H +#define TDENGINE_SYNC_TCP_POOL_H #ifdef __cplusplus extern "C" { #endif -typedef void *ttpool_h; -typedef void *tthread_h; - typedef struct { int32_t numOfThreads; uint32_t serverIp; @@ -33,10 +30,10 @@ typedef struct { void (*processIncomingConn)(int32_t fd, uint32_t ip); } SPoolInfo; -ttpool_h taosOpenTcpThreadPool(SPoolInfo *pInfo); -void taosCloseTcpThreadPool(ttpool_h); -void * taosAllocateTcpConn(void *, void *ahandle, int32_t connFd); -void taosFreeTcpConn(void *); +void *syncOpenTcpThreadPool(SPoolInfo *pInfo); +void syncCloseTcpThreadPool(void *); +void *syncAllocateTcpConn(void *, void *ahandle, int32_t connFd); +void syncFreeTcpConn(void *); #ifdef __cplusplus } diff --git a/src/sync/src/tarbitrator.c b/src/sync/src/syncArbitrator.c similarity index 82% rename from src/sync/src/tarbitrator.c rename to src/sync/src/syncArbitrator.c index 4016042de2..1cb2b8f302 100644 --- a/src/sync/src/tarbitrator.c +++ b/src/sync/src/syncArbitrator.c @@ -22,17 +22,17 @@ #include "tsocket.h" #include "tglobal.h" #include "taoserror.h" -#include "taosTcpPool.h" #include "twal.h" #include "tsync.h" #include "syncInt.h" +#include "syncTcp.h" -static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context); -static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp); -static void arbProcessBrokenLink(void *param); -static int32_t arbProcessPeerMsg(void *param, void *buffer); -static tsem_t tsArbSem; -static ttpool_h tsArbTcpPool; +static void arbSignalHandler(int32_t signum, siginfo_t *sigInfo, void *context); +static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp); +static void arbProcessBrokenLink(void *param); +static int32_t arbProcessPeerMsg(void *param, void *buffer); +static tsem_t tsArbSem; +static void * tsArbTcpPool; typedef struct { char id[TSDB_EP_LEN + 24]; @@ -90,7 +90,7 @@ int32_t main(int32_t argc, char *argv[]) { info.processBrokenLink = arbProcessBrokenLink; info.processIncomingMsg = arbProcessPeerMsg; info.processIncomingConn = arbProcessIncommingConnection; - tsArbTcpPool = taosOpenTcpThreadPool(&info); + tsArbTcpPool = syncOpenTcpThreadPool(&info); if (tsArbTcpPool == NULL) { sDebug("failed to open TCP thread pool, exit..."); @@ -101,8 +101,8 @@ int32_t main(int32_t argc, char *argv[]) { tsem_wait(&tsArbSem); - taosCloseTcpThreadPool(tsArbTcpPool); - sInfo("TAOS arbitrator is shut down\n"); + syncCloseTcpThreadPool(tsArbTcpPool); + sInfo("TAOS arbitrator is shut down"); closelog(); return 0; @@ -113,9 +113,9 @@ static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { tinet_ntoa(ipstr, sourceIp); sDebug("peer TCP connection from ip:%s", ipstr); - SFirstPkt firstPkt; - if (taosReadMsg(connFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { - sError("failed to read peer first pkt from ip:%s since %s", ipstr, strerror(errno)); + SSyncMsg msg; + if (taosReadMsg(connFd, &msg, sizeof(SSyncMsg)) != sizeof(SSyncMsg)) { + sError("failed to read peer sync msg from ip:%s since %s", ipstr, strerror(errno)); taosCloseSocket(connFd); return; } @@ -127,9 +127,9 @@ static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { return; } - firstPkt.fqdn[sizeof(firstPkt.fqdn) - 1] = 0; - snprintf(pNode->id, sizeof(pNode->id), "vgId:%d, peer:%s:%d", firstPkt.sourceId, firstPkt.fqdn, firstPkt.port); - if (firstPkt.syncHead.vgId) { + msg.fqdn[TSDB_FQDN_LEN - 1] = 0; + snprintf(pNode->id, sizeof(pNode->id), "vgId:%d, peer:%s:%d", msg.sourceId, msg.fqdn, msg.port); + if (msg.head.vgId) { sDebug("%s, vgId in head is not zero, close the connection", pNode->id); tfree(pNode); taosCloseSocket(connFd); @@ -138,7 +138,7 @@ static void arbProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { sDebug("%s, arbitrator request is accepted", pNode->id); pNode->nodeFd = connFd; - pNode->pConn = taosAllocateTcpConn(tsArbTcpPool, pNode, connFd); + pNode->pConn = syncAllocateTcpConn(tsArbTcpPool, pNode, connFd); return; } @@ -156,8 +156,8 @@ static int32_t arbProcessPeerMsg(void *param, void *buffer) { int32_t bytes = 0; char * cont = (char *)buffer; - int32_t hlen = taosReadMsg(pNode->nodeFd, &head, sizeof(head)); - if (hlen != sizeof(head)) { + int32_t hlen = taosReadMsg(pNode->nodeFd, &head, sizeof(SSyncHead)); + if (hlen != sizeof(SSyncHead)) { sDebug("%s, failed to read msg, hlen:%d", pNode->id, hlen); return -1; } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index c86265d556..ac79b48606 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -23,27 +23,19 @@ #include "tsocket.h" #include "tglobal.h" #include "taoserror.h" -#include "taosTcpPool.h" #include "tqueue.h" #include "twal.h" #include "tsync.h" +#include "syncTcp.h" #include "syncInt.h" -// global configurable -int32_t tsMaxSyncNum = 2; -int32_t tsSyncTcpThreads = 2; -int32_t tsMaxWatchFiles = 500; -int32_t tsMaxFwdInfo = 200; -int32_t tsSyncTimer = 1; +int32_t tsSyncNum = 0; // number of sync in process in whole system +char tsNodeFqdn[TSDB_FQDN_LEN] = {0}; -// module global, not configurable -int32_t tsSyncNum; // number of sync in process in whole system -char tsNodeFqdn[TSDB_FQDN_LEN]; - -static ttpool_h tsTcpPool; -static void * tsSyncTmrCtrl = NULL; -static void * tsVgIdHash; -static int32_t tsSyncRefId = -1; +static void * tsTcpPool = NULL; +static void * tsSyncTmrCtrl = NULL; +static void * tsVgIdHash = NULL; +static int32_t tsSyncRefId = -1; // local functions static void syncProcessSyncRequest(char *pMsg, SSyncPeer *pPeer); @@ -60,7 +52,7 @@ static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode); static void syncMonitorFwdInfos(void *param, void *tmrId); static void syncMonitorNodeRole(void *param, void *tmrId); static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code); -static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle); +static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle); static void syncRestartPeer(SSyncPeer *pPeer); static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle, int32_t qtyp); static SSyncPeer *syncAddPeer(SSyncNode *pNode, const SNodeInfo *pInfo); @@ -81,36 +73,10 @@ char *syncStatus[] = { "invalid" }; -typedef enum { - SYNC_STATUS_BROADCAST, - SYNC_STATUS_BROADCAST_RSP, - SYNC_STATUS_SETUP_CONN, - SYNC_STATUS_SETUP_CONN_RSP, - SYNC_STATUS_EXCHANGE_DATA, - SYNC_STATUS_EXCHANGE_DATA_RSP, - SYNC_STATUS_CHECK_ROLE, - SYNC_STATUS_CHECK_ROLE_RSP -} ESyncStatusType; - -char *statusType[] = { - "broadcast", - "broadcast-rsp", - "setup-conn", - "setup-conn-rsp", - "exchange-data", - "exchange-data-rsp", - "check-role", - "check-role-rsp" -}; - -uint16_t syncGenTranId() { - return taosRand() & 0XFFFF; -} - int32_t syncInit() { SPoolInfo info = {0}; - info.numOfThreads = tsSyncTcpThreads; + info.numOfThreads = SYNC_TCP_THREADS; info.serverIp = 0; info.port = tsSyncPort; info.bufferSize = SYNC_MAX_SIZE; @@ -118,7 +84,7 @@ int32_t syncInit() { info.processIncomingMsg = syncProcessPeerMsg; info.processIncomingConn = syncProcessIncommingConnection; - tsTcpPool = taosOpenTcpThreadPool(&info); + tsTcpPool = syncOpenTcpThreadPool(&info); if (tsTcpPool == NULL) { sError("failed to init tcpPool"); return -1; @@ -127,16 +93,16 @@ int32_t syncInit() { tsSyncTmrCtrl = taosTmrInit(1000, 50, 10000, "SYNC"); if (tsSyncTmrCtrl == NULL) { sError("failed to init tmrCtrl"); - taosCloseTcpThreadPool(tsTcpPool); + syncCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; return -1; } tsVgIdHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (tsVgIdHash == NULL) { - sError("failed to init tsVgIdHash"); + sError("failed to init vgIdHash"); taosTmrCleanUp(tsSyncTmrCtrl); - taosCloseTcpThreadPool(tsTcpPool); + syncCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; tsSyncTmrCtrl = NULL; return -1; @@ -156,7 +122,7 @@ int32_t syncInit() { void syncCleanUp() { if (tsTcpPool) { - taosCloseTcpThreadPool(tsTcpPool); + syncCloseTcpThreadPool(tsTcpPool); tsTcpPool = NULL; } @@ -237,7 +203,7 @@ int64_t syncStart(const SSyncInfo *pInfo) { sInfo("vgId:%d, %d replicas are configured, quorum:%d role:%s", pNode->vgId, pNode->replica, pNode->quorum, syncRole[nodeRole]); - pNode->pSyncFwds = calloc(sizeof(SSyncFwds) + tsMaxFwdInfo * sizeof(SFwdInfo), 1); + pNode->pSyncFwds = calloc(sizeof(SSyncFwds) + SYNC_MAX_FWDS * sizeof(SFwdInfo), 1); if (pNode->pSyncFwds == NULL) { sError("vgId:%d, no memory to allocate syncFwds", pNode->vgId); terrno = TAOS_SYSTEM_ERROR(errno); @@ -337,6 +303,11 @@ int32_t syncReconfig(int64_t rid, const SSyncCfg *pNewCfg) { newPeers[i] = pNode->peerInfo[j]; } + if (newPeers[i] == NULL) { + sError("vgId:%d, failed to reconfig", pNode->vgId); + return TSDB_CODE_SYN_INVALID_CONFIG; + } + if ((strcmp(pNewNode->nodeFqdn, tsNodeFqdn) == 0) && (pNewNode->nodePort == tsSyncPort)) { pNode->selfIndex = i; } @@ -385,23 +356,13 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) { SSyncPeer *pPeer = pNode->pMaster; if (pPeer && pNode->quorum > 1) { - char msg[sizeof(SSyncHead) + sizeof(SFwdRsp)] = {0}; + SFwdRsp rsp; + syncBuildSyncFwdRsp(&rsp, pNode->vgId, version, code); - SSyncHead *pHead = (SSyncHead *)msg; - pHead->type = TAOS_SMSG_FORWARD_RSP; - pHead->len = sizeof(SFwdRsp); - - SFwdRsp *pFwdRsp = (SFwdRsp *)(msg + sizeof(SSyncHead)); - pFwdRsp->version = version; - pFwdRsp->code = code; - - int32_t msgLen = sizeof(SSyncHead) + sizeof(SFwdRsp); - int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, msgLen); - - if (retLen == msgLen) { - sTrace("%s, forward-rsp is sent, code:%x hver:%" PRIu64, pPeer->id, code, version); + if (taosWriteMsg(pPeer->peerFd, &rsp, sizeof(SFwdRsp)) == sizeof(SFwdRsp)) { + sTrace("%s, forward-rsp is sent, code:0x%x hver:%" PRIu64, pPeer->id, code, version); } else { - sDebug("%s, failed to send forward ack, restart", pPeer->id); + sDebug("%s, failed to send forward-rsp, restart", pPeer->id); syncRestartConnection(pPeer); } } @@ -512,7 +473,7 @@ static void syncClosePeerConn(SSyncPeer *pPeer) { taosClose(pPeer->syncFd); if (pPeer->peerFd >= 0) { pPeer->peerFd = -1; - taosFreeTcpConn(pPeer->pConn); + syncFreeTcpConn(pPeer->pConn); } } @@ -782,7 +743,7 @@ static void syncRestartPeer(SSyncPeer *pPeer) { int32_t ret = strcmp(pPeer->fqdn, tsNodeFqdn); if (ret > 0 || (ret == 0 && pPeer->port > tsSyncPort)) { sDebug("%s, check peer connection in 1000 ms", pPeer->id); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer); } } @@ -795,7 +756,7 @@ void syncRestartConnection(SSyncPeer *pPeer) { static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - sDebug("%s, sync-req is received", pPeer->id); + sInfo("%s, sync-req is received", pPeer->id); if (pPeer->ip == 0) return; @@ -860,7 +821,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { taosTmrStopA(&pPeer->timer); // Ensure the sync of mnode not interrupted - if (pNode->vgId != 1 && tsSyncNum >= tsMaxSyncNum) { + if (pNode->vgId != 1 && tsSyncNum >= SYNC_MAX_NUM) { sInfo("%s, %d syncs are in process, try later", pPeer->id, tsSyncNum); taosTmrReset(syncTryRecoverFromMaster, 500 + (pNode->vgId * 10) % 200, pPeer, tsSyncTmrCtrl, &pPeer->timer); return; @@ -868,26 +829,20 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { sDebug("%s, try to sync", pPeer->id); - SFirstPkt firstPkt; - memset(&firstPkt, 0, sizeof(firstPkt)); - firstPkt.syncHead.type = TAOS_SMSG_SYNC_REQ; - firstPkt.syncHead.vgId = pNode->vgId; - firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead); - tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); - firstPkt.port = tsSyncPort; - taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); + SSyncMsg msg; + syncBuildSyncReqMsg(&msg, pNode->vgId); - if (taosWriteMsg(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { + taosTmrReset(syncNotStarted, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer); + + if (taosWriteMsg(pPeer->peerFd, &msg, sizeof(SSyncMsg)) != sizeof(SSyncMsg)) { sError("%s, failed to send sync-req to peer", pPeer->id); } else { - nodeSStatus = TAOS_SYNC_STATUS_START; - sInfo("%s, sync-req is sent to peer, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); + sInfo("%s, sync-req is sent to peer, tranId:%u, sstatus:%s", pPeer->id, msg.tranId, syncStatus[nodeSStatus]); } } -static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) { +static void syncProcessFwdResponse(SFwdRsp *pFwdRsp, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - SFwdRsp * pFwdRsp = (SFwdRsp *)cont; SSyncFwds *pSyncFwds = pNode->pSyncFwds; SFwdInfo * pFwdInfo; @@ -897,18 +852,19 @@ static void syncProcessFwdResponse(char *cont, SSyncPeer *pPeer) { if (pFirst->version <= pFwdRsp->version && pSyncFwds->fwds > 0) { // find the forwardInfo from first for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { - pFwdInfo = pSyncFwds->fwdInfo + (i + pSyncFwds->first) % tsMaxFwdInfo; - if (pFwdRsp->version == pFwdInfo->version) break; + pFwdInfo = pSyncFwds->fwdInfo + (i + pSyncFwds->first) % SYNC_MAX_FWDS; + if (pFwdRsp->version == pFwdInfo->version) { + syncProcessFwdAck(pNode, pFwdInfo, pFwdRsp->code); + syncRemoveConfirmedFwdInfo(pNode); + return; + } } - - syncProcessFwdAck(pNode, pFwdInfo, pFwdRsp->code); - syncRemoveConfirmedFwdInfo(pNode); } } static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - SWalHead * pHead = (SWalHead *)cont; + SWalHead * pHead = (SWalHead *)(cont + sizeof(SSyncHead)); sTrace("%s, forward is received, hver:%" PRIu64 ", len:%d", pPeer->id, pHead->version, pHead->len); @@ -925,9 +881,8 @@ static void syncProcessForwardFromPeer(char *cont, SSyncPeer *pPeer) { } } -static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) { - SSyncNode * pNode = pPeer->pSyncNode; - SPeersStatus *pPeersStatus = (SPeersStatus *)cont; +static void syncProcessPeersStatusMsg(SPeersStatus *pPeersStatus, SSyncPeer *pPeer) { + SSyncNode *pNode = pPeer->pSyncNode; sDebug("%s, status is received, self:%s:%s:%" PRIu64 ", peer:%s:%" PRIu64 ", ack:%d tranId:%u type:%s pfd:%d", pPeer->id, syncRole[nodeRole], syncStatus[nodeSStatus], nodeVersion, syncRole[pPeersStatus->role], @@ -941,23 +896,22 @@ static void syncProcessPeersStatusMsg(char *cont, SSyncPeer *pPeer) { } } -static int32_t syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { +static int32_t syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead) { if (pPeer->peerFd < 0) return -1; int32_t hlen = taosReadMsg(pPeer->peerFd, pHead, sizeof(SSyncHead)); if (hlen != sizeof(SSyncHead)) { - sDebug("%s, failed to read msg, hlen:%d", pPeer->id, hlen); + sDebug("%s, failed to read msg since %s, hlen:%d", pPeer->id, tstrerror(errno), hlen); return -1; } - // head.len = htonl(head.len); - if (pHead->len < 0) { - sError("%s, invalid pkt length, hlen:%d", pPeer->id, pHead->len); + int32_t code = syncCheckHead(pHead); + if (code != 0) { + sError("%s, failed to check msg head since %s, type:%d", pPeer->id, tstrerror(code), pHead->type); return -1; } - assert(pHead->len <= TSDB_MAX_WAL_SIZE); - int32_t bytes = taosReadMsg(pPeer->peerFd, cont, pHead->len); + int32_t bytes = taosReadMsg(pPeer->peerFd, (char *)pHead + sizeof(SSyncHead), pHead->len); if (bytes != pHead->len) { sError("%s, failed to read, bytes:%d len:%d", pPeer->id, bytes, pHead->len); return -1; @@ -968,23 +922,22 @@ static int32_t syncReadPeerMsg(SSyncPeer *pPeer, SSyncHead *pHead, char *cont) { static int32_t syncProcessPeerMsg(void *param, void *buffer) { SSyncPeer *pPeer = param; - SSyncHead head; - char * cont = buffer; - + SSyncHead *pHead = buffer; SSyncNode *pNode = pPeer->pSyncNode; + pthread_mutex_lock(&pNode->mutex); - int32_t code = syncReadPeerMsg(pPeer, &head, cont); + int32_t code = syncReadPeerMsg(pPeer, pHead); if (code == 0) { - if (head.type == TAOS_SMSG_FORWARD) { - syncProcessForwardFromPeer(cont, pPeer); - } else if (head.type == TAOS_SMSG_FORWARD_RSP) { - syncProcessFwdResponse(cont, pPeer); - } else if (head.type == TAOS_SMSG_SYNC_REQ) { - syncProcessSyncRequest(cont, pPeer); - } else if (head.type == TAOS_SMSG_STATUS) { - syncProcessPeersStatusMsg(cont, pPeer); + if (pHead->type == TAOS_SMSG_SYNC_FWD) { + syncProcessForwardFromPeer(buffer, pPeer); + } else if (pHead->type == TAOS_SMSG_SYNC_FWD_RSP) { + syncProcessFwdResponse(buffer, pPeer); + } else if (pHead->type == TAOS_SMSG_SYNC_REQ) { + syncProcessSyncRequest(buffer, pPeer); + } else if (pHead->type == TAOS_SMSG_STATUS) { + syncProcessPeersStatusMsg(buffer, pPeer); } } @@ -993,37 +946,30 @@ static int32_t syncProcessPeerMsg(void *param, void *buffer) { return code; } -#define statusMsgLen sizeof(SSyncHead) + sizeof(SPeersStatus) + sizeof(SPeerStatus) * TAOS_SYNC_MAX_REPLICA - static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type, uint16_t tranId) { - SSyncNode *pNode = pPeer->pSyncNode; - char msg[statusMsgLen] = {0}; - if (pPeer->peerFd < 0 || pPeer->ip == 0) return; - SSyncHead * pHead = (SSyncHead *)msg; - SPeersStatus *pPeersStatus = (SPeersStatus *)(msg + sizeof(SSyncHead)); + SSyncNode * pNode = pPeer->pSyncNode; + SPeersStatus msg; - pHead->type = TAOS_SMSG_STATUS; - pHead->len = statusMsgLen - sizeof(SSyncHead); + memset(&msg, 0, sizeof(SPeersStatus)); + syncBuildPeersStatus(&msg, pNode->vgId); - pPeersStatus->version = nodeVersion; - pPeersStatus->role = nodeRole; - pPeersStatus->ack = ack; - pPeersStatus->type = type; - pPeersStatus->tranId = tranId; + msg.role = nodeRole; + msg.ack = ack; + msg.type = type; + msg.tranId = tranId; + msg.version = nodeVersion; for (int32_t i = 0; i < pNode->replica; ++i) { - pPeersStatus->peersStatus[i].role = pNode->peerInfo[i]->role; - pPeersStatus->peersStatus[i].version = pNode->peerInfo[i]->version; + msg.peersStatus[i].role = pNode->peerInfo[i]->role; + msg.peersStatus[i].version = pNode->peerInfo[i]->version; } - int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, statusMsgLen); - if (retLen == statusMsgLen) { + if (taosWriteMsg(pPeer->peerFd, &msg, sizeof(SPeersStatus)) == sizeof(SPeersStatus)) { sDebug("%s, status is sent, self:%s:%s:%" PRIu64 ", peer:%s:%s:%" PRIu64 ", ack:%d tranId:%u type:%s pfd:%d", pPeer->id, syncRole[nodeRole], syncStatus[nodeSStatus], nodeVersion, syncRole[pPeer->role], - syncStatus[pPeer->sstatus], pPeer->version, pPeersStatus->ack, pPeersStatus->tranId, - statusType[pPeersStatus->type], pPeer->peerFd); + syncStatus[pPeer->sstatus], pPeer->version, ack, tranId, statusType[type], pPeer->peerFd); } else { sDebug("%s, failed to send status msg, restart", pPeer->id); syncRestartConnection(pPeer); @@ -1043,28 +989,23 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { int32_t connFd = taosOpenTcpClientSocket(pPeer->ip, pPeer->port, 0); if (connFd < 0) { sDebug("%s, failed to open tcp socket since %s", pPeer->id, strerror(errno)); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer); return; } - SFirstPkt firstPkt; - memset(&firstPkt, 0, sizeof(firstPkt)); - firstPkt.syncHead.vgId = pPeer->nodeId ? pNode->vgId : 0; - firstPkt.syncHead.type = TAOS_SMSG_STATUS; - tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); - firstPkt.port = tsSyncPort; - firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId + SSyncMsg msg; + syncBuildSyncSetupMsg(&msg, pPeer->nodeId ? pNode->vgId : 0); - if (taosWriteMsg(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { - sDebug("%s, connection to peer server is setup, pfd:%d sfd:%d", pPeer->id, connFd, pPeer->syncFd); + if (taosWriteMsg(connFd, &msg, sizeof(SSyncMsg)) == sizeof(SSyncMsg)) { + sDebug("%s, connection to peer server is setup, pfd:%d sfd:%d tranId:%u", pPeer->id, connFd, pPeer->syncFd, msg.tranId); pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; - pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); + pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer, connFd); syncAddPeerRef(pPeer); } else { sDebug("%s, failed to setup peer connection to server since %s, try later", pPeer->id, strerror(errno)); taosClose(connFd); - taosTmrReset(syncCheckPeerConnection, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); + taosTmrReset(syncCheckPeerConnection, SYNC_CHECK_INTERVAL, pPeer, tsSyncTmrCtrl, &pPeer->timer); } } @@ -1093,7 +1034,9 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { pthread_attr_destroy(&thattr); if (ret < 0) { - sError("%s, failed to create sync thread", pPeer->id); + SSyncNode *pNode = pPeer->pSyncNode; + nodeSStatus = TAOS_SYNC_STATUS_INIT; + sError("%s, failed to create sync thread, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); taosClose(pPeer->syncFd); syncDecPeerRef(pPeer); } else { @@ -1108,14 +1051,21 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { tinet_ntoa(ipstr, sourceIp); sDebug("peer TCP connection from ip:%s", ipstr); - SFirstPkt firstPkt; - if (taosReadMsg(connFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { - sError("failed to read peer first pkt from ip:%s since %s", ipstr, strerror(errno)); + SSyncMsg msg; + if (taosReadMsg(connFd, &msg, sizeof(SSyncMsg)) != sizeof(SSyncMsg)) { + sError("failed to read peer sync msg from ip:%s since %s", ipstr, strerror(errno)); taosCloseSocket(connFd); return; } - int32_t vgId = firstPkt.syncHead.vgId; + int32_t code = syncCheckHead((SSyncHead *)(&msg)); + if (code != 0) { + sError("failed to check peer sync msg from ip:%s since %s", ipstr, strerror(code)); + taosCloseSocket(connFd); + return; + } + + int32_t vgId = msg.head.vgId; SSyncNode **ppNode = taosHashGet(tsVgIdHash, &vgId, sizeof(int32_t)); if (ppNode == NULL || *ppNode == NULL) { sError("vgId:%d, vgId could not be found", vgId); @@ -1123,30 +1073,35 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { return; } + sDebug("vgId:%d, sync msg is received, tranId:%u", vgId, msg.tranId); + SSyncNode *pNode = *ppNode; pthread_mutex_lock(&pNode->mutex); SSyncPeer *pPeer; for (i = 0; i < pNode->replica; ++i) { pPeer = pNode->peerInfo[i]; - if (pPeer && (strcmp(pPeer->fqdn, firstPkt.fqdn) == 0) && (pPeer->port == firstPkt.port)) break; + if (pPeer && (strcmp(pPeer->fqdn, msg.fqdn) == 0) && (pPeer->port == msg.port)) break; } pPeer = (i < pNode->replica) ? pNode->peerInfo[i] : NULL; if (pPeer == NULL) { - sError("vgId:%d, peer:%s:%u not configured", pNode->vgId, firstPkt.fqdn, firstPkt.port); + sError("vgId:%d, peer:%s:%u not configured", pNode->vgId, msg.fqdn, msg.port); taosCloseSocket(connFd); // syncSendVpeerCfgMsg(sync); } else { // first packet tells what kind of link - if (firstPkt.syncHead.type == TAOS_SMSG_SYNC_DATA) { + if (msg.head.type == TAOS_SMSG_SYNC_DATA) { pPeer->syncFd = connFd; + nodeSStatus = TAOS_SYNC_STATUS_START; + sInfo("%s, sync-data msg from master is received, tranId:%u, set sstatus:%s", pPeer->id, msg.tranId, + syncStatus[nodeSStatus]); syncCreateRestoreDataThread(pPeer); } else { sDebug("%s, TCP connection is up, pfd:%d sfd:%d, old pfd:%d", pPeer->id, connFd, pPeer->syncFd, pPeer->peerFd); syncClosePeerConn(pPeer); pPeer->peerFd = connFd; - pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); + pPeer->pConn = syncAllocateTcpConn(tsTcpPool, pPeer, connFd); syncAddPeerRef(pPeer); sDebug("%s, ready to exchange data", pPeer->id); syncSendPeersStatusMsgToPeer(pPeer, 1, SYNC_STATUS_EXCHANGE_DATA, syncGenTranId()); @@ -1175,17 +1130,19 @@ static void syncProcessBrokenLink(void *param) { taosReleaseRef(tsSyncRefId, pNode->rid); } -static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { +static int32_t syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { SSyncFwds *pSyncFwds = pNode->pSyncFwds; int64_t time = taosGetTimestampMs(); - if (pSyncFwds->fwds >= tsMaxFwdInfo) { - pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; - pSyncFwds->fwds--; + if (pSyncFwds->fwds >= SYNC_MAX_FWDS) { + // pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS; + // pSyncFwds->fwds--; + sError("vgId:%d, failed to save fwd info, hver:%" PRIu64 " fwds:%d", pNode->vgId, version, pSyncFwds->fwds); + return TSDB_CODE_SYN_TOO_MANY_FWDINFO; } if (pSyncFwds->fwds > 0) { - pSyncFwds->last = (pSyncFwds->last + 1) % tsMaxFwdInfo; + pSyncFwds->last = (pSyncFwds->last + 1) % SYNC_MAX_FWDS; } SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->last; @@ -1196,6 +1153,8 @@ static void syncSaveFwdInfo(SSyncNode *pNode, uint64_t version, void *mhandle) { pSyncFwds->fwds++; sTrace("vgId:%d, fwd info is saved, hver:%" PRIu64 " fwds:%d ", pNode->vgId, version, pSyncFwds->fwds); + + return 0; } static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode) { @@ -1206,11 +1165,10 @@ static void syncRemoveConfirmedFwdInfo(SSyncNode *pNode) { SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + pSyncFwds->first; if (pFwdInfo->confirmed == 0) break; - pSyncFwds->first = (pSyncFwds->first + 1) % tsMaxFwdInfo; + pSyncFwds->first = (pSyncFwds->first + 1) % SYNC_MAX_FWDS; pSyncFwds->fwds--; if (pSyncFwds->fwds == 0) pSyncFwds->first = pSyncFwds->last; - // sDebug("vgId:%d, fwd info is removed, hver:%d, fwds:%d", - // pNode->vgId, pFwdInfo->version, pSyncFwds->fwds); + sTrace("vgId:%d, fwd info is removed, hver:%" PRIu64 " fwds:%d", pNode->vgId, pFwdInfo->version, pSyncFwds->fwds); memset(pFwdInfo, 0, sizeof(SFwdInfo)); } } @@ -1232,7 +1190,7 @@ static void syncProcessFwdAck(SSyncNode *pNode, SFwdInfo *pFwdInfo, int32_t code } if (confirm && pFwdInfo->confirmed == 0) { - sTrace("vgId:%d, forward is confirmed, hver:%" PRIu64 " code:%x", pNode->vgId, pFwdInfo->version, pFwdInfo->code); + sTrace("vgId:%d, forward is confirmed, hver:%" PRIu64 " code:0x%x", pNode->vgId, pFwdInfo->version, pFwdInfo->code); (*pNode->confirmForward)(pNode->vgId, pFwdInfo->mhandle, pFwdInfo->code); pFwdInfo->confirmed = 1; } @@ -1273,7 +1231,7 @@ static void syncMonitorFwdInfos(void *param, void *tmrId) { if (pSyncFwds->fwds > 0) { pthread_mutex_lock(&pNode->mutex); for (int32_t i = 0; i < pSyncFwds->fwds; ++i) { - SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % tsMaxFwdInfo; + SFwdInfo *pFwdInfo = pSyncFwds->fwdInfo + (pSyncFwds->first + i) % SYNC_MAX_FWDS; if (ABS(time - pFwdInfo->time) < 2000) break; sDebug("vgId:%d, forward info expired, hver:%" PRIu64 " curtime:%" PRIu64 " savetime:%" PRIu64, pNode->vgId, @@ -1312,20 +1270,18 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle } // always update version - sTrace("vgId:%d, forward to peer, replica:%d role:%s qtype:%s hver:%" PRIu64, pNode->vgId, pNode->replica, + sTrace("vgId:%d, update version, replica:%d role:%s qtype:%s hver:%" PRIu64, pNode->vgId, pNode->replica, syncRole[nodeRole], qtypeStr[qtype], pWalHead->version); nodeVersion = pWalHead->version; if (pNode->replica == 1 || nodeRole != TAOS_SYNC_ROLE_MASTER) return 0; - // only pkt from RPC or CQ can be forwarded + // only msg from RPC or CQ can be forwarded if (qtype != TAOS_QTYPE_RPC && qtype != TAOS_QTYPE_CQ) return 0; // a hacker way to improve the performance pSyncHead = (SSyncHead *)(((char *)pWalHead) - sizeof(SSyncHead)); - pSyncHead->type = TAOS_SMSG_FORWARD; - pSyncHead->pversion = 0; - pSyncHead->len = sizeof(SWalHead) + pWalHead->len; + syncBuildSyncFwdMsg(pSyncHead, pNode->vgId, sizeof(SWalHead) + pWalHead->len); fwdLen = pSyncHead->len + sizeof(SSyncHead); // include the WAL and SYNC head pthread_mutex_lock(&pNode->mutex); @@ -1336,8 +1292,8 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle if (pPeer->role != TAOS_SYNC_ROLE_SLAVE && pPeer->sstatus != TAOS_SYNC_STATUS_CACHE) continue; if (pNode->quorum > 1 && code == 0) { - syncSaveFwdInfo(pNode, pWalHead->version, mhandle); - code = 1; + code = syncSaveFwdInfo(pNode, pWalHead->version, mhandle); + if (code >= 0) code = 1; } int32_t retLen = taosWriteMsg(pPeer->peerFd, pSyncHead, fwdLen); @@ -1355,4 +1311,3 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle return code; } - diff --git a/src/sync/src/syncMsg.c b/src/sync/src/syncMsg.c new file mode 100644 index 0000000000..034f9a98a7 --- /dev/null +++ b/src/sync/src/syncMsg.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "tglobal.h" +#include "tchecksum.h" +#include "syncInt.h" + +char *statusType[] = { + "broadcast", + "broadcast-rsp", + "setup-conn", + "setup-conn-rsp", + "exchange-data", + "exchange-data-rsp", + "check-role", + "check-role-rsp" +}; + +uint16_t syncGenTranId() { + return taosRand() & 0XFFFF; +} + +static void syncBuildHead(SSyncHead *pHead) { + pHead->protocol = SYNC_PROTOCOL_VERSION; + pHead->signature = SYNC_SIGNATURE; + pHead->code = 0; + pHead->cId = 0; + taosCalcChecksumAppend(0, (uint8_t *)pHead, sizeof(SSyncHead)); +} + +int32_t syncCheckHead(SSyncHead *pHead) { + if (pHead->protocol != SYNC_PROTOCOL_VERSION) return TSDB_CODE_SYN_MISMATCHED_PROTOCOL; + if (pHead->signature != SYNC_SIGNATURE) return TSDB_CODE_SYN_MISMATCHED_SIGNATURE; + if (pHead->cId != 0) return TSDB_CODE_SYN_MISMATCHED_CLUSTERID; + if (pHead->len <= 0 || pHead->len > TSDB_MAX_WAL_SIZE) return TSDB_CODE_SYN_INVALID_MSGLEN; + if (pHead->type <= TAOS_SMSG_START || pHead->type >= TAOS_SMSG_END) return TSDB_CODE_SYN_INVALID_MSGTYPE; + if (!taosCheckChecksumWhole((uint8_t *)pHead, sizeof(SSyncHead))) return TSDB_CODE_SYN_INVALID_CHECKSUM; + + return TSDB_CODE_SUCCESS; +} + +void syncBuildSyncFwdMsg(SSyncHead *pHead, int32_t vgId, int32_t len) { + pHead->type = TAOS_SMSG_SYNC_FWD; + pHead->vgId = vgId; + pHead->len = len; + syncBuildHead(pHead); +} + +void syncBuildSyncFwdRsp(SFwdRsp *pMsg, int32_t vgId, uint64_t version, int32_t code) { + pMsg->head.type = TAOS_SMSG_SYNC_FWD_RSP; + pMsg->head.vgId = vgId; + pMsg->head.len = sizeof(SFwdRsp) - sizeof(SSyncHead); + syncBuildHead(&pMsg->head); + + pMsg->version = version; + pMsg->code = code; +} + +static void syncBuildMsg(SSyncMsg *pMsg, int32_t vgId, ESyncMsgType type) { + pMsg->head.type = type; + pMsg->head.vgId = vgId; + pMsg->head.len = sizeof(SSyncMsg) - sizeof(SSyncHead); + syncBuildHead(&pMsg->head); + + pMsg->port = tsSyncPort; + pMsg->tranId = syncGenTranId(); + pMsg->sourceId = vgId; + tstrncpy(pMsg->fqdn, tsNodeFqdn, TSDB_FQDN_LEN); +} + +void syncBuildSyncReqMsg(SSyncMsg *pMsg, int32_t vgId) { syncBuildMsg(pMsg, vgId, TAOS_SMSG_SYNC_REQ); } +void syncBuildSyncDataMsg(SSyncMsg *pMsg, int32_t vgId) { syncBuildMsg(pMsg, vgId, TAOS_SMSG_SYNC_DATA); } +void syncBuildSyncSetupMsg(SSyncMsg *pMsg, int32_t vgId) { syncBuildMsg(pMsg, vgId, TAOS_SMSG_SETUP); } + +void syncBuildPeersStatus(SPeersStatus *pMsg, int32_t vgId) { + pMsg->head.type = TAOS_SMSG_STATUS; + pMsg->head.vgId = vgId; + pMsg->head.len = sizeof(SPeersStatus) - sizeof(SSyncHead); + syncBuildHead(&pMsg->head); +} + +void syncBuildFileAck(SFileAck *pMsg, int32_t vgId) { + pMsg->head.type = TAOS_SMSG_SYNC_FILE_RSP; + pMsg->head.vgId = vgId; + pMsg->head.len = sizeof(SFileAck) - sizeof(SSyncHead); + syncBuildHead(&pMsg->head); +} + +void syncBuildFileInfo(SFileInfo *pMsg, int32_t vgId) { + pMsg->head.type = TAOS_SMSG_SYNC_FILE; + pMsg->head.vgId = vgId; + pMsg->head.len = sizeof(SFileInfo) - sizeof(SSyncHead); + syncBuildHead(&pMsg->head); +} \ No newline at end of file diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index d156c93865..8651879eb6 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -36,6 +36,8 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex if (sindex < 0 || eindex < sindex) return; + sDebug("%s, extra files will be removed between sindex:%d and eindex:%d", pPeer->id, sindex, eindex); + while (1) { name[0] = 0; magic = (*pNode->getFileInfo)(pNode->vgId, name, &index, eindex, &size, &fversion); @@ -43,7 +45,7 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex snprintf(fname, sizeof(fname), "%s/%s", pNode->path, name); (void)remove(fname); - sDebug("%s, %s is removed", pPeer->id, fname); + sInfo("%s, %s is removed for its extra", pPeer->id, fname); index++; if (index > eindex) break; @@ -54,19 +56,27 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { SSyncNode *pNode = pPeer->pSyncNode; SFileInfo minfo; memset(&minfo, 0, sizeof(SFileInfo)); /* = {0}; */ SFileInfo sinfo; memset(&sinfo, 0, sizeof(SFileInfo)); /* = {0}; */ - SFileAck fileAck = {0}; + SFileAck fileAck; memset(&fileAck, 0, sizeof(SFileAck)); int32_t code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; uint32_t pindex = 0; // index in last restore bool fileChanged = false; *fversion = 0; - sinfo.index = 0; + sinfo.index = -1; while (1) { // read file info - int32_t ret = taosReadMsg(pPeer->syncFd, &(minfo), sizeof(minfo)); - if (ret < 0) { - sError("%s, failed to read file info while restore file since %s", pPeer->id, strerror(errno)); + minfo.index = -1; + int32_t ret = taosReadMsg(pPeer->syncFd, &minfo, sizeof(SFileInfo)); + if (ret != sizeof(SFileInfo) || minfo.index == -1) { + sError("%s, failed to read fileinfo while restore file since %s", pPeer->id, strerror(errno)); + break; + } + + assert(ret == sizeof(SFileInfo)); + ret = syncCheckHead((SSyncHead *)(&minfo)); + if (ret != 0) { + sError("%s, failed to check fileinfo while restore file since %s", pPeer->id, strerror(ret)); break; } @@ -75,7 +85,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { sDebug("%s, no more files to restore", pPeer->id); // remove extra files after the current index - syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX); + if (sinfo.index != -1) syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX); code = 0; break; } @@ -91,12 +101,13 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { &sinfo.fversion); // if file not there or magic is not the same, file shall be synced - memset(&fileAck, 0, sizeof(fileAck)); + memset(&fileAck, 0, sizeof(SFileAck)); + syncBuildFileAck(&fileAck, pNode->vgId); fileAck.sync = (sinfo.magic != minfo.magic || sinfo.name[0] == 0) ? 1 : 0; // send file ack - ret = taosWriteMsg(pPeer->syncFd, &fileAck, sizeof(fileAck)); - if (ret < 0) { + ret = taosWriteMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); + if (ret != sizeof(SFileAck)) { sError("%s, failed to write file:%s ack while restore file since %s", pPeer->id, minfo.name, strerror(errno)); break; } @@ -154,7 +165,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) { while (1) { ret = taosReadMsg(pPeer->syncFd, pHead, sizeof(SWalHead)); - if (ret < 0) { + if (ret != sizeof(SWalHead)) { sError("%s, failed to read walhead while restore wal since %s", pPeer->id, strerror(errno)); break; } @@ -166,7 +177,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) { } // wal sync over ret = taosReadMsg(pPeer->syncFd, pHead->cont, pHead->len); - if (ret < 0) { + if (ret != pHead->len) { sError("%s, failed to read walcont, len:%d while restore wal since %s", pPeer->id, pHead->len, strerror(errno)); break; } @@ -286,11 +297,12 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { uint64_t fversion = 0; sInfo("%s, start to restore, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); - SFirstPktRsp firstPktRsp = {.sync = 1}; - if (taosWriteMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) < 0) { - sError("%s, failed to send sync firstPkt rsp since %s", pPeer->id, strerror(errno)); + SSyncRsp rsp = {.sync = 1, .tranId = syncGenTranId()}; + if (taosWriteMsg(pPeer->syncFd, &rsp, sizeof(SSyncRsp)) != sizeof(SSyncRsp)) { + sError("%s, failed to send sync rsp since %s", pPeer->id, strerror(errno)); return -1; } + sDebug("%s, send sync rsp to peer, tranId:%u", pPeer->id, rsp.tranId); sInfo("%s, start to restore file, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); int32_t code = syncRestoreFile(pPeer, &fversion); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 36b197dd46..02d990313e 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -58,7 +58,7 @@ static int32_t syncGetFileVersion(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); if (code != 0) { - sDebug("%s, vnode is commiting while retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); + sDebug("%s, vnode is commiting while get fver for retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); return -1; } @@ -88,11 +88,14 @@ static bool syncAreFilesModified(SSyncNode *pNode, SSyncPeer *pPeer) { static int32_t syncRetrieveFile(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; SFileInfo fileInfo; memset(&fileInfo, 0, sizeof(SFileInfo)); - SFileAck fileAck = {0}; + SFileAck fileAck; memset(&fileAck, 0, sizeof(SFileAck)); int32_t code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; - if (syncGetFileVersion(pNode, pPeer) < 0) return -1; + if (syncGetFileVersion(pNode, pPeer) < 0) { + pPeer->fileChanged = 1; + return -1; + } while (1) { // retrieve file info @@ -100,12 +103,12 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { fileInfo.size = 0; fileInfo.magic = (*pNode->getFileInfo)(pNode->vgId, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX, &fileInfo.size, &fileInfo.fversion); - // fileInfo.size = htonl(size); + syncBuildFileInfo(&fileInfo, pNode->vgId); sDebug("%s, file:%s info is sent, size:%" PRId64, pPeer->id, fileInfo.name, fileInfo.size); // send the file info - int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(fileInfo)); - if (ret < 0) { + int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(SFileInfo)); + if (ret != sizeof(SFileInfo)) { code = -1; sError("%s, failed to write file:%s info while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); break; @@ -119,13 +122,20 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { } // wait for the ack from peer - ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(fileAck)); - if (ret < 0) { + ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); + if (ret != sizeof(SFileAck)) { code = -1; sError("%s, failed to read file:%s ack while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); break; } + ret = syncCheckHead((SSyncHead*)(&fileAck)); + if (ret != 0) { + code = -1; + sError("%s, failed to check file:%s ack while retrieve file since %s", pPeer->id, fileInfo.name, strerror(ret)); + break; + } + // set the peer sync version pPeer->sversion = fileInfo.fversion; @@ -384,12 +394,15 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { } if (code == 0) { - pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; - sInfo("%s, wal retrieve is finished, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); - SWalHead walHead; memset(&walHead, 0, sizeof(walHead)); - taosWriteMsg(pPeer->syncFd, &walHead, sizeof(walHead)); + if (taosWriteMsg(pPeer->syncFd, &walHead, sizeof(walHead)) == sizeof(walHead)) { + pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; + sInfo("%s, wal retrieve is finished, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); + } else { + sError("%s, failed to send last wal record since %s", pPeer->id, strerror(errno)); + code = -1; + } } else { sError("%s, failed to send wal since %s, code:0x%x", pPeer->id, strerror(errno), code); } @@ -400,24 +413,22 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { static int32_t syncRetrieveFirstPkt(SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - SFirstPkt firstPkt; - memset(&firstPkt, 0, sizeof(firstPkt)); - firstPkt.syncHead.type = TAOS_SMSG_SYNC_DATA; - firstPkt.syncHead.vgId = pNode->vgId; - tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); - firstPkt.port = tsSyncPort; + SSyncMsg msg; + syncBuildSyncDataMsg(&msg, pNode->vgId); - if (taosWriteMsg(pPeer->syncFd, &firstPkt, sizeof(firstPkt)) < 0) { - sError("%s, failed to send sync firstPkt since %s", pPeer->id, strerror(errno)); - return -1; - } - - SFirstPktRsp firstPktRsp; - if (taosReadMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) < 0) { - sError("%s, failed to read sync firstPkt rsp since %s", pPeer->id, strerror(errno)); + if (taosWriteMsg(pPeer->syncFd, &msg, sizeof(SSyncMsg)) != sizeof(SSyncMsg)) { + sError("%s, failed to send sync-data msg since %s, tranId:%u", pPeer->id, strerror(errno), msg.tranId); + return -1; + } + sDebug("%s, send sync-data msg to peer, tranId:%u", pPeer->id, msg.tranId); + + SSyncRsp rsp; + if (taosReadMsg(pPeer->syncFd, &rsp, sizeof(SSyncRsp)) != sizeof(SSyncRsp)) { + sError("%s, failed to read sync-data rsp since %s, tranId:%u", pPeer->id, strerror(errno), msg.tranId); return -1; } + sDebug("%s, recv sync-data rsp from peer, tranId:%u rsp-tranId:%u", pPeer->id, msg.tranId, rsp.tranId); return 0; } diff --git a/src/sync/src/taosTcpPool.c b/src/sync/src/syncTcp.c similarity index 89% rename from src/sync/src/taosTcpPool.c rename to src/sync/src/syncTcp.c index eb05cf7c6f..7bfdc4e440 100644 --- a/src/sync/src/taosTcpPool.c +++ b/src/sync/src/syncTcp.c @@ -19,10 +19,10 @@ #include "tutil.h" #include "tsocket.h" #include "taoserror.h" -#include "taosTcpPool.h" #include "twal.h" #include "tsync.h" #include "syncInt.h" +#include "syncTcp.h" typedef struct SThreadObj { pthread_t thread; @@ -47,12 +47,12 @@ typedef struct { int32_t closedByApp; } SConnObj; -static void *taosAcceptPeerTcpConnection(void *argv); -static void *taosProcessTcpData(void *param); -static void taosStopPoolThread(SThreadObj *pThread); -static SThreadObj *taosGetTcpThread(SPoolObj *pPool); +static void *syncAcceptPeerTcpConnection(void *argv); +static void *syncProcessTcpData(void *param); +static void syncStopPoolThread(SThreadObj *pThread); +static SThreadObj *syncGetTcpThread(SPoolObj *pPool); -void *taosOpenTcpThreadPool(SPoolInfo *pInfo) { +void *syncOpenTcpThreadPool(SPoolInfo *pInfo) { pthread_attr_t thattr; SPoolObj *pPool = calloc(sizeof(SPoolObj), 1); @@ -80,7 +80,7 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) { pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&(pPool->thread), &thattr, (void *)taosAcceptPeerTcpConnection, pPool) != 0) { + if (pthread_create(&(pPool->thread), &thattr, (void *)syncAcceptPeerTcpConnection, pPool) != 0) { sError("failed to create accept thread for TCP server since %s", strerror(errno)); close(pPool->acceptFd); tfree(pPool->pThread); @@ -94,7 +94,7 @@ void *taosOpenTcpThreadPool(SPoolInfo *pInfo) { return pPool; } -void taosCloseTcpThreadPool(void *param) { +void syncCloseTcpThreadPool(void *param) { SPoolObj * pPool = param; SThreadObj *pThread; @@ -103,7 +103,7 @@ void taosCloseTcpThreadPool(void *param) { for (int32_t i = 0; i < pPool->info.numOfThreads; ++i) { pThread = pPool->pThread[i]; - if (pThread) taosStopPoolThread(pThread); + if (pThread) syncStopPoolThread(pThread); } sDebug("%p TCP pool is closed", pPool); @@ -112,7 +112,7 @@ void taosCloseTcpThreadPool(void *param) { tfree(pPool); } -void *taosAllocateTcpConn(void *param, void *pPeer, int32_t connFd) { +void *syncAllocateTcpConn(void *param, void *pPeer, int32_t connFd) { struct epoll_event event; SPoolObj *pPool = param; @@ -122,7 +122,7 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int32_t connFd) { return NULL; } - SThreadObj *pThread = taosGetTcpThread(pPool); + SThreadObj *pThread = syncGetTcpThread(pPool); if (pThread == NULL) { tfree(pConn); return NULL; @@ -149,7 +149,7 @@ void *taosAllocateTcpConn(void *param, void *pPeer, int32_t connFd) { return pConn; } -void taosFreeTcpConn(void *param) { +void syncFreeTcpConn(void *param) { SConnObj * pConn = param; SThreadObj *pThread = pConn->pThread; @@ -175,7 +175,7 @@ static void taosProcessBrokenLink(SConnObj *pConn) { #define maxEvents 10 -static void *taosProcessTcpData(void *param) { +static void *syncProcessTcpData(void *param) { SThreadObj *pThread = (SThreadObj *)param; SPoolObj * pPool = pThread->pPool; SPoolInfo * pInfo = &pPool->info; @@ -222,7 +222,7 @@ static void *taosProcessTcpData(void *param) { if (pConn->closedByApp == 0) { if ((*pInfo->processIncomingMsg)(pConn->ahandle, buffer) < 0) { - taosFreeTcpConn(pConn); + syncFreeTcpConn(pConn); continue; } } @@ -239,7 +239,7 @@ static void *taosProcessTcpData(void *param) { return NULL; } -static void *taosAcceptPeerTcpConnection(void *argv) { +static void *syncAcceptPeerTcpConnection(void *argv) { SPoolObj * pPool = (SPoolObj *)argv; SPoolInfo *pInfo = &pPool->info; @@ -268,7 +268,7 @@ static void *taosAcceptPeerTcpConnection(void *argv) { return NULL; } -static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { +static SThreadObj *syncGetTcpThread(SPoolObj *pPool) { SThreadObj *pThread = pPool->pThread[pPool->nextId]; if (pThread) return pThread; @@ -286,7 +286,7 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { pthread_attr_t thattr; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); - int32_t ret = pthread_create(&(pThread->thread), &thattr, (void *)taosProcessTcpData, pThread); + int32_t ret = pthread_create(&(pThread->thread), &thattr, (void *)syncProcessTcpData, pThread); pthread_attr_destroy(&thattr); if (ret != 0) { @@ -303,7 +303,7 @@ static SThreadObj *taosGetTcpThread(SPoolObj *pPool) { return pThread; } -static void taosStopPoolThread(SThreadObj *pThread) { +static void syncStopPoolThread(SThreadObj *pThread) { pthread_t thread = pThread->thread; if (!taosCheckPthreadValid(thread)) { return; diff --git a/src/sync/test/syncServer.c b/src/sync/test/syncServer.c index 5a64a0a36a..161105d86c 100644 --- a/src/sync/test/syncServer.c +++ b/src/sync/test/syncServer.c @@ -100,7 +100,7 @@ int processRpcMsg(void *item) { pHead->msgType = pMsg->msgType; pHead->len = pMsg->contLen; - uDebug("ver:%" PRIu64 ", pkt from client processed", pHead->version); + uDebug("ver:%" PRIu64 ", rsp from client processed", pHead->version); writeIntoWal(pHead); syncForwardToPeer(syncHandle, pHead, item, TAOS_QTYPE_RPC); @@ -275,7 +275,7 @@ int getWalInfo(int32_t vgId, char *name, int64_t *index) { int writeToCache(int32_t vgId, void *data, int type) { SWalHead *pHead = data; - uDebug("pkt from peer is received, ver:%" PRIu64 " len:%d type:%d", pHead->version, pHead->len, type); + uDebug("rsp from peer is received, ver:%" PRIu64 " len:%d type:%d", pHead->version, pHead->len, type); int msgSize = pHead->len + sizeof(SWalHead); void *pMsg = taosAllocateQitem(msgSize); diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 43ceada8cf..1fbe9c6566 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -876,7 +876,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) { for (int i = 0; i < pMeta->maxTables; i++) { STable *pTable = pMeta->tables[i]; if (pTable && pTable->type == TSDB_STREAM_TABLE) { - pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, + pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, tsdbGetTableSchemaImpl(pTable, false, false, -1)); } } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 25c815b74e..9dfa147c8f 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -828,7 +828,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { - pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, + pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, tsdbGetTableSchemaImpl(pTable, false, false, -1)); } diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 405c5ebdce..269cf4b94b 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -79,6 +79,7 @@ extern char * tsCfgStatusStr[]; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); void taosPrintGlobalCfg(); +void taosDumpGlobalCfg(); void taosInitConfigOption(SGlobalCfg cfg); SGlobalCfg * taosGetConfigOption(const char *option); diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h index 7aeb2da6b6..e2654f21f4 100644 --- a/src/util/inc/tstoken.h +++ b/src/util/inc/tstoken.h @@ -31,7 +31,7 @@ extern "C" { typedef struct SStrToken { uint32_t n; uint32_t type; - char * z; + char *z; } SStrToken; /** diff --git a/src/util/inc/tworker.h b/src/util/inc/tworker.h new file mode 100644 index 0000000000..7bc1eba2fd --- /dev/null +++ b/src/util/inc/tworker.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TWORKER_H +#define TDENGINE_TWORKER_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *(*FWorkerThread)(void *pWorker); +struct SWorkerPool; + +typedef struct { + pthread_t thread; // thread + int32_t id; // worker ID + struct SWorkerPool *pPool; +} SWorker; + +typedef struct SWorkerPool { + int32_t max; // max number of workers + int32_t min; // min number of workers + int32_t num; // current number of workers + void * qset; + char * name; + SWorker *worker; + FWorkerThread workerFp; + pthread_mutex_t mutex; +} SWorkerPool; + +int32_t tWorkerInit(SWorkerPool *pPool); +void tWorkerCleanup(SWorkerPool *pPool); +void * tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle); +void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 204a7d10e9..9aac91f2e0 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -249,9 +249,6 @@ void taosReadGlobalLogCfg() { int olen, vlen; char fileName[PATH_MAX] = {0}; - mDebugFlag = 135; - sdbDebugFlag = 135; - wordexp_t full_path; if ( 0 != wordexp(configDir, &full_path, 0)) { printf("\nconfig file: %s wordexp fail! reason:%s\n", configDir, strerror(errno)); @@ -414,3 +411,57 @@ void taosPrintGlobalCfg() { taosPrintDataDirCfg(); uInfo("=================================="); } + +static void taosDumpCfg(SGlobalCfg *cfg) { + int optionLen = (int)strlen(cfg->option); + int blankLen = TSDB_CFG_PRINT_LEN - optionLen; + blankLen = blankLen < 0 ? 0 : blankLen; + + char blank[TSDB_CFG_PRINT_LEN]; + memset(blank, ' ', TSDB_CFG_PRINT_LEN); + blank[blankLen] = 0; + + switch (cfg->valType) { + case TAOS_CFG_VTYPE_INT16: + printf(" %s:%s%d%s\n", cfg->option, blank, *((int16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_INT32: + printf(" %s:%s%d%s\n", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_FLOAT: + printf(" %s:%s%f%s\n", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_STRING: + case TAOS_CFG_VTYPE_IPSTR: + case TAOS_CFG_VTYPE_DIRECTORY: + printf(" %s:%s%s%s\n", cfg->option, blank, (char *)cfg->ptr, tsGlobalUnit[cfg->unitType]); + break; + default: + break; + } +} + +void taosDumpGlobalCfg() { + printf("taos global config:\n"); + printf("==================================\n"); + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalCfg *cfg = tsGlobalConfig + i; + if (tscEmbedded == 0 && !(cfg->cfgType & TSDB_CFG_CTYPE_B_CLIENT)) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_NOT_PRINT) continue; + if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_SHOW)) continue; + + taosDumpCfg(cfg); + } + + printf("\ntaos local config:\n"); + printf("==================================\n"); + + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalCfg *cfg = tsGlobalConfig + i; + if (tscEmbedded == 0 && !(cfg->cfgType & TSDB_CFG_CTYPE_B_CLIENT)) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_NOT_PRINT) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_SHOW) continue; + + taosDumpCfg(cfg); + } +} diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index d72bc5f412..7caa1a6c37 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -61,7 +61,7 @@ taos_queue taosOpenQueue() { pthread_mutex_init(&queue->mutex, NULL); - uTrace("queue:%p is openned", queue); + uTrace("queue:%p is opened", queue); return queue; } @@ -230,7 +230,7 @@ taos_qset taosOpenQset() { pthread_mutex_init(&qset->mutex, NULL); tsem_init(&qset->sem, 0, 0); - uTrace("qset:%p is openned", qset); + uTrace("qset:%p is opened", qset); return qset; } diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index 0222a6d80a..114f0764e8 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -19,7 +19,7 @@ #include "ttimer.h" #include "tutil.h" -extern int32_t tscEmbedded; +extern uint32_t tscEmbedded; #define tmrFatal(...) { if (tmrDebugFlag & DEBUG_FATAL) { taosPrintLog("TMR FATAL ", tscEmbedded ? 255 : tmrDebugFlag, __VA_ARGS__); }} #define tmrError(...) { if (tmrDebugFlag & DEBUG_ERROR) { taosPrintLog("TMR ERROR ", tscEmbedded ? 255 : tmrDebugFlag, __VA_ARGS__); }} @@ -225,10 +225,11 @@ static void addToWheel(tmr_obj_t* timer, uint32_t delay) { } static bool removeFromWheel(tmr_obj_t* timer) { - if (timer->wheel >= tListLen(wheels)) { + uint8_t wheelIdx = timer->wheel; + if (wheelIdx >= tListLen(wheels)) { return false; } - time_wheel_t* wheel = wheels + timer->wheel; + time_wheel_t* wheel = wheels + wheelIdx; bool removed = false; pthread_mutex_lock(&wheel->mutex); diff --git a/src/util/src/tworker.c b/src/util/src/tworker.c new file mode 100644 index 0000000000..8b4053bccd --- /dev/null +++ b/src/util/src/tworker.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tulog.h" +#include "tqueue.h" +#include "tworker.h" + +int32_t tWorkerInit(SWorkerPool *pPool) { + pPool->qset = taosOpenQset(); + pPool->worker = calloc(sizeof(SWorker), pPool->max); + pthread_mutex_init(&pPool->mutex, NULL); + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + pWorker->id = i; + pWorker->pPool = pPool; + } + + uInfo("worker:%s is initialized, min:%d max:%d", pPool->name, pPool->min, pPool->max); + return 0; +} + +void tWorkerCleanup(SWorkerPool *pPool) { + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + if(taosCheckPthreadValid(pWorker->thread)) { + taosQsetThreadResume(pPool->qset); + } + } + + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + if (taosCheckPthreadValid(pWorker->thread)) { + pthread_join(pWorker->thread, NULL); + } + } + + free(pPool->worker); + taosCloseQset(pPool->qset); + pthread_mutex_destroy(&pPool->mutex); + + uInfo("worker:%s is closed", pPool->name); +} + +void *tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle) { + pthread_mutex_lock(&pPool->mutex); + taos_queue pQueue = taosOpenQueue(); + if (pQueue == NULL) { + pthread_mutex_unlock(&pPool->mutex); + return NULL; + } + + taosAddIntoQset(pPool->qset, pQueue, ahandle); + + // spawn a thread to process queue + if (pPool->num < pPool->max) { + do { + SWorker *pWorker = pPool->worker + pPool->num; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, pPool->workerFp, pWorker) != 0) { + uError("worker:%s:%d failed to create thread to process since %s", pPool->name, pWorker->id, strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + pPool->num++; + uDebug("worker:%s:%d is launched, total:%d", pPool->name, pWorker->id, pPool->num); + } while (pPool->num < pPool->min); + } + + pthread_mutex_unlock(&pPool->mutex); + uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pPool->name, pQueue, ahandle); + + return pQueue; +} + +void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue) { + taosCloseQueue(pQueue); + uDebug("worker:%s, queue:%p is freed", pPool->name, pQueue); +} diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index 401c217b9a..b28eb690fe 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -46,9 +46,11 @@ typedef struct { int8_t isFull; int8_t isCommiting; uint64_t version; // current version + uint64_t cversion; // version while commit start uint64_t fversion; // version on saved data file - void * wqueue; - void * rqueue; + void * wqueue; // write queue + void * qqueue; // read query queue + void * fqueue; // read fetch/cancel queue void * wal; void * tsdb; int64_t sync; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index e0c2101ff0..4c4df3962c 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -192,8 +192,8 @@ int32_t vnodeOpen(int32_t vgId) { code = vnodeReadVersion(pVnode); if (code != TSDB_CODE_SUCCESS) { - vError("vgId:%d, failed to read version, generate it from data file", pVnode->vgId); - // Allow vnode start even when read version fails, set version as walVersion or zero + vError("vgId:%d, failed to read file version, generate it from data file", pVnode->vgId); + // Allow vnode start even when read file version fails, set file version as wal version or zero // vnodeCleanUp(pVnode); // return code; } @@ -201,8 +201,9 @@ int32_t vnodeOpen(int32_t vgId) { pVnode->fversion = pVnode->version; pVnode->wqueue = dnodeAllocVWriteQueue(pVnode); - pVnode->rqueue = dnodeAllocVReadQueue(pVnode); - if (pVnode->wqueue == NULL || pVnode->rqueue == NULL) { + pVnode->qqueue = dnodeAllocVQueryQueue(pVnode); + pVnode->fqueue = dnodeAllocVFetchQueue(pVnode); + if (pVnode->wqueue == NULL || pVnode->qqueue == NULL || pVnode->fqueue == NULL) { vnodeCleanUp(pVnode); return terrno; } @@ -362,9 +363,14 @@ void vnodeDestroy(SVnodeObj *pVnode) { pVnode->wqueue = NULL; } - if (pVnode->rqueue) { - dnodeFreeVReadQueue(pVnode->rqueue); - pVnode->rqueue = NULL; + if (pVnode->qqueue) { + dnodeFreeVQueryQueue(pVnode->qqueue); + pVnode->qqueue = NULL; + } + + if (pVnode->fqueue) { + dnodeFreeVFetchQueue(pVnode->fqueue); + pVnode->fqueue = NULL; } tfree(pVnode->rootDir); @@ -430,6 +436,7 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { if (status == TSDB_STATUS_COMMIT_START) { pVnode->isCommiting = 1; + pVnode->cversion = pVnode->version; vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); if (!vnodeInInitStatus(pVnode)) { return walRenew(pVnode->wal); @@ -440,7 +447,7 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { if (status == TSDB_STATUS_COMMIT_OVER) { pVnode->isCommiting = 0; pVnode->isFull = 0; - pVnode->fversion = pVnode->version; + pVnode->fversion = pVnode->cversion; vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); if (!vnodeInInitStatus(pVnode)) { walRemoveOneOldFile(pVnode->wal); diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 34921a93b3..637d470f8a 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -14,7 +14,6 @@ */ #define _DEFAULT_SOURCE -#define _NON_BLOCKING_RETRIEVE 0 #include "os.h" #include "taosmsg.h" #include "tqueue.h" @@ -25,12 +24,12 @@ static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead); + static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId); int32_t vnodeInitRead(void) { vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg; vnodeProcessReadMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg; - return 0; } @@ -115,13 +114,16 @@ int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qt } pRead->qtype = qtype; - atomic_add_fetch_32(&pVnode->refCount, 1); atomic_add_fetch_32(&pVnode->queuedRMsg, 1); - vTrace("vgId:%d, write into vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); - taosWriteQitem(pVnode->rqueue, qtype, pRead); - return TSDB_CODE_SUCCESS; + if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRead->msgType == TSDB_MSG_TYPE_FETCH) { + vTrace("vgId:%d, write into vfetch queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + return taosWriteQitem(pVnode->fqueue, qtype, pRead); + } else { + vTrace("vgId:%d, write into vquery queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + return taosWriteQitem(pVnode->qqueue, qtype, pRead); + } } static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void *ahandle) { @@ -131,7 +133,7 @@ static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void int32_t code = vnodeWriteToRQueue(pVnode, qhandle, 0, TAOS_QTYPE_QUERY, &rpcMsg); if (code == TSDB_CODE_SUCCESS) { - vDebug("QInfo:%p add to vread queue for exec query", *qhandle); + vTrace("QInfo:%p add to vread queue for exec query", *qhandle); } return code; @@ -162,7 +164,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, } } else { *freeHandle = true; - vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); + vTrace("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -197,26 +199,29 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // qHandle needs to be freed correctly if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - SRetrieveTableMsg *killQueryMsg = (SRetrieveTableMsg *)pRead->pCont; - killQueryMsg->free = htons(killQueryMsg->free); - killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); - - vWarn("QInfo:%p connection %p broken, kill query", (void *)killQueryMsg->qhandle, pRead->rpcHandle); - assert(pRead->contLen > 0 && killQueryMsg->free == 1); - - void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)killQueryMsg->qhandle); - if (qhandle == NULL || *qhandle == NULL) { - vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)killQueryMsg->qhandle, - pRead->rpcHandle); - } else { - assert(*qhandle == (void *)killQueryMsg->qhandle); - - qKillQuery(*qhandle); - qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true); - } - - return TSDB_CODE_TSC_QUERY_CANCELLED; + vError("error rpc msg in query, %s", tstrerror(pRead->code)); } +// assert(pRead->code != TSDB_CODE_RPC_NETWORK_UNAVAIL); +// if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { +// SCancelQueryMsg *pMsg = (SCancelQueryMsg *)pRead->pCont; +//// pMsg->free = htons(killQueryMsg->free); +// pMsg->qhandle = htobe64(pMsg->qhandle); +// +// vWarn("QInfo:%p connection %p broken, kill query", (void *)pMsg->qhandle, pRead->rpcHandle); +//// assert(pRead->contLen > 0 && pMsg->free == 1); +// +// void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)pMsg->qhandle); +// if (qhandle == NULL || *qhandle == NULL) { +// vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)pMsg->qhandle, pRead->rpcHandle); +// } else { +// assert(*qhandle == (void *)pMsg->qhandle); +// +// qKillQuery(*qhandle); +// qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true); +// } +// +// return TSDB_CODE_TSC_QUERY_CANCELLED; +// } int32_t code = TSDB_CODE_SUCCESS; void ** handle = NULL; @@ -261,7 +266,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } if (handle != NULL) { - vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); + vTrace("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); code = vnodePutItemIntoReadQueue(pVnode, handle, pRead->rpcHandle); if (code != TSDB_CODE_SUCCESS) { pRsp->code = code; @@ -273,10 +278,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { assert(pCont != NULL); void **qhandle = (void **)pRead->qhandle; - vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); + vTrace("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); // In the retrieve blocking model, only 50% CPU will be used in query processing - if (tsHalfCoresForQuery) { + if (tsRetrieveBlockingModel) { qTableQuery(*qhandle); // do execute query qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false); } else { @@ -289,7 +294,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle); assert(pRead->rpcHandle != NULL); - vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, + vTrace("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, pRead->rpcHandle); // set the real rsp error code @@ -322,7 +327,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { pRetrieve->free = htons(pRetrieve->free); pRetrieve->qhandle = htobe64(pRetrieve->qhandle); - vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void *)pRetrieve->qhandle, + vTrace("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void *)pRetrieve->qhandle, pRetrieve->free, pRead->rpcHandle); memset(pRet, 0, sizeof(SRspRet)); @@ -338,11 +343,12 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } if (code != TSDB_CODE_SUCCESS) { - vError("vgId:%d, invalid handle in retrieving result, code:0x%08x, QInfo:%p", pVnode->vgId, code, (void *)pRetrieve->qhandle); + vError("vgId:%d, invalid handle in retrieving result, code:%s, QInfo:%p", pVnode->vgId, tstrerror(code), (void *)pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } - + + // kill current query and free corresponding resources. if (pRetrieve->free == 1) { vWarn("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); qKillQuery(*handle); @@ -373,10 +379,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); freeHandle = true; } else { // result is not ready, return immediately - assert(buildRes == true); - // Only effects in the non-blocking model - if (!tsHalfCoresForQuery) { + if (!tsRetrieveBlockingModel) { if (!buildRes) { assert(pRead->rpcHandle != NULL); @@ -401,12 +405,11 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // notify connection(handle) that current qhandle is created, if current connection from // client is broken, the query needs to be killed immediately. int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) { - SRetrieveTableMsg *killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg)); - killQueryMsg->qhandle = htobe64((uint64_t)qhandle); - killQueryMsg->free = htons(1); - killQueryMsg->header.vgId = htonl(vgId); - killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); + SRetrieveTableMsg *pMsg = rpcMallocCont(sizeof(SRetrieveTableMsg)); + pMsg->qhandle = htobe64((uint64_t)qhandle); + pMsg->header.vgId = htonl(vgId); + pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); - vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle); - return rpcReportProgress(handle, (char *)killQueryMsg, sizeof(SRetrieveTableMsg)); + vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle); + return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg)); } diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index a826a4903f..80e2dc422e 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -243,8 +243,10 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); if (queued > MAX_QUEUED_MSG_NUM) { - vDebug("vgId:%d, too many msg:%d in vwqueue, flow control", pVnode->vgId, queued); - taosMsleep(1); + int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; + if (ms > 100) ms = 100; + vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms); + taosMsleep(ms); } code = vnodePerformFlowCtrl(pWrite); @@ -271,6 +273,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { SVnodeObj * pVnode = pWrite->pVnode; int32_t code = TSDB_CODE_VND_SYNCING; + if (pVnode->flowctrlLevel <= 0) code = TSDB_CODE_VND_IS_FLOWCTRL; + pWrite->processedCount++; if (pWrite->processedCount > 100) { vError("vgId:%d, msg:%p, failed to process since %s, retry:%d", pVnode->vgId, pWrite, tstrerror(code), @@ -290,8 +294,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { SVnodeObj *pVnode = pWrite->pVnode; - if (pVnode->flowctrlLevel <= 0) return 0; if (pWrite->qtype != TAOS_QTYPE_RPC) return 0; + if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0; if (tsFlowCtrl == 0) { int32_t ms = pow(2, pVnode->flowctrlLevel + 2); diff --git a/src/wal/inc/walInt.h b/src/wal/inc/walInt.h index 06748d885f..890b404ce9 100644 --- a/src/wal/inc/walInt.h +++ b/src/wal/inc/walInt.h @@ -38,7 +38,7 @@ extern int32_t wDebugFlag; #define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE)) #define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) #define WAL_FILE_LEN (WAL_PATH_LEN + 32) -#define WAL_FILE_NUM 3 +#define WAL_FILE_NUM 1 // 3 typedef struct { uint64_t version; diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 10e1b4dd61..e67127d6e4 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -166,14 +166,14 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) { char walName[WAL_FILE_LEN]; snprintf(walName, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); - wDebug("vgId:%d, file:%s, will be restored", pWal->vgId, walName); + wInfo("vgId:%d, file:%s, will be restored", pWal->vgId, walName); int32_t code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId); if (code != TSDB_CODE_SUCCESS) { wError("vgId:%d, file:%s, failed to restore since %s", pWal->vgId, walName, tstrerror(code)); continue; } - wDebug("vgId:%d, file:%s, restore success", pWal->vgId, walName); + wInfo("vgId:%d, file:%s, restore success, wver:%" PRIu64, pWal->vgId, walName, pWal->version); count++; } @@ -267,8 +267,6 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch return TAOS_SYSTEM_ERROR(errno); } - wDebug("vgId:%d, file:%s, start to restore", pWal->vgId, name); - int32_t code = TSDB_CODE_SUCCESS; int64_t offset = 0; SWalHead *pHead = buffer; @@ -299,16 +297,14 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch } } - if (pHead->len > size - sizeof(SWalHead)) { - size = sizeof(SWalHead) + pHead->len; - buffer = realloc(buffer, size); - if (buffer == NULL) { - wError("vgId:%d, file:%s, failed to open for restore since %s", pWal->vgId, name, strerror(errno)); - code = TAOS_SYSTEM_ERROR(errno); + if (pHead->len < 0 || pHead->len > size - sizeof(SWalHead)) { + wError("vgId:%d, file:%s, wal head len out of range, hver:%" PRIu64 " len:%d offset:%" PRId64, pWal->vgId, name, + pHead->version, pHead->len, offset); + code = walSkipCorruptedRecord(pWal, pHead, tfd, &offset); + if (code != TSDB_CODE_SUCCESS) { + walFtruncate(pWal, tfd, offset); break; } - - pHead = buffer; } ret = tfRead(tfd, pHead->cont, pHead->len); @@ -326,7 +322,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch offset = offset + sizeof(SWalHead) + pHead->len; - wDebug("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, + wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, fileId, pHead->version, pWal->version, pHead->len); pWal->version = pHead->version; diff --git a/src/wal/test/waltest.c b/src/wal/test/waltest.c index 7a473ed18c..9a52a2ca83 100644 --- a/src/wal/test/waltest.c +++ b/src/wal/test/waltest.c @@ -76,7 +76,7 @@ int main(int argc, char *argv[]) { taosInitLog("wal.log", 100000, 10); - SWalCfg walCfg; + SWalCfg walCfg = {0}; walCfg.walLevel = level; walCfg.keep = keep; diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 49e25a2f5e..5c6e60d30f 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -1,8 +1,3 @@ -properties([pipelineTriggers([githubPush()])]) -node { - git url: 'https://github.com/liuyq-617/TDengine' -} - def pre_test(){ catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' @@ -11,15 +6,14 @@ def pre_test(){ } sh ''' cd ${WKC} - rm -rf * + git reset --hard + git checkout ${BRANCH} + git pull + git submodule update cd ${WK} git reset --hard - git checkout develop + git checkout ${BRANCH} git pull - cd ${WKC} - rm -rf * - mv ${WORKSPACE}/* . - cd ${WK} export TZ=Asia/Harbin date rm -rf ${WK}/debug @@ -28,13 +22,13 @@ def pre_test(){ cmake .. > /dev/null make > /dev/null make install > /dev/null - cd ${WKC}/tests ''' return 1 } pipeline { agent none environment{ + BRANCH = $branch_name WK = '/var/lib/jenkins/workspace/TDinternal' WKC= '/var/lib/jenkins/workspace/TDinternal/community' } @@ -42,13 +36,13 @@ pipeline { stages { stage('Parallel test stage') { parallel { - stage('python p1') { - agent{label 'p1'} + stage('pytest') { + agent{label '184'} steps { pre_test() sh ''' cd ${WKC}/tests - ./test-all.sh p1 + ./test-all.sh pytest date''' } } @@ -56,6 +50,12 @@ pipeline { agent{label 'master'} steps { pre_test() + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + python3 concurrent_inquiry.py -c 1 + ''' + } sh ''' cd ${WKC}/tests ./test-all.sh b1 @@ -64,9 +64,12 @@ pipeline { } stage('test_crash_gen') { - agent{label "b2"} + agent{label "185"} steps { pre_test() + sh ''' + cd ${WKC}/tests/pytest + ''' catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/pytest @@ -80,7 +83,6 @@ pipeline { ''' } sh ''' - date cd ${WKC}/tests ./test-all.sh b2 date @@ -89,42 +91,177 @@ pipeline { } stage('test_valgrind') { - agent{label "b3"} + agent{label "186"} steps { pre_test() - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WKC}/tests/pytest - ./valgrind-test.sh 2>&1 > mem-error-out.log - ./handle_val_log.sh - ''' - } sh ''' + cd ${WKC}/tests/pytest + ./valgrind-test.sh 2>&1 > mem-error-out.log + ./handle_val_log.sh + date cd ${WKC}/tests ./test-all.sh b3 date''' } } - stage('python p2'){ - agent{label "p2"} + stage('connector'){ + agent{label "release"} steps{ - pre_test() - sh ''' - date - cd ${WKC}/tests - ./test-all.sh p2 - date + sh''' + cd ${WORKSPACE} + git checkout develop ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/gotest + bash batchtest.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker + python3 PythonChecker.py + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ + mvn clean package assembly:single >/dev/null + java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# + dotnet run + ''' + } } } - - + stage('arm64_build'){ + agent{label 'arm64'} + steps{ + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + } + stage('arm32_build'){ + agent{label 'arm32'} + steps{ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + + } + } } } } - -} + post { + success { + emailext ( + subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/.gitignore b/tests/examples/JDBC/taosdemo/.gitignore new file mode 100644 index 0000000000..549e00a2a9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.gitignore @@ -0,0 +1,33 @@ +HELP.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..a45eb6ba26 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000..2cc7d4a55c Binary files /dev/null and b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..642d572ce9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/tests/examples/JDBC/taosdemo/mvnw b/tests/examples/JDBC/taosdemo/mvnw new file mode 100755 index 0000000000..3c8a553731 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/mvnw @@ -0,0 +1,322 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="$(/usr/libexec/java_home)" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +if [ -z "$M2_HOME" ]; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ]; do + ls=$(ls -ld "$PRG") + link=$(expr "$ls" : '.*-> \(.*\)$') + if expr "$link" : '/.*' >/dev/null; then + PRG="$link" + else + PRG="$(dirname "$PRG")/$link" + fi + done + + saveddir=$(pwd) + + M2_HOME=$(dirname "$PRG")/.. + + # make it fully qualified + M2_HOME=$(cd "$M2_HOME" && pwd) + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --unix "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$M2_HOME" ] && + M2_HOME="$( ( + cd "$M2_HOME" + pwd + ))" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="$( ( + cd "$JAVA_HOME" + pwd + ))" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then + if $darwin; then + javaHome="$(dirname \"$javaExecutable\")" + javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac" + else + javaExecutable="$(readlink -f \"$javaExecutable\")" + fi + javaHome="$(dirname \"$javaExecutable\")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(which java)" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." + pwd + ) + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' <"$1")" + fi +} + +BASE_DIR=$(find_maven_basedir "$(pwd)") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in wrapperUrl) + jarUrl="$value" + break + ;; + esac + done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --path --windows "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/taosdemo/mvnw.cmd b/tests/examples/JDBC/taosdemo/mvnw.cmd new file mode 100644 index 0000000000..c8d43372c9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml new file mode 100644 index 0000000000..5cbf6cb700 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -0,0 +1,117 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.4.0 + + + com.taosdata + taosdemo + 2.0 + taosdemo + Demo project for TDengine + + + 1.8 + + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.14 + + + + mysql + mysql-connector-java + 5.1.47 + + + + com.baomidou + mybatis-plus-boot-starter + 3.1.2 + + + + log4j + log4j + 1.2.17 + + + + + org.springframework.boot + spring-boot-starter-jdbc + + + org.springframework.boot + spring-boot-starter-thymeleaf + + + org.springframework.boot + spring-boot-starter-web + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 2.1.4 + + + junit + junit + 4.12 + test + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.projectlombok + lombok + true + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + src/main/resources + + **/*.properties + **/*.xml + + true + + + src/main/java + + **/*.properties + **/*.xml + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java new file mode 100644 index 0000000000..db1b20527d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@MapperScan(basePackages = {"com.taosdata.taosdemo.mapper"}) +@SpringBootApplication +public class TaosdemoApplication { + + public static void main(String[] args) { + SpringApplication.run(TaosdemoApplication.class, args); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java new file mode 100644 index 0000000000..e58c68f7a5 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java @@ -0,0 +1,174 @@ +package com.taosdata.taosdemo.components; + +import com.taosdata.taosdemo.domain.*; +import com.taosdata.taosdemo.service.DatabaseService; +import com.taosdata.taosdemo.service.SubTableService; +import com.taosdata.taosdemo.service.SuperTableService; +import com.taosdata.taosdemo.service.data.SubTableMetaGenerator; +import com.taosdata.taosdemo.service.data.SubTableValueGenerator; +import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; +import com.taosdata.taosdemo.utils.JdbcTaosdemoConfig; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.stereotype.Component; + +import java.util.*; +import java.util.concurrent.TimeUnit; + + +@Component +public class TaosDemoCommandLineRunner implements CommandLineRunner { + + private static Logger logger = Logger.getLogger(TaosDemoCommandLineRunner.class); + @Autowired + private DatabaseService databaseService; + @Autowired + private SuperTableService superTableService; + @Autowired + private SubTableService subTableService; + + private SuperTableMeta superTableMeta; + private List subTableMetaList; + private List subTableValueList; + private List> dataList; + + + @Override + public void run(String... args) throws Exception { + // 读配置参数 + JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); + boolean isHelp = Arrays.asList(args).contains("--help"); + if (isHelp) { + JdbcTaosdemoConfig.printHelp(); + System.exit(0); + } + // 准备数据 + prepareData(config); + // 创建数据库 + createDatabaseTask(config); + // 建表 + createTableTask(config); + // 插入 + insertTask(config); + // 查询: 1. 生成查询语句, 2. 执行查询 + // 删除表 + if (config.dropTable) { + superTableService.drop(config.database, config.superTable); + } + + System.exit(0); + } + + private void createDatabaseTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + + Map databaseParam = new HashMap<>(); + databaseParam.put("database", config.database); + databaseParam.put("keep", Integer.toString(config.keep)); + databaseParam.put("days", Integer.toString(config.days)); + databaseParam.put("replica", Integer.toString(config.replica)); + //TODO: other database parameters + databaseService.dropDatabase(config.database); + databaseService.createDatabase(databaseParam); + databaseService.useDatabase(config.database); + + long end = System.currentTimeMillis(); + logger.info(">>> insert time cost : " + (end - start) + " ms."); + } + + // 建超级表,三种方式:1. 指定SQL,2. 指定field和tags的个数,3. 默认 + private void createTableTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + if (config.doCreateTable) { + superTableService.create(superTableMeta); + // 批量建子表 + subTableService.createSubTable(subTableMetaList, config.numOfThreadsForCreate); + } + long end = System.currentTimeMillis(); + logger.info(">>> create table time cost : " + (end - start) + " ms."); + } + + private void insertTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + + int numOfThreadsForInsert = config.numOfThreadsForInsert; + int sleep = config.sleep; + if (config.autoCreateTable) { + // 批量插入,自动建表 + dataList.stream().forEach(subTableValues -> { + subTableService.insertAutoCreateTable(subTableValues, numOfThreadsForInsert); + sleep(sleep); + }); + } else { + dataList.stream().forEach(subTableValues -> { + subTableService.insert(subTableValues, numOfThreadsForInsert); + sleep(sleep); + }); + } + long end = System.currentTimeMillis(); + logger.info(">>> insert time cost : " + (end - start) + " ms."); + } + + private void prepareData(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + // 超级表的meta + superTableMeta = createSupertable(config); + // 子表的meta + subTableMetaList = SubTableMetaGenerator.generate(superTableMeta, config.numOfTables, config.tablePrefix); + // 子表的data + subTableValueList = SubTableValueGenerator.generate(subTableMetaList, config.numOfRowsPerTable, config.startTime, config.timeGap); + // 如果有乱序,给数据搞乱 + if (config.order != 0) { + SubTableValueGenerator.disrupt(subTableValueList, config.rate, config.range); + } + // 分割数据 + int numOfTables = config.numOfTables; + int numOfTablesPerSQL = config.numOfTablesPerSQL; + int numOfRowsPerTable = config.numOfRowsPerTable; + int numOfValuesPerSQL = config.numOfValuesPerSQL; + dataList = SubTableValueGenerator.split(subTableValueList, numOfTables, numOfTablesPerSQL, numOfRowsPerTable, numOfValuesPerSQL); + long end = System.currentTimeMillis(); + logger.info(">>> prepare data time cost : " + (end - start) + " ms."); + } + + private SuperTableMeta createSupertable(JdbcTaosdemoConfig config) { + SuperTableMeta tableMeta; + // create super table + logger.info(">>> create super table <<<"); + if (config.superTableSQL != null) { + // use a sql to create super table + tableMeta = SuperTableMetaGenerator.generate(config.superTableSQL); + } else if (config.numOfFields == 0) { + // default sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase(config.database); + superTableMeta.setName(config.superTable); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + return superTableMeta; + } else { + // create super table with specified field size and tag size + tableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); + } + return tableMeta; + } + + private static void sleep(int sleep) { + if (sleep <= 0) + return; + try { + TimeUnit.MILLISECONDS.sleep(sleep); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java new file mode 100644 index 0000000000..1cf1463f0a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java @@ -0,0 +1,40 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.service.DatabaseService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +import java.util.Map; + +@RestController +@RequestMapping +public class DatabaseController { + + @Autowired + private DatabaseService databaseService; + + /** + * create database + ***/ + @PostMapping + public int create(@RequestBody Map map) { + return databaseService.createDatabase(map); + } + + + /** + * drop database + **/ + @DeleteMapping("/{dbname}") + public int delete(@PathVariable("dbname") String dbname) { + return databaseService.dropDatabase(dbname); + } + + /** + * use database + **/ + @GetMapping("/{dbname}") + public int use(@PathVariable("dbname") String dbname) { + return databaseService.useDatabase(dbname); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java new file mode 100644 index 0000000000..788f68a30a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.controller; + +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class InsertController { + + //TODO:多线程写一张表, thread = 10, table = 1 + //TODO:一个批次写多张表, insert into t1 using weather values() t2 using weather values() + //TODO:插入的频率, + //TODO:指定一张表内的records数量 + //TODO:是否乱序, + //TODO:乱序的比例,乱序的范围 + //TODO:先建表,自动建表 + //TODO:一个批次写多张表 + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java new file mode 100644 index 0000000000..797c3708d3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java @@ -0,0 +1,45 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.domain.TableValue; +import com.taosdata.taosdemo.service.SuperTableService; +import com.taosdata.taosdemo.service.TableService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class SubTableController { + + @Autowired + private TableService tableService; + @Autowired + private SuperTableService superTableService; + + //TODO: 使用supertable创建一个子表 + + //TODO:使用supertable创建多个子表 + + //TODO:使用supertable多线程创建子表 + + //TODO:使用supertable多线程创建子表,指定子表的name_prefix,子表的数量,使用线程的个数 + + /** + * 创建表,超级表或者普通表 + **/ + + + /** + * 创建超级表的子表 + **/ + @PostMapping("/{database}/{superTable}") + public int createTable(@PathVariable("database") String database, + @PathVariable("superTable") String superTable, + @RequestBody TableValue tableMetadta) { + tableMetadta.setDatabase(database); + return 0; + } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java new file mode 100644 index 0000000000..cf53c1440f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java @@ -0,0 +1,26 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.service.SuperTableService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; + +public class SuperTableController { + @Autowired + private SuperTableService superTableService; + + + @PostMapping("/{database}") + public int createTable(@PathVariable("database") String database, @RequestBody SuperTableMeta tableMetadta) { + tableMetadta.setDatabase(database); + return superTableService.create(tableMetadta); + } + + //TODO: 删除超级表 + + //TODO:查询超级表 + + //TODO:统计查询表 +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java new file mode 100644 index 0000000000..dbdd978e74 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java @@ -0,0 +1,11 @@ +package com.taosdata.taosdemo.controller; + +public class TableController { + + //TODO:创建普通表,create table(ts timestamp, temperature float) + + //TODO:创建普通表,指定表的列数,包括第一列timestamp + + //TODO:创建普通表,指定表每列的name和type + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java new file mode 100644 index 0000000000..8a45e99989 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class FieldMeta { + private String name; + private String type; + + public FieldMeta() { + } + + public FieldMeta(String name, String type) { + this.name = name; + this.type = type; + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java new file mode 100644 index 0000000000..44805c0d7c --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class FieldValue { + private String name; + private T value; + + public FieldValue() { + } + + public FieldValue(String name, T value) { + this.name = name; + this.value = value; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java new file mode 100644 index 0000000000..a9f216f679 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class RowValue { + private List fields; + + + public RowValue(List fields) { + this.fields = fields; + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java new file mode 100644 index 0000000000..81de882448 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SubTableMeta { + + private String database; + private String supertable; + private String name; + private List tags; + private List fields; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java new file mode 100644 index 0000000000..74fb9598bc --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SubTableValue { + + private String database; + private String supertable; + private String name; + private List tags; + private List values; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java new file mode 100644 index 0000000000..c5c65a4599 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java @@ -0,0 +1,14 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SuperTableMeta { + + private String database; + private String name; + private List fields; + private List tags; +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java new file mode 100644 index 0000000000..3ab0a75c0b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java @@ -0,0 +1,13 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class TableMeta { + + private String database; + private String name; + private List fields; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java new file mode 100644 index 0000000000..d5502aa46f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class TableValue { + + private String database; + private String name; + private List columns; + private List values; + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java new file mode 100644 index 0000000000..a385bb4e12 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java @@ -0,0 +1,18 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class TagMeta { + private String name; + private String type; + + public TagMeta() { + + } + + public TagMeta(String name, String type) { + this.name = name; + this.type = type; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java new file mode 100644 index 0000000000..98ea8c0dc9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class TagValue { + private String name; + private T value; + + public TagValue() { + } + + public TagValue(String name, T value) { + this.name = name; + this.value = value; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java new file mode 100644 index 0000000000..e535ed1f98 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java @@ -0,0 +1,27 @@ +package com.taosdata.taosdemo.mapper; + +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.Map; + +@Repository +public interface DatabaseMapper { + + // create database if not exists XXX + int createDatabase(@Param("database") String dbname); + + // drop database if exists XXX + int dropDatabase(@Param("database") String dbname); + + // create database if not exists XXX keep XX days XX replica XX + int createDatabaseWithParameters(Map map); + + // use XXX + int useDatabase(@Param("database") String dbname); + + //TODO: alter database + + //TODO: show database + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml new file mode 100644 index 0000000000..1a1de34842 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml @@ -0,0 +1,48 @@ + + + + + + + + create database if not exists ${database} + + + + DROP database if exists ${database} + + + + CREATE database if not exists ${database} + + KEEP ${keep} + + + DAYS ${days} + + + REPLICA ${replica} + + + cache ${cache} + + + blocks ${blocks} + + + minrows ${minrows} + + + maxrows ${maxrows} + + + + + use ${database} + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java new file mode 100644 index 0000000000..d23473ba31 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java @@ -0,0 +1,30 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface SubTableMapper { + + // 创建:子表 + int createUsingSuperTable(SubTableMeta subTableMeta); + + // 插入:一张子表多个values + int insertOneTableMultiValues(SubTableValue subTableValue); + + // 插入:一张子表多个values, 自动建表 + int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue); + + // 插入:多张表多个values + int insertMultiTableMultiValues(@Param("tables") List tables); + + // 插入:多张表多个values,自动建表 + int insertMultiTableMultiValuesUsingSuperTable(@Param("tables") List tables); + + // + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml new file mode 100644 index 0000000000..2fb94e99b7 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml @@ -0,0 +1,81 @@ + + + + + + + + CREATE table IF NOT EXISTS ${database}.${name} USING ${supertable} TAGS + + #{tag.value} + + + + + + INSERT INTO ${database}.${name} + VALUES + + + #{field.value} + + + + + + + INSERT INTO ${database}.${name} USING ${supertable} TAGS + + #{tag.value} + + VALUES + + + #{field.value} + + + + + + + + + + + INSERT INTO + + ${table.database}.${table.name} + VALUES + + + #{field.value} + + + + + + + + INSERT INTO + + ${table.database}.${table.name} USING ${table.supertable} TAGS + + #{tag.value} + + VALUES + + + #{field.value} + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java new file mode 100644 index 0000000000..c8610fac90 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java @@ -0,0 +1,33 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +@Repository +public interface SuperTableMapper { + + // 创建超级表,使用自己定义的SQL语句 + int createSuperTableUsingSQL(@Param("createSuperTableSQL") String sql); + + // 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...) + int createSuperTable(SuperTableMeta tableMetadata); + + // 删除超级表 drop table if exists xxx; + int dropSuperTable(@Param("database") String database, @Param("name") String name); + + // + + // + + // + + // + + // + + // + + // + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml new file mode 100644 index 0000000000..8b83d57a4b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml @@ -0,0 +1,41 @@ + + + + + + + ${createSuperTableSQL} + + + + + create table if not exists ${database}.${name} + + ${field.name} ${field.type} + + tags + + ${tag.name} ${tag.type} + + + + + + drop table if exists ${database}.${name} + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java new file mode 100644 index 0000000000..f00f6c9694 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java @@ -0,0 +1,28 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.TableMeta; +import com.taosdata.taosdemo.domain.TableValue; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface TableMapper { + + // 创建:普通表 + int create(TableMeta tableMeta); + + // 插入:一张表多个value + int insertOneTableMultiValues(TableValue values); + + // 插入: 一张表多个value,指定的列 + int insertOneTableMultiValuesWithColumns(TableValue values); + + // 插入:多个表多个value + int insertMultiTableMultiValues(@Param("tables") List tables); + + // 插入:多个表多个value, 指定的列 + int insertMultiTableMultiValuesWithColumns(@Param("tables") List tables); + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml new file mode 100644 index 0000000000..e2e7cbb30d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml @@ -0,0 +1,68 @@ + + + + + + + + create table if not exists ${database}.${name} + + ${field.name} ${field.type} + + + + + + insert into ${database}.${name} values + + + ${field.value} + + + + + + + insert into ${database}.${name} + + ${column.name} + + values + + + ${field.value} + + + + + + + insert into + + ${table.database}.${table.name} values + + + ${field.value} + + + + + + + + insert into + + ${table.database}.${table.name} + + ${column.name} + + values + + + ${field.value} + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java new file mode 100644 index 0000000000..4afbe9dae8 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java @@ -0,0 +1,35 @@ +package com.taosdata.taosdemo.service; + +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +public class AbstractService { + + protected int getAffectRows(List> futureList) { + int count = 0; + for (Future future : futureList) { + try { + count += future.get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + } + return count; + } + + protected int getAffectRows(Future future) { + int count = 0; + try { + count += future.get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + return count; + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java new file mode 100644 index 0000000000..e9aa2727a0 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java @@ -0,0 +1,38 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.mapper.DatabaseMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.Map; + +@Service +public class DatabaseService { + + @Autowired + private DatabaseMapper databaseMapper; + + // 建库,指定 name + public int createDatabase(String database) { + return databaseMapper.createDatabase(database); + } + + // 建库,指定参数 keep,days,replica等 + public int createDatabase(Map map) { + if (map.isEmpty()) + return 0; + if (map.containsKey("database") && map.size() == 1) + return databaseMapper.createDatabase(map.get("database")); + return databaseMapper.createDatabaseWithParameters(map); + } + + // drop database + public int dropDatabase(String dbname) { + return databaseMapper.dropDatabase(dbname); + } + + // use database + public int useDatabase(String dbname) { + return databaseMapper.useDatabase(dbname); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java new file mode 100644 index 0000000000..07c315b65a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java @@ -0,0 +1,118 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.mapper.SubTableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +@Service +public class SubTableService extends AbstractService { + + @Autowired + private SubTableMapper mapper; + + /** + * 1. 选择database,找到所有supertable + * 2. 选择supertable,可以拿到表结构,包括field和tag + * 3. 指定子表的前缀和个数 + * 4. 指定创建子表的线程数 + */ + //TODO:指定database、supertable、子表前缀、子表个数、线程数 + + // 多线程创建表,指定线程个数 + public int createSubTable(List subTables, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + List> futureList = new ArrayList<>(); + for (SubTableMeta subTableMeta : subTables) { + Future future = executor.submit(() -> createSubTable(subTableMeta)); + futureList.add(future); + } + executor.shutdown(); + return getAffectRows(futureList); + } + + + // 创建一张子表,可以指定database,supertable,tablename,tag值 + public int createSubTable(SubTableMeta subTableMeta) { + return mapper.createUsingSuperTable(subTableMeta); + } + + // 单线程创建多张子表,每张子表分别可以指定自己的database,supertable,tablename,tag值 + public int createSubTable(List subTables) { + return createSubTable(subTables, 1); + } + + /*************************************************************************************************************************/ + // 插入:多线程,多表 + public int insert(List subTableValues, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + Future future = executor.submit(() -> insert(subTableValues)); + executor.shutdown(); + return getAffectRows(future); + } + + // 插入:多线程,多表, 自动建表 + public int insertAutoCreateTable(List subTableValues, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + Future future = executor.submit(() -> insertAutoCreateTable(subTableValues)); + executor.shutdown(); + return getAffectRows(future); + } + + // 插入:单表,insert into xxx values(),()... + public int insert(SubTableValue subTableValue) { + return mapper.insertOneTableMultiValues(subTableValue); + } + + // 插入: 多表,insert into xxx values(),()... xxx values(),()... + public int insert(List subTableValues) { + return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); + } + + // 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()... + public int insertAutoCreateTable(SubTableValue subTableValue) { + return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue); + } + + // 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()... + public int insertAutoCreateTable(List subTableValues) { + return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); + } + + +// ExecutorService executors = Executors.newFixedThreadPool(threadSize); +// int count = 0; +// +// // +// List subTableValues = new ArrayList<>(); +// for (int tableIndex = 1; tableIndex <= numOfTablesPerSQL; tableIndex++) { +// // each table +// SubTableValue subTableValue = new SubTableValue(); +// subTableValue.setDatabase(); +// subTableValue.setName(); +// subTableValue.setSupertable(); +// +// List values = new ArrayList<>(); +// for (int valueCnt = 0; valueCnt < numOfValuesPerSQL; valueCnt++) { +// List fields = new ArrayList<>(); +// for (int fieldInd = 0; fieldInd <; fieldInd++) { +// FieldValue field = new FieldValue<>("", ""); +// fields.add(field); +// } +// RowValue row = new RowValue(); +// row.setFields(fields); +// values.add(row); +// } +// subTableValue.setValues(values); +// subTableValues.add(subTableValue); +// } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java new file mode 100644 index 0000000000..7f6836c999 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java @@ -0,0 +1,22 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.mapper.SuperTableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +@Service +public class SuperTableService { + + @Autowired + private SuperTableMapper superTableMapper; + + // 创建超级表,指定每个field的名称和类型,每个tag的名称和类型 + public int create(SuperTableMeta superTableMeta) { + return superTableMapper.createSuperTable(superTableMeta); + } + + public void drop(String database, String name) { + superTableMapper.dropSuperTable(database, name); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java new file mode 100644 index 0000000000..bada6de708 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -0,0 +1,42 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.TableMeta; +import com.taosdata.taosdemo.mapper.TableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +@Service +public class TableService extends AbstractService { + + @Autowired + private TableMapper tableMapper; + + //创建一张表 + public int create(TableMeta tableMeta) { + return tableMapper.create(tableMeta); + } + + //创建多张表 + public int create(List tables) { + return create(tables, 1); + } + + //多线程创建多张表 + public int create(List tables, int threadSize) { + ExecutorService executors = Executors.newFixedThreadPool(threadSize); + List> futures = new ArrayList<>(); + for (TableMeta table : tables) { + Future future = executors.submit(() -> create(table)); + futures.add(future); + } + return getAffectRows(futures); + } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java new file mode 100644 index 0000000000..73cd981a46 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java @@ -0,0 +1,48 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.FieldValue; +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.utils.DataGenerator; + +import java.util.*; + +public class FieldValueGenerator { + + public static Random random = new Random(System.currentTimeMillis()); + + // 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成 + public static List generate(long start, long end, long timeGap, List fieldMetaList) { + List values = new ArrayList<>(); + + for (long ts = start; ts < end; ts += timeGap) { + List fieldValues = new ArrayList<>(); + // timestamp + fieldValues.add(new FieldValue(fieldMetaList.get(0).getName(), ts)); + // other values + for (int fieldInd = 1; fieldInd < fieldMetaList.size(); fieldInd++) { + FieldMeta fieldMeta = fieldMetaList.get(fieldInd); + fieldValues.add(new FieldValue(fieldMeta.getName(), DataGenerator.randomValue(fieldMeta.getType()))); + } + values.add(new RowValue(fieldValues)); + } + return values; + } + + // 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成 + public static List disrupt(List values, int rate, long range) { + long timeGap = (long) (values.get(1).getFields().get(0).getValue()) - (long) (values.get(0).getFields().get(0).getValue()); + int bugSize = values.size() * rate / 100; + Set bugIndSet = new HashSet<>(); + while (bugIndSet.size() < bugSize) { + bugIndSet.add(random.nextInt(values.size())); + } + for (Integer bugInd : bugIndSet) { + Long timestamp = (Long) values.get(bugInd).getFields().get(0).getValue(); + Long newTimestamp = timestamp - timeGap - random.nextInt((int) range); + values.get(bugInd).getFields().get(0).setValue(newTimestamp); + } + + return values; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java new file mode 100644 index 0000000000..d15ad0d8bd --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java @@ -0,0 +1,30 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagValue; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableMetaGenerator { + + // 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据 + // create table xxx using XXX tags(XXX) + public static List generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) { + List subTableMetaList = new ArrayList<>(); + for (int i = 1; i <= tableSize; i++) { + SubTableMeta subTableMeta = new SubTableMeta(); + // create table xxx.xxx using xxx tags(...) + subTableMeta.setDatabase(superTableMeta.getDatabase()); + subTableMeta.setName(tablePrefix + i); + subTableMeta.setSupertable(superTableMeta.getName()); + subTableMeta.setFields(superTableMeta.getFields()); + List tagValues = TagValueGenerator.generate(superTableMeta.getTags()); + subTableMeta.setTags(tagValues); + subTableMetaList.add(subTableMeta); + } + return subTableMetaList; + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java new file mode 100644 index 0000000000..a36f718f83 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java @@ -0,0 +1,84 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.utils.TimeStampUtil; +import org.springframework.beans.BeanUtils; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableValueGenerator { + + public static List generate(List subTableMetaList, int numOfRowsPerTable, long start, long timeGap) { + List subTableValueList = new ArrayList<>(); + + subTableMetaList.stream().forEach((subTableMeta) -> { + // insert into xxx.xxx using xxxx tags(...) values(),()... + SubTableValue subTableValue = new SubTableValue(); + subTableValue.setDatabase(subTableMeta.getDatabase()); + subTableValue.setName(subTableMeta.getName()); + subTableValue.setSupertable(subTableMeta.getSupertable()); + subTableValue.setTags(subTableMeta.getTags()); + TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable); + List values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields()); + subTableValue.setValues(values); + subTableValueList.add(subTableValue); + }); + return subTableValueList; + } + + public static void disrupt(List subTableValueList, int rate, long range) { + subTableValueList.stream().forEach((tableValue) -> { + List values = tableValue.getValues(); + FieldValueGenerator.disrupt(values, rate, range); + }); + } + + public static List> split(List subTableValueList, int numOfTables, int numOfTablesPerSQL, int numOfRowsPerTable, int numOfValuesPerSQL) { + List> dataList = new ArrayList<>(); + + if (numOfRowsPerTable < numOfValuesPerSQL) + numOfValuesPerSQL = numOfRowsPerTable; + if (numOfTables < numOfTablesPerSQL) + numOfTablesPerSQL = numOfTables; + + //table + for (int tableCnt = 0; tableCnt < numOfTables; ) { + int tableSize = numOfTablesPerSQL; + if (tableCnt + tableSize > numOfTables) { + tableSize = numOfTables - tableCnt; + } + // row + for (int rowCnt = 0; rowCnt < numOfRowsPerTable; ) { + int rowSize = numOfValuesPerSQL; + if (rowCnt + rowSize > numOfRowsPerTable) { + rowSize = numOfRowsPerTable - rowCnt; + } + // System.out.println("rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", tableCnt: " + tableCnt + ", tableSize: " + tableSize); + // split + List blocks = subTableValueList.subList(tableCnt, tableCnt + tableSize); + List newBlocks = new ArrayList<>(); + for (int i = 0; i < blocks.size(); i++) { + SubTableValue subTableValue = blocks.get(i); + SubTableValue newSubTableValue = new SubTableValue(); + BeanUtils.copyProperties(subTableValue, newSubTableValue); + List values = subTableValue.getValues().subList(rowCnt, rowCnt + rowSize); + newSubTableValue.setValues(values); + newBlocks.add(newSubTableValue); + } + dataList.add(newBlocks); + + rowCnt += rowSize; + } + tableCnt += tableSize; + } + return dataList; + } + + public static void main(String[] args) { + split(null, 99, 10, 99, 10); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java new file mode 100644 index 0000000000..05aefd01ac --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java @@ -0,0 +1,80 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.utils.TaosConstants; + +import java.util.ArrayList; +import java.util.List; + +public class SuperTableMetaGenerator { + + // 创建超级表,使用指定SQL语句 + public static SuperTableMeta generate(String superTableSQL) { + SuperTableMeta tableMeta = new SuperTableMeta(); + // for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int) + superTableSQL = superTableSQL.trim().toLowerCase(); + if (!superTableSQL.startsWith("create")) + throw new RuntimeException("invalid create super table SQL"); + + if (superTableSQL.contains("tags")) { + String tagSQL = superTableSQL.substring(superTableSQL.indexOf("tags") + 4).trim(); + tagSQL = tagSQL.substring(tagSQL.indexOf("(") + 1, tagSQL.lastIndexOf(")")); + String[] tagPairs = tagSQL.split(","); + List tagMetaList = new ArrayList<>(); + for (String tagPair : tagPairs) { + String name = tagPair.trim().split("\\s+")[0]; + String type = tagPair.trim().split("\\s+")[1]; + tagMetaList.add(new TagMeta(name, type)); + } + tableMeta.setTags(tagMetaList); + superTableSQL = superTableSQL.substring(0, superTableSQL.indexOf("tags")); + } + if (superTableSQL.contains("(")) { + String fieldSQL = superTableSQL.substring(superTableSQL.indexOf("(") + 1, superTableSQL.indexOf(")")); + String[] fieldPairs = fieldSQL.split(","); + List fieldList = new ArrayList<>(); + for (String fieldPair : fieldPairs) { + String name = fieldPair.trim().split("\\s+")[0]; + String type = fieldPair.trim().split("\\s+")[1]; + fieldList.add(new FieldMeta(name, type)); + } + tableMeta.setFields(fieldList); + superTableSQL = superTableSQL.substring(0, superTableSQL.indexOf("(")); + } + superTableSQL = superTableSQL.substring(superTableSQL.indexOf("table") + 5).trim(); + if (superTableSQL.contains(".")) { + String database = superTableSQL.split("\\.")[0]; + tableMeta.setDatabase(database); + superTableSQL = superTableSQL.substring(superTableSQL.indexOf(".") + 1); + } + tableMeta.setName(superTableSQL.trim()); + + return tableMeta; + } + + // 创建超级表,指定field和tag的个数 + public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, String tagPrefix) { + if (fieldSize < 2 || tagSize < 1) { + throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1"); + } + SuperTableMeta tableMetadata = new SuperTableMeta(); + tableMetadata.setDatabase(database); + tableMetadata.setName(name); + // fields + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + for (int i = 1; i <= fieldSize; i++) { + fields.add(new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); + } + tableMetadata.setFields(fields); + // tags + List tags = new ArrayList<>(); + for (int i = 1; i <= tagSize; i++) { + tags.add(new TagMeta(tagPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); + } + tableMetadata.setTags(tags); + return tableMetadata; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java new file mode 100644 index 0000000000..b8024fea45 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java @@ -0,0 +1,24 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.domain.TagValue; +import com.taosdata.taosdemo.utils.DataGenerator; + +import java.util.ArrayList; +import java.util.List; + +public class TagValueGenerator { + + // 创建标签值:使用tagMetas + public static List generate(List tagMetas) { + List tagValues = new ArrayList<>(); + for (int i = 0; i < tagMetas.size(); i++) { + TagMeta tagMeta = tagMetas.get(i); + TagValue tagValue = new TagValue(); + tagValue.setName(tagMeta.getName()); + tagValue.setValue(DataGenerator.randomValue(tagMeta.getType())); + tagValues.add(tagValue); + } + return tagValues; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java new file mode 100644 index 0000000000..a200d17ef6 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java @@ -0,0 +1,120 @@ +package com.taosdata.taosdemo.utils; + +import java.util.Random; + +public class DataGenerator { + private static Random random = new Random(System.currentTimeMillis()); + private static final String alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + + // "timestamp", "int", "bigint", "float", "double", "binary(64)", "smallint", "tinyint", "bool", "nchar(64)", + + public static Object randomValue(String type) { + int length = 64; + if (type.contains("(")) { + length = Integer.parseInt(type.substring(type.indexOf("(") + 1, type.indexOf(")"))); + type = type.substring(0, type.indexOf("(")); + } + switch (type.trim().toLowerCase()) { + case "timestamp": + return randomTimestamp(); + case "int": + return randomInt(); + case "bigint": + return randomBigint(); + case "float": + return randomFloat(); + case "double": + return randomDouble(); + case "binary": + return randomBinary(length); + case "smallint": + return randomSmallint(); + case "tinyint": + return randomTinyint(); + case "bool": + return randomBoolean(); + case "nchar": + return randomNchar(length); + default: + throw new IllegalArgumentException("Unexpected value: " + type); + } + } + + public static Long randomTimestamp() { + long start = System.currentTimeMillis(); + return randomTimestamp(start, start + 60l * 60l * 1000l); + } + + public static Long randomTimestamp(Long start, Long end) { + return start + (long) random.nextInt((int) (end - start)); + } + + public static String randomNchar(int length) { + return randomChinese(length); + } + + public static Boolean randomBoolean() { + return random.nextBoolean(); + } + + public static Integer randomTinyint() { + return randomInt(-127, 127); + } + + public static Integer randomSmallint() { + return randomInt(-32767, 32767); + } + + public static String randomBinary(int length) { + return randomString(length); + } + + public static String randomString(int length) { + String zh_en = ""; + for (int i = 0; i < length; i++) { + zh_en += alphabet.charAt(random.nextInt(alphabet.length())); + } + return zh_en; + } + + public static String randomChinese(int length) { + String zh_cn = ""; + int bottom = Integer.parseInt("4e00", 16); + int top = Integer.parseInt("9fa5", 16); + + for (int i = 0; i < length; i++) { + char c = (char) (random.nextInt(top - bottom + 1) + bottom); + zh_cn += new String(new char[]{c}); + } + return zh_cn; + } + + public static Double randomDouble() { + return randomDouble(0, 100); + } + + public static Double randomDouble(double bottom, double top) { + return bottom + (top - bottom) * random.nextDouble(); + } + + public static Float randomFloat() { + return randomFloat(0, 100); + } + + public static Float randomFloat(float bottom, float top) { + return bottom + (top - bottom) * random.nextFloat(); + } + + public static Long randomBigint() { + return random.nextLong(); + } + + public static Integer randomInt(int bottom, int top) { + return bottom + random.nextInt((top - bottom)); + } + + public static Integer randomInt() { + return randomInt(0, 100); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java new file mode 100644 index 0000000000..4e6f640330 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java @@ -0,0 +1,204 @@ +package com.taosdata.taosdemo.utils; + +public final class JdbcTaosdemoConfig { + // instance + public String host; //host + public int port = 6030; //port + public String user = "root"; //user + public String password = "taosdata"; //password + // database + public String database = "test"; //database + public int keep = 3650; //keep + public int days = 30; //days + public int replica = 1; //replica + //super table + public boolean doCreateTable = true; + public String superTable = "weather"; //super table name + public String prefixOfFields = "col"; + public int numOfFields; + public String prefixOfTags = "tag"; + public int numOfTags; + public String superTableSQL; + //sub table + public String tablePrefix = "t"; + public int numOfTables = 100; + public int numOfThreadsForCreate = 1; + // insert task + public boolean autoCreateTable; + public int numOfRowsPerTable = 100; + public int numOfThreadsForInsert = 1; + public int numOfTablesPerSQL = 10; + public int numOfValuesPerSQL = 10; + public long startTime; + public long timeGap; + public int sleep = 0; + public int order = 0; + public int rate = 10; + public long range = 1000l; + // select task + + // drop task + public boolean dropTable = false; + + public static void printHelp() { + System.out.println("Usage: java -jar jdbc-taosdemo-2.0.jar [OPTION...]"); + // instance + System.out.println("-host The host to connect to TDengine which you must specify"); + System.out.println("-port The TCP/IP port number to use for the connection. Default is 6030"); + System.out.println("-user The TDengine user name to use when connecting to the server. Default is 'root'"); + System.out.println("-password The password to use when connecting to the server.Default is 'taosdata'"); + // database + System.out.println("-database Destination database. Default is 'test'"); + System.out.println("-keep database keep parameter. Default is 3650"); + System.out.println("-days database days parameter. Default is 30"); + System.out.println("-replica database replica parameter. Default 1, min: 1, max: 3"); + // super table + System.out.println("-doCreateTable do create super table and sub table, true or false, Default true"); + System.out.println("-superTable super table name. Default 'weather'"); + System.out.println("-prefixOfFields The prefix of field in super table. Default is 'col'"); + System.out.println("-numOfFields The number of field in super table. Default is (ts timestamp, temperature float, humidity int)."); + System.out.println("-prefixOfTags The prefix of tag in super table. Default is 'tag'"); + System.out.println("-numOfTags The number of tag in super table. Default is (location nchar(64), groupId int)."); + System.out.println("-superTableSQL specify a sql statement for the super table.\n" + + " Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n" + + " if you use this parameter, the numOfFields and numOfTags will be invalid'"); + // sub table + System.out.println("-tablePrefix The prefix of sub tables. Default is 't'"); + System.out.println("-numOfTables The number of tables. Default is 1"); + System.out.println("-numOfThreadsForCreate The number of thread during create sub table. Default is 1"); + // insert task + System.out.println("-autoCreateTable Use auto Create sub tables SQL. Default is false"); + System.out.println("-numOfRowsPerTable The number of records per table. Default is 1"); + System.out.println("-numOfThreadsForInsert The number of threads during insert row. Default is 1"); + System.out.println("-numOfTablesPerSQL The number of table per SQL. Default is 1"); + System.out.println("-numOfValuesPerSQL The number of value per SQL. Default is 1"); + System.out.println("-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\"."); + System.out.println("-timeGap the number of time gap. Default is 1000 ms"); + System.out.println("-sleep The number of milliseconds for sleep after each insert. default is 0"); + System.out.println("-order Insert mode--0: In order, 1: Out of order. Default is in order"); + System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10"); + System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms"); + + // query task +// System.out.println("-sqlFile The select sql file"); + // drop task + System.out.println("-dropTable Drop data before quit. Default is false"); + System.out.println("--help Give this help list"); + } + + /** + * parse args from command line + * + * @param args command line args + * @return JdbcTaosdemoConfig + */ + public JdbcTaosdemoConfig(String[] args) { + for (int i = 0; i < args.length; i++) { + // instance + if ("-host".equals(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-port".equals(args[i]) && i < args.length - 1) { + port = Integer.parseInt(args[++i]); + } + if ("-user".equals(args[i]) && i < args.length - 1) { + user = args[++i]; + } + if ("-password".equals(args[i]) && i < args.length - 1) { + password = args[++i]; + } + // database + if ("-database".equals(args[i]) && i < args.length - 1) { + database = args[++i]; + } + if ("-keep".equals(args[i]) && i < args.length - 1) { + keep = Integer.parseInt(args[++i]); + } + if ("-days".equals(args[i]) && i < args.length - 1) { + days = Integer.parseInt(args[++i]); + } + if ("-replica".equals(args[i]) && i < args.length - 1) { + replica = Integer.parseInt(args[++i]); + } + // super table + if ("-doCreateTable".equals(args[i]) && i < args.length - 1) { + doCreateTable = Boolean.parseBoolean(args[++i]); + } + if ("-superTable".equals(args[i]) && i < args.length - 1) { + superTable = args[++i]; + } + if ("-prefixOfFields".equals(args[i]) && i < args.length - 1) { + prefixOfFields = args[++i]; + } + if ("-numOfFields".equals(args[i]) && i < args.length - 1) { + numOfFields = Integer.parseInt(args[++i]); + } + if ("-prefixOfTags".equals(args[i]) && i < args.length - 1) { + prefixOfTags = args[++i]; + } + if ("-numOfTags".equals(args[i]) && i < args.length - 1) { + numOfTags = Integer.parseInt(args[++i]); + } + if ("-superTableSQL".equals(args[i]) && i < args.length - 1) { + superTableSQL = args[++i]; + } + // sub table + if ("-tablePrefix".equals(args[i]) && i < args.length - 1) { + tablePrefix = args[++i]; + } + if ("-numOfTables".equals(args[i]) && i < args.length - 1) { + numOfTables = Integer.parseInt(args[++i]); + } + if ("-autoCreateTable".equals(args[i]) && i < args.length - 1) { + autoCreateTable = Boolean.parseBoolean(args[++i]); + } + if ("-numOfThreadsForCreate".equals(args[i]) && i < args.length - 1) { + numOfThreadsForCreate = Integer.parseInt(args[++i]); + } + // insert task + if ("-numOfRowsPerTable".equals(args[i]) && i < args.length - 1) { + numOfRowsPerTable = Integer.parseInt(args[++i]); + } + if ("-numOfThreadsForInsert".equals(args[i]) && i < args.length - 1) { + numOfThreadsForInsert = Integer.parseInt(args[++i]); + } + if ("-numOfTablesPerSQL".equals(args[i]) && i < args.length - 1) { + numOfTablesPerSQL = Integer.parseInt(args[++i]); + } + if ("-numOfValuesPerSQL".equals(args[i]) && i < args.length - 1) { + numOfValuesPerSQL = Integer.parseInt(args[++i]); + } + if ("-startTime".equals(args[i]) && i < args.length - 1) { + startTime = TimeStampUtil.datetimeToLong(args[++i]); + } + if ("-timeGap".equals(args[i]) && i < args.length - 1) { + timeGap = Long.parseLong(args[++i]); + } + if ("-sleep".equals(args[i]) && i < args.length - 1) { + sleep = Integer.parseInt(args[++i]); + } + if ("-order".equals(args[i]) && i < args.length - 1) { + order = Integer.parseInt(args[++i]); + } + if ("-rate".equals(args[i]) && i < args.length - 1) { + rate = Integer.parseInt(args[++i]); + if (rate < 0 || rate > 100) + throw new IllegalArgumentException("rate must between 0 and 100"); + } + if ("-range".equals(args[i]) && i < args.length - 1) { + range = Integer.parseInt(args[++i]); + } + // select task + + // drop task + if ("-dropTable".equals(args[i]) && i < args.length - 1) { + dropTable = Boolean.parseBoolean(args[++i]); + } + } + } + + public static void main(String[] args) { + JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java new file mode 100644 index 0000000000..23c3c5279a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java @@ -0,0 +1,8 @@ +package com.taosdata.taosdemo.utils; + +public class TaosConstants { + public static final String[] DATA_TYPES = { + "timestamp", "int", "bigint", "float", "double", + "binary(64)", "smallint", "tinyint", "bool", "nchar(64)", + }; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java new file mode 100644 index 0000000000..9cfce16d82 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java @@ -0,0 +1,67 @@ +package com.taosdata.taosdemo.utils; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Date; + +public class TimeStampUtil { + + private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS"; + + public static long datetimeToLong(String dateTime) { + SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); + try { + return sdf.parse(dateTime).getTime(); + } catch (ParseException e) { + throw new IllegalArgumentException("invalid datetime string >>> " + dateTime); + } + } + + public static String longToDatetime(long time) { + SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); + return sdf.format(new Date(time)); + } + + public static class TimeTuple { + public Long start; + public Long end; + public Long timeGap; + + TimeTuple(long start, long end, long timeGap) { + this.start = start; + this.end = end; + this.timeGap = timeGap; + } + } + + public static TimeTuple range(long start, long timeGap, long size) { + long now = System.currentTimeMillis(); + if (timeGap < 1) + timeGap = 1; + if (start == 0) + start = now - size * timeGap; + + // 如果size小于1异常 + if (size < 1) + throw new IllegalArgumentException("size less than 1."); + // 如果timeGap为1,已经超长,需要前移start + if (start + size > now) { + start = now - size; + return new TimeTuple(start, now, 1); + } + long end = start + (long) (timeGap * size); + if (end > now) { + //压缩timeGap + end = now; + double gap = (end - start) / (size * 1.0f); + if (gap < 1.0f) { + timeGap = 1; + start = end - size; + } else { + timeGap = (long) gap; + end = start + (long) (timeGap * size); + } + } + return new TimeTuple(start, end, timeGap); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties new file mode 100644 index 0000000000..1e7a7de89f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -0,0 +1,14 @@ +#spring.datasource.url=jdbc:mysql://master:3306/?useSSL=false&useUnicode=true&characterEncoding=UTF-8 +#spring.datasource.driver-class-name=com.mysql.jdbc.Driver +#spring.datasource.username=root +#spring.datasource.password=123456 + +spring.datasource.url=jdbc:TAOS://master:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.hikari.maximum-pool-size=10 +spring.datasource.hikari.minimum-idle=10 +spring.datasource.hikari.max-lifetime=600000 +logging.level.com.taosdata.taosdemo.mapper=debug \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties new file mode 100644 index 0000000000..1299357be3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties @@ -0,0 +1,21 @@ +### 设置### +log4j.rootLogger=debug,stdout,DebugLog,ErrorLog +### 输出信息到控制抬 ### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n +### 输出DEBUG 级别以上的日志到=logs/debug.log +log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DebugLog.File=logs/debug.log +log4j.appender.DebugLog.Append=true +log4j.appender.DebugLog.Threshold=DEBUG +log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout +log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n +### 输出ERROR 级别以上的日志到=logs/error.log +log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.ErrorLog.File=logs/error.log +log4j.appender.ErrorLog.Append=true +log4j.appender.ErrorLog.Threshold=ERROR +log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html new file mode 100644 index 0000000000..69f8851c9b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html @@ -0,0 +1,10 @@ + + + + + Index + + +

Hello~~~

+ + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java new file mode 100644 index 0000000000..e872509187 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java @@ -0,0 +1,13 @@ +package com.taosdata.taosdemo; + +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +class TaosdemoApplicationTests { + + @Test + void contextLoads() { + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java new file mode 100644 index 0000000000..8364e16ed0 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java @@ -0,0 +1,42 @@ +package com.taosdata.taosdemo.mapper; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.HashMap; +import java.util.Map; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class DatabaseMapperTest { + @Autowired + private DatabaseMapper databaseMapper; + + @Test + public void createDatabase() { + databaseMapper.createDatabase("db_test"); + } + + @Test + public void dropDatabase() { + databaseMapper.dropDatabase("db_test"); + } + + @Test + public void creatDatabaseWithParameters() { + Map map = new HashMap<>(); + map.put("dbname", "weather"); + map.put("keep", "3650"); + map.put("days", "30"); + map.put("replica", "1"); + databaseMapper.createDatabaseWithParameters(map); + } + + @Test + public void useDatabase() { + databaseMapper.useDatabase("test"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java new file mode 100644 index 0000000000..90faa20496 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java @@ -0,0 +1,88 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.*; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SubTableMapperTest { + @Autowired + private SubTableMapper subTableMapper; + private List tables; + + @Test + public void createUsingSuperTable() { + SubTableMeta subTableMeta = new SubTableMeta(); + subTableMeta.setDatabase("test"); + subTableMeta.setSupertable("weather"); + subTableMeta.setName("t1"); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "nchar(64)")); + } + subTableMeta.setTags(tags); + subTableMapper.createUsingSuperTable(subTableMeta); + } + + @Test + public void insertOneTableMultiValues() { + subTableMapper.insertOneTableMultiValues(tables.get(0)); + } + + @Test + public void insertOneTableMultiValuesUsingSuperTable() { + subTableMapper.insertOneTableMultiValuesUsingSuperTable(tables.get(0)); + } + + + @Test + public void insertMultiTableMultiValues() { + subTableMapper.insertMultiTableMultiValues(tables); + } + + @Test + public void insertMultiTableMultiValuesUsingSuperTable() { + subTableMapper.insertMultiTableMultiValuesUsingSuperTable(tables); + } + + + @Before + public void before() { + tables = new ArrayList<>(); + for (int ind = 0; ind < 3; ind++) { + + SubTableValue table = new SubTableValue(); + table.setDatabase("test"); + // supertable + table.setSupertable("weather"); + table.setName("t" + (ind + 1)); + // tags + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "beijing")); + } + table.setTags(tags); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 4; j++) { + fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10)); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java new file mode 100644 index 0000000000..6c97874cfc --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java @@ -0,0 +1,50 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SuperTableMapperTest { + @Autowired + private SuperTableMapper superTableMapper; + + @Test + public void testCreateSuperTableUsingSQL() { + String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + superTableMapper.createSuperTableUsingSQL(sql); + } + + @Test + public void createSuperTable() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + fields.add(new FieldMeta("f" + (i + 1), "int")); + } + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagMeta("t" + (i + 1), "nchar(64)")); + } + superTableMeta.setTags(tags); + + superTableMapper.createSuperTable(superTableMeta); + } + + @Test + public void dropSuperTable() { + superTableMapper.dropSuperTable("test", "weather"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java new file mode 100644 index 0000000000..3a051b3112 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java @@ -0,0 +1,142 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.*; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +@SpringBootTest +@RunWith(SpringRunner.class) +public class TableMapperTest { + @Autowired + private TableMapper tableMapper; + private static Random random = new Random(System.currentTimeMillis()); + + @Test + public void create() { + TableMeta table = new TableMeta(); + table.setDatabase("test"); + table.setName("t1"); + List fields = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + field.setType("nchar(64)"); + fields.add(field); + } + table.setFields(fields); + tableMapper.create(table); + } + + @Test + public void insertOneTableMultiValues() { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t1"); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 100); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tableMapper.insertOneTableMultiValues(table); + } + + @Test + public void insertOneTableMultiValuesWithCoulmns() { + TableValue tableValue = new TableValue(); + tableValue.setDatabase("test"); + tableValue.setName("weather"); + // columns + List columns = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + columns.add(field); + } + tableValue.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldValue field = new FieldValue(); + field.setValue(j); + fields.add(field); + } + values.add(new RowValue(fields)); + } + tableValue.setValues(values); + tableMapper.insertOneTableMultiValuesWithColumns(tableValue); + } + + @Test + public void insertMultiTableMultiValues() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + tableMapper.insertMultiTableMultiValues(tables); + } + + @Test + public void insertMultiTableMultiValuesWithCoulumns() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + // columns + List columns = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (j + 1)); + columns.add(field); + } + table.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < columns.size(); k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + tableMapper.insertMultiTableMultiValuesWithColumns(tables); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java new file mode 100644 index 0000000000..2c1cdf6e00 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java @@ -0,0 +1,29 @@ +package com.taosdata.taosdemo.service; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class DatabaseServiceTest { + @Autowired + private DatabaseService service; + + @Test + public void testCreateDatabase1() { + service.createDatabase("testXXXX"); + } + + @Test + public void dropDatabase() { + service.dropDatabase("testXXXX"); + } + + @Test + public void useDatabase() { + service.useDatabase("test"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java new file mode 100644 index 0000000000..4e54de3f13 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java @@ -0,0 +1,50 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.TagValue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SubTableServiceTest { + @Autowired + private SubTableService service; + + private List subTables; + + @Before + public void before() { + subTables = new ArrayList<>(); + for (int i = 1; i <= 1; i++) { + SubTableMeta subTableMeta = new SubTableMeta(); + subTableMeta.setDatabase("test"); + subTableMeta.setSupertable("weather"); + subTableMeta.setName("t" + i); + List tags = new ArrayList<>(); + tags.add(new TagValue("location", "beijing")); + tags.add(new TagValue("groupId", i)); + subTableMeta.setTags(tags); + subTables.add(subTableMeta); + } + } + + @Test + public void testCreateSubTable() { + int count = service.createSubTable(subTables); + System.out.println("count >>> " + count); + } + + @Test + public void testCreateSubTableList() { + int count = service.createSubTable(subTables, 10); + System.out.println("count >>> " + count); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java new file mode 100644 index 0000000000..b9291fceaf --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java @@ -0,0 +1,39 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SuperTableServiceTest { + + @Autowired + private SuperTableService service; + + @Test + public void testCreate() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + service.create(superTableMeta); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java new file mode 100644 index 0000000000..fdbd554629 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java @@ -0,0 +1,43 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.TableMeta; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class TableServiceTest { + @Autowired + private TableService tableService; + + private List tables; + + @Before + public void before() { + tables = new ArrayList<>(); + for (int i = 0; i < 1; i++) { + TableMeta tableMeta = new TableMeta(); + tableMeta.setDatabase("test"); + tableMeta.setName("weather" + (i + 1)); + tables.add(tableMeta); + } + } + + @Test + public void testCreate() { + int count = tableService.create(tables); + System.out.println(count); + } + + @Test + public void testCreateMultiThreads() { + System.out.println(tableService.create(tables, 10)); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java new file mode 100644 index 0000000000..aea3cc76ca --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java @@ -0,0 +1,59 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.utils.TimeStampUtil; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class FieldValueGeneratorTest { + + private List rowValues; + + @Test + public void generate() { + List fieldMetas = new ArrayList<>(); + fieldMetas.add(new FieldMeta("ts", "timestamp")); + fieldMetas.add(new FieldMeta("temperature", "float")); + fieldMetas.add(new FieldMeta("humidity", "int")); + + long start = TimeStampUtil.datetimeToLong("2020-01-01 00:00:00.000"); + long end = TimeStampUtil.datetimeToLong("2020-01-01 10:00:00.000"); + + rowValues = FieldValueGenerator.generate(start, end, 1000l * 3600, fieldMetas); + Assert.assertEquals(10, rowValues.size()); + } + + @Test + public void disrupt() { + List fieldMetas = new ArrayList<>(); + fieldMetas.add(new FieldMeta("ts", "timestamp")); + fieldMetas.add(new FieldMeta("temperature", "float")); + fieldMetas.add(new FieldMeta("humidity", "int")); + + long start = TimeStampUtil.datetimeToLong("2020-01-01 00:00:00.000"); + long end = TimeStampUtil.datetimeToLong("2020-01-01 10:00:00.000"); + + rowValues = FieldValueGenerator.generate(start, end, 1000l * 3600l, fieldMetas); + + FieldValueGenerator.disrupt(rowValues, 20, 1000); + Assert.assertEquals(10, rowValues.size()); + } + + @After + public void after() { + for (RowValue row : rowValues) { + row.getFields().stream().forEach(field -> { + if (field.getName().equals("ts")) { + System.out.print(TimeStampUtil.longToDatetime((Long) field.getValue())); + } else + System.out.print(" ," + field.getValue()); + }); + System.out.println(); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java new file mode 100644 index 0000000000..78c8e9283f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java @@ -0,0 +1,52 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableMetaGeneratorTest { + List subTableMetas; + + @Test + public void generate() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + + subTableMetas = SubTableMetaGenerator.generate(superTableMeta, 10, "t"); + Assert.assertEquals(10, subTableMetas.size()); + Assert.assertEquals("t1", subTableMetas.get(0).getName()); + Assert.assertEquals("t2", subTableMetas.get(1).getName()); + Assert.assertEquals("t3", subTableMetas.get(2).getName()); + Assert.assertEquals("t4", subTableMetas.get(3).getName()); + Assert.assertEquals("t5", subTableMetas.get(4).getName()); + Assert.assertEquals("t6", subTableMetas.get(5).getName()); + Assert.assertEquals("t7", subTableMetas.get(6).getName()); + Assert.assertEquals("t8", subTableMetas.get(7).getName()); + Assert.assertEquals("t9", subTableMetas.get(8).getName()); + Assert.assertEquals("t10", subTableMetas.get(9).getName()); + } + + @After + public void after() { + for (SubTableMeta subTableMeta : subTableMetas) { + System.out.println(subTableMeta); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java new file mode 100644 index 0000000000..11c5312cf6 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java @@ -0,0 +1,60 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +public class SuperTableMetaGeneratorImplTest { + private SuperTableMeta meta; + + @Test + public void generate() { + String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + meta = SuperTableMetaGenerator.generate(sql); + Assert.assertEquals("test", meta.getDatabase()); + Assert.assertEquals("weather", meta.getName()); + Assert.assertEquals(3, meta.getFields().size()); + Assert.assertEquals("ts", meta.getFields().get(0).getName()); + Assert.assertEquals("timestamp", meta.getFields().get(0).getType()); + Assert.assertEquals("temperature", meta.getFields().get(1).getName()); + Assert.assertEquals("float", meta.getFields().get(1).getType()); + Assert.assertEquals("humidity", meta.getFields().get(2).getName()); + Assert.assertEquals("int", meta.getFields().get(2).getType()); + + Assert.assertEquals("location", meta.getTags().get(0).getName()); + Assert.assertEquals("nchar(64)", meta.getTags().get(0).getType()); + Assert.assertEquals("groupid", meta.getTags().get(1).getName()); + Assert.assertEquals("int", meta.getTags().get(1).getType()); + } + + @Test + public void generate2() { + meta = SuperTableMetaGenerator.generate("test", "weather", 10, "col", 10, "tag"); + Assert.assertEquals("test", meta.getDatabase()); + Assert.assertEquals("weather", meta.getName()); + Assert.assertEquals(11, meta.getFields().size()); + for (FieldMeta fieldMeta : meta.getFields()) { + Assert.assertNotNull(fieldMeta.getName()); + Assert.assertNotNull(fieldMeta.getType()); + } + for (TagMeta tagMeta : meta.getTags()) { + Assert.assertNotNull(tagMeta.getName()); + Assert.assertNotNull(tagMeta.getType()); + } + } + + @After + public void after() { + System.out.println(meta.getDatabase()); + System.out.println(meta.getName()); + for (FieldMeta fieldMeta : meta.getFields()) { + System.out.println(fieldMeta); + } + for (TagMeta tagMeta : meta.getTags()) { + System.out.println(tagMeta); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java new file mode 100644 index 0000000000..37c9051c94 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java @@ -0,0 +1,37 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.domain.TagValue; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class TagValueGeneratorTest { + List tagvalues; + + @Test + public void generate() { + List tagMetaList = new ArrayList<>(); + tagMetaList.add(new TagMeta("location", "nchar(10)")); + tagMetaList.add(new TagMeta("groupId", "int")); + tagMetaList.add(new TagMeta("ts", "timestamp")); + tagMetaList.add(new TagMeta("temperature", "float")); + tagMetaList.add(new TagMeta("humidity", "double")); + tagMetaList.add(new TagMeta("text", "binary(10)")); + tagvalues = TagValueGenerator.generate(tagMetaList); + Assert.assertEquals("location", tagvalues.get(0).getName()); + Assert.assertEquals("groupId", tagvalues.get(1).getName()); + Assert.assertEquals("ts", tagvalues.get(2).getName()); + Assert.assertEquals("temperature", tagvalues.get(3).getName()); + Assert.assertEquals("humidity", tagvalues.get(4).getName()); + Assert.assertEquals("text", tagvalues.get(5).getName()); + } + + @After + public void after() { + tagvalues.stream().forEach(System.out::println); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java new file mode 100644 index 0000000000..7d12782526 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java @@ -0,0 +1,20 @@ +package com.taosdata.taosdemo.utils; + +import org.junit.Assert; +import org.junit.Test; + +public class DataGeneratorTest { + + @Test + public void randomValue() { + for (int i = 0; i < TaosConstants.DATA_TYPES.length; i++) { + System.out.println(TaosConstants.DATA_TYPES[i] + " >>> " + DataGenerator.randomValue(TaosConstants.DATA_TYPES[i])); + } + } + + @Test + public void randomNchar() { + String s = DataGenerator.randomNchar(10); + Assert.assertEquals(10, s.length()); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java new file mode 100644 index 0000000000..628594c4b1 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java @@ -0,0 +1,38 @@ +package com.taosdata.taosdemo.utils; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TimeStampUtilTest { + + @Test + public void datetimeToLong() { + final String startTime = "2005-01-01 00:00:00.000"; + long start = TimeStampUtil.datetimeToLong(startTime); + assertEquals(1104508800000l, start); + String dateTimeStr = TimeStampUtil.longToDatetime(start); + assertEquals("2005-01-01 00:00:00.000", dateTimeStr); + } + + @Test + public void longToDatetime() { + String datetime = TimeStampUtil.longToDatetime(1510000000000L); + assertEquals("2017-11-07 04:26:40.000", datetime); + long timestamp = TimeStampUtil.datetimeToLong(datetime); + assertEquals(1510000000000L, timestamp); + } + + @Test + public void range() { + long start = TimeStampUtil.datetimeToLong("2020-10-01 00:00:00.000"); + long timeGap = 1000; + long numOfRowsPerTable = 1000l * 3600l * 24l * 90l; + TimeStampUtil.TimeTuple timeTuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable); + System.out.println(TimeStampUtil.longToDatetime(timeTuple.start)); + System.out.println(TimeStampUtil.longToDatetime(timeTuple.end)); + System.out.println(timeTuple.timeGap); + + } + +} \ No newline at end of file diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 74a49288e9..54e81d33b9 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -50,7 +50,7 @@ static void queryDB(TAOS *taos, char *command) { taos_free_result(pSql); } -void Test(char *qstr, const char *input, int i); +void Test(TAOS *taos, char *qstr, int i); int main(int argc, char *argv[]) { char qstr[1024]; @@ -63,21 +63,22 @@ int main(int argc, char *argv[]) { // init TAOS taos_init(); - for (int i = 0; i < 4000000; i++) { - Test(qstr, argv[1], i); - } - taos_cleanup(); -} -void Test(char *qstr, const char *input, int index) { - TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0); - printf("==================test at %d\n================================", index); - queryDB(taos, "drop database if exists demo"); - queryDB(taos, "create database demo"); - TAOS_RES *result; + TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } + for (int i = 0; i < 4000000; i++) { + Test(taos, qstr, i); + } + taos_close(taos); + taos_cleanup(); +} +void Test(TAOS *taos, char *qstr, int index) { + printf("==================test at %d\n================================", index); + queryDB(taos, "drop database if exists demo"); + queryDB(taos, "create database demo"); + TAOS_RES *result; queryDB(taos, "use demo"); queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"); @@ -131,6 +132,5 @@ void Test(char *qstr, const char *input, int index) { taos_free_result(result); printf("====demo end====\n\n"); - taos_close(taos); } diff --git a/tests/examples/lua/OpenResty/conf/nginx.conf b/tests/examples/lua/OpenResty/conf/nginx.conf new file mode 100644 index 0000000000..2f838c21fc --- /dev/null +++ b/tests/examples/lua/OpenResty/conf/nginx.conf @@ -0,0 +1,21 @@ +worker_processes 1; +user root; +error_log logs/error.log; +events { + worker_connections 1024; +} + +http { + lua_package_path '$prefix/lua/?.lua;$prefix/rest/?.lua;/blah/?.lua;;'; + lua_package_cpath "$prefix/so/?.so;;"; + lua_code_cache off; + server { + listen 7000; + server_name restapi; + charset utf-8; + lua_need_request_body on; + location ~ ^/api/([-_a-zA-Z0-9/]+) { + content_by_lua_file rest/$1.lua; + } + } +} diff --git a/tests/examples/lua/OpenResty/logs/.gitignore b/tests/examples/lua/OpenResty/logs/.gitignore new file mode 100644 index 0000000000..ad8530e1c3 --- /dev/null +++ b/tests/examples/lua/OpenResty/logs/.gitignore @@ -0,0 +1,4 @@ +# Ignore everything in this directory +* +# Except this file +!.gitignore diff --git a/tests/examples/lua/OpenResty/rest/test.lua b/tests/examples/lua/OpenResty/rest/test.lua new file mode 100644 index 0000000000..179950cbe7 --- /dev/null +++ b/tests/examples/lua/OpenResty/rest/test.lua @@ -0,0 +1,83 @@ +local driver = require "luaconnector51" +local cjson = require "cjson" +ngx.say("start time:"..os.time()) + + +local config = { + host = "127.0.0.1", + port = 6030, + database = "", + user = "root", + password = "taosdata", + max_packet_size = 1024 * 1024 +} + +local conn +local res = driver.connect(config) +if res.code ~=0 then + ngx.say("connect--- failed: "..res.error) + return +else + conn = res.conn + ngx.say("connect--- pass.") +end + +local res = driver.query(conn,"drop database if exists nginx") +if res.code ~=0 then + ngx.say("drop db--- failed: "..res.error) +else + ngx.say("drop db--- pass.") +end +res = driver.query(conn,"create database nginx") +if res.code ~=0 then + ngx.say("create db--- failed: "..res.error) + +else + ngx.say("create db--- pass.") +end + +res = driver.query(conn,"use nginx") +if res.code ~=0 then + ngx.say("select db--- failed: "..res.error) +else + ngx.say("select db--- pass.") +end + +res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") +if res.code ~=0 then + ngx.say("create table---failed: "..res.error) + +else + ngx.say("create table--- pass.") +end + +res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") +if res.code ~=0 then + ngx.say("insert records failed: "..res.error) + return +else + if(res.affected == 3) then + ngx.say("insert records--- pass") + else + ngx.say("insert records---failed: expect 3 affected records, actually affected "..res.affected) + end +end + +res = driver.query(conn,"select * from m1") + +if res.code ~=0 then + ngx.say("select failed: "..res.error) + return +else + ngx.say(cjson.encode(res)) + if (#(res.item) == 3) then + ngx.say("select--- pass") + else + ngx.say("select--- failed: expect 3 affected records, actually received "..#(res.item)) + end + +end +driver.close(conn) +ngx.say("end time:"..os.time()) +--ngx.log(ngx.ERR,"in test file.") + diff --git a/tests/examples/lua/OpenResty/so/luaconnector51.so b/tests/examples/lua/OpenResty/so/luaconnector51.so new file mode 100755 index 0000000000..442de6e39f Binary files /dev/null and b/tests/examples/lua/OpenResty/so/luaconnector51.so differ diff --git a/tests/examples/lua/README.md b/tests/examples/lua/README.md index 8efc52b35c..dd9c9d0787 100644 --- a/tests/examples/lua/README.md +++ b/tests/examples/lua/README.md @@ -2,13 +2,13 @@ It's a Lua implementation for [TDengine](https://github.com/taosdata/TDengine), an open-sourced big data platform designed and optimized for the Internet of Things (IoT), Connected Cars, Industrial IoT, and IT Infrastructure and Application Monitoring. You may need to install Lua5.3 . -## Dependencies +## Lua Dependencies - Lua: ``` https://www.lua.org/ ``` -## Run with Sample +## Run with Lua Sample Build driver lib: ``` @@ -18,3 +18,26 @@ Run lua sample: ``` lua test.lua ``` + +## OpenResty Dependencies +- OpenResty: +``` +http://openresty.org +``` +## Run with OpenResty Sample +**This section demonstrates how to get binary file for connector. To be convenient for trial, an connector has been put into OpenResty work directory. +Because of difference on C API between Lua5.3 and Lua5.1, the files needed by connector for OpenResty are stored in local source directory and configured in script build.sh.** + +Build driver lib: +``` +cd lua51 +./build.sh +``` +Run OpenResty sample: +``` +cd .. +cd OpenResty +sudo openresty -p . +curl http://127.0.0.1:7000/api/test +``` + diff --git a/tests/examples/lua/lua51/build.sh b/tests/examples/lua/lua51/build.sh new file mode 100755 index 0000000000..da2981bf7d --- /dev/null +++ b/tests/examples/lua/lua51/build.sh @@ -0,0 +1,2 @@ +gcc lua_connector51.c -fPIC -shared -o luaconnector51.so -Wall -ltaos + diff --git a/tests/examples/lua/lua51/lauxlib.h b/tests/examples/lua/lua51/lauxlib.h new file mode 100644 index 0000000000..a44f0272b3 --- /dev/null +++ b/tests/examples/lua/lua51/lauxlib.h @@ -0,0 +1,161 @@ +/* +** $Id: lauxlib.h,v 1.88.1.1 2007/12/27 13:02:25 roberto Exp $ +** Auxiliary functions for building Lua libraries +** See Copyright Notice in lua.h +*/ + + +#ifndef lauxlib_h +#define lauxlib_h + + +#include +#include + +#include "lua.h" + + +/* extra error code for `luaL_load' */ +#define LUA_ERRFILE (LUA_ERRERR+1) + +typedef struct luaL_Reg { + const char *name; + lua_CFunction func; +} luaL_Reg; + +LUALIB_API void (luaL_openlib) (lua_State *L, const char *libname, + const luaL_Reg *l, int nup); +LUALIB_API void (luaL_register) (lua_State *L, const char *libname, + const luaL_Reg *l); +LUALIB_API int (luaL_getmetafield) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_callmeta) (lua_State *L, int obj, const char *e); +LUALIB_API int (luaL_typerror) (lua_State *L, int narg, const char *tname); +LUALIB_API int (luaL_argerror) (lua_State *L, int numarg, const char *extramsg); +LUALIB_API const char *(luaL_checklstring) (lua_State *L, int numArg, + size_t *l); +LUALIB_API const char *(luaL_optlstring) (lua_State *L, int numArg, + const char *def, size_t *l); +LUALIB_API lua_Number (luaL_checknumber) (lua_State *L, int numArg); +LUALIB_API lua_Number (luaL_optnumber) (lua_State *L, int nArg, lua_Number def); + +LUALIB_API lua_Integer (luaL_checkinteger) (lua_State *L, int numArg); +LUALIB_API lua_Integer (luaL_optinteger) (lua_State *L, int nArg, + lua_Integer def); + +LUALIB_API void (luaL_checkstack) (lua_State *L, int sz, const char *msg); +LUALIB_API void (luaL_checktype) (lua_State *L, int narg, int t); +LUALIB_API void (luaL_checkany) (lua_State *L, int narg); + +LUALIB_API int (luaL_newmetatable) (lua_State *L, const char *tname); +LUALIB_API void *(luaL_checkudata) (lua_State *L, int ud, const char *tname); + +LUALIB_API void (luaL_where) (lua_State *L, int lvl); +LUALIB_API int (luaL_error) (lua_State *L, const char *fmt, ...); + +LUALIB_API int (luaL_checkoption) (lua_State *L, int narg, const char *def, + const char *const lst[]); + +/* pre-defined references */ +#define LUA_NOREF (-2) +#define LUA_REFNIL (-1) + +LUALIB_API int (luaL_ref) (lua_State *L, int t); +LUALIB_API void (luaL_unref) (lua_State *L, int t, int ref); + +LUALIB_API int (luaL_loadfile) (lua_State *L, const char *filename); +LUALIB_API int (luaL_loadbuffer) (lua_State *L, const char *buff, size_t sz, + const char *name); +LUALIB_API int (luaL_loadstring) (lua_State *L, const char *s); + +LUALIB_API lua_State *(luaL_newstate) (void); + + +LUALIB_API const char *(luaL_gsub) (lua_State *L, const char *s, const char *p, + const char *r); + +LUALIB_API const char *(luaL_findtable) (lua_State *L, int idx, + const char *fname, int szhint); + +/* From Lua 5.2. */ +LUALIB_API int luaL_fileresult(lua_State *L, int stat, const char *fname); +LUALIB_API int luaL_execresult(lua_State *L, int stat); +LUALIB_API int (luaL_loadfilex) (lua_State *L, const char *filename, + const char *mode); +LUALIB_API int (luaL_loadbufferx) (lua_State *L, const char *buff, size_t sz, + const char *name, const char *mode); +LUALIB_API void luaL_traceback (lua_State *L, lua_State *L1, const char *msg, + int level); +LUALIB_API void (luaL_setfuncs) (lua_State *L, const luaL_Reg *l, int nup); +LUALIB_API void (luaL_pushmodule) (lua_State *L, const char *modname, + int sizehint); +LUALIB_API void *(luaL_testudata) (lua_State *L, int ud, const char *tname); +LUALIB_API void (luaL_setmetatable) (lua_State *L, const char *tname); + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define luaL_argcheck(L, cond,numarg,extramsg) \ + ((void)((cond) || luaL_argerror(L, (numarg), (extramsg)))) +#define luaL_checkstring(L,n) (luaL_checklstring(L, (n), NULL)) +#define luaL_optstring(L,n,d) (luaL_optlstring(L, (n), (d), NULL)) +#define luaL_checkint(L,n) ((int)luaL_checkinteger(L, (n))) +#define luaL_optint(L,n,d) ((int)luaL_optinteger(L, (n), (d))) +#define luaL_checklong(L,n) ((long)luaL_checkinteger(L, (n))) +#define luaL_optlong(L,n,d) ((long)luaL_optinteger(L, (n), (d))) + +#define luaL_typename(L,i) lua_typename(L, lua_type(L,(i))) + +#define luaL_dofile(L, fn) \ + (luaL_loadfile(L, fn) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_dostring(L, s) \ + (luaL_loadstring(L, s) || lua_pcall(L, 0, LUA_MULTRET, 0)) + +#define luaL_getmetatable(L,n) (lua_getfield(L, LUA_REGISTRYINDEX, (n))) + +#define luaL_opt(L,f,n,d) (lua_isnoneornil(L,(n)) ? (d) : f(L,(n))) + +/* From Lua 5.2. */ +#define luaL_newlibtable(L, l) \ + lua_createtable(L, 0, sizeof(l)/sizeof((l)[0]) - 1) +#define luaL_newlib(L, l) (luaL_newlibtable(L, l), luaL_setfuncs(L, l, 0)) + +/* +** {====================================================== +** Generic Buffer manipulation +** ======================================================= +*/ + + + +typedef struct luaL_Buffer { + char *p; /* current position in buffer */ + int lvl; /* number of strings in the stack (level) */ + lua_State *L; + char buffer[LUAL_BUFFERSIZE]; +} luaL_Buffer; + +#define luaL_addchar(B,c) \ + ((void)((B)->p < ((B)->buffer+LUAL_BUFFERSIZE) || luaL_prepbuffer(B)), \ + (*(B)->p++ = (char)(c))) + +/* compatibility only */ +#define luaL_putchar(B,c) luaL_addchar(B,c) + +#define luaL_addsize(B,n) ((B)->p += (n)) + +LUALIB_API void (luaL_buffinit) (lua_State *L, luaL_Buffer *B); +LUALIB_API char *(luaL_prepbuffer) (luaL_Buffer *B); +LUALIB_API void (luaL_addlstring) (luaL_Buffer *B, const char *s, size_t l); +LUALIB_API void (luaL_addstring) (luaL_Buffer *B, const char *s); +LUALIB_API void (luaL_addvalue) (luaL_Buffer *B); +LUALIB_API void (luaL_pushresult) (luaL_Buffer *B); + + +/* }====================================================== */ + +#endif diff --git a/tests/examples/lua/lua51/lua.h b/tests/examples/lua/lua51/lua.h new file mode 100644 index 0000000000..9dcafd6906 --- /dev/null +++ b/tests/examples/lua/lua51/lua.h @@ -0,0 +1,404 @@ +/* +** $Id: lua.h,v 1.218.1.5 2008/08/06 13:30:12 roberto Exp $ +** Lua - An Extensible Extension Language +** Lua.org, PUC-Rio, Brazil (http://www.lua.org) +** See Copyright Notice at the end of this file +*/ + + +#ifndef lua_h +#define lua_h + +#include +#include + + +#include "luaconf.h" + + +#define LUA_VERSION "Lua 5.1" +#define LUA_RELEASE "Lua 5.1.4" +#define LUA_VERSION_NUM 501 +#define LUA_COPYRIGHT "Copyright (C) 1994-2008 Lua.org, PUC-Rio" +#define LUA_AUTHORS "R. Ierusalimschy, L. H. de Figueiredo & W. Celes" + + +/* mark for precompiled code (`Lua') */ +#define LUA_SIGNATURE "\033Lua" + +/* option for multiple returns in `lua_pcall' and `lua_call' */ +#define LUA_MULTRET (-1) + + +/* +** pseudo-indices +*/ +#define LUA_REGISTRYINDEX (-10000) +#define LUA_ENVIRONINDEX (-10001) +#define LUA_GLOBALSINDEX (-10002) +#define lua_upvalueindex(i) (LUA_GLOBALSINDEX-(i)) + + +/* thread status */ +#define LUA_OK 0 +#define LUA_YIELD 1 +#define LUA_ERRRUN 2 +#define LUA_ERRSYNTAX 3 +#define LUA_ERRMEM 4 +#define LUA_ERRERR 5 + + +typedef struct lua_State lua_State; + +typedef int (*lua_CFunction) (lua_State *L); + + +/* +** functions that read/write blocks when loading/dumping Lua chunks +*/ +typedef const char * (*lua_Reader) (lua_State *L, void *ud, size_t *sz); + +typedef int (*lua_Writer) (lua_State *L, const void* p, size_t sz, void* ud); + + +/* +** prototype for memory-allocation functions +*/ +typedef void * (*lua_Alloc) (void *ud, void *ptr, size_t osize, size_t nsize); + + +/* +** basic types +*/ +#define LUA_TNONE (-1) + +#define LUA_TNIL 0 +#define LUA_TBOOLEAN 1 +#define LUA_TLIGHTUSERDATA 2 +#define LUA_TNUMBER 3 +#define LUA_TSTRING 4 +#define LUA_TTABLE 5 +#define LUA_TFUNCTION 6 +#define LUA_TUSERDATA 7 +#define LUA_TTHREAD 8 + + + +/* minimum Lua stack available to a C function */ +#define LUA_MINSTACK 20 + + +/* +** generic extra include file +*/ +#if defined(LUA_USER_H) +#include LUA_USER_H +#endif + + +/* type of numbers in Lua */ +typedef LUA_NUMBER lua_Number; + + +/* type for integer functions */ +typedef LUA_INTEGER lua_Integer; + + + +/* +** state manipulation +*/ +LUA_API lua_State *(lua_newstate) (lua_Alloc f, void *ud); +LUA_API void (lua_close) (lua_State *L); +LUA_API lua_State *(lua_newthread) (lua_State *L); + +LUA_API lua_CFunction (lua_atpanic) (lua_State *L, lua_CFunction panicf); + + +/* +** basic stack manipulation +*/ +LUA_API int (lua_gettop) (lua_State *L); +LUA_API void (lua_settop) (lua_State *L, int idx); +LUA_API void (lua_pushvalue) (lua_State *L, int idx); +LUA_API void (lua_remove) (lua_State *L, int idx); +LUA_API void (lua_insert) (lua_State *L, int idx); +LUA_API void (lua_replace) (lua_State *L, int idx); +LUA_API int (lua_checkstack) (lua_State *L, int sz); + +LUA_API void (lua_xmove) (lua_State *from, lua_State *to, int n); + + +/* +** access functions (stack -> C) +*/ + +LUA_API int (lua_isnumber) (lua_State *L, int idx); +LUA_API int (lua_isstring) (lua_State *L, int idx); +LUA_API int (lua_iscfunction) (lua_State *L, int idx); +LUA_API int (lua_isuserdata) (lua_State *L, int idx); +LUA_API int (lua_type) (lua_State *L, int idx); +LUA_API const char *(lua_typename) (lua_State *L, int tp); + +LUA_API int (lua_equal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_rawequal) (lua_State *L, int idx1, int idx2); +LUA_API int (lua_lessthan) (lua_State *L, int idx1, int idx2); + +LUA_API lua_Number (lua_tonumber) (lua_State *L, int idx); +LUA_API lua_Integer (lua_tointeger) (lua_State *L, int idx); +LUA_API int (lua_toboolean) (lua_State *L, int idx); +LUA_API const char *(lua_tolstring) (lua_State *L, int idx, size_t *len); +LUA_API size_t (lua_objlen) (lua_State *L, int idx); +LUA_API lua_CFunction (lua_tocfunction) (lua_State *L, int idx); +LUA_API void *(lua_touserdata) (lua_State *L, int idx); +LUA_API lua_State *(lua_tothread) (lua_State *L, int idx); +LUA_API const void *(lua_topointer) (lua_State *L, int idx); + + +/* +** push functions (C -> stack) +*/ +LUA_API void (lua_pushnil) (lua_State *L); +LUA_API void (lua_pushnumber) (lua_State *L, lua_Number n); +LUA_API void (lua_pushinteger) (lua_State *L, lua_Integer n); +LUA_API void (lua_pushlstring) (lua_State *L, const char *s, size_t l); +LUA_API void (lua_pushstring) (lua_State *L, const char *s); +LUA_API const char *(lua_pushvfstring) (lua_State *L, const char *fmt, + va_list argp); +LUA_API const char *(lua_pushfstring) (lua_State *L, const char *fmt, ...); +LUA_API void (lua_pushcclosure) (lua_State *L, lua_CFunction fn, int n); +LUA_API void (lua_pushboolean) (lua_State *L, int b); +LUA_API void (lua_pushlightuserdata) (lua_State *L, void *p); +LUA_API int (lua_pushthread) (lua_State *L); + + +/* +** get functions (Lua -> stack) +*/ +LUA_API void (lua_gettable) (lua_State *L, int idx); +LUA_API void (lua_getfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawget) (lua_State *L, int idx); +LUA_API void (lua_rawgeti) (lua_State *L, int idx, int n); +LUA_API void (lua_createtable) (lua_State *L, int narr, int nrec); +LUA_API void *(lua_newuserdata) (lua_State *L, size_t sz); +LUA_API int (lua_getmetatable) (lua_State *L, int objindex); +LUA_API void (lua_getfenv) (lua_State *L, int idx); + + +/* +** set functions (stack -> Lua) +*/ +LUA_API void (lua_settable) (lua_State *L, int idx); +LUA_API void (lua_setfield) (lua_State *L, int idx, const char *k); +LUA_API void (lua_rawset) (lua_State *L, int idx); +LUA_API void (lua_rawseti) (lua_State *L, int idx, int n); +LUA_API int (lua_setmetatable) (lua_State *L, int objindex); +LUA_API int (lua_setfenv) (lua_State *L, int idx); + + +/* +** `load' and `call' functions (load and run Lua code) +*/ +LUA_API void (lua_call) (lua_State *L, int nargs, int nresults); +LUA_API int (lua_pcall) (lua_State *L, int nargs, int nresults, int errfunc); +LUA_API int (lua_cpcall) (lua_State *L, lua_CFunction func, void *ud); +LUA_API int (lua_load) (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname); + +LUA_API int (lua_dump) (lua_State *L, lua_Writer writer, void *data); + + +/* +** coroutine functions +*/ +LUA_API int (lua_yield) (lua_State *L, int nresults); +LUA_API int (lua_resume) (lua_State *L, int narg); +LUA_API int (lua_status) (lua_State *L); + +/* +** garbage-collection function and options +*/ + +#define LUA_GCSTOP 0 +#define LUA_GCRESTART 1 +#define LUA_GCCOLLECT 2 +#define LUA_GCCOUNT 3 +#define LUA_GCCOUNTB 4 +#define LUA_GCSTEP 5 +#define LUA_GCSETPAUSE 6 +#define LUA_GCSETSTEPMUL 7 +#define LUA_GCISRUNNING 9 + +LUA_API int (lua_gc) (lua_State *L, int what, int data); + + +/* +** miscellaneous functions +*/ + +LUA_API int (lua_error) (lua_State *L); + +LUA_API int (lua_next) (lua_State *L, int idx); + +LUA_API void (lua_concat) (lua_State *L, int n); + +LUA_API lua_Alloc (lua_getallocf) (lua_State *L, void **ud); +LUA_API void lua_setallocf (lua_State *L, lua_Alloc f, void *ud); + +LUA_API void lua_setexdata(lua_State *L, void *exdata); +LUA_API void *lua_getexdata(lua_State *L); + + +/* +** =============================================================== +** some useful macros +** =============================================================== +*/ + +#define lua_pop(L,n) lua_settop(L, -(n)-1) + +#define lua_newtable(L) lua_createtable(L, 0, 0) + +#define lua_register(L,n,f) (lua_pushcfunction(L, (f)), lua_setglobal(L, (n))) + +#define lua_pushcfunction(L,f) lua_pushcclosure(L, (f), 0) + +#define lua_strlen(L,i) lua_objlen(L, (i)) + +#define lua_isfunction(L,n) (lua_type(L, (n)) == LUA_TFUNCTION) +#define lua_istable(L,n) (lua_type(L, (n)) == LUA_TTABLE) +#define lua_islightuserdata(L,n) (lua_type(L, (n)) == LUA_TLIGHTUSERDATA) +#define lua_isnil(L,n) (lua_type(L, (n)) == LUA_TNIL) +#define lua_isboolean(L,n) (lua_type(L, (n)) == LUA_TBOOLEAN) +#define lua_isthread(L,n) (lua_type(L, (n)) == LUA_TTHREAD) +#define lua_isnone(L,n) (lua_type(L, (n)) == LUA_TNONE) +#define lua_isnoneornil(L, n) (lua_type(L, (n)) <= 0) + +#define lua_pushliteral(L, s) \ + lua_pushlstring(L, "" s, (sizeof(s)/sizeof(char))-1) + +#define lua_setglobal(L,s) lua_setfield(L, LUA_GLOBALSINDEX, (s)) +#define lua_getglobal(L,s) lua_getfield(L, LUA_GLOBALSINDEX, (s)) + +#define lua_tostring(L,i) lua_tolstring(L, (i), NULL) + + + +/* +** compatibility macros and functions +*/ + +#define lua_open() luaL_newstate() + +#define lua_getregistry(L) lua_pushvalue(L, LUA_REGISTRYINDEX) + +#define lua_getgccount(L) lua_gc(L, LUA_GCCOUNT, 0) + +#define lua_Chunkreader lua_Reader +#define lua_Chunkwriter lua_Writer + + +/* hack */ +LUA_API void lua_setlevel (lua_State *from, lua_State *to); + + +/* +** {====================================================================== +** Debug API +** ======================================================================= +*/ + + +/* +** Event codes +*/ +#define LUA_HOOKCALL 0 +#define LUA_HOOKRET 1 +#define LUA_HOOKLINE 2 +#define LUA_HOOKCOUNT 3 +#define LUA_HOOKTAILRET 4 + + +/* +** Event masks +*/ +#define LUA_MASKCALL (1 << LUA_HOOKCALL) +#define LUA_MASKRET (1 << LUA_HOOKRET) +#define LUA_MASKLINE (1 << LUA_HOOKLINE) +#define LUA_MASKCOUNT (1 << LUA_HOOKCOUNT) + +typedef struct lua_Debug lua_Debug; /* activation record */ + + +/* Functions to be called by the debuger in specific events */ +typedef void (*lua_Hook) (lua_State *L, lua_Debug *ar); + + +LUA_API int lua_getstack (lua_State *L, int level, lua_Debug *ar); +LUA_API int lua_getinfo (lua_State *L, const char *what, lua_Debug *ar); +LUA_API const char *lua_getlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_setlocal (lua_State *L, const lua_Debug *ar, int n); +LUA_API const char *lua_getupvalue (lua_State *L, int funcindex, int n); +LUA_API const char *lua_setupvalue (lua_State *L, int funcindex, int n); +LUA_API int lua_sethook (lua_State *L, lua_Hook func, int mask, int count); +LUA_API lua_Hook lua_gethook (lua_State *L); +LUA_API int lua_gethookmask (lua_State *L); +LUA_API int lua_gethookcount (lua_State *L); + +/* From Lua 5.2. */ +LUA_API void *lua_upvalueid (lua_State *L, int idx, int n); +LUA_API void lua_upvaluejoin (lua_State *L, int idx1, int n1, int idx2, int n2); +LUA_API int lua_loadx (lua_State *L, lua_Reader reader, void *dt, + const char *chunkname, const char *mode); +LUA_API const lua_Number *lua_version (lua_State *L); +LUA_API void lua_copy (lua_State *L, int fromidx, int toidx); +LUA_API lua_Number lua_tonumberx (lua_State *L, int idx, int *isnum); +LUA_API lua_Integer lua_tointegerx (lua_State *L, int idx, int *isnum); + +/* From Lua 5.3. */ +LUA_API int lua_isyieldable (lua_State *L); + + +struct lua_Debug { + int event; + const char *name; /* (n) */ + const char *namewhat; /* (n) `global', `local', `field', `method' */ + const char *what; /* (S) `Lua', `C', `main', `tail' */ + const char *source; /* (S) */ + int currentline; /* (l) */ + int nups; /* (u) number of upvalues */ + int linedefined; /* (S) */ + int lastlinedefined; /* (S) */ + char short_src[LUA_IDSIZE]; /* (S) */ + /* private part */ + int i_ci; /* active function */ +}; + +/* }====================================================================== */ + + +/****************************************************************************** +* Copyright (C) 1994-2008 Lua.org, PUC-Rio. All rights reserved. +* +* Permission is hereby granted, free of charge, to any person obtaining +* a copy of this software and associated documentation files (the +* "Software"), to deal in the Software without restriction, including +* without limitation the rights to use, copy, modify, merge, publish, +* distribute, sublicense, and/or sell copies of the Software, and to +* permit persons to whom the Software is furnished to do so, subject to +* the following conditions: +* +* The above copyright notice and this permission notice shall be +* included in all copies or substantial portions of the Software. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +* IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +* CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +******************************************************************************/ + + +#endif diff --git a/tests/examples/lua/lua51/lua_connector51.c b/tests/examples/lua/lua51/lua_connector51.c new file mode 100644 index 0000000000..9b932337fe --- /dev/null +++ b/tests/examples/lua/lua51/lua_connector51.c @@ -0,0 +1,323 @@ +#include +#include +#include +#include +#include "lua.h" +#include "lauxlib.h" +#include "lualib.h" +#include + +struct cb_param{ + lua_State* state; + int callback; + void * stream; +}; + +static int l_connect(lua_State *L){ + TAOS * taos=NULL; + const char* host; + const char* database; + const char* user; + const char* password; + int port; + + luaL_checktype(L, 1, LUA_TTABLE); + + lua_getfield(L,-1,"host"); + if (lua_isstring(L,-1)){ + host = lua_tostring(L, -1); + // printf("host = %s\n", host); + } + + lua_getfield(L, 1, "port"); + if (lua_isnumber(L,-1)){ + port = lua_tonumber(L, -1); + //printf("port = %d\n", port); + } + + lua_getfield(L, 1, "database"); + if (lua_isstring(L, -1)){ + database = lua_tostring(L, -1); + //printf("database = %s\n", database); + } + + lua_getfield(L, 1, "user"); + if (lua_isstring(L, -1)){ + user = lua_tostring(L, -1); + //printf("user = %s\n", user); + } + + lua_getfield(L, 1, "password"); + if (lua_isstring(L, -1)){ + password = lua_tostring(L, -1); + //printf("password = %s\n", password); + } + + lua_settop(L,0); + + taos_init(); + + lua_newtable(L); + int table_index = lua_gettop(L); + + taos = taos_connect(host, user,password,database, port); + if (taos == NULL) { + printf("failed to connect server, reason:%s\n", taos_errstr(taos)); + + lua_pushinteger(L, -1); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + lua_pushlightuserdata(L,NULL); + lua_setfield(L, table_index, "conn"); + }else{ + // printf("success to connect server\n"); + lua_pushinteger(L, 0); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + lua_pushlightuserdata(L,taos); + lua_setfield(L, table_index, "conn"); + } + + return 1; +} + +static int l_query(lua_State *L){ + TAOS *taos= (TAOS*)lua_topointer(L,1); + const char* s = lua_tostring(L, 2); + TAOS_RES *result; + lua_newtable(L); + int table_index = lua_gettop(L); + + // printf("receive command:%s\r\n",s); + result = taos_query(taos, s); + int32_t code = taos_errno(result); + if( code != 0){ + printf("failed, reason:%s\n", taos_errstr(result)); + lua_pushinteger(L, -1); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + + return 1; + + }else{ + //printf("success to query.\n"); + TAOS_ROW row; + int rows = 0; + int num_fields = taos_field_count(result); + const TAOS_FIELD *fields = taos_fetch_fields(result); + //char temp[256]; + + const int affectRows = taos_affected_rows(result); + // printf(" affect rows:%d\r\n", affectRows); + lua_pushinteger(L, 0); + lua_setfield(L, table_index, "code"); + lua_pushinteger(L, affectRows); + lua_setfield(L, table_index, "affected"); + lua_newtable(L); + + while ((row = taos_fetch_row(result))) { + //printf("row index:%d\n",rows); + rows++; + + lua_pushnumber(L,rows); + lua_newtable(L); + + for (int i = 0; i < num_fields; ++i) { + if (row[i] == NULL) { + continue; + } + + lua_pushstring(L,fields[i].name); + + switch (fields[i].type) { + case TSDB_DATA_TYPE_TINYINT: + lua_pushinteger(L,*((char *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + lua_pushinteger(L,*((short *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + lua_pushinteger(L,*((int *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + lua_pushnumber(L,*((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + lua_pushnumber(L,*((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + lua_pushstring(L,(char *)row[i]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_BOOL: + lua_pushinteger(L,*((char *)row[i])); + break; + default: + lua_pushnil(L); + break; + } + + lua_settable(L,-3); + } + + lua_settable(L,-3); + } + taos_free_result(result); + } + + lua_setfield(L, table_index, "item"); + return 1; +} + +void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ + struct cb_param* p = (struct cb_param*) param; + TAOS_FIELD *fields = taos_fetch_fields(result); + int numFields = taos_num_fields(result); + + // printf("\nnumfields:%d\n", numFields); + //printf("\n\r-----------------------------------------------------------------------------------\n"); + + lua_State *L = p->state; + lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback); + + lua_newtable(L); + + for (int i = 0; i < numFields; ++i) { + if (row[i] == NULL) { + continue; + } + + lua_pushstring(L,fields[i].name); + + switch (fields[i].type) { + case TSDB_DATA_TYPE_TINYINT: + lua_pushinteger(L,*((char *)row[i])); + break; + case TSDB_DATA_TYPE_SMALLINT: + lua_pushinteger(L,*((short *)row[i])); + break; + case TSDB_DATA_TYPE_INT: + lua_pushinteger(L,*((int *)row[i])); + break; + case TSDB_DATA_TYPE_BIGINT: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_FLOAT: + lua_pushnumber(L,*((float *)row[i])); + break; + case TSDB_DATA_TYPE_DOUBLE: + lua_pushnumber(L,*((double *)row[i])); + break; + case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_NCHAR: + lua_pushstring(L,(char *)row[i]); + break; + case TSDB_DATA_TYPE_TIMESTAMP: + lua_pushinteger(L,*((int64_t *)row[i])); + break; + case TSDB_DATA_TYPE_BOOL: + lua_pushinteger(L,*((char *)row[i])); + break; + default: + lua_pushnil(L); + break; + } + + lua_settable(L, -3); + } + + lua_call(L, 1, 0); + + // printf("-----------------------------------------------------------------------------------\n\r"); +} + +static int l_open_stream(lua_State *L){ + int r = luaL_ref(L, LUA_REGISTRYINDEX); + TAOS * taos = (TAOS*)lua_topointer(L,1); + const char * sqlstr = lua_tostring(L,2); + int stime = luaL_checknumber(L,3); + + lua_newtable(L); + int table_index = lua_gettop(L); + + struct cb_param *p = malloc(sizeof(struct cb_param)); + p->state = L; + p->callback=r; + // printf("r:%d, L:%d\n",r,L); + void * s = taos_open_stream(taos,sqlstr,stream_cb,stime,p,NULL); + if (s == NULL) { + printf("failed to open stream, reason:%s\n", taos_errstr(taos)); + free(p); + lua_pushnumber(L, -1); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + lua_pushlightuserdata(L,NULL); + lua_setfield(L, table_index, "stream"); + }else{ + // printf("success to open stream\n"); + lua_pushnumber(L, 0); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, taos_errstr(taos)); + lua_setfield(L, table_index, "error"); + p->stream = s; + lua_pushlightuserdata(L,p); + lua_setfield(L, table_index, "stream");//stream has different content in lua and c. + } + + return 1; +} + +static int l_close_stream(lua_State *L){ + //TODO:get stream and free cb_param + struct cb_param *p = lua_touserdata(L,1); + taos_close_stream(p->stream); + free(p); + return 0; +} + +static int l_close(lua_State *L){ + TAOS *taos= (TAOS*)lua_topointer(L,1); + lua_newtable(L); + int table_index = lua_gettop(L); + + if(taos == NULL){ + lua_pushnumber(L, -1); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, "null pointer."); + lua_setfield(L, table_index, "error"); + }else{ + taos_close(taos); + lua_pushnumber(L, 0); + lua_setfield(L, table_index, "code"); + lua_pushstring(L, "done."); + lua_setfield(L, table_index, "error"); + } + return 1; +} + +static const struct luaL_Reg lib[] = { + {"connect", l_connect}, + {"query", l_query}, + {"close", l_close}, + {"open_stream", l_open_stream}, + {"close_stream", l_close_stream}, + {NULL, NULL} +}; + +extern int luaopen_luaconnector51(lua_State* L) +{ + // luaL_register(L, "luaconnector51", lib); + lua_newtable (L); + luaL_setfuncs(L,lib,0); + return 1; +} diff --git a/tests/examples/lua/lua51/luaconf.h b/tests/examples/lua/lua51/luaconf.h new file mode 100644 index 0000000000..c72893fd15 --- /dev/null +++ b/tests/examples/lua/lua51/luaconf.h @@ -0,0 +1,152 @@ +/* +** Configuration header. +** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef luaconf_h +#define luaconf_h + +#ifndef WINVER +#define WINVER 0x0501 +#endif +#include +#include + +/* Default path for loading Lua and C modules with require(). */ +#if defined(_WIN32) +/* +** In Windows, any exclamation mark ('!') in the path is replaced by the +** path of the directory of the executable file of the current process. +*/ +#define LUA_LDIR "!\\lua\\" +#define LUA_CDIR "!\\" +#define LUA_PATH_DEFAULT \ + ".\\?.lua;" "!\\lualib\\?.lua;" LUA_LDIR"?.lua;" LUA_LDIR"?\\init.lua;" +#define LUA_CPATH_DEFAULT \ + ".\\?.dll;" "!\\lualib\\?.so;" LUA_CDIR"?.dll;" LUA_CDIR"loadall.dll" +#else +/* +** Note to distribution maintainers: do NOT patch the following lines! +** Please read ../doc/install.html#distro and pass PREFIX=/usr instead. +*/ +#ifndef LUA_MULTILIB +#define LUA_MULTILIB "lib" +#endif +#ifndef LUA_LMULTILIB +#define LUA_LMULTILIB "lib" +#endif +#define LUA_LROOT "/usr/local" +#define LUA_LUADIR "/lua/5.1/" +#define LUA_LJDIR "/luajit-2.1.0-beta3/" + +#ifdef LUA_ROOT +#define LUA_JROOT LUA_ROOT +#define LUA_RLDIR LUA_ROOT "/share" LUA_LUADIR +#define LUA_RCDIR LUA_ROOT "/" LUA_MULTILIB LUA_LUADIR +#define LUA_RLPATH ";" LUA_RLDIR "?.lua;" LUA_RLDIR "?/init.lua" +#define LUA_RCPATH ";" LUA_RCDIR "?.so" +#else +#define LUA_JROOT LUA_LROOT +#define LUA_RLPATH +#define LUA_RCPATH +#endif + +#define LUA_JPATH ";" LUA_JROOT "/share" LUA_LJDIR "?.lua" +#define LUA_LLDIR LUA_LROOT "/share" LUA_LUADIR +#define LUA_LCDIR LUA_LROOT "/" LUA_LMULTILIB LUA_LUADIR +#define LUA_LLPATH ";" LUA_LLDIR "?.lua;" LUA_LLDIR "?/init.lua" +#define LUA_LCPATH1 ";" LUA_LCDIR "?.so" +#define LUA_LCPATH2 ";" LUA_LCDIR "loadall.so" + +#define LUA_PATH_DEFAULT "./?.lua" LUA_JPATH LUA_LLPATH LUA_RLPATH +#define LUA_CPATH_DEFAULT "./?.so" LUA_LCPATH1 LUA_RCPATH LUA_LCPATH2 +#endif + +/* Environment variable names for path overrides and initialization code. */ +#define LUA_PATH "LUA_PATH" +#define LUA_CPATH "LUA_CPATH" +#define LUA_INIT "LUA_INIT" + +/* Special file system characters. */ +#if defined(_WIN32) +#define LUA_DIRSEP "\\" +#else +#define LUA_DIRSEP "/" +#endif +#define LUA_PATHSEP ";" +#define LUA_PATH_MARK "?" +#define LUA_EXECDIR "!" +#define LUA_IGMARK "-" +#define LUA_PATH_CONFIG \ + LUA_DIRSEP "\n" LUA_PATHSEP "\n" LUA_PATH_MARK "\n" \ + LUA_EXECDIR "\n" LUA_IGMARK "\n" + +/* Quoting in error messages. */ +#define LUA_QL(x) "'" x "'" +#define LUA_QS LUA_QL("%s") + +/* Various tunables. */ +#define LUAI_MAXSTACK 65500 /* Max. # of stack slots for a thread (<64K). */ +#define LUAI_MAXCSTACK 8000 /* Max. # of stack slots for a C func (<10K). */ +#define LUAI_GCPAUSE 200 /* Pause GC until memory is at 200%. */ +#define LUAI_GCMUL 200 /* Run GC at 200% of allocation speed. */ +#define LUA_MAXCAPTURES 32 /* Max. pattern captures. */ + +/* Configuration for the frontend (the luajit executable). */ +#if defined(luajit_c) +#define LUA_PROGNAME "luajit" /* Fallback frontend name. */ +#define LUA_PROMPT "> " /* Interactive prompt. */ +#define LUA_PROMPT2 ">> " /* Continuation prompt. */ +#define LUA_MAXINPUT 512 /* Max. input line length. */ +#endif + +/* Note: changing the following defines breaks the Lua 5.1 ABI. */ +#define LUA_INTEGER ptrdiff_t +#define LUA_IDSIZE 60 /* Size of lua_Debug.short_src. */ +/* +** Size of lauxlib and io.* on-stack buffers. Weird workaround to avoid using +** unreasonable amounts of stack space, but still retain ABI compatibility. +** Blame Lua for depending on BUFSIZ in the ABI, blame **** for wrecking it. +*/ +#define LUAL_BUFFERSIZE (BUFSIZ > 16384 ? 8192 : BUFSIZ) + +/* The following defines are here only for compatibility with luaconf.h +** from the standard Lua distribution. They must not be changed for LuaJIT. +*/ +#define LUA_NUMBER_DOUBLE +#define LUA_NUMBER double +#define LUAI_UACNUMBER double +#define LUA_NUMBER_SCAN "%lf" +#define LUA_NUMBER_FMT "%.14g" +#define lua_number2str(s, n) sprintf((s), LUA_NUMBER_FMT, (n)) +#define LUAI_MAXNUMBER2STR 32 +#define LUA_INTFRMLEN "l" +#define LUA_INTFRM_T long + +/* Linkage of public API functions. */ +#if defined(LUA_BUILD_AS_DLL) +#if defined(LUA_CORE) || defined(LUA_LIB) +#define LUA_API __declspec(dllexport) +#else +#define LUA_API __declspec(dllimport) +#endif +#else +#define LUA_API extern +#endif + +#define LUALIB_API LUA_API + +/* Support for internal assertions. */ +#if defined(LUA_USE_ASSERT) || defined(LUA_USE_APICHECK) +#include +#endif +#ifdef LUA_USE_ASSERT +#define lua_assert(x) assert(x) +#endif +#ifdef LUA_USE_APICHECK +#define luai_apicheck(L, o) { (void)L; assert(o); } +#else +#define luai_apicheck(L, o) { (void)L; } +#endif + +#endif diff --git a/tests/examples/lua/lua51/luajit.h b/tests/examples/lua/lua51/luajit.h new file mode 100644 index 0000000000..ae14c4ffeb --- /dev/null +++ b/tests/examples/lua/lua51/luajit.h @@ -0,0 +1,81 @@ +/* +** LuaJIT -- a Just-In-Time Compiler for Lua. http://luajit.org/ +** +** Copyright (C) 2005-2017 Mike Pall. All rights reserved. +** +** Permission is hereby granted, free of charge, to any person obtaining +** a copy of this software and associated documentation files (the +** "Software"), to deal in the Software without restriction, including +** without limitation the rights to use, copy, modify, merge, publish, +** distribute, sublicense, and/or sell copies of the Software, and to +** permit persons to whom the Software is furnished to do so, subject to +** the following conditions: +** +** The above copyright notice and this permission notice shall be +** included in all copies or substantial portions of the Software. +** +** THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +** SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +** +** [ MIT license: http://www.opensource.org/licenses/mit-license.php ] +*/ + +#ifndef _LUAJIT_H +#define _LUAJIT_H + +#include "lua.h" + +#define OPENRESTY_LUAJIT + +#define LUAJIT_VERSION "LuaJIT 2.1.0-beta3" +#define LUAJIT_VERSION_NUM 20100 /* Version 2.1.0 = 02.01.00. */ +#define LUAJIT_VERSION_SYM luaJIT_version_2_1_0_beta3 +#define LUAJIT_COPYRIGHT "Copyright (C) 2005-2017 Mike Pall" +#define LUAJIT_URL "http://luajit.org/" + +/* Modes for luaJIT_setmode. */ +#define LUAJIT_MODE_MASK 0x00ff + +enum { + LUAJIT_MODE_ENGINE, /* Set mode for whole JIT engine. */ + LUAJIT_MODE_DEBUG, /* Set debug mode (idx = level). */ + + LUAJIT_MODE_FUNC, /* Change mode for a function. */ + LUAJIT_MODE_ALLFUNC, /* Recurse into subroutine protos. */ + LUAJIT_MODE_ALLSUBFUNC, /* Change only the subroutines. */ + + LUAJIT_MODE_TRACE, /* Flush a compiled trace. */ + + LUAJIT_MODE_WRAPCFUNC = 0x10, /* Set wrapper mode for C function calls. */ + + LUAJIT_MODE_MAX +}; + +/* Flags or'ed in to the mode. */ +#define LUAJIT_MODE_OFF 0x0000 /* Turn feature off. */ +#define LUAJIT_MODE_ON 0x0100 /* Turn feature on. */ +#define LUAJIT_MODE_FLUSH 0x0200 /* Flush JIT-compiled code. */ + +/* LuaJIT public C API. */ + +/* Control the JIT engine. */ +LUA_API int luaJIT_setmode(lua_State *L, int idx, int mode); + +/* Low-overhead profiling API. */ +typedef void (*luaJIT_profile_callback)(void *data, lua_State *L, + int samples, int vmstate); +LUA_API void luaJIT_profile_start(lua_State *L, const char *mode, + luaJIT_profile_callback cb, void *data); +LUA_API void luaJIT_profile_stop(lua_State *L); +LUA_API const char *luaJIT_profile_dumpstack(lua_State *L, const char *fmt, + int depth, size_t *len); + +/* Enforce (dynamic) linker error for version mismatches. Call from main. */ +LUA_API void LUAJIT_VERSION_SYM(void); + +#endif diff --git a/tests/examples/lua/lua51/lualib.h b/tests/examples/lua/lua51/lualib.h new file mode 100644 index 0000000000..6aceabe592 --- /dev/null +++ b/tests/examples/lua/lua51/lualib.h @@ -0,0 +1,44 @@ +/* +** Standard library header. +** Copyright (C) 2005-2017 Mike Pall. See Copyright Notice in luajit.h +*/ + +#ifndef _LUALIB_H +#define _LUALIB_H + +#include "lua.h" + +#define LUA_FILEHANDLE "FILE*" + +#define LUA_COLIBNAME "coroutine" +#define LUA_MATHLIBNAME "math" +#define LUA_STRLIBNAME "string" +#define LUA_TABLIBNAME "table" +#define LUA_IOLIBNAME "io" +#define LUA_OSLIBNAME "os" +#define LUA_LOADLIBNAME "package" +#define LUA_DBLIBNAME "debug" +#define LUA_BITLIBNAME "bit" +#define LUA_JITLIBNAME "jit" +#define LUA_FFILIBNAME "ffi" +#define LUA_THRLIBNAME "thread" + +LUALIB_API int luaopen_base(lua_State *L); +LUALIB_API int luaopen_math(lua_State *L); +LUALIB_API int luaopen_string(lua_State *L); +LUALIB_API int luaopen_table(lua_State *L); +LUALIB_API int luaopen_io(lua_State *L); +LUALIB_API int luaopen_os(lua_State *L); +LUALIB_API int luaopen_package(lua_State *L); +LUALIB_API int luaopen_debug(lua_State *L); +LUALIB_API int luaopen_bit(lua_State *L); +LUALIB_API int luaopen_jit(lua_State *L); +LUALIB_API int luaopen_ffi(lua_State *L); + +LUALIB_API void luaL_openlibs(lua_State *L); + +#ifndef lua_assert +#define lua_assert(x) ((void)0) +#endif + +#endif diff --git a/tests/examples/lua/lua_connector.c b/tests/examples/lua/lua_connector.c index 143f16a799..920d2cdc35 100644 --- a/tests/examples/lua/lua_connector.c +++ b/tests/examples/lua/lua_connector.c @@ -13,17 +13,49 @@ struct cb_param{ void * stream; }; - - static int l_connect(lua_State *L){ - TAOS * taos; - char *host = lua_tostring(L, 1); - char *user = lua_tostring(L, 2); - char *password = lua_tostring(L, 3); - char *database = lua_tostring(L, 4); - int port =luaL_checknumber(L, 5); - taos_init(); + TAOS * taos=NULL; + const char* host; + const char* database; + const char* user; + const char* password; + int port; + + luaL_checktype(L, 1, LUA_TTABLE); + + lua_getfield(L,-1,"host"); + if (lua_isstring(L,-1)){ + host = lua_tostring(L, -1); + // printf("host = %s\n", host); + } + lua_getfield(L, 1, "port"); + if (lua_isinteger(L,-1)){ + port = lua_tointeger(L, -1); + //printf("port = %d\n", port); + } + + lua_getfield(L, 1, "database"); + if (lua_isstring(L, -1)){ + database = lua_tostring(L, -1); + //printf("database = %s\n", database); + } + + lua_getfield(L, 1, "user"); + if (lua_isstring(L, -1)){ + user = lua_tostring(L, -1); + //printf("user = %s\n", user); + } + + lua_getfield(L, 1, "password"); + if (lua_isstring(L, -1)){ + password = lua_tostring(L, -1); + //printf("password = %s\n", password); + } + + lua_settop(L,0); + + taos_init(); lua_newtable(L); int table_index = lua_gettop(L); @@ -31,38 +63,38 @@ static int l_connect(lua_State *L){ if (taos == NULL) { printf("failed to connect server, reason:%s\n", taos_errstr(taos)); - lua_pushnumber(L, -1); + lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); lua_pushstring(L, taos_errstr(taos)); lua_setfield(L, table_index, "error"); lua_pushlightuserdata(L,NULL); lua_setfield(L, table_index, "conn"); }else{ - printf("success to connect server\n"); - lua_pushnumber(L, 0); + // printf("success to connect server\n"); + lua_pushinteger(L, 0); lua_setfield(L, table_index, "code"); lua_pushstring(L, taos_errstr(taos)); lua_setfield(L, table_index, "error"); lua_pushlightuserdata(L,taos); lua_setfield(L, table_index, "conn"); } - + return 1; } static int l_query(lua_State *L){ - TAOS * taos= lua_topointer(L,1); - char *s = lua_tostring(L, 2); + TAOS *taos= (TAOS*)lua_topointer(L,1); + const char* s = lua_tostring(L, 2); TAOS_RES *result; lua_newtable(L); int table_index = lua_gettop(L); // printf("receive command:%s\r\n",s); - result = taos_query(taos,s); - int32_t code = taos_errno(result); + result = taos_query(taos, s); + int32_t code = taos_errno(result); if( code != 0){ printf("failed, reason:%s\n", taos_errstr(result)); - lua_pushnumber(L, -1); + lua_pushinteger(L, -1); lua_setfield(L, table_index, "code"); lua_pushstring(L, taos_errstr(taos)); lua_setfield(L, table_index, "error"); @@ -74,12 +106,12 @@ static int l_query(lua_State *L){ TAOS_ROW row; int rows = 0; int num_fields = taos_field_count(result); - TAOS_FIELD *fields = taos_fetch_fields(result); - char temp[256]; + const TAOS_FIELD *fields = taos_fetch_fields(result); + //char temp[256]; - int affectRows = taos_affected_rows(result); + const int affectRows = taos_affected_rows(result); // printf(" affect rows:%d\r\n", affectRows); - lua_pushnumber(L, 0); + lua_pushinteger(L, 0); lua_setfield(L, table_index, "code"); lua_pushinteger(L, affectRows); lua_setfield(L, table_index, "affected"); @@ -150,8 +182,8 @@ void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ TAOS_FIELD *fields = taos_fetch_fields(result); int numFields = taos_num_fields(result); - printf("\nnumfields:%d\n", numFields); - printf("\n\r-----------------------------------------------------------------------------------\n"); + // printf("\nnumfields:%d\n", numFields); + //printf("\n\r-----------------------------------------------------------------------------------\n"); lua_State *L = p->state; lua_rawgeti(L, LUA_REGISTRYINDEX, p->callback); @@ -204,13 +236,13 @@ void stream_cb(void *param, TAOS_RES *result, TAOS_ROW row){ lua_call(L, 1, 0); - printf("-----------------------------------------------------------------------------------\n\r"); + // printf("-----------------------------------------------------------------------------------\n\r"); } static int l_open_stream(lua_State *L){ int r = luaL_ref(L, LUA_REGISTRYINDEX); - TAOS * taos = lua_topointer(L,1); - char * sqlstr = lua_tostring(L,2); + TAOS * taos = (TAOS*)lua_topointer(L,1); + const char * sqlstr = lua_tostring(L,2); int stime = luaL_checknumber(L,3); lua_newtable(L); @@ -253,7 +285,7 @@ static int l_close_stream(lua_State *L){ } static int l_close(lua_State *L){ - TAOS * taos= lua_topointer(L,1); + TAOS *taos= (TAOS*)lua_topointer(L,1); lua_newtable(L); int table_index = lua_gettop(L); @@ -263,7 +295,7 @@ static int l_close(lua_State *L){ lua_pushstring(L, "null pointer."); lua_setfield(L, table_index, "error"); }else{ - taos_close(taos); + taos_close(taos); lua_pushnumber(L, 0); lua_setfield(L, table_index, "code"); lua_pushstring(L, "done."); diff --git a/tests/examples/lua/test.lua b/tests/examples/lua/test.lua index 4d5f9fe7d3..9f9c6934aa 100644 --- a/tests/examples/lua/test.lua +++ b/tests/examples/lua/test.lua @@ -1,93 +1,117 @@ local driver = require "luaconnector" -local host="127.0.0.1" -local user="root" -local password="taosdata" -local db =nil -local port=6030 -local conn +local config = { + host = "127.0.0.1", + port = 6030, + database = "", + user = "root", + password = "taosdata", + max_packet_size = 1024 * 1024 +} -local res = driver.connect(host,user,password,db,port) +local conn +local res = driver.connect(config) if res.code ~=0 then - print(res.error) + print("connect--- failed: "..res.error) return else conn = res.conn + print("connect--- pass.") end local res = driver.query(conn,"drop database if exists demo") res = driver.query(conn,"create database demo") if res.code ~=0 then - print(res.error) + print("create db--- failed: "..res.error) return +else + print("create db--- pass.") end res = driver.query(conn,"use demo") if res.code ~=0 then - print(res.error) + print("select db--- failed: "..res.error) return +else + print("select db--- pass.") end res = driver.query(conn,"create table m1 (ts timestamp, speed int,owner binary(20))") if res.code ~=0 then - print(res.error) + print("create table---failed: "..res.error) return +else + print("create table--- pass.") end res = driver.query(conn,"insert into m1 values ('2019-09-01 00:00:00.001',0,'robotspace'), ('2019-09-01 00:00:00.002',1,'Hilink'),('2019-09-01 00:00:00.003',2,'Harmony')") if res.code ~=0 then - print(res.error) + print("insert records failed: "..res.error) return else - print("insert successfully, affected:"..res.affected) + if(res.affected == 3) then + print("insert records--- pass") + else + print("insert records---failed: expect 3 affected records, actually affected "..res.affected) + end end res = driver.query(conn,"select * from m1") if res.code ~=0 then - print("select error:"..res.error) + print("select failed: "..res.error) return else - print("in lua, result:") - for i = 1, #(res.item) do - print("timestamp:"..res.item[i].ts) - print("speed:"..res.item[i].speed) - print("owner:"..res.item[i].owner) - end + if (#(res.item) == 3) then + print("select--- pass") + else + print("select--- failed: expect 3 affected records, actually received "..#(res.item)) + end + end res = driver.query(conn,"CREATE TABLE thermometer (ts timestamp, degree double) TAGS(location binary(20), type int)") if res.code ~=0 then print(res.error) return +else + print("create super table--- pass") end res = driver.query(conn,"CREATE TABLE therm1 USING thermometer TAGS ('beijing', 1)") if res.code ~=0 then print(res.error) return +else + print("create table--- pass") end + res = driver.query(conn,"INSERT INTO therm1 VALUES ('2019-09-01 00:00:00.001', 20),('2019-09-01 00:00:00.002', 21)") if res.code ~=0 then print(res.error) return else - print("insert successfully, affected:"..res.affected) + if(res.affected == 2) then + print("insert records--- pass") + else + print("insert records---failed: expect 2 affected records, actually affected "..res.affected) + end end res = driver.query(conn,"SELECT COUNT(*) count, AVG(degree) AS av, MAX(degree), MIN(degree) FROM thermometer WHERE location='beijing' or location='tianjin' GROUP BY location, type") if res.code ~=0 then - print("select error:"..res.error) + print("select from super table--- failed:"..res.error) return else - print("in lua, result:") + print("select from super table--- pass") for i = 1, #(res.item) do print("res:"..res.item[i].count) end end function callback(t) + print("------------------------") print("continuous query result:") for key, value in pairs(t) do print("key:"..key..", value:"..value) @@ -97,25 +121,25 @@ end local stream res = driver.open_stream(conn,"SELECT COUNT(*) as count, AVG(degree) as avg, MAX(degree) as max, MIN(degree) as min FROM thermometer interval(2s) sliding(2s);)",0,callback) if res.code ~=0 then - print("open stream error:"..res.error) + print("open stream--- failed:"..res.error) return else - print("openstream ok") + print("open stream--- pass") stream = res.stream end ---From now on we begin continous query in an definite (infinite if you want) loop. +print("From now on we start continous insert in an definite (infinite if you want) loop.") local loop_index = 0 -while loop_index < 10 do +while loop_index < 30 do local t = os.time()*1000 local v = loop_index res = driver.query(conn,string.format("INSERT INTO therm1 VALUES (%d, %d)",t,v)) if res.code ~=0 then - print(res.error) + print("continous insertion--- failed:" .. res.error) return else - print("insert successfully, affected:"..res.affected) + --print("insert successfully, affected:"..res.affected) end os.execute("sleep " .. 1) loop_index = loop_index + 1 diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 6f4258312d..03a7fdb86a 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -17,6 +17,7 @@ import json import time import random import requests +import argparse from requests.auth import HTTPBasicAuth func_list=['avg','count','twa','sum','stddev','leastsquares','min', 'max','first','last','top','bottom','percentile','apercentile', @@ -32,18 +33,33 @@ condition_list=[ ] where_list = ['_c0>now-10d',' <50'," like \'%a%\'"] class ConcurrentInquiry: - def __init__(self,n_Therads=25,r_Therads=25): + # def __init__(self,ts=1500000001000,host='127.0.0.1',user='root',password='taosdata',dbname='test', + # stb_prefix='st',subtb_prefix='t',n_Therads=10,r_Therads=10,probabilities=0.05,loop=5, + # stableNum = 2,subtableNum = 1000,insertRows = 100): + def __init__(self,ts,host,user,password,dbname, + stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop, + stableNum ,subtableNum ,insertRows ): self.n_numOfTherads = n_Therads self.r_numOfTherads = r_Therads - self.ts=1500000001000 - self.dbname='test' + self.ts=ts + self.host = host + self.user = user + self.password = password + self.dbname=dbname + self.stb_prefix = stb_prefix + self.subtb_prefix = subtb_prefix self.stb_list=[] self.subtb_list=[] self.stb_stru_list=[] self.subtb_stru_list=[] self.stb_tag_list=[] self.subtb_tag_list=[] - + self.probabilities = [probabilities,1-probabilities] + self.ifjoin = [0,1] + self.loop = loop + self.stableNum = stableNum + self.subtableNum = subtableNum + self.insertRows = insertRows def SetThreadsNum(self,num): self.numOfTherads=num @@ -87,9 +103,9 @@ class ConcurrentInquiry: self.subtb_tag_list.append(tag) def get_full(self): #获取所有的表、表结构 - host = "127.0.0.1" - user = "root" - password = "taosdata" + host = self.host + user = self.user + password = self.password conn = taos.connect( host, user, @@ -105,7 +121,7 @@ class ConcurrentInquiry: conn.close() #query condition - def con_where(self,tlist): + def con_where(self,tlist,col_list,tag_list): l=[] for i in range(random.randint(0,len(tlist))): c = random.choice(where_list) @@ -115,19 +131,26 @@ class ConcurrentInquiry: l.append(random.choice(tlist)+c) return 'where '+random.choice([' and ',' or ']).join(l) - def con_interval(self,tlist): - return random.choice(['interval(10s)','interval(10d)','interval(1n)']) + def con_interval(self,tlist,col_list,tag_list): + interval = 'interval(' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')' + return interval - def con_limit(self,tlist): - return random.choice(['limit 10','limit 10 offset 10','slimit 10','slimit 10 offset 10','limit 10 slimit 10','limit 10 offset 5 slimit 5 soffset 10']) + def con_limit(self,tlist,col_list,tag_list): + rand1 = str(random.randint(0,1000)) + rand2 = str(random.randint(0,1000)) + return random.choice(['limit ' + rand1,'limit ' + rand1 + ' offset '+rand2, + ' slimit ' + rand1,' slimit ' + rand1 + ' offset ' + rand2,'limit '+rand1 + ' slimit '+ rand2, + 'limit '+ rand1 + ' offset' + rand2 + ' slimit '+ rand1 + ' soffset ' + rand2 ]) - def con_fill(self,tlist): + def con_fill(self,tlist,col_list,tag_list): return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)']) - def con_group(self,tlist): - return 'group by '+random.choice(tlist) + def con_group(self,tlist,col_list,tag_list): + rand_tag = random.randint(0,5) + rand_col = random.randint(0,1) + return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag)) - def con_order(self,tlist): + def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) def gen_query_sql(self): #生成查询语句 @@ -157,32 +180,131 @@ class ConcurrentInquiry: random.shuffle(func_list) sel_col_list=[] col_rand=random.randint(0,len(col_list)) + loop = 0 for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + alias = ' as '+ 'taos%d ' % loop + loop += 1 + pick_func = '' if j == 'leastsquares': - sel_col_list.append(j+'('+i+',1,1)') + pick_func=j+'('+i+',1,1)' elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': - sel_col_list.append(j+'('+i+',1)') + pick_func=j+'('+i+',1)' else: - sel_col_list.append(j+'('+i+')') + pick_func=j+'('+i+')' + if bool(random.getrandbits(1)): + pick_func+=alias + sel_col_list.append(pick_func) + sql=sql+','.join(sel_col_list)+' from '+random.choice(self.stb_list+self.subtb_list)+' ' #select col & func con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: - sel_con_list.append(i(tlist)) #获取对应的条件函数 + sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 sql+=' '.join(sel_con_list) # condition - print(sql) + #print(sql) return sql + def gen_query_join(self): #生成join查询语句 + tbname = [] + col_list = [] + tag_list = [] + col_intersection = [] + tag_intersection = [] + subtable = None + + if bool(random.getrandbits(1)): + subtable = True + tbname = random.sample(self.subtb_list,2) + for i in tbname: + col_list.append(self.subtb_stru_list[self.subtb_list.index(i)]) + tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)]) + col_intersection = list(set(col_list[0]).intersection(set(col_list[1]))) + tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1]))) + else: + tbname = random.sample(self.stb_list,2) + for i in tbname: + col_list.append(self.stb_stru_list[self.stb_list.index(i)]) + tag_list.append(self.stb_stru_list[self.stb_list.index(i)]) + col_intersection = list(set(col_list[0]).intersection(set(col_list[1]))) + tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1]))) + + + con_rand=random.randint(0,len(condition_list)) + col_rand=random.randint(0,len(col_list)) + tag_rand=random.randint(0,len(tag_list)) + + sql='select ' #select + + sel_col_tag=[] + col_rand=random.randint(0,len(col_list)) + if bool(random.getrandbits(1)): + sql += '*' + else: + sel_col_tag.append('t1.' + str(random.choice(col_list[0] + tag_list[0]))) + sel_col_tag.append('t2.' + str(random.choice(col_list[1] + tag_list[1]))) + sql += ','.join(sel_col_tag) + + sql = sql + ' from '+ str(tbname[0]) +' t1,' + str(tbname[1]) + ' t2 ' #select col & func + join_section = None + if subtable: + join_section = ''.join(random.choices(col_intersection)) + sql += 'where t1._c0 = t2._c0 and ' + 't1.' + join_section + '=t2.' + join_section + else: + join_section = ''.join(random.choices(col_intersection+tag_intersection)) + sql += 'where t1._c0 = t2._c0 and ' + 't1.' + join_section + '=t2.' + join_section + return sql + + def random_pick(self): + x = random.uniform(0,1) + cumulative_probability = 0.0 + for item, item_probability in zip(self.ifjoin, self.probabilities): + cumulative_probability += item_probability + if x < cumulative_probability:break + return item + + def gen_data(self): + stableNum = self.stableNum + subtableNum = self.subtableNum + insertRows = self.insertRows + t0 = self.ts + host = self.host + user = self.user + password = self.password + conn = taos.connect( + host, + user, + password, + ) + cl = conn.cursor() + cl.execute("drop database if exists %s;" %self.dbname) + cl.execute("create database if not exists %s;" %self.dbname) + cl.execute("use %s" % self.dbname) + for k in range(stableNum): + sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20)) \ + tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20))" % (self.stb_prefix+str(k)) + cl.execute(sql) + for j in range(subtableNum): + sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s')" % \ + (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j)) + print(sql) + cl.execute(sql) + for i in range(insertRows): + ret = cl.execute( + "insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s')" % + (self.subtb_prefix+str(k)+'_'+str(j),t0+i,i%100,i/2.0,i%41,i%51,i%53,i*1.0,i%2,'taos'+str(i),'涛思'+str(i))) + cl.close() + conn.close() + def rest_query(self,sql): #rest 接口 - host = "127.0.0.1" - user = "root" - password = "taosdata" + host = self.host + user = self.user + password = self.password port =6041 url = "http://{}:{}/rest/sql".format(host, port ) try: r = requests.post(url, - data = 'use test', + data = 'use %s' % self.dbname, auth = HTTPBasicAuth('root', 'taosdata')) r = requests.post(url, data = sql, @@ -210,24 +332,28 @@ class ConcurrentInquiry: nRows = rj['rows'] if ('rows' in rj) else 0 return nRows + def query_thread_n(self,threadID): #使用原生python接口查询 - host = "127.0.0.1" - user = "root" - password = "taosdata" + host = self.host + user = self.user + password = self.password conn = taos.connect( host, user, password, ) cl = conn.cursor() - cl.execute("use test;") + cl.execute("use %s;" % self.dbname) print("Thread %d: starting" % threadID) - - while True: + loop = self.loop + while loop: try: - sql=self.gen_query_sql() + if self.random_pick(): + sql=self.gen_query_sql() + else: + sql=self.gen_query_join() print("sql is ",sql) start = time.time() cl.execute(sql) @@ -235,30 +361,40 @@ class ConcurrentInquiry: end = time.time() print("time cost :",end-start) except Exception as e: + print('-'*40) print( - "Failure thread%d, sql: %s,exception: %s" % + "Failure thread%d, sql: %s \nexception: %s" % (threadID, str(sql),str(e))) #exit(-1) + loop -= 1 + if loop == 0: break - + cl.close() + conn.close() print("Thread %d: finishing" % threadID) def query_thread_r(self,threadID): #使用rest接口查询 print("Thread %d: starting" % threadID) - while True: - try: + loop = self.loop + while loop: + try: + if self.random_pick(): sql=self.gen_query_sql() - print("sql is ",sql) - start = time.time() - self.rest_query(sql) - end = time.time() - print("time cost :",end-start) - except Exception as e: - print( - "Failure thread%d, sql: %s,exception: %s" % - (threadID, str(sql),str(e))) - #exit(-1) - + else: + sql=self.gen_query_join() + print("sql is ",sql) + start = time.time() + self.rest_query(sql) + end = time.time() + print("time cost :",end-start) + except Exception as e: + print('-'*40) + print( + "Failure thread%d, sql: %s \nexception: %s" % + (threadID, str(sql),str(e))) + #exit(-1) + loop -= 1 + if loop == 0: break print("Thread %d: finishing" % threadID) @@ -270,14 +406,127 @@ class ConcurrentInquiry: threads.append(thread) thread.start() for i in range(self.r_numOfTherads): - # for i in range(1): thread = threading.Thread(target=self.query_thread_r, args=(i,)) threads.append(thread) thread.start() -if len(sys.argv)>1: - q = ConcurrentInquiry(n_Therads=sys.argv[1],r_Therads=sys.argv[2]) -else: - q = ConcurrentInquiry() + +parser = argparse.ArgumentParser() +parser.add_argument( + '-H', + '--host-name', + action='store', + default='127.0.0.1', + type=str, + help='host name to be connected (default: 127.0.0.1)') +parser.add_argument( + '-S', + '--ts', + action='store', + default=1500000000000, + type=int, + help='insert data from timestamp (default: 1500000000000)') +parser.add_argument( + '-d', + '--db-name', + action='store', + default='test', + type=str, + help='Database name to be created (default: test)') +parser.add_argument( + '-t', + '--number-of-native-threads', + action='store', + default=10, + type=int, + help='Number of native threads (default: 10)') +parser.add_argument( + '-T', + '--number-of-rest-threads', + action='store', + default=10, + type=int, + help='Number of rest threads (default: 10)') +parser.add_argument( + '-r', + '--number-of-records', + action='store', + default=100, + type=int, + help='Number of record to be created for each table (default: 100)') +parser.add_argument( + '-c', + '--create-table', + action='store', + default='0', + type=int, + help='whether gen data (default: 0)') +parser.add_argument( + '-p', + '--subtb-name-prefix', + action='store', + default='t', + type=str, + help='subtable-name-prefix (default: t)') +parser.add_argument( + '-P', + '--stb-name-prefix', + action='store', + default='st', + type=str, + help='stable-name-prefix (default: st)') +parser.add_argument( + '-b', + '--probabilities', + action='store', + default='0.05', + type=float, + help='probabilities of join (default: 0.05)') +parser.add_argument( + '-l', + '--loop-per-thread', + action='store', + default='100', + type=int, + help='loop per thread (default: 100)') +parser.add_argument( + '-u', + '--user', + action='store', + default='root', + type=str, + help='user name') +parser.add_argument( + '-w', + '--password', + action='store', + default='root', + type=str, + help='user name') +parser.add_argument( + '-n', + '--number-of-tables', + action='store', + default=1000, + type=int, + help='Number of subtales per stable (default: 1000)') +parser.add_argument( + '-N', + '--number-of-stables', + action='store', + default=2, + type=int, + help='Number of stables (default: 2)') + +args = parser.parse_args() +q = ConcurrentInquiry( + args.ts,args.host_name,args.user,args.password,args.db_name, + args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads, + args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records ) + +if args.create_table: + q.gen_data() q.get_full() + #q.gen_query_sql() q.run() + diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index b58100ef0a..328c833d05 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -19,9 +19,10 @@ python3 ./test.py -f insert/randomNullCommit.py python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py -python3 ./test.py -f insert/before_1970.py +#python3 ./test.py -f insert/before_1970.py python3 bug2265.py +#table python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py @@ -29,6 +30,10 @@ python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py +python3 ./test.py -f table/alter_column.py +python3 ./test.py -f table/boundary.py +python3 ./test.py -f table/create.py +python3 ./test.py -f table/del_stable.py # tag @@ -138,9 +143,6 @@ python3 ./test.py -f user/pass_len.py # stable python3 ./test.py -f stable/query_after_reset.py -# table -python3 ./test.py -f table/del_stable.py - #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -164,11 +166,16 @@ python3 ./test.py -f query/bug1875.py python3 ./test.py -f query/bug1876.py python3 ./test.py -f query/bug2218.py python3 ./test.py -f query/bug2117.py +python3 ./test.py -f query/bug2118.py python3 ./test.py -f query/bug2143.py python3 ./test.py -f query/sliding.py python3 ./test.py -f query/unionAllTest.py python3 ./test.py -f query/bug2281.py python3 ./test.py -f query/bug2119.py +python3 ./test.py -f query/isNullTest.py +python3 ./test.py -f query/queryWithTaosdKilled.py +python3 ./test.py -f query/floatCompare.py + #stream python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/new.py @@ -209,6 +216,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 python3 ./test.py -f functions/function_top.py -r 1 #python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py diff --git a/tests/pytest/functions/function_twa_test2.py b/tests/pytest/functions/function_twa_test2.py new file mode 100644 index 0000000000..b5cd24ce71 --- /dev/null +++ b/tests/pytest/functions/function_twa_test2.py @@ -0,0 +1,124 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table t1(ts timestamp, c int)") + for i in range(self.rowNum): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i * 10000, i + 1)) + + # twa verifacation + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 5.5) + + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s)") + tdSql.checkRows(10) + tdSql.checkData(0, 1, 1.49995) + tdSql.checkData(1, 1, 2.49995) + tdSql.checkData(2, 1, 3.49995) + tdSql.checkData(3, 1, 4.49995) + tdSql.checkData(4, 1, 5.49995) + tdSql.checkData(5, 1, 6.49995) + tdSql.checkData(6, 1, 7.49995) + tdSql.checkData(7, 1, 8.49995) + tdSql.checkData(8, 1, 9.49995) + tdSql.checkData(9, 1, 10) + + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s) sliding(5s)") + tdSql.checkRows(20) + tdSql.checkData(0, 1, 1.24995) + tdSql.checkData(1, 1, 1.49995) + tdSql.checkData(2, 1, 1.99995) + tdSql.checkData(3, 1, 2.49995) + tdSql.checkData(4, 1, 2.99995) + tdSql.checkData(5, 1, 3.49995) + tdSql.checkData(6, 1, 3.99995) + tdSql.checkData(7, 1, 4.49995) + tdSql.checkData(8, 1, 4.99995) + tdSql.checkData(9, 1, 5.49995) + tdSql.checkData(10, 1, 5.99995) + tdSql.checkData(11, 1, 6.49995) + tdSql.checkData(12, 1, 6.99995) + tdSql.checkData(13, 1, 7.49995) + tdSql.checkData(14, 1, 7.99995) + tdSql.checkData(15, 1, 8.49995) + tdSql.checkData(16, 1, 8.99995) + tdSql.checkData(17, 1, 9.49995) + tdSql.checkData(18, 1, 9.75000) + tdSql.checkData(19, 1, 10) + + tdSql.execute("create table t2(ts timestamp, c int)") + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + 3000)) + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) ") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s) ") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:04.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(0) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 08:00:00.000' and ts <= '2018-09-17 09:00:00.000' ") + tdSql.checkRows(0) + + tdSql.execute("create table t3(ts timestamp, c int)") + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts)) + tdSql.execute("insert into t3 values(%d, -2)" % (self.ts + 3000)) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.5) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(1s)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 0.5005) + tdSql.checkData(1, 1, -2) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s)") + tdSql.checkRows(4) + tdSql.checkData(0, 1, 0.5005) + tdSql.checkData(1, 1, 0.0005) + tdSql.checkData(2, 1, -1.5) + tdSql.checkData(3, 1, -2) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/handle_crash_gen_val_log.sh b/tests/pytest/handle_crash_gen_val_log.sh index ce3d1c0c67..f00fe40c69 100755 --- a/tests/pytest/handle_crash_gen_val_log.sh +++ b/tests/pytest/handle_crash_gen_val_log.sh @@ -6,7 +6,7 @@ GREEN_DARK='\033[0;32m' GREEN_UNDERLINE='\033[4;32m' NC='\033[0m' nohup /var/lib/jenkins/workspace/TDinternal/debug/build/bin/taosd -c /var/lib/jenkins/workspace/TDinternal/community/sim/dnode1/cfg >/dev/null & -./crash_gen.sh --valgrind -p -t 10 -s 100 -b 4 +./crash_gen.sh --valgrind -p -t 10 -s 250 -b 4 pidof taosd|xargs kill grep 'start to execute\|ERROR SUMMARY' valgrind.err|grep -v 'grep'|uniq|tee crash_gen_mem_err.log diff --git a/tests/pytest/pytest_1.sh b/tests/pytest/pytest_1.sh index 28afbfcdf0..05d6ec1cec 100755 --- a/tests/pytest/pytest_1.sh +++ b/tests/pytest/pytest_1.sh @@ -19,7 +19,9 @@ python3 ./test.py -f insert/randomNullCommit.py python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py +python3 ./test.py -f query/isNullTest.py +#table python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py @@ -27,6 +29,11 @@ python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py python3 ./test.py -f table/max_table_length.py +python3 ./test.py -f table/alter_column.py +python3 ./test.py -f table/boundary.py +python3 ./test.py -f table/create.py +python3 ./test.py -f table/del_stable.py +python3 ./test.py -f table/queryWithTaosdKilled.py # tag python3 ./test.py -f tag_lite/filter.py @@ -135,9 +142,6 @@ python3 ./test.py -f user/pass_len.py # stable python3 ./test.py -f stable/query_after_reset.py -# table -python3 ./test.py -f table/del_stable.py - #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -163,6 +167,8 @@ python3 ./test.py -f query/bug2218.py python3 ./test.py -f query/bug2281.py python3 ./test.py -f query/bug2119.py python3 bug2265.py +python3 ./test.py -f query/floatCompare.py + #stream python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/new.py @@ -202,6 +208,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 python3 ./test.py -f functions/function_top.py -r 1 #python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py diff --git a/tests/pytest/query/bug2118.py b/tests/pytest/query/bug2118.py new file mode 100644 index 0000000000..803f442a1b --- /dev/null +++ b/tests/pytest/query/bug2118.py @@ -0,0 +1,52 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdLog.info("==========step1") + tdLog.info("create table && insert data") + + tdSql.execute("create table mt0 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20))") + insertRows = 1000 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(insertRows): + ret = tdSql.execute( + "insert into mt0 values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s')" % + (t0+i,i%100,i/2.0,i%41,i%51,i%53,i*1.0,i%2,'taos'+str(i%43),'涛思'+str(i%41))) + tdLog.info("==========step2") + tdLog.info("test percentile with group by normal_col ") + tdSql.query('select percentile(c1,1),percentile(c2,1),percentile(c6,1) from mt0 group by c3 limit 3 offset 2') + + tdSql.checkData(0,0,2.48) + tdSql.checkData(0,0,2.48) + tdSql.checkData(0,2,11.84) + + + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/floatCompare.py b/tests/pytest/query/floatCompare.py new file mode 100644 index 0000000000..cdbddb893e --- /dev/null +++ b/tests/pytest/query/floatCompare.py @@ -0,0 +1,172 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table st(ts timestamp, c1 float, c2 double) tags(t1 float, t2 double)") + tdSql.execute( + 'CREATE TABLE if not exists t1 using st tags(1.023, 3.00412)') + tdSql.execute( + 'CREATE TABLE if not exists t2 using st tags(2.005, 10.129001)') + + print("==============step2") + for i in range(10): + tdSql.execute("insert into t1 values(%d, %s, %s) t2 values(%d, %s, %s)" % + (self.ts + i, 1.0001 + i, 2.0001301 + i, self.ts + i, 1.0001 * i, 2.0001301 * i)) + + tdSql.query("select count(*) from st") + tdSql.checkData(0, 0, 20) + + tdSql.query("select count(*) from st where t1 > 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 < 1.023") + tdSql.checkRows(0) + + tdSql.query("select count(*) from st where t1 >= 1.023") + tdSql.checkData(0, 0, 20) + + tdSql.query("select count(*) from st where t1 <= 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 = 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 <> 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 != 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 >= 1.023 and t1 <= 1.023") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t1 > 1.023 and t1 <= 1.023") + tdSql.checkRows(0) + + tdSql.query("select count(*) from st where t1 >= 1.023 and t1 < 1.023") + tdSql.checkRows(0) + + tdSql.query("select count(*) from st where t2 > 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 < 3.00412") + tdSql.checkRows(0) + + tdSql.query("select count(*) from st where t2 >= 3.00412") + tdSql.checkData(0, 0, 20) + + tdSql.query("select count(*) from st where t2 <= 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 = 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 <> 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 != 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 >= 3.00412 and t2 <= 3.00412") + tdSql.checkData(0, 0, 10) + + tdSql.query("select count(*) from st where t2 > 3.00412 and t2 <= 3.00412") + tdSql.checkRows(0) + + tdSql.query("select count(*) from st where t2 >= 3.00412 and t2 < 3.00412") + tdSql.checkRows(0) + + tdSql.query("select count(*) from t1 where c1 < 2.0001") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t1 where c1 <= 2.0001") + tdSql.checkData(0, 0, 2) + + tdSql.query("select count(*) from t1 where c1 = 2.0001") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t1 where c1 > 2.0001") + tdSql.checkData(0, 0, 8) + + tdSql.query("select count(*) from t1 where c1 >= 2.0001") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t1 where c1 <> 2.0001") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t1 where c1 != 2.0001") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t1 where c1 >= 2.0001 and c1 <= 2.0001") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t1 where c1 > 2.0001 and c1 <= 2.0001") + tdSql.checkRows(0) + + tdSql.query("select count(*) from t1 where c1 >= 2.0001 and c1 < 2.0001") + tdSql.checkRows(0) + + tdSql.query("select count(*) from t2 where c2 < 2.0001301") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t2 where c2 <= 2.0001301") + tdSql.checkData(0, 0, 2) + + tdSql.query("select count(*) from t2 where c2 = 2.0001301") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t2 where c2 > 2.0001301") + tdSql.checkData(0, 0, 8) + + tdSql.query("select count(*) from t2 where c2 >= 2.0001301") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t2 where c2 <> 2.0001301") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t2 where c2 != 2.0001301") + tdSql.checkData(0, 0, 9) + + tdSql.query("select count(*) from t2 where c2 >= 2.0001301 and c2 <= 2.0001301") + tdSql.checkData(0, 0, 1) + + tdSql.query("select count(*) from t2 where c2 > 2.0001301 and c2 <= 2.0001301") + tdSql.checkRows(0) + + tdSql.query("select count(*) from t2 where c2 >= 2.0001301 and c2 < 2.0001301") + tdSql.checkRows(0) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/isNullTest.py b/tests/pytest/query/isNullTest.py new file mode 100644 index 0000000000..7b79679c7d --- /dev/null +++ b/tests/pytest/query/isNullTest.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table st(ts timestamp, c1 int, c2 binary(20), c3 nchar(20)) tags(t1 int, t2 binary(20), t3 nchar(20))") + tdSql.execute("create table t1 using st tags(1, 'binary1', 'nchar1')") + tdSql.execute("insert into t2(ts, c2) using st(t2) tags('') values(%d, '')" % (self.ts + 10)) + tdSql.execute("insert into t3(ts, c2) using st(t3) tags('') values(%d, '')" % (self.ts + 10)) + + for i in range(10): + tdSql.execute("insert into t1 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + tdSql.execute("insert into t2 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + tdSql.execute("insert into t3 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + + tdSql.execute("insert into t1(ts, c2) values(%d, '')" % (self.ts + 10)) + tdSql.execute("insert into t1(ts, c3) values(%d, '')" % (self.ts + 11)) + tdSql.execute("insert into t2(ts, c3) values(%d, '')" % (self.ts + 11)) + tdSql.execute("insert into t3(ts, c3) values(%d, '')" % (self.ts + 11)) + + tdSql.query("select count(*) from st") + tdSql.checkData(0, 0, 36) + + tdSql.query("select count(*) from st where t1 is null") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where t1 is not null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 is null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 is not null") + tdSql.checkData(0, 0, 24) + + tdSql.error("select count(*) from st where t2 <> null") + tdSql.error("select count(*) from st where t2 = null") + + tdSql.query("select count(*) from st where t2 = '' ") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 <> '' ") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where t3 is null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t3 is not null") + tdSql.checkData(0, 0, 24) + + tdSql.error("select count(*) from st where t3 <> null") + tdSql.error("select count(*) from st where t3 = null") + + tdSql.query("select count(*) from st where t3 = '' ") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t3 <> '' ") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where c1 is not null") + tdSql.checkData(0, 0, 30) + + tdSql.query("select count(*) from st where c1 is null") + tdSql.checkData(0, 0, 6) + + tdSql.query("select count(*) from st where c2 is not null") + tdSql.checkData(0, 0, 33) + + tdSql.query("select count(*) from st where c2 is null") + tdSql.checkData(0, 0, 3) + + tdSql.error("select count(*) from st where c2 <> null") + tdSql.error("select count(*) from st where c2 = null") + + tdSql.query("select count(*) from st where c2 = '' ") + tdSql.checkData(0, 0, 3) + + tdSql.query("select count(*) from st where c2 <> '' ") + tdSql.checkData(0, 0, 30) + + tdSql.query("select count(*) from st where c3 is not null") + tdSql.checkData(0, 0, 33) + + tdSql.query("select count(*) from st where c3 is null") + tdSql.checkData(0, 0, 3) + + tdSql.error("select count(*) from st where c3 <> null") + tdSql.error("select count(*) from st where c3 = null") + + tdSql.query("select count(*) from st where c3 = '' ") + tdSql.checkData(0, 0, 3) + + tdSql.query("select count(*) from st where c3 <> '' ") + tdSql.checkData(0, 0, 30) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryError.py b/tests/pytest/query/queryError.py index f1fd9c0dec..539ce5141f 100644 --- a/tests/pytest/query/queryError.py +++ b/tests/pytest/query/queryError.py @@ -56,6 +56,15 @@ class TDTestCase: # query .. order by non-time field tdSql.error("select * from st order by name") + # TD-2133 + tdSql.error("select diff(tagtype),bottom(tagtype,1) from dev_001") + + # TD-2190 + tdSql.error("select min(tagtype),max(tagtype) from dev_002 interval(1n) fill(prev)") + + # TD-2208 + tdSql.error("select diff(tagtype),top(tagtype,1) from dev_001") + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/query/queryNullValueTest.py b/tests/pytest/query/queryNullValueTest.py index bc0b11827e..9920543b3a 100644 --- a/tests/pytest/query/queryNullValueTest.py +++ b/tests/pytest/query/queryNullValueTest.py @@ -50,7 +50,7 @@ class TDTestCase: tdSql.execute("insert into t0 values (%d, NULL)" % (self.ts)) tdDnodes.stop(1) - tdLog.sleep(10) + # tdLog.sleep(10) tdDnodes.start(1) tdSql.execute("use db") tdSql.query("select * from t0") @@ -62,7 +62,7 @@ class TDTestCase: tdSql.execute("create table t1 (ts timestamp, col %s)" % self.types[i]) tdSql.execute("insert into t1 values (%d, NULL)" % (self.ts)) tdDnodes.stop(1) - tdLog.sleep(10) + # tdLog.sleep(10) tdDnodes.start(1) tdSql.execute("use db") diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py new file mode 100644 index 0000000000..28f9b87636 --- /dev/null +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + self.conn = conn + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def createOldDir(self): + path = tdDnodes.dnodes[1].getDnodeRootDir(1) + print(path) + tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + + def run(self): + # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, speed int)") + tdSql.execute("insert into st values(now, 1)") + tdSql.query("select count(*) from st") + tdSql.checkRows(1) + + + self.createOldDir() + tdLog.sleep(10) + + print("force kill taosd") + os.system("sudo kill -9 $(pgrep -x taosd)") + os.system("") + tdDnodes.start(1) + + tdSql.init(self.conn.cursor()) + tdSql.execute("use db") + tdSql.query("select count(*) from st") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index 44882f5972..d71742048a 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -87,6 +87,10 @@ class TDTestCase: tdSql.checkData(0, 3, rowNum) except Exception as e: tdLog.info(repr(e)) + + tdSql.query("show streams") + tdSql.checkRows(1) + tdSql.checkData(0, 2, 's0') tdLog.info("===== step8 =====") tdSql.query( @@ -142,6 +146,12 @@ class TDTestCase: except Exception as e: tdLog.info(repr(e)) + tdSql.query("show streams") + tdSql.checkRows(2) + tdSql.checkData(0, 2, 's1') + tdSql.checkData(1, 2, 's0') + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/table/createTableFromAnotherDb.py b/tests/pytest/table/createTableFromAnotherDb.py new file mode 100644 index 0000000000..b40e72404c --- /dev/null +++ b/tests/pytest/table/createTableFromAnotherDb.py @@ -0,0 +1,41 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table db.cars(ts timestamp, c int) tags(id int);") + tdSql.execute("create database db2") + tdSql.error("create table db2.car1 using db.cars tags(1)") + tdSql.error("insert into db2.car1 using db1.cars tags(1) values(now, 1);") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/test.py b/tests/pytest/test.py index a2c35444b5..6be86fe3fd 100644 --- a/tests/pytest/test.py +++ b/tests/pytest/test.py @@ -111,13 +111,25 @@ if __name__ == "__main__": tdLog.info('stop All dnodes') sys.exit(0) - + tdDnodes.init(deployPath) tdDnodes.setTestCluster(testCluster) tdDnodes.setValgrind(valgrind) - tdDnodes.stopAll() - tdDnodes.deploy(1) + is_test_framework = 0 + key_word = 'tdCases.addLinux' + if key_word in open(fileName).read(): + is_test_framework = 1 + if is_test_framework: + moduleName = fileName.replace(".py", "").replace("/", ".") + uModule = importlib.import_module(moduleName) + try: + ucase = uModule.TDTestCase() + tdDnodes.deploy(1,ucase.updatecfgDict) + except : + tdDnodes.deploy(1,{}) + else: + tdDnodes.deploy(1,{}) tdDnodes.start(1) if masterIp == "": diff --git a/tests/pytest/update/append_commit_data.py b/tests/pytest/update/append_commit_data.py index 3169b748e0..867ee696a2 100644 --- a/tests/pytest/update/append_commit_data.py +++ b/tests/pytest/update/append_commit_data.py @@ -38,38 +38,62 @@ class TDTestCase: insertRows = 200 t0 = 1604298064000 + sql='insert into db.t1 values ' + temp='' tdLog.info("insert %d rows" % (insertRows)) for i in range(0, insertRows): - ret = tdSql.execute( - 'insert into t1 values (%d , 1)' % - (t0+i)) + # ret = tdSql.execute( + # 'insert into t1 values (%d , 1)' % + # (t0+i)) + temp += '(%d,1)' %(t0+i) + if i % 100 == 0 or i == (insertRows - 1 ): + print(sql+temp) + ret = tdSql.execute( + sql+temp + ) + temp = '' print("==========step2") print("restart to commit ") tdDnodes.stop(1) tdDnodes.start(1) tdSql.query("select * from db.t1") tdSql.checkRows(insertRows) + for k in range(0,100): tdLog.info("insert %d rows" % (insertRows)) + temp='' for i in range (0,insertRows): - ret = tdSql.execute( - 'insert into db.t1 values(%d,1)' % - (t0+k*200+i) - ) + temp += '(%d,1)' %(t0+k*200+i) + if i % 100 == 0 or i == (insertRows - 1 ): + print(sql+temp) + ret = tdSql.execute( + sql+temp + ) + temp = '' + tdDnodes.stop(1) tdDnodes.start(1) tdSql.query("select * from db.t1") tdSql.checkRows(insertRows+200*k) - print("==========step2") + print("==========step3") print("insert into another table ") s = 'use db' tdSql.execute(s) ret = tdSql.execute('create table t2 (ts timestamp, a int)') insertRows = 20000 + sql = 'insert into t2 values ' + temp = '' for i in range(0, insertRows): - ret = tdSql.execute( - 'insert into t2 values (%d, 1)' % - (t0+i)) + # ret = tdSql.execute( + # 'insert into t2 values (%d, 1)' % + # (t0+i)) + temp += '(%d,1)' %(t0+i) + if i % 500 == 0 or i == (insertRows - 1 ): + print(sql+temp) + ret = tdSql.execute( + sql+temp + ) + temp = '' tdDnodes.stop(1) tdDnodes.start(1) tdSql.query("select * from t2") diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 757399b4a2..83cb4e8d99 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -108,6 +108,36 @@ class TDDnode: self.deployed = 0 self.testCluster = False self.valgrind = 0 + self.cfgDict = { + "numOfLogLines":"100000000", + "mnodeEqualVnodeNum":"0", + "walLevel":"2", + "fsync":"1000", + "statusInterval":"1", + "numOfMnodes":"3", + "numOfThreadsPerCore":"2.0", + "monitor":"0", + "maxVnodeConnections":"30000", + "maxMgmtConnections":"30000", + "maxMeterConnections":"30000", + "maxShellConns":"30000", + "locale":"en_US.UTF-8", + "charset":"UTF-8", + "asyncLog":"0", + "anyIp":"0", + "tsEnableTelemetryReporting":"0", + "dDebugFlag":"135", + "mDebugFlag":"135", + "sdbDebugFlag":"135", + "rpcDebugFlag":"135", + "tmrDebugFlag":"131", + "cDebugFlag":"135", + "httpDebugFlag":"135", + "monitorDebugFlag":"135", + "udebugFlag":"135", + "jnidebugFlag":"135", + "qdebugFlag":"135" + } def init(self, path): self.path = path @@ -131,7 +161,10 @@ class TDDnode: return totalSize - def deploy(self): + def addExtraCfg(self, option, value): + self.cfgDict.update({option: value}) + + def deploy(self, *updatecfgDict): self.logDir = "%s/sim/dnode%d/log" % (self.path, self.index) self.dataDir = "%s/sim/dnode%d/data" % (self.path, self.index) self.cfgDir = "%s/sim/dnode%d/cfg" % (self.path, self.index) @@ -175,36 +208,17 @@ class TDDnode: self.cfg("publicIp", "192.168.0.%d" % (self.index)) self.cfg("internalIp", "192.168.0.%d" % (self.index)) self.cfg("privateIp", "192.168.0.%d" % (self.index)) - self.cfg("dataDir", self.dataDir) - self.cfg("logDir", self.logDir) - self.cfg("numOfLogLines", "100000000") - self.cfg("mnodeEqualVnodeNum", "0") - self.cfg("walLevel", "2") - self.cfg("fsync", "1000") - self.cfg("statusInterval", "1") - self.cfg("numOfMnodes", "3") - self.cfg("numOfThreadsPerCore", "2.0") - self.cfg("monitor", "0") - self.cfg("maxVnodeConnections", "30000") - self.cfg("maxMgmtConnections", "30000") - self.cfg("maxMeterConnections", "30000") - self.cfg("maxShellConns", "30000") - self.cfg("locale", "en_US.UTF-8") - self.cfg("charset", "UTF-8") - self.cfg("asyncLog", "0") - self.cfg("anyIp", "0") - self.cfg("tsEnableTelemetryReporting", "0") - self.cfg("dDebugFlag", "135") - self.cfg("mDebugFlag", "135") - self.cfg("sdbDebugFlag", "135") - self.cfg("rpcDebugFlag", "135") - self.cfg("tmrDebugFlag", "131") - self.cfg("cDebugFlag", "135") - self.cfg("httpDebugFlag", "135") - self.cfg("monitorDebugFlag", "135") - self.cfg("udebugFlag", "135") - self.cfg("jnidebugFlag", "135") - self.cfg("qdebugFlag", "135") + + self.cfg("dataDir",self.dataDir) + self.cfg("logDir",self.logDir) + print(updatecfgDict) + if updatecfgDict[0] and updatecfgDict[0][0]: + print(updatecfgDict[0][0]) + for key,value in updatecfgDict[0][0].items(): + self.addExtraCfg(key,value) + for key, value in self.cfgDict.items(): + self.cfg(key, value) + self.deployed = 1 tdLog.debug( "dnode:%d is deployed and configured by %s" % @@ -255,9 +269,33 @@ class TDDnode: tdLog.exit(cmd) self.running = 1 tdLog.debug("dnode:%d is running with %s " % (self.index, cmd)) + if self.valgrind == 0: + time.sleep(0.1) + key = 'from offline to online' + bkey = bytes(key,encoding="utf8") + logFile = self.logDir + "/taosdlog.0" + i = 0 + while not os.path.exists(logFile): + sleep(0.1) + i += 1 + if i>50: + break + popen = subprocess.Popen('tail -f ' + logFile, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) + pid = popen.pid + print('Popen.pid:' + str(pid)) + while True: + line = popen.stdout.readline().strip() + if bkey in line: + print(line) + popen.kill() + break + tdLog.debug("the dnode:%d has been started." % (self.index)) + else: + tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) + time.sleep(5) - tdLog.debug("wait 5 seconds for the dnode:%d to start." % (self.index)) - time.sleep(5) + + # time.sleep(5) def startWithoutSleep(self): buildPath = self.getBuildPath() @@ -437,7 +475,7 @@ class TDDnodes: def setValgrind(self, value): self.valgrind = value - def deploy(self, index): + def deploy(self, index, *updatecfgDict): self.sim.setTestCluster(self.testCluster) if (self.simDeployed == False): @@ -447,7 +485,7 @@ class TDDnodes: self.check(index) self.dnodes[index - 1].setTestCluster(self.testCluster) self.dnodes[index - 1].setValgrind(self.valgrind) - self.dnodes[index - 1].deploy() + self.dnodes[index - 1].deploy(updatecfgDict) def cfg(self, index, option, value): self.check(index) diff --git a/tests/script/general/parser/fill.sim b/tests/script/general/parser/fill.sim index 9851a4e7fc..405a805312 100644 --- a/tests/script/general/parser/fill.sim +++ b/tests/script/general/parser/fill.sim @@ -848,10 +848,7 @@ if $rows != 12 then return -1 endi -print =====================>td-1442 -sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev); - -print =====================> aggregation + arithmetic + fill +print =====================> aggregation + arithmetic + fill, need to add cases TODO #sql select avg(cpu_taosd) - first(cpu_taosd) from dn1 where ts<'2020-11-13 11:00:00' and ts>'2020-11-13 10:50:00' interval(10s) fill(value, 99) #sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(value, 99); #sql select count(*), first(k), avg(k), avg(k)-first(k) from tm0 where ts>'2020-1-1 1:1:1' and ts<'2020-1-1 1:02:59' interval(10s) fill(NULL); @@ -1044,6 +1041,17 @@ if $data12 != 1 then return -1 endi +print =====================>td-1442, td-2190 , no time range for fill option +sql_error select count(*) from m_fl_tb0 interval(1s) fill(prev); + +sql_error select min(c3) from m_fl_mt0 interval(10a) fill(value, 20) +sql_error select min(c3) from m_fl_mt0 interval(10s) fill(value, 20) +sql_error select min(c3) from m_fl_mt0 interval(10m) fill(value, 20) +sql_error select min(c3) from m_fl_mt0 interval(10h) fill(value, 20) +sql_error select min(c3) from m_fl_mt0 interval(10d) fill(value, 20) +sql_error select min(c3) from m_fl_mt0 interval(10w) fill(value, 20) +sql_error select max(c3) from m_fl_mt0 interval(1n) fill(prev) +sql_error select min(c3) from m_fl_mt0 interval(1y) fill(value, 20) print =============== clear #sql drop database $db diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 34e9844f71..3505ad1a28 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -22,7 +22,7 @@ $db = $dbPrefix . $i $mt = $mtPrefix . $i sql drop database if exists $db -sql create database $db +sql create database $db keep 36500 sql use $db print =====================================> test case for twa in single block @@ -111,7 +111,7 @@ if $rows != 2 then return -1 endi -if $data00 != @15-08-18 00:06:00.00@ then +if $data00 != @15-08-18 00:06:00.000@ then return -1 endi @@ -219,10 +219,28 @@ if $data02 != 6 then return -1 endi +sql select twa(k) from t1 where ts>'2015-8-18 00:00:00' and ts<'2015-8-18 00:00:1' +if $rows != 0 then + return -1 +endi + sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts asc sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts desc -#todo add test case while column filte exists. +#todo add test case while column filter exists. -select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s) +#sql select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s) + +sql create table tm0 (ts timestamp, k float); +sql insert into tm0 values(100000000, 5); +sql insert into tm0 values(100003000, -9); +sql select twa(k) from tm0 where tsnow-1h group by t1; sql_error select twa(c2) from group_stb0 where tsnow-1h group by tbname,t1; @@ -393,7 +392,7 @@ endi #error sql sql_error select * from 1; -sql_error select 1; +#sql_error select 1; // equals to select server_status(); sql_error select k+k; sql_error select k+1; sql_error select abc(); diff --git a/tests/script/general/parser/tags_filter.sim b/tests/script/general/parser/tags_filter.sim index c3d0fdfc61..e05776ff11 100644 --- a/tests/script/general/parser/tags_filter.sim +++ b/tests/script/general/parser/tags_filter.sim @@ -149,4 +149,57 @@ if $rows != 2 then return -1 endi +print ==================>td-2424 +sql create table t1(ts timestamp, k float) +sql insert into t1 values(now, 8.001) +sql select * from t1 where k=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k<8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k<>8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 and k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>=8.0009999 and k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>8.001 and k<=8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 and k<8.001 +if $rows != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 8593400ce8..cea6d98679 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -103,6 +103,8 @@ sleep 500 run general/parser/timestamp.sim sleep 500 run general/parser/sliding.sim +sleep 500 +run general/parser/function.sim #sleep 500 #run general/parser/repeatStream.sim diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 64a6c871fc..34b057e71b 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -154,6 +154,7 @@ cd ../../../debug; make ./test.sh -f general/parser/repeatAlter.sim ./test.sh -f general/parser/union.sim ./test.sh -f general/parser/topbot.sim +./test.sh -f general/parser/function.sim ./test.sh -f general/stable/disk.sim ./test.sh -f general/stable/dnode3.sim diff --git a/tests/script/jenkins/basic_3.txt b/tests/script/jenkins/basic_3.txt index 83b10a371c..23d1e0a17d 100644 --- a/tests/script/jenkins/basic_3.txt +++ b/tests/script/jenkins/basic_3.txt @@ -70,7 +70,7 @@ ./test.sh -f unique/arbitrator/sync_replica2_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica2_dropTable.sim -./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim +#./test.sh -f unique/arbitrator/sync_replica3_alterTable_add.sim ./test.sh -f unique/arbitrator/sync_replica3_alterTable_drop.sim ./test.sh -f unique/arbitrator/sync_replica3_dropDb.sim ./test.sh -f unique/arbitrator/sync_replica3_dropTable.sim diff --git a/tests/script/unique/arbitrator/insert_duplicationTs.sim b/tests/script/unique/arbitrator/insert_duplicationTs.sim index a873bf02ae..7c6c6e6e92 100644 --- a/tests/script/unique/arbitrator/insert_duplicationTs.sim +++ b/tests/script/unique/arbitrator/insert_duplicationTs.sim @@ -91,8 +91,11 @@ while $i < $tblNum $i = $i + 1 endw +sql show db.vgroups; +print d1: $data04 $data05 , d2: $data06 $data07 + sql select count(*) from $stb -print rows:$rows data00:$data00 +print rtest1==> rows:$rows data00:$data00 if $rows != 1 then return -1 endi @@ -103,6 +106,15 @@ endi $totalRows = $data00 +sql select count(*) from $stb +print test2==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test3==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test4==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test5==> rows:$rows data00:$data00 + print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc sql insert into $tb values ( now - 20d , -20 ) sql insert into $tb values ( now - 40d , -40 ) @@ -153,12 +165,21 @@ if $data00 != $totalRows then return -1 endi +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 + print ============== step5: insert two data rows: now-16d, now+16d, sql insert into $tb values ( now - 21d , -21 ) sql insert into $tb values ( now - 41d , -41 ) $totalRows = $totalRows + 2 -print ============== step5: restart dnode2, waiting sync end +print ============== step6: restart dnode2, waiting sync end system sh/exec.sh -n dnode2 -s start sleep 3000 $loopCnt = 0 @@ -192,9 +213,81 @@ endi sleep $sleepTimer # check using select +sleep 5000 sql select count(*) from $stb print data00 $data00 if $data00 != $totalRows then return -1 endi +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi diff --git a/tests/script/unique/big/maxvnodes.sim b/tests/script/unique/big/maxvnodes.sim index 662d391e47..eb17929ff4 100644 --- a/tests/script/unique/big/maxvnodes.sim +++ b/tests/script/unique/big/maxvnodes.sim @@ -9,10 +9,11 @@ $totalRows = $totalVnodes * $maxTables system sh/deploy.sh -n dnode1 -i 1 system sh/cfg.sh -n dnode1 -c walLevel -v 2 system sh/cfg.sh -n dnode1 -c maxVgroupsPerDb -v $totalVnodes +system sh/cfg.sh -n dnode1 -c balanceInterval -v 1 system sh/deploy.sh -n dnode2 -i 2 system sh/cfg.sh -n dnode2 -c walLevel -v 2 system sh/cfg.sh -n dnode2 -c maxVgroupsPerDb -v $totalVnodes - +system sh/cfg.sh -n dnode2 -c balanceInterval -v 1 print ========== prepare data system sh/exec.sh -n dnode1 -s start diff --git a/tests/script/unique/db/replica_add12.sim b/tests/script/unique/db/replica_add12.sim index e29cf1ba73..62d7e9daa3 100644 --- a/tests/script/unique/db/replica_add12.sim +++ b/tests/script/unique/db/replica_add12.sim @@ -115,6 +115,7 @@ sql insert into d1.t1 values(now, 2) sql insert into d2.t2 values(now, 2) sql insert into d3.t3 values(now, 2) sql insert into d4.t4 values(now, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -154,6 +155,7 @@ sql_error insert into d1.t1 values(now, 3) sql_error insert into d2.t2 values(now, 3) sql_error insert into d3.t3 values(now, 3) sql_error insert into d4.t4 values(now, 3) +sleep 1000 print ========= step6 system sh/exec.sh -n dnode2 -s start @@ -193,6 +195,7 @@ sql_error insert into d1.t1 values(now, 3) sql_error insert into d2.t2 values(now, 3) sql_error insert into d3.t3 values(now, 3) sql_error insert into d4.t4 values(now, 3) +sleep 1000 sql_error select * from d1.t1 sql_error select * from d2.t2 @@ -207,6 +210,7 @@ sql insert into d1.t1 values(now, 5) sql insert into d2.t2 values(now, 5) sql insert into d3.t3 values(now, 5) sql insert into d4.t4 values(now, 5) +sleep 1000 sql select * from d1.t1 if $rows != 4 then diff --git a/tests/script/unique/db/replica_add13.sim b/tests/script/unique/db/replica_add13.sim index 1df49ba658..5d34440e3e 100644 --- a/tests/script/unique/db/replica_add13.sim +++ b/tests/script/unique/db/replica_add13.sim @@ -46,6 +46,7 @@ sql insert into d1.t1 values(1589529000011, 1) sql insert into d2.t2 values(1589529000021, 1) sql insert into d3.t3 values(1589529000031, 1) sql insert into d4.t4 values(1589529000041, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -110,6 +111,7 @@ sql insert into d1.t1 values(1589529000012, 2) sql insert into d2.t2 values(1589529000022, 2) sql insert into d3.t3 values(1589529000032, 2) sql insert into d4.t4 values(1589529000042, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -141,6 +143,7 @@ sql insert into d1.t1 values(1589529000013, 3) sql insert into d2.t2 values(1589529000023, 3) sql insert into d3.t3 values(1589529000033, 3) sql insert into d4.t4 values(1589529000043, 3) +sleep 1000 sql select * from d1.t1 if $rows != 3 then @@ -172,6 +175,7 @@ sql insert into d1.t1 values(1589529000014, 4) sql insert into d2.t2 values(1589529000024, 4) sql insert into d3.t3 values(1589529000034, 4) sql insert into d4.t4 values(1589529000044, 4) +sleep 1000 sql select * from d1.t1 print select * from d1.t1 $rows @@ -207,6 +211,7 @@ sql insert into d1.t1 values(1589529000015, 5) sql insert into d2.t2 values(1589529000025, 5) sql insert into d3.t3 values(1589529000035, 5) sql insert into d4.t4 values(1589529000045, 5) +sleep 1000 sql select * from d1.t1 if $rows != 5 then @@ -238,6 +243,7 @@ sql insert into d1.t1 values(1589529000016, 6) sql insert into d2.t2 values(1589529000026, 6) sql insert into d3.t3 values(1589529000036, 6) sql insert into d4.t4 values(1589529000046, 6) +sleep 1000 sql select * from d1.t1 if $rows != 6 then diff --git a/tests/script/unique/db/replica_add23.sim b/tests/script/unique/db/replica_add23.sim index 5da73cd117..9285f68137 100644 --- a/tests/script/unique/db/replica_add23.sim +++ b/tests/script/unique/db/replica_add23.sim @@ -46,6 +46,7 @@ sql insert into d1.t1 values(1588262400001, 1) sql insert into d2.t2 values(1588262400001, 1) sql insert into d3.t3 values(1588262400001, 1) sql insert into d4.t4 values(1588262400001, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -110,6 +111,7 @@ sql insert into d1.t1 values(1588262400002, 2) sql insert into d2.t2 values(1588262400002, 2) sql insert into d3.t3 values(1588262400002, 2) sql insert into d4.t4 values(1588262400002, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -142,6 +144,7 @@ sql insert into d1.t1 values(1588262400003, 3) sql insert into d2.t2 values(1588262400003, 3) sql insert into d3.t3 values(1588262400003, 3) sql insert into d4.t4 values(1588262400003, 3) +sleep 1000 sql select * from d1.t1 if $rows != 3 then @@ -173,6 +176,7 @@ sql insert into d1.t1 values(1588262400004, 4) sql insert into d2.t2 values(1588262400004, 4) sql insert into d3.t3 values(1588262400004, 4) sql insert into d4.t4 values(1588262400004, 4) +sleep 1000 sql select * from d1.t1 if $rows != 4 then @@ -204,6 +208,7 @@ sql insert into d1.t1 values(1588262400005, 5) sql insert into d2.t2 values(1588262400005, 5) sql insert into d3.t3 values(1588262400005, 5) sql insert into d4.t4 values(1588262400005, 5) +sleep 1000 sql select * from d1.t1 if $rows != 5 then @@ -235,6 +240,7 @@ sql insert into d1.t1 values(1588262400006, 6) sql insert into d2.t2 values(1588262400006, 6) sql insert into d3.t3 values(1588262400006, 6) sql insert into d4.t4 values(1588262400006, 6) +sleep 1000 sql select * from d1.t1 if $rows != 6 then diff --git a/tests/script/unique/db/replica_part.sim b/tests/script/unique/db/replica_part.sim index de3f6203d2..5c5f98a108 100644 --- a/tests/script/unique/db/replica_part.sim +++ b/tests/script/unique/db/replica_part.sim @@ -38,6 +38,7 @@ sql insert into d2.t2 values(now, 1) sql insert into d1.t1 values(now, 1) sql insert into d3.t3 values(now, 1) sql insert into d4.t4 values(now, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -77,6 +78,7 @@ sql insert into d1.t1 values(now, 2) sql insert into d2.t2 values(now, 2) sql insert into d3.t3 values(now, 2) sql insert into d4.t4 values(now, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -137,6 +139,7 @@ sql insert into d1.t1 values(now, 5) sql insert into d2.t2 values(now, 5) sql insert into d3.t3 values(now, 5) sql insert into d4.t4 values(now, 5) +sleep 1000 sql select * from d1.t1 sql select * from d2.t2 diff --git a/tests/script/unique/db/replica_reduce21.sim b/tests/script/unique/db/replica_reduce21.sim index a64f4370c8..7fe7d9504a 100644 --- a/tests/script/unique/db/replica_reduce21.sim +++ b/tests/script/unique/db/replica_reduce21.sim @@ -36,6 +36,7 @@ sql insert into d2.t2 values(now, 1) sql insert into d1.t1 values(now, 1) sql insert into d3.t3 values(now, 1) sql insert into d4.t4 values(now, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -67,6 +68,7 @@ sleep 12000 print ========= step4 query d1 sql insert into d1.t1 values(now, 2) sql select * from d1.t1 +sleep 1000 if $rows != 2 then return -1 endi @@ -75,6 +77,7 @@ print ========= step5 query d5 sql create table d5.t5 (ts timestamp, i int) sql insert into d5.t5 values(now, 1); sql select * from d5.t5 +sleep 1000 if $rows != 1 then return -1 endi @@ -89,6 +92,7 @@ sql insert into d5.t5 values(now, 2) sql insert into d2.t2 values(now, 2) sql insert into d3.t3 values(now, 2) sql insert into d4.t4 values(now, 2) +sleep 1000 sql select * from d5.t5 if $rows != 2 then @@ -118,6 +122,7 @@ sql insert into d5.t5 values(now, 3) sql insert into d2.t2 values(now, 3) sql insert into d3.t3 values(now, 3) sql insert into d4.t4 values(now, 3) +sleep 1000 sql select * from d5.t5 if $rows != 3 then diff --git a/tests/script/unique/db/replica_reduce31.sim b/tests/script/unique/db/replica_reduce31.sim index 3ed0a1e3b9..cbe0417dbd 100644 --- a/tests/script/unique/db/replica_reduce31.sim +++ b/tests/script/unique/db/replica_reduce31.sim @@ -38,6 +38,7 @@ sql insert into d1.t1 values(now, 1) sql insert into d2.t2 values(now, 1) sql insert into d3.t3 values(now, 1) sql insert into d4.t4 values(now, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -83,6 +84,7 @@ sql insert into d1.t1 values(now, 2) sql insert into d2.t2 values(now, 2) sql insert into d3.t3 values(now, 2) sql insert into d4.t4 values(now, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -114,6 +116,7 @@ sql insert into d1.t1 values(now, 3) sql insert into d2.t2 values(now, 3) sql insert into d3.t3 values(now, 3) sql insert into d4.t4 values(now, 3) +sleep 1000 sql select * from d1.t1 if $rows != 3 then @@ -174,6 +177,7 @@ sql insert into d1.t1 values(now, 6) sql insert into d2.t2 values(now, 6) sql insert into d3.t3 values(now, 6) sql insert into d4.t4 values(now, 6) +sleep 1000 sql select * from d1.t1 sql select * from d2.t2 diff --git a/tests/script/unique/db/replica_reduce32.sim b/tests/script/unique/db/replica_reduce32.sim index 664a286be4..c453f16ce6 100644 --- a/tests/script/unique/db/replica_reduce32.sim +++ b/tests/script/unique/db/replica_reduce32.sim @@ -38,6 +38,7 @@ sql insert into d2.t2 values(now, 1) sql insert into d1.t1 values(now, 1) sql insert into d3.t3 values(now, 1) sql insert into d4.t4 values(now, 1) +sleep 1000 sql select * from d1.t1 if $rows != 1 then @@ -71,6 +72,7 @@ sql insert into d1.t1 values(now, 2) sql insert into d2.t2 values(now, 2) sql insert into d3.t3 values(now, 2) sql insert into d4.t4 values(now, 2) +sleep 1000 sql select * from d1.t1 if $rows != 2 then @@ -132,6 +134,7 @@ sql insert into d1.t1 values(now, 5) sql insert into d2.t2 values(now, 5) sql insert into d3.t3 values(now, 5) sql insert into d4.t4 values(now, 5) +sleep 1000 sql select * from d1.t1 print d1.t1 $rows diff --git a/tests/test/c/CMakeLists.txt b/tests/test/c/CMakeLists.txt index 11480a8ba2..2eb8ee1614 100644 --- a/tests/test/c/CMakeLists.txt +++ b/tests/test/c/CMakeLists.txt @@ -31,8 +31,8 @@ IF (TD_LINUX) #add_executable(createTablePerformance createTablePerformance.c) #target_link_libraries(createTablePerformance taos_static tutil common pthread) - add_executable(createNormalTable createNormalTable.c) - target_link_libraries(createNormalTable taos_static tutil common pthread) + #add_executable(createNormalTable createNormalTable.c) + #target_link_libraries(createNormalTable taos_static tutil common pthread) #add_executable(queryPerformance queryPerformance.c) #target_link_libraries(queryPerformance taos_static tutil common pthread) @@ -46,7 +46,7 @@ IF (TD_LINUX) #add_executable(invalidTableId invalidTableId.c) #target_link_libraries(invalidTableId taos_static tutil common pthread) - add_executable(hashIterator hashIterator.c) - target_link_libraries(hashIterator taos_static tutil common pthread) + #add_executable(hashIterator hashIterator.c) + #target_link_libraries(hashIterator taos_static tutil common pthread) ENDIF() diff --git a/tests/test/c/createTablePerformance.c b/tests/test/c/createTablePerformance.c index eae104291a..b94c687f2c 100644 --- a/tests/test/c/createTablePerformance.c +++ b/tests/test/c/createTablePerformance.c @@ -32,6 +32,7 @@ int32_t numOfThreads = 30; int32_t numOfTables = 100000; int32_t replica = 1; int32_t numOfColumns = 2; +TAOS * con = NULL; typedef struct { int32_t tableBeginIndex; @@ -84,13 +85,14 @@ int main(int argc, char *argv[]) { pthread_attr_destroy(&thattr); free(pInfo); + taos_close(con); } void createDbAndSTable() { pPrint("start to create db and stable"); char qstr[64000]; - TAOS *con = taos_connect(NULL, "root", "taosdata", NULL, 0); + con = taos_connect(NULL, "root", "taosdata", NULL, 0); if (con == NULL) { pError("failed to connect to DB, reason:%s", taos_errstr(con)); exit(1); @@ -127,8 +129,6 @@ void createDbAndSTable() { exit(0); } taos_free_result(pSql); - - taos_close(con); } void *threadFunc(void *param) { @@ -136,12 +136,6 @@ void *threadFunc(void *param) { char qstr[65000]; int code; - TAOS *con = taos_connect(NULL, "root", "taosdata", NULL, 0); - if (con == NULL) { - pError("index:%d, failed to connect to DB, reason:%s", pInfo->threadIndex, taos_errstr(con)); - exit(1); - } - sprintf(qstr, "use %s", pInfo->dbName); TAOS_RES *pSql = taos_query(con, qstr); taos_free_result(pSql); @@ -170,7 +164,6 @@ void *threadFunc(void *param) { pInfo->createTableSpeed = speed; pPrint("thread:%d, time:%.2f sec, speed:%.1f tables/second, ", pInfo->threadIndex, seconds, speed); - taos_close(con); return 0; }