diff --git a/CMakeLists.txt b/CMakeLists.txt index 588526c286..eb2b1cceb4 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -13,7 +13,6 @@ ENDIF () SET(TD_ACCOUNT FALSE) SET(TD_ADMIN FALSE) SET(TD_GRANT FALSE) -SET(TD_SYNC TRUE) SET(TD_MQTT TRUE) SET(TD_TSDB_PLUGINS FALSE) diff --git a/Jenkinsfile b/Jenkinsfile index edbe11d428..6dc55be4bd 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -1,3 +1,47 @@ + +properties([pipelineTriggers([githubPush()])]) +node { + git url: 'https://github.com/taosdata/TDengine' +} + + +// execute this before anything else, including requesting any time on an agent +if (currentBuild.rawBuild.getCauses().toString().contains('BranchIndexingCause')) { + print "INFO: Build skipped due to trigger being Branch Indexing" + currentBuild.result = 'ABORTED' // optional, gives a better hint to the user that it's been skipped, rather than the default which shows it's successful + return +} + + +def pre_test(){ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + sudo rmtaos + ''' + } + sh ''' + cd ${WKC} + rm -rf * + cd ${WK} + git reset --hard + git checkout develop + git pull + cd ${WKC} + rm -rf * + mv ${WORKSPACE}/* . + cd ${WK} + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake .. > /dev/null + make > /dev/null + make install > /dev/null + cd ${WKC}/tests + ''' + return 1 +} pipeline { agent none environment{ @@ -8,85 +52,31 @@ pipeline { stages { stage('Parallel test stage') { parallel { - stage('pytest') { - agent{label '184'} + stage('python p1') { + agent{label 'p1'} steps { + pre_test() sh ''' - date - cd ${WKC} - git reset --hard - git checkout develop - git pull - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - make install > /dev/null cd ${WKC}/tests - #./test-all.sh smoke - ./test-all.sh pytest + ./test-all.sh p1 date''' } } stage('test_b1') { - agent{label 'master'} + agent{label 'b1'} steps { + pre_test() sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null cd ${WKC}/tests - #./test-all.sh smoke ./test-all.sh b1 date''' } } stage('test_crash_gen') { - agent{label "185"} + agent{label "b2"} steps { - sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - cd ${WKC}/tests/pytest - ''' + pre_test() catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/pytest @@ -109,193 +99,42 @@ pipeline { } stage('test_valgrind') { - agent{label "186"} + agent{label "b3"} steps { + pre_test() + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./valgrind-test.sh 2>&1 > mem-error-out.log + ./handle_val_log.sh + ''' + } sh ''' - cd ${WKC} - git reset --hard - git checkout develop - git pull - - git submodule update - cd ${WK} - git reset --hard - git checkout develop - git pull - export TZ=Asia/Harbin - date - rm -rf ${WK}/debug - mkdir debug - cd debug - cmake .. > /dev/null - make > /dev/null - cd ${WKC}/tests/pytest - ./valgrind-test.sh 2>&1 > mem-error-out.log - ./handle_val_log.sh - date cd ${WKC}/tests ./test-all.sh b3 date''' } } - stage('connector'){ - agent{label "release"} + stage('python p2'){ + agent{label "p2"} steps{ - sh''' - cd ${WORKSPACE} - git checkout develop + pre_test() + sh ''' + date + cd ${WKC}/tests + ./test-all.sh p2 + date ''' - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/gotest - bash batchtest.sh - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker - python3 PythonChecker.py - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ - mvn clean package assembly:single >/dev/null - java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 - ''' - } - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# - dotnet run - ''' - } } } - stage('arm64_build'){ - agent{label 'arm64'} - steps{ - sh ''' - cd ${WK} - git fetch - git checkout develop - git pull - cd ${WKC} - git fetch - git checkout develop - git pull - git submodule update - cd ${WKC}/packaging - ./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0 - - ''' - } - } - stage('arm32_build'){ - agent{label 'arm32'} - steps{ - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - cd ${WK} - git fetch - git checkout develop - git pull - cd ${WKC} - git fetch - git checkout develop - git pull - git submodule update - cd ${WKC}/packaging - ./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0 - - ''' - } - - } - } + + } } } - post { - success { - emailext ( - subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' - - - - - - - - - - - - -

- 构建信息 -
-
    -
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • -
  • 构建结果: Successful
  • -
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • -
  • 构建地址:${BUILD_URL}
  • -
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • -
    -
-
- - ''', - to: "yqliu@taosdata.com,pxiao@taosdata.com", - from: "support@taosdata.com" - ) - } - failure { - emailext ( - subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", - body: ''' - - - - - - - - - - - - -

- 构建信息 -
-
    -
    -
  • 构建名称>>分支:${PROJECT_NAME}
  • -
  • 构建结果: Successful
  • -
  • 构建编号:${BUILD_NUMBER}
  • -
  • 触发用户:${CAUSE}
  • -
  • 变更概要:${CHANGES}
  • -
  • 构建地址:${BUILD_URL}
  • -
  • 构建日志:${BUILD_URL}console
  • -
  • 变更集:${JELLY_SCRIPT}
  • -
    -
-
- - ''', - to: "yqliu@taosdata.com,pxiao@taosdata.com", - from: "support@taosdata.com" - ) - } - } -} \ No newline at end of file + +} diff --git a/cmake/define.inc b/cmake/define.inc index 6e64c2709a..782dc625bf 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -13,10 +13,6 @@ IF (TD_GRANT) ADD_DEFINITIONS(-D_GRANT) ENDIF () -IF (TD_SYNC) - ADD_DEFINITIONS(-D_SYNC) -ENDIF () - IF (TD_MQTT) ADD_DEFINITIONS(-D_MQTT) ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index 1ef2045f57..e8324887a0 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -47,11 +47,6 @@ IF (${MQTT} MATCHES "false") MESSAGE(STATUS "build without mqtt module") ENDIF () -IF (${SYNC} MATCHES "false") - SET(TD_SYNC FALSE) - MESSAGE(STATUS "build without sync module") -ENDIF () - IF (${RANDOM_FILE_FAIL} MATCHES "true") SET(TD_RANDOM_FILE_FAIL TRUE) MESSAGE(STATUS "build with random-file-fail enabled") diff --git a/cmake/install.inc b/cmake/install.inc index 8418612d4c..55b3fa188f 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.14-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.15-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/cmake/version.inc b/cmake/version.inc index 948c7d2d0b..556bae575c 100644 --- a/cmake/version.inc +++ b/cmake/version.inc @@ -4,7 +4,7 @@ PROJECT(TDengine) IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "2.0.9.0") + SET(TD_VER_NUMBER "2.0.10.0") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/documentation20/webdocs/assets/connector.png b/documentation20/webdocs/assets/connector.png new file mode 100644 index 0000000000..c30a50a830 Binary files /dev/null and b/documentation20/webdocs/assets/connector.png differ diff --git a/documentation20/webdocs/markdowndocs/Documentation-ch.md b/documentation20/webdocs/markdowndocs/Documentation-ch.md index f1f2d58f05..766a428f1b 100644 --- a/documentation20/webdocs/markdowndocs/Documentation-ch.md +++ b/documentation20/webdocs/markdowndocs/Documentation-ch.md @@ -84,6 +84,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 - [数据导出](https://www.taosdata.com/cn/documentation20/administrator/#数据导出):从shell按表导出,也可用taosdump工具做各种导出 - [系统监控](https://www.taosdata.com/cn/documentation20/administrator/#系统监控):检查系统现有的连接、查询、流式计算,日志和事件等 - [文件目录结构](https://www.taosdata.com/cn/documentation20/administrator/#文件目录结构):TDengine数据文件、配置文件等所在目录 +- [参数限制和保留关键字](https://www.taosdata.com/cn/documentation20/administrator/#参数限制和保留关键字):TDengine的参数限制和保留关键字列表 ## [TAOS SQL](https://www.taosdata.com/cn/documentation20/taos-sql) @@ -128,4 +129,4 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ## [培训和FAQ](https://www.taosdata.com/cn/faq) - [FAQ](https://www.taosdata.com/cn/documentation20/faq):常见问题与答案 -- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine \ No newline at end of file +- [应用案列](https://www.taosdata.com/cn/blog/?categories=4):一些使用实例来解释如何使用TDengine diff --git a/documentation20/webdocs/markdowndocs/Getting Started-ch.md b/documentation20/webdocs/markdowndocs/Getting Started-ch.md index beb0c639ae..b63bfff6c9 100644 --- a/documentation20/webdocs/markdowndocs/Getting Started-ch.md +++ b/documentation20/webdocs/markdowndocs/Getting Started-ch.md @@ -2,7 +2,35 @@ ## 快捷安装 -TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版仅能在Linux系统上安装和运行,后续会支持Windows、MAC OS等系统。如果应用需要在Windows或Mac上运行,目前只能使用TDengine的RESTful接口连接服务器。硬件支持X64,后续会支持ARM、龙芯等CPU系统。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 +TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版服务器仅能在Linux系统上安装和运行,后续会支持Windows、mac OS等系统。 + +**应用驱动** + +如果应用在Windows和Linux上运行,可使用C/C++/C#/JAVA/Python/Go/Node.js接口连接服务器。如果应用在Mac上运行,目前可以使用RESTful接口连接服务器。 + +**CPU** + +CPU支持X64/ARM64/MIPS64/Alpha64,后续会支持ARM32、RISC-V等CPU架构。用户可根据需求选择通过[源码](https://www.taosdata.com/cn/getting-started/#通过源码安装)或者[安装包](https://www.taosdata.com/cn/getting-started/#通过安装包安装)来安装。 + +**服务器** + +目前TDengine服务器可以运行在以下平台上: + +| | **CentOS** **6/7/8** | **Ubuntu** **16/18/20** | **Other Linux** | **Win64/32** | **macOS** | **统信****UOS** | **银河****/****中标麒麟** | **凝思** **V60/V80** | +| -------------- | --------------------- | ------------------------ | --------------- | ------------ | --------- | --------------- | ------------------------- | --------------------- | +| X64 | ● | ● | | ○/○ | ○ | ○ | ● | ● | +| 树莓派ARM32 | | ● | ● | | | | | | +| 龙芯MIPS64 | | | ● | | | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | | | ● | | +| 申威 Alpha64 | | | ○ | | | ● | | | +| 飞腾ARM64 | | | ○优麒麟 | | | | | | +| 海光X64 | ● | ● | ● | | | ○ | ● | ● | +| 瑞芯微ARM64/32 | | | ○ | | | | | | +| 全志ARM64/32 | | | ○ | | | | | | +| 炬力ARM64/32 | | | ○ | | | | | | +| TI ARM32 | | | ○ | | | | | | + + ### 通过源码安装 @@ -16,28 +44,27 @@ TDengine软件分为服务器、客户端和报警模块三部分,目前2.0版 服务器部分,我们提供三种安装包,您可以根据需要选择。TDengine的安装非常简单,从下载到安装成功仅仅只要几秒钟。 - +- TDengine-server-2.0.9.0-Linux-x64.rpm (4.2M) +- TDengine-server-2.0.9.0-Linux-x64.deb (2.7M) +- TDengine-server-2.0.9.0-Linux-x64.tar.gz (4.5M) + 客户端部分,Linux安装包如下: -- TDengine-client-2.0.0.0-Linux-x64.tar.gz (3.4M) +- TDengine-client-2.0.9.0-Linux-x64.tar.gz(3.0M) +- TDengine-client-2.0.9.0-Windows-x64.exe(2.8M) +- TDengine-client-2.0.9.0-Windows-x86.exe(2.8M) 报警模块的Linux安装包如下(请参考[报警模块的使用方法](https://github.com/taosdata/TDengine/blob/master/alert/README_cn.md)): -- TDengine-alert-2.0.0-Linux-x64.tar.gz (8.1M) +- TDengine-alert-2.0.9.0-Linux-x64.tar.gz (8.1M) -目前,TDengine只支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装。其他linux系统的支持正在开发中。用`which systemctl`命令来检测系统中是否存在`systemd`包: +目前,TDengine 支持在使用[`systemd`](https://en.wikipedia.org/wiki/Systemd)做进程服务管理的linux系统上安装,用`which systemctl`命令来检测系统中是否存在`systemd`包: ```cmd which systemctl ``` -如果系统中不存在`systemd`包,请考虑[通过源码安装](#通过源码安装)TDengine。 - 具体的安装过程,请参见TDengine多种安装包的安装和卸载。 ## 轻松启动 @@ -54,12 +81,14 @@ systemctl status taosd ``` 如果TDengine服务正常工作,那么您可以通过TDengine的命令行程序`taos`来访问并体验TDengine。 - + **注意:** - + - systemctl命令需要 _root_ 权限来运行,如果您非 _root_ 用户,请在命令前添加 sudo - 为更好的获得产品反馈,改善产品,TDengine会采集基本的使用信息,但您可以修改系统配置文件taos.cfg里的配置参数telemetryReporting, 将其设为0,就可将其关闭。 +如果系统中不支持`systemd`,也可以用手动运行 /usr/local/taos/bin/taosd 方式启动 TDengine 服务。 + ## TDengine命令行程序 执行TDengine命令行程序,您只要在Linux终端执行`taos`即可。 diff --git a/documentation20/webdocs/markdowndocs/Model-ch.md b/documentation20/webdocs/markdowndocs/Model-ch.md index c0b64bde6e..5bd29fdb5d 100644 --- a/documentation20/webdocs/markdowndocs/Model-ch.md +++ b/documentation20/webdocs/markdowndocs/Model-ch.md @@ -6,12 +6,12 @@ TDengine采用关系型数据模型,需要建库、建表。因此对于一个 ## 创建库 -不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: +不同类型的数据采集点往往具有不同的数据特征,包括数据采集频率的高低,数据保留时间的长短,副本的数目,数据块的大小,是否允许更新数据等等。为让各种场景下TDengine都能最大效率的工作,TDengine建议将不同数据特征的表创建在不同的库里,因为每个库可以配置不同的存储策略。创建一个库时,除SQL标准的选项外,应用还可以指定保留时长、副本数、内存块个数、时间精度、文件块里最大最小记录条数、是否压缩、一个数据文件覆盖的天数等多种参数。比如: ```cmd -CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4; +CREATE DATABASE power KEEP 365 DAYS 10 BLOCKS 4 UPDATE 1; ``` -上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4。详细的语法及参数请见TAOS SQL +上述语句将创建一个名为power的库,这个库的数据将保留365天(超过365天将被自动删除),每10天一个数据文件,内存块数为4,允许更新数据。详细的语法及参数请见TAOS SQL 创建库之后,需要使用SQL命令USE将当前库切换过来,例如: diff --git a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md index 760ebae4fc..adba39ec1f 100644 --- a/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md +++ b/documentation20/webdocs/markdowndocs/TAOS SQL-ch.md @@ -2,7 +2,7 @@ 本文档说明TAOS SQL支持的语法规则、主要查询功能、支持的SQL查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的SQL语言的基础。 -TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供修改和更新功能,因此在TAO SQL中不提供数据更新和数据删除的相关功能。 +TAOS SQL是用户对TDengine进行数据写入和查询的主要工具。TAOS SQL为了便于用户快速上手,在一定程度上提供类似于标准SQL类似的风格和模式。严格意义上,TAOS SQL并不是也不试图提供SQL标准的语法。此外,由于TDengine针对的时序性结构化数据不提供删除功能,因此在TAO SQL中不提供数据删除的相关功能。 本章节SQL语法遵循如下约定: @@ -57,18 +57,29 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic ## 数据库管理 - **创建数据库** - ```mysql - CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep]; - ``` - 说明: - - 1) `KEEP`是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据; - 2) 数据库名最大长度为33; - 3) 一条SQL 语句的最大长度为65480个字符; - 4) 数据库还有更多与存储相关的配置参数,请参见[系统管理](../administrator/#服务端配置)。 + + ```mysql + CREATE DATABASE [IF NOT EXISTS] db_name [KEEP keep] [UPDATE 1]; + ``` + 说明: + + 1) KEEP是该数据库的数据保留多长天数,缺省是3650天(10年),数据库会自动删除超过时限的数据; + + 2) UPDATE 标志数据库支持更新相同时间戳数据; + + 3) 数据库名最大长度为33; + 4) 一条SQL 语句的最大长度为65480个字符; + 5) 数据库还有更多与存储相关的配置参数,请参见系统管理。 + +- **显示系统当前参数** + + ```mysql + SHOW VARIABLES; + ``` - **使用数据库** + ```mysql USE db_name; ``` @@ -143,6 +154,12 @@ TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMic 显示当前数据库下的所有数据表信息。说明:可在like中使用通配符进行名称的匹配。 通配符匹配:1)’%’ (百分号)匹配0到任意个字符;2)’_’下划线匹配一个字符。 +- **在线修改显示字符宽度** + + ```mysql + SET MAX_BINARY_DISPLAY_WIDTH ; + ``` + - **获取表的结构信息** ```mysql diff --git a/documentation20/webdocs/markdowndocs/administrator-ch.md b/documentation20/webdocs/markdowndocs/administrator-ch.md index f54c6b91a1..7f40009e3c 100644 --- a/documentation20/webdocs/markdowndocs/administrator-ch.md +++ b/documentation20/webdocs/markdowndocs/administrator-ch.md @@ -96,6 +96,7 @@ TDengine系统后台服务由taosd提供,可以在配置文件taos.cfg里修 - maxSQLLength:单条SQL语句允许最长限制。默认值:65380字节。 - telemetryReporting: 是否允许 TDengine 采集和上报基本使用信息,0表示不允许,1表示允许。 默认值:1。 - stream: 是否启用连续查询(流计算功能),0表示不允许,1表示允许。 默认值:1。 +- queryBufferSize: 为所有并发查询占用保留的内存大小。计算规则可以根据实际应用可能的最大并发数和表的数字相乘,再乘 170 。单位为字节。 **注意:**对于端口,TDengine会使用从serverPort起13个连续的TCP和UDP端口号,请务必在防火墙打开。因此如果是缺省配置,需要打开从6030都6042共13个端口,而且必须TCP和UDP都打开。 @@ -156,6 +157,9 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同 客户端配置参数 - firstEp: taos启动时,主动连接的集群中第一个taosd实例的end point, 缺省值为 localhost:6030。 + +- secondEp: taos 启动时,如果 firstEp 连不上,将尝试连接 secondEp。 + - locale 默认值:系统中动态获取,如果自动获取失败,需要用户在配置文件设置或通过API设置 @@ -229,7 +233,7 @@ TDengine系统的前台交互客户端应用程序为taos,它与taosd共享同 - maxBinaryDisplayWidth Shell中binary 和 nchar字段的显示宽度上限,超过此限制的部分将被隐藏。默认值:30。可在 shell 中通过命令 set max_binary_display_width nn 动态修改此选项。 - + ## 用户管理 @@ -264,7 +268,7 @@ SHOW USERS; ``` 显示所有用户 - + **注意:**SQL 语法中,< >表示需要用户输入的部分,但请不要输入< >本身 ## 数据导入 @@ -413,3 +417,65 @@ TDengine的所有可执行文件默认存放在 _/usr/local/taos/bin_ 目录下 您可以通过修改系统配置文件taos.cfg来配置不同的数据目录和日志目录。 + +## TDengine参数限制与保留关键字 + +- 数据库名:不能包含“.”以及特殊字符,不能超过32个字符 +- 表名:不能包含“.”以及特殊字符,与所属数据库名一起,不能超过192个字符 +- 表的列名:不能包含特殊字符,不能超过64个字符 +- 表的列数:不能超过1024列 +- 记录的最大长度:包括时间戳8 byte,不能超过16KB +- 单条SQL语句默认最大字符串长度:65480 byte +- 数据库副本数:不能超过3 +- 用户名:不能超过20个byte +- 用户密码:不能超过15个byte +- 标签(Tags)数量:不能超过128个 +- 标签的总长度:不能超过16Kbyte +- 记录条数:仅受存储空间限制 +- 表的个数:仅受节点个数限制 +- 库的个数:仅受节点个数限制 +- 单个库上虚拟节点个数:不能超过64个 + + + +目前TDengine有将近200个内部保留关键字,这些关键字无论大小写均不可以用作库名、表名、STable名、数据列名及标签列名等。这些关键字列表如下: + +| 关键字列表 | | | | | +| ---------- | ----------- | ------------ | ---------- | --------- | +| ABLOCKS | CONNECTION | GT | MINUS | SHOW | +| ABORT | CONNECTIONS | ID | MNODES | SLASH | +| ACCOUNT | COPY | IF | MODULES | SLIDING | +| ACCOUNTS | COUNT | IGNORE | NCHAR | SMALLINT | +| ADD | CREATE | IMMEDIATE | NE | SPREAD | +| AFTER | CTIME | IMPORT | NONE | STAR | +| ALL | DATABASE | IN | NOT | STATEMENT | +| ALTER | DATABASES | INITIALLY | NOTNULL | STDDEV | +| AND | DAYS | INSERT | NOW | STREAM | +| AS | DEFERRED | INSTEAD | OF | STREAMS | +| ASC | DELIMITERS | INTEGER | OFFSET | STRING | +| ATTACH | DESC | INTERVAL | OR | SUM | +| AVG | DESCRIBE | INTO | ORDER | TABLE | +| BEFORE | DETACH | IP | PASS | TABLES | +| BEGIN | DIFF | IS | PERCENTILE | TAG | +| BETWEEN | DIVIDE | ISNULL | PLUS | TAGS | +| BIGINT | DNODE | JOIN | PRAGMA | TBLOCKS | +| BINARY | DNODES | KEEP | PREV | TBNAME | +| BITAND | DOT | KEY | PRIVILEGE | TIMES | +| BITNOT | DOUBLE | KILL | QUERIES | TIMESTAMP | +| BITOR | DROP | LAST | QUERY | TINYINT | +| BOOL | EACH | LE | RAISE | TOP | +| BOTTOM | END | LEASTSQUARES | REM | TRIGGER | +| BY | EQ | LIKE | REPLACE | UMINUS | +| CACHE | EXISTS | LIMIT | REPLICA | UPLUS | +| CASCADE | EXPLAIN | LINEAR | RESET | USE | +| CHANGE | FAIL | LOCAL | RESTRICT | USER | +| CLOG | FILL | LP | ROW | USERS | +| CLUSTER | FIRST | LSHIFT | ROWS | USING | +| COLON | FLOAT | LT | RP | VALUES | +| COLUMN | FOR | MATCH | RSHIFT | VARIABLE | +| COMMA | FROM | MAX | SCORES | VGROUPS | +| COMP | GE | METRIC | SELECT | VIEW | +| CONCAT | GLOB | METRICS | SEMI | WAVG | +| CONFIGS | GRANTS | MIN | SET | WHERE | +| CONFLICT | GROUP | | | | + diff --git a/documentation20/webdocs/markdowndocs/connector-ch.md b/documentation20/webdocs/markdowndocs/connector-ch.md index 69c560bbc4..5b89761aa4 100644 --- a/documentation20/webdocs/markdowndocs/connector-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-ch.md @@ -1,6 +1,21 @@ # 连接器 -TDengine提供了丰富的应用程序开发接口,其中包括C/C++、JAVA、Python、RESTful、Go等,便于用户快速开发应用。 +TDengine提供了丰富的应用程序开发接口,其中包括C/C++、C# 、Java、Python、Go、Node.js、RESTful 等,便于用户快速开发应用。 + +![image-connecotr](../assets/connector.png) + +目前TDengine的连接器可支持的平台广泛,目前包括:X64/X86/ARM64/ARM32/MIPS/Alpha等硬件平台,以及Linux/Win64/Win32等开发环境。对照矩阵如下: + +| | **CPU** | **X64 64bit** | **X86 32bit** | **ARM64** | **ARM32** | **MIPS ** **龙芯** | **Alpha ** **申威** | **X64 ** **海光** | | | +| ---------------------------- | --------- | --------------- | --------------- | --------- | --------- | ------------------- | -------------------- | ------------------ | --------- | --------- | +| | **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | +| **连** **接** **器** | **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | | +| **Python** | ● | ● | ● | ○ | ● | ● | ● | -- | ● | | +| **Go** | ● | ● | ● | ○ | ● | ● | ○ | -- | -- | | +| **NodeJs** | ● | ● | ○ | ○ | ● | ● | ○ | -- | -- | | +| **C#** | ○ | ● | ● | ○ | ○ | ○ | ○ | -- | -- | | +| **RESTful** | ● | ● | ● | ● | ● | ● | ● | ● | ● | | 注意:所有执行 SQL 语句的 API,例如 C/C++ Connector 中的 `tao_query`、`taos_query_a`、`taos_subscribe` 等,以及其它语言中与它们对应的API,每次都只能执行一条 SQL 语句,如果实际参数中包含了多条语句,它们的行为是未定义的。 @@ -664,23 +679,45 @@ import ( _ "github.com/taosdata/driver-go/taosSql" ) ``` +**建议Go版本是1.13或以上,并开启模块支持:** + +``` +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.io,direct +``` + ### 常用API -* `sql.Open(DRIVER_NAME string, dataSourceName string) *DB` - - 该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine - - **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。 - -* `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` + +- sql.Open(DRIVER_NAME string, dataSourceName string) *DB` + + 该API用来打开DB,返回一个类型为*DB的对象,一般情况下,DRIVER_NAME设置为字符串`taosSql`, dataSourceName设置为字符串`user:password@/tcp(host:port)/dbname`,如果客户想要用多个goroutine并发访问TDengine, 那么需要在各个goroutine中分别创建一个sql.Open对象并用之访问TDengine + + **注意**: 该API成功创建的时候,并没有做权限等检查,只有在真正执行Query或者Exec的时候才能真正的去创建连接,并同时检查user/password/host/port是不是合法。 另外,由于整个驱动程序大部分实现都下沉到taosSql所依赖的libtaos中。所以,sql.Open本身特别轻量。 + +- `func (db *DB) Exec(query string, args ...interface{}) (Result, error)` sql.Open内置的方法,用来执行非查询相关SQL - - -* `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` - + +- `func (db *DB) Query(query string, args ...interface{}) (*Rows, error)` + sql.Open内置的方法,用来执行查询语句 - - + +- `func (db *DB) Prepare(query string) (*Stmt, error)` + + sql.Open内置的方法,Prepare creates a prepared statement for later queries or executions. + +- `func (s *Stmt) Exec(args ...interface{}) (Result, error)` + + sql.Open内置的方法,executes a prepared statement with the given arguments and returns a Result summarizing the effect of the statement. + +- `func (s *Stmt) Query(args ...interface{}) (*Rows, error)` + + sql.Open内置的方法,Query executes a prepared query statement with the given arguments and returns the query results as a *Rows. + +- `func (s *Stmt) Close() error` + + sql.Open内置的方法,Close closes the statement. + ## Node.js Connector TDengine 同时也提供了node.js 的连接器。用户可以通过[npm](https://www.npmjs.com/)来进行安装,也可以通过源代码*src/connector/nodejs/* 来进行安装。[具体安装步骤如下](https://github.com/taosdata/tdengine/tree/master/src/connector/nodejs): @@ -701,32 +738,6 @@ npm install td2.0-connector - `make` - c语言编译器比如[GCC](https://gcc.gnu.org) -### macOS - -- `python` (建议`v2.7` , `v3.x.x` 目前还不支持) - -- Xcode - - - 然后通过Xcode安装 - - ``` - Command Line Tools - ``` - - 在 - ``` - Xcode -> Preferences -> Locations - ``` - - 目录下可以找到这个工具。或者在终端里执行 - - ``` - xcode-select --install - ``` - - - - 该步执行后 `gcc` 和 `make`就被安装上了 - ### Windows #### 安装方法1 diff --git a/documentation20/webdocs/markdowndocs/connector-java-ch.md b/documentation20/webdocs/markdowndocs/connector-java-ch.md index e3743f4f9d..f17c74bdf3 100644 --- a/documentation20/webdocs/markdowndocs/connector-java-ch.md +++ b/documentation20/webdocs/markdowndocs/connector-java-ch.md @@ -1,5 +1,7 @@ # Java Connector +Java连接器支持的系统有: Linux 64/Windows x64/Windows x86。 + TDengine 为了方便 Java 应用使用,提供了遵循 JDBC 标准(3.0)API 规范的 `taos-jdbcdriver` 实现。目前可以通过 [Sonatype Repository][1] 搜索并下载。 由于 TDengine 是使用 c 语言开发的,使用 taos-jdbcdriver 驱动包时需要依赖系统对应的本地函数库。 @@ -24,7 +26,8 @@ TDengine 的 JDBC 驱动实现尽可能的与关系型数据库驱动保持一 | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | --- | --- | --- | -| 2.0.4 | 2.0.0.x 及以上 | 1.8.x | +| 2.0.12 及以上 | 2.0.8.0 及以上 | 1.8.x | +| 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | | 1.0.1 | 1.6.1.x 及以上 | 1.8.x | diff --git a/documentation20/webdocs/markdowndocs/faq-ch.md b/documentation20/webdocs/markdowndocs/faq-ch.md index 61f70c5962..d89cdd4c92 100644 --- a/documentation20/webdocs/markdowndocs/faq-ch.md +++ b/documentation20/webdocs/markdowndocs/faq-ch.md @@ -117,7 +117,17 @@ Connection = DriverManager.getConnection(url, properties); -## 16. 怎么报告问题? +## 16. 如何进行数据迁移? + +TDengine是根据hostname唯一标志一台机器的,在数据文件从机器A移动机器B时,注意如下两件事: + +- 2.0.0.0 至 2.0.6.x 的版本,重新配置机器B的hostname为机器A的hostname +- 2.0.7.0 及以后的版本,到/var/lib/taos/dnode下,修复dnodeEps.json的dnodeId对应的FQDN,重启。确保机器内所有机器的此文件是完全相同的。 +- 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 + + + +## 17. 怎么报告问题? 如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: 1. /var/log/taos diff --git a/documentation20/webdocs/markdowndocs/insert-ch.md b/documentation20/webdocs/markdowndocs/insert-ch.md index 77ba596d4e..b303d0d5fb 100644 --- a/documentation20/webdocs/markdowndocs/insert-ch.md +++ b/documentation20/webdocs/markdowndocs/insert-ch.md @@ -24,7 +24,7 @@ INSERT INTO d1001 VALUES (1538548685000, 10.3, 219, 0.31) (1538548695000, 12.6, - 要提高写入效率,需要批量写入。一批写入的记录条数越多,插入效率就越高。但一条记录不能超过16K,一条SQL语句总长度不能超过64K(可通过参数maxSQLLength配置,最大可配置为1M)。 - TDengine支持多线程同时写入,要进一步提高写入速度,一个客户端需要打开20个以上的线程同时写。但线程数达到一定数量后,无法再提高,甚至还会下降,因为线程切频繁切换,带来额外开销。 -- 对同一张表,如果新插入记录的时间戳已经存在,新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。 +- 对同一张表,如果新插入记录的时间戳已经存在,默认(没有使用 UPDATE 1 创建数据库)新记录将被直接抛弃,也就是说,在一张表里,时间戳必须是唯一的。如果应用自动生成记录,很有可能生成的时间戳是一样的,这样,成功插入的记录条数会小于应用插入的记录条数。如果在创建数据库时使用 UPDATE 1 选项,插入相同时间戳的新记录将覆盖原有记录。 - 写入的数据的时间戳必须大于当前时间减去配置参数keep的时间。如果keep配置为3650天,那么无法写入比3650天还老的数据。写入数据的时间戳也不能大于当前时间加配置参数days。如果days配置为2,那么无法写入比当前时间还晚2天的数据。 ## Prometheus直接写入 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index 8a68d02dfd..8c2ef19382 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -29,8 +29,12 @@ # number of threads per CPU core # numOfThreadsPerCore 1.0 -# the proportion of total threads responsible for query -# ratioOfQueryThreads 0.5 +# the proportion of total CPU cores available for query processing +# 2.0: the query threads will be set to double of the CPU cores. +# 1.0: all CPU cores are available for query processing [default]. +# 0.5: only half of the CPU cores are available for query. +# 0.0: only one core available. +# tsRatioOfQueryCores 1.0 # number of management nodes in the system # numOfMnodes 3 @@ -265,5 +269,5 @@ # enable/disable stream (continuous query) # stream 1 -# only 50% CPU resources will be used in query processing -# halfCoresForQuery 0 +# in retrieve blocking model, only in 50% query threads will be used in query processing in dnode +# retrieveBlockingModel 0 diff --git a/snap/snapcraft.yaml b/snap/snapcraft.yaml index b5d06a4adb..fc946566f3 100644 --- a/snap/snapcraft.yaml +++ b/snap/snapcraft.yaml @@ -1,6 +1,6 @@ name: tdengine base: core18 -version: '2.0.9.0' +version: '2.0.10.0' icon: snap/gui/t-dengine.svg summary: an open-source big data platform designed and optimized for IoT. description: | @@ -72,7 +72,7 @@ parts: - usr/bin/taosd - usr/bin/taos - usr/bin/taosdemo - - usr/lib/libtaos.so.2.0.9.0 + - usr/lib/libtaos.so.2.0.10.0 - usr/lib/libtaos.so.1 - usr/lib/libtaos.so diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a2600785c3..931a0a132e 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -10,9 +10,7 @@ ADD_SUBDIRECTORY(client) ADD_SUBDIRECTORY(query) ADD_SUBDIRECTORY(kit) ADD_SUBDIRECTORY(plugins) -IF (TD_SYNC) - ADD_SUBDIRECTORY(sync) -ENDIF () +ADD_SUBDIRECTORY(sync) ADD_SUBDIRECTORY(balance) ADD_SUBDIRECTORY(mnode) ADD_SUBDIRECTORY(vnode) diff --git a/src/balance/src/bnMain.c b/src/balance/src/bnMain.c index 383f981913..d80488fe9f 100644 --- a/src/balance/src/bnMain.c +++ b/src/balance/src/bnMain.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "os.h" +#include "tref.h" #include "tsync.h" #include "tglobal.h" #include "dnode.h" @@ -28,7 +29,9 @@ #include "mnodeUser.h" #include "mnodeVgroup.h" -static SBnMgmt tsBnMgmt;; +extern int64_t tsDnodeRid; +extern int64_t tsSdbRid; +static SBnMgmt tsBnMgmt; static void bnMonitorDnodeModule(); static void bnLock() { @@ -529,6 +532,9 @@ void bnCheckStatus() { void * pIter = NULL; SDnodeObj *pDnode = NULL; + void *dnodeSdb = taosAcquireRef(tsSdbRid, tsDnodeRid); + if (dnodeSdb == NULL) return; + while (1) { pIter = mnodeGetNextDnode(pIter, &pDnode); if (pDnode == NULL) break; @@ -543,6 +549,8 @@ void bnCheckStatus() { } mnodeDecDnodeRef(pDnode); } + + taosReleaseRef(tsSdbRid, tsDnodeRid); } void bnCheckModules() { diff --git a/src/client/inc/tscSubquery.h b/src/client/inc/tscSubquery.h index f7832c9818..d3996ccf7f 100644 --- a/src/client/inc/tscSubquery.h +++ b/src/client/inc/tscSubquery.h @@ -36,7 +36,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql); int32_t tscHandleMultivnodeInsert(SSqlObj *pSql); -int32_t tscHandleInsertRetry(SSqlObj* pSql); +int32_t tscHandleInsertRetry(SSqlObj* parent, SSqlObj* child); void tscBuildResFromSubqueries(SSqlObj *pSql); TAOS_ROW doSetResultRowData(SSqlObj *pSql); diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 2c8641da76..eddfa62966 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -110,11 +110,12 @@ SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint uint32_t offset); void* tscDestroyBlockArrayList(SArray* pDataBlockList); +void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable); + int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pDataList); -int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, - STableDataBlocks** dataBlocks); +int32_t tscMergeTableDataBlocks(SSqlObj* pSql); +int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, + STableDataBlocks** dataBlocks, SArray* pBlockList); /** * for the projection query on metric or point interpolation query on metric, @@ -275,6 +276,8 @@ void tscPrintSelectClause(SSqlObj* pSql, int32_t subClauseIndex); bool hasMoreVnodesToTry(SSqlObj *pSql); bool hasMoreClauseToTry(SSqlObj* pSql); +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache); + void tscTryQueryNextVnode(SSqlObj *pSql, __async_cb_func_t fp); void tscAsyncQuerySingleRowForNextVnode(void *param, TAOS_RES *tres, int numOfRows); void tscTryQueryNextClause(SSqlObj* pSql, __async_cb_func_t fp); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index a1b6174de0..bdb35cb072 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -37,40 +37,6 @@ extern "C" { #include "qTsbuf.h" #include "tcmdtype.h" -#if 0 -static UNUSED_FUNC void *u_malloc (size_t __size) { - uint32_t v = rand(); - - if (v % 5000 <= 0) { - return NULL; - } else { - return malloc(__size); - } -} - -static UNUSED_FUNC void* u_calloc(size_t num, size_t __size) { - uint32_t v = rand(); - if (v % 5000 <= 0) { - return NULL; - } else { - return calloc(num, __size); - } -} - -static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { - uint32_t v = rand(); - if (v % 5000 <= 0) { - return NULL; - } else { - return realloc(p, __size); - } -} - -#define calloc u_calloc -#define malloc u_malloc -#define realloc u_realloc -#endif - // forward declaration struct SSqlInfo; struct SLocalReducer; @@ -78,7 +44,7 @@ struct SLocalReducer; // data source from sql string or from file enum { DATA_FROM_SQL_STRING = 1, - DATA_FROM_DATA_FILE = 2, + DATA_FROM_DATA_FILE = 2, }; typedef void (*__async_cb_func_t)(void *param, TAOS_RES *tres, int32_t numOfRows); @@ -118,10 +84,10 @@ typedef struct STableMetaInfo { * 1. keep the vgroup index during the multi-vnode super table projection query * 2. keep the vgroup index for multi-vnode insertion */ - int32_t vgroupIndex; - char name[TSDB_TABLE_FNAME_LEN]; // (super) table name - char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql - SArray* tagColList; // SArray, involved tag columns + int32_t vgroupIndex; + char name[TSDB_TABLE_FNAME_LEN]; // (super) table name + char aliasName[TSDB_TABLE_NAME_LEN]; // alias name of table specified in query sql + SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; /* the structure for sql function in select clause */ @@ -136,7 +102,7 @@ typedef struct SSqlExpr { int16_t numOfParams; // argument value of each function tVariant param[3]; // parameters are not more than 3 int32_t offset; // sub result column value of arithmetic expression. - int16_t resColId; // result column id + int16_t resColId; // result column id } SSqlExpr; typedef struct SColumnIndex { @@ -204,22 +170,17 @@ typedef struct SParamInfo { } SParamInfo; typedef struct STableDataBlocks { - char tableId[TSDB_TABLE_FNAME_LEN]; - int8_t tsSource; // where does the UNIX timestamp come from, server or client - bool ordered; // if current rows are ordered or not - int64_t vgId; // virtual group id - int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending - int32_t numOfTables; // number of tables in current submit block - int32_t rowSize; // row size for current table - uint32_t nAllocSize; - uint32_t headerSize; // header for table info (uid, tid, submit metadata) - uint32_t size; - - /* - * the table meta of table, the table meta will be used during submit, keep a ref - * to avoid it to be removed from cache - */ - STableMeta *pTableMeta; + char tableId[TSDB_TABLE_FNAME_LEN]; + int8_t tsSource; // where does the UNIX timestamp come from, server or client + bool ordered; // if current rows are ordered or not + int64_t vgId; // virtual group id + int64_t prevTS; // previous timestamp, recorded to decide if the records array is ts ascending + int32_t numOfTables; // number of tables in current submit block + int32_t rowSize; // row size for current table + uint32_t nAllocSize; + uint32_t headerSize; // header for table info (uid, tid, submit metadata) + uint32_t size; + STableMeta *pTableMeta; // the tableMeta of current table, the table meta will be used during submit, keep a ref to avoid to be removed from cache char *pData; // for parameter ('?') binding @@ -252,7 +213,7 @@ typedef struct SQueryInfo { int64_t clauseLimit; // limit for current sub clause int64_t prjOffset; // offset value in the original sql expression, only applied at client side - int64_t tableLimit; // table limit in case of super table projection query + global order + limit + int64_t vgroupLimit; // table limit in case of super table projection query + global order + limit int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX int16_t resColumnId; // result column id @@ -284,10 +245,14 @@ typedef struct { int32_t numOfParams; int8_t dataSourceType; // load data from file or not - int8_t submitSchema; // submit block is built with table schema - STagData *pTagData; // NOTE: pTagData->data is used as a variant length array - SHashObj *pTableList; // referred table involved in sql - SArray *pDataBlocks; // SArray submit data blocks after parsing sql + int8_t submitSchema; // submit block is built with table schema + STagData *pTagData; // NOTE: pTagData->data is used as a variant length array + + STableMeta **pTableMetaList; // all involved tableMeta list of current insert sql statement. + int32_t numOfTables; + + SHashObj *pTableBlockHashList; // data block for each table + SArray *pDataBlocks; // SArray. Merged submit block for each vgroup } SSqlCmd; typedef struct SResRec { @@ -382,6 +347,7 @@ typedef struct SSqlObj { typedef struct SSqlStream { SSqlObj *pSql; + const char* dstTable; uint32_t streamId; char listed; bool isProject; @@ -408,6 +374,8 @@ typedef struct SSqlStream { struct SSqlStream *prev, *next; } SSqlStream; +void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable); + int32_t tscInitRpc(const char *user, const char *secret, void** pDnodeConn); void tscInitMsgsFp(); diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 3ff8a68d8f..910a7b4112 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -96,7 +96,7 @@ void taos_query_a(TAOS *taos, const char *sqlstr, __async_cb_func_t fp, void *pa return; } - taosNotePrintTsc(sqlstr); + nPrintTsc(sqlstr); SSqlObj *pSql = (SSqlObj *)calloc(1, sizeof(SSqlObj)); if (pSql == NULL) { @@ -365,6 +365,7 @@ void tscProcessFetchRow(SSchedMsg *pMsg) { static void tscProcessAsyncError(SSchedMsg *pMsg) { void (*fp)() = pMsg->ahandle; terrno = *(int32_t*) pMsg->msg; + tfree(pMsg->msg); (*fp)(pMsg->thandle, NULL, *(int32_t*)pMsg->msg); } @@ -410,52 +411,26 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (code != TSDB_CODE_SUCCESS) { tscError("%p get %s failed, code:%s", pSql, msg, tstrerror(code)); goto _error; - } else { - tscDebug("%p get %s successfully", pSql, msg); } + tscDebug("%p get %s successfully", pSql, msg); if (pSql->pStream == NULL) { SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, pCmd->clauseIndex); // check if it is a sub-query of super table query first, if true, enter another routine - if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_STABLE_SUBQUERY)) { - tscDebug("%p update table meta in local cache, continue to process sql and send corresponding subquery", pSql); + if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { + tscDebug("%p update table meta in local cache, continue to process sql and send the corresponding query", pSql); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); + assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; - } else { - assert(code == TSDB_CODE_SUCCESS); - } - - // param already freed by other routine and pSql in tscCache when ctrl + c - if (atomic_load_ptr(&pSql->param) == NULL) { - return; - } - assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); - - SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; - SSqlObj * pParObj = trs->pParentSql; - - // NOTE: the vgroupInfo for the queried super table must be existed here. - assert(pParObj->signature == pParObj && trs->subqueryIndex == pTableMetaInfo->vgroupIndex && - pTableMetaInfo->vgroupIndex >= 0 && pTableMetaInfo->vgroupList != NULL); - - // tscProcessSql can add error into async res - tscProcessSql(pSql); - return; - } else if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_TAG_FILTER_QUERY)) { - tscDebug("%p update table meta in local cache, continue to process sql and send corresponding tid_tag query", pSql); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - code = tscGetTableMeta(pSql, pTableMetaInfo); - if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { - return; - } else { - assert(code == TSDB_CODE_SUCCESS); } assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); + // tscProcessSql can add error into async res tscProcessSql(pSql); return; @@ -465,16 +440,15 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); code = tscGetTableMeta(pSql, pTableMetaInfo); + + assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { return; - } else { - assert(code == TSDB_CODE_SUCCESS); } - // in case of insert, redo parsing the sql string and build new submit data block for two reasons: - // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. - // 2. vnode may need the schema information along with submit block to update its local table schema. - if (pCmd->command == TSDB_SQL_INSERT || pCmd->command == TSDB_SQL_SELECT) { + assert(pCmd->command != TSDB_SQL_INSERT); + + if (pCmd->command == TSDB_SQL_SELECT) { tscDebug("%p redo parse sql string and proceed", pSql); pCmd->parseFinished = false; tscResetSqlCmdObj(pCmd, false); @@ -486,17 +460,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { goto _error; } - if (pCmd->command == TSDB_SQL_INSERT) { - /* - * Discard previous built submit blocks, and then parse the sql string again and build up all submit blocks, - * and send the required submit block according to index value in supporter to server. - */ - pSql->fp = pSql->fetchFp; // restore the fp - tscHandleInsertRetry(pSql); - } else if (pCmd->command == TSDB_SQL_SELECT) { // in case of other query type, continue - tscProcessSql(pSql); - } - }else { // in all other cases, simple retry + tscProcessSql(pSql); + } else { // in all other cases, simple retry tscProcessSql(pSql); } @@ -551,6 +516,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (!pSql->cmd.parseFinished) { tsParseSql(pSql, false); } + (*pSql->fp)(pSql->param, pSql, code); return; diff --git a/src/client/src/tscFunctionImpl.c b/src/client/src/tscFunctionImpl.c index 7921399330..62e55c033f 100644 --- a/src/client/src/tscFunctionImpl.c +++ b/src/client/src/tscFunctionImpl.c @@ -2589,10 +2589,11 @@ static void percentile_next_step(SQLFunctionCtx *pCtx) { // all data are null, set it completed if (pInfo->numOfElems == 0) { pResInfo->complete = true; + } else { + pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, GET_DOUBLE_VAL(&pInfo->minval), GET_DOUBLE_VAL(&pInfo->maxval)); } pInfo->stage += 1; - pInfo->pMemBucket = tMemBucketCreate(pCtx->inputBytes, pCtx->inputType, GET_DOUBLE_VAL(&pInfo->minval), GET_DOUBLE_VAL(&pInfo->maxval)); } else { pResInfo->complete = true; } @@ -3647,11 +3648,21 @@ static bool twa_function_setup(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); - pInfo->lastKey = INT64_MIN; + pInfo->p.key = INT64_MIN; pInfo->win = TSWINDOW_INITIALIZER; return true; } +static double twa_get_area(SPoint1 s, SPoint1 e) { + if ((s.val >= 0 && e.val >= 0)|| (s.val <=0 && e.val <= 0)) { + return (s.val + e.val) * (e.key - s.key) / 2; + } + + double x = (s.key * e.val - e.key * s.val)/(e.val - s.val); + double val = (s.val * (x - s.key) + e.val * (e.key - x)) / 2; + return val; +} + static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t index, int32_t size) { int32_t notNullElems = 0; TSKEY *primaryKey = pCtx->ptsList; @@ -3662,28 +3673,29 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t int32_t i = index; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); + SPoint1* last = &pInfo->p; if (pCtx->start.key != INT64_MIN) { assert((pCtx->start.key < primaryKey[tsIndex + i] && pCtx->order == TSDB_ORDER_ASC) || (pCtx->start.key > primaryKey[tsIndex + i] && pCtx->order == TSDB_ORDER_DESC)); - assert(pInfo->lastKey == INT64_MIN); + assert(last->key == INT64_MIN); - pInfo->lastKey = primaryKey[tsIndex + i]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + last->key = primaryKey[tsIndex + i]; + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); - pInfo->dOutput += ((pInfo->lastValue + pCtx->start.val) / 2) * (pInfo->lastKey - pCtx->start.key); + pInfo->dOutput += twa_get_area(pCtx->start, *last); pInfo->hasResult = DATA_SET_FLAG; pInfo->win.skey = pCtx->start.key; notNullElems++; i += step; - } else if (pInfo->lastKey == INT64_MIN) { - pInfo->lastKey = primaryKey[tsIndex + i]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + } else if (pInfo->p.key == INT64_MIN) { + last->key = primaryKey[tsIndex + i]; + GET_TYPED_DATA(last->val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); pInfo->hasResult = DATA_SET_FLAG; - pInfo->win.skey = pInfo->lastKey; + pInfo->win.skey = last->key; notNullElems++; i += step; } @@ -3697,9 +3709,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3710,9 +3722,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3723,9 +3735,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3736,9 +3748,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = (double) val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = (double) val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3749,9 +3761,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3762,9 +3774,9 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + tsIndex] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + tsIndex]; + SPoint1 st = {.key = primaryKey[i + tsIndex], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3773,20 +3785,19 @@ static int32_t twa_function_impl(SQLFunctionCtx* pCtx, int32_t tsIndex, int32_t // the last interpolated time window value if (pCtx->end.key != INT64_MIN) { - pInfo->dOutput += ((pInfo->lastValue + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->lastKey); - pInfo->lastValue = pCtx->end.val; - pInfo->lastKey = pCtx->end.key; + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end); + pInfo->p = pCtx->end; } - pInfo->win.ekey = pInfo->lastKey; + pInfo->win.ekey = pInfo->p.key; return notNullElems; } static void twa_function(SQLFunctionCtx *pCtx) { - void * data = GET_INPUT_CHAR(pCtx); + void *data = GET_INPUT_CHAR(pCtx); SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); - STwaInfo * pInfo = GET_ROWCELL_INTERBUF(pResInfo); + STwaInfo *pInfo = GET_ROWCELL_INTERBUF(pResInfo); // skip null value int32_t step = GET_FORWARD_DIRECTION_FACTOR(pCtx->order); @@ -3807,6 +3818,7 @@ static void twa_function(SQLFunctionCtx *pCtx) { } } +//TODO refactor static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { void *pData = GET_INPUT_CHAR_INDEX(pCtx, index); if (pCtx->hasNull && isNull(pData, pCtx->inputType)) { @@ -3823,23 +3835,23 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { int32_t size = pCtx->size; if (pCtx->start.key != INT64_MIN) { - assert(pInfo->lastKey == INT64_MIN); + assert(pInfo->p.key == INT64_MIN); - pInfo->lastKey = primaryKey[index]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + pInfo->p.key = primaryKey[index]; + GET_TYPED_DATA(pInfo->p.val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); - pInfo->dOutput += ((pInfo->lastValue + pCtx->start.val) / 2) * (pInfo->lastKey - pCtx->start.key); + pInfo->dOutput += twa_get_area(pCtx->start, pInfo->p); pInfo->hasResult = DATA_SET_FLAG; pInfo->win.skey = pCtx->start.key; notNullElems++; i += 1; - } else if (pInfo->lastKey == INT64_MIN) { - pInfo->lastKey = primaryKey[index]; - GET_TYPED_DATA(pInfo->lastValue, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); + } else if (pInfo->p.key == INT64_MIN) { + pInfo->p.key = primaryKey[index]; + GET_TYPED_DATA(pInfo->p.val, double, pCtx->inputType, GET_INPUT_CHAR_INDEX(pCtx, index)); pInfo->hasResult = DATA_SET_FLAG; - pInfo->win.skey = pInfo->lastKey; + pInfo->win.skey = pInfo->p.key; notNullElems++; i += 1; } @@ -3853,9 +3865,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3866,9 +3878,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3879,9 +3891,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3892,9 +3904,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = (double) val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = (double) val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st); + pInfo->p = st; } break; } @@ -3905,9 +3917,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st);//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key); + pInfo->p = st; } break; } @@ -3918,9 +3930,9 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { continue; } - pInfo->dOutput += ((val[i] + pInfo->lastValue) / 2) * (primaryKey[i + index] - pInfo->lastKey); - pInfo->lastValue = val[i]; - pInfo->lastKey = primaryKey[i + index]; + SPoint1 st = {.key = primaryKey[i + index], .val = val[i]}; + pInfo->dOutput += twa_get_area(pInfo->p, st);//((val[i] + pInfo->p.val) / 2) * (primaryKey[i + index] - pInfo->p.key); + pInfo->p = st; } break; } @@ -3929,12 +3941,11 @@ static void twa_function_f(SQLFunctionCtx *pCtx, int32_t index) { // the last interpolated time window value if (pCtx->end.key != INT64_MIN) { - pInfo->dOutput += ((pInfo->lastValue + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->lastKey); - pInfo->lastValue = pCtx->end.val; - pInfo->lastKey = pCtx->end.key; + pInfo->dOutput += twa_get_area(pInfo->p, pCtx->end);//((pInfo->p.val + pCtx->end.val) / 2) * (pCtx->end.key - pInfo->p.key); + pInfo->p = pCtx->end; } - pInfo->win.ekey = pInfo->lastKey; + pInfo->win.ekey = pInfo->p.key; SET_VAL(pCtx, notNullElems, 1); @@ -3965,7 +3976,7 @@ static void twa_func_merge(SQLFunctionCtx *pCtx) { pBuf->dOutput += pInput->dOutput; pBuf->win = pInput->win; - pBuf->lastKey = pInput->lastKey; + pBuf->p = pInput->p; } SET_VAL(pCtx, numOfNotNull, 1); @@ -3992,15 +4003,14 @@ void twa_function_finalizer(SQLFunctionCtx *pCtx) { SResultRowCellInfo *pResInfo = GET_RES_INFO(pCtx); STwaInfo *pInfo = (STwaInfo *)GET_ROWCELL_INTERBUF(pResInfo); - assert(pInfo->win.ekey == pInfo->lastKey && pInfo->hasResult == pResInfo->hasResult); - if (pInfo->hasResult != DATA_SET_FLAG) { setNull(pCtx->aOutputBuf, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); return; } - + + assert(pInfo->win.ekey == pInfo->p.key && pInfo->hasResult == pResInfo->hasResult); if (pInfo->win.ekey == pInfo->win.skey) { - *(double *)pCtx->aOutputBuf = pInfo->lastValue; + *(double *)pCtx->aOutputBuf = pInfo->p.val; } else { *(double *)pCtx->aOutputBuf = pInfo->dOutput / (pInfo->win.ekey - pInfo->win.skey); } diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index 7fc5b8debb..a99918975e 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -726,10 +726,14 @@ int32_t tscLocalReducerEnvCreate(SSqlObj *pSql, tExtMemBuffer ***pMemBuffer, tOr SSqlExpr *pExpr = tscSqlExprGet(pQueryInfo, i); SSchema p1 = {0}; - if (pExpr->colInfo.colIndex != TSDB_TBNAME_COLUMN_INDEX) { - p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); - } else { + if (pExpr->colInfo.colIndex == TSDB_TBNAME_COLUMN_INDEX) { p1 = tGetTableNameColumnSchema(); + } else if (TSDB_COL_IS_UD_COL(pExpr->colInfo.flag)) { + p1.bytes = pExpr->resBytes; + p1.type = (uint8_t) pExpr->resType; + tstrncpy(p1.name, pExpr->aliasName, tListLen(p1.name)); + } else { + p1 = *tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, pExpr->colInfo.colIndex); } int32_t inter = 0; diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 18e5b6f074..683e3b1d78 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -686,17 +686,14 @@ void tscSortRemoveDataBlockDupRows(STableDataBlocks *dataBuf) { } } -static int32_t doParseInsertStatement(SSqlObj *pSql, void *pTableList, char **str, SParsedDataColInfo *spd, - int32_t *totalNum) { - SSqlCmd * pCmd = &pSql->cmd; +static int32_t doParseInsertStatement(SSqlCmd* pCmd, char **str, SParsedDataColInfo *spd, int32_t *totalNum) { STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); - STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + STableMeta *pTableMeta = pTableMetaInfo->pTableMeta; STableComInfo tinfo = tscGetTableInfo(pTableMeta); STableDataBlocks *dataBuf = NULL; - int32_t ret = tscGetDataBlockFromList(pTableList, pCmd->pDataBlocks, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, - sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, - pTableMeta, &dataBuf); + int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_DEFAULT_PAYLOAD_SIZE, + sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &dataBuf, NULL); if (ret != TSDB_CODE_SUCCESS) { return ret; } @@ -1058,18 +1055,17 @@ int tsParseInsertSql(SSqlObj *pSql) { return code; } - if (NULL == pCmd->pTableList) { - pCmd->pTableList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); - pCmd->pDataBlocks = taosArrayInit(4, POINTER_BYTES); - if (NULL == pCmd->pTableList || NULL == pSql->cmd.pDataBlocks) { + if (NULL == pCmd->pTableBlockHashList) { + pCmd->pTableBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + if (NULL == pCmd->pTableBlockHashList) { code = TSDB_CODE_TSC_OUT_OF_MEMORY; - goto _error; + goto _clean; } } else { str = pCmd->curSql; } - tscDebug("%p create data block list for submit data:%p, pTableList:%p", pSql, pCmd->pDataBlocks, pCmd->pTableList); + tscDebug("%p create data block list hashList:%p", pSql, pCmd->pTableBlockHashList); while (1) { int32_t index = 0; @@ -1091,7 +1087,7 @@ int tsParseInsertSql(SSqlObj *pSql) { */ if (totalNum == 0) { code = TSDB_CODE_TSC_INVALID_SQL; - goto _error; + goto _clean; } else { break; } @@ -1104,11 +1100,11 @@ int tsParseInsertSql(SSqlObj *pSql) { // Check if the table name available or not if (validateTableName(sToken.z, sToken.n, &sTblToken) != TSDB_CODE_SUCCESS) { code = tscInvalidSQLErrMsg(pCmd->payload, "table name invalid", sToken.z); - goto _error; + goto _clean; } if ((code = tscSetTableFullName(pTableMetaInfo, &sTblToken, pSql)) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } if ((code = tscCheckIfCreateTable(&str, pSql)) != TSDB_CODE_SUCCESS) { @@ -1122,12 +1118,12 @@ int tsParseInsertSql(SSqlObj *pSql) { tscError("%p async insert parse error, code:%s", pSql, tstrerror(code)); pCmd->curSql = NULL; - goto _error; + goto _clean; } if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { code = tscInvalidSQLErrMsg(pCmd->payload, "insert data into super table is not supported", NULL); - goto _error; + goto _clean; } index = 0; @@ -1136,7 +1132,7 @@ int tsParseInsertSql(SSqlObj *pSql) { if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE required", sToken.z); - goto _error; + goto _clean; } STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); @@ -1148,32 +1144,32 @@ int tsParseInsertSql(SSqlObj *pSql) { tscSetAssignedColumnInfo(&spd, pSchema, tinfo.numOfColumns); if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } /* * app here insert data in different vnodes, so we need to set the following * data in another submit procedure using async insert routines */ - code = doParseInsertStatement(pSql, pCmd->pTableList, &str, &spd, &totalNum); + code = doParseInsertStatement(pCmd, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } } else if (sToken.type == TK_FILE) { if (validateDataSource(pCmd, DATA_FROM_DATA_FILE, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } index = 0; sToken = tStrGetToken(str, &index, false, 0, NULL); if (sToken.type != TK_STRING && sToken.type != TK_ID) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); - goto _error; + goto _clean; } str += index; if (sToken.n == 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "file path is required following keyword FILE", sToken.z); - goto _error; + goto _clean; } strncpy(pCmd->payload, sToken.z, sToken.n); @@ -1183,7 +1179,7 @@ int tsParseInsertSql(SSqlObj *pSql) { wordexp_t full_path; if (wordexp(pCmd->payload, &full_path, 0) != 0) { code = tscInvalidSQLErrMsg(pCmd->payload, "invalid filename", sToken.z); - goto _error; + goto _clean; } tstrncpy(pCmd->payload, full_path.we_wordv[0], pCmd->allocSize); @@ -1195,7 +1191,7 @@ int tsParseInsertSql(SSqlObj *pSql) { SSchema * pSchema = tscGetTableSchema(pTableMeta); if (validateDataSource(pCmd, DATA_FROM_SQL_STRING, sToken.z) != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } SParsedDataColInfo spd = {0}; @@ -1230,7 +1226,7 @@ int tsParseInsertSql(SSqlObj *pSql) { if (spd.hasVal[t] == true) { code = tscInvalidSQLErrMsg(pCmd->payload, "duplicated column name", sToken.z); - goto _error; + goto _clean; } spd.hasVal[t] = true; @@ -1241,13 +1237,13 @@ int tsParseInsertSql(SSqlObj *pSql) { if (!findColumnIndex) { code = tscInvalidSQLErrMsg(pCmd->payload, "invalid column name", sToken.z); - goto _error; + goto _clean; } } if (spd.numOfAssignedCols == 0 || spd.numOfAssignedCols > tinfo.numOfColumns) { code = tscInvalidSQLErrMsg(pCmd->payload, "column name expected", sToken.z); - goto _error; + goto _clean; } index = 0; @@ -1256,16 +1252,16 @@ int tsParseInsertSql(SSqlObj *pSql) { if (sToken.type != TK_VALUES) { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES is expected", sToken.z); - goto _error; + goto _clean; } - code = doParseInsertStatement(pSql, pCmd->pTableList, &str, &spd, &totalNum); + code = doParseInsertStatement(pCmd, &str, &spd, &totalNum); if (code != TSDB_CODE_SUCCESS) { - goto _error; + goto _clean; } } else { code = tscInvalidSQLErrMsg(pCmd->payload, "keyword VALUES or FILE are required", sToken.z); - goto _error; + goto _clean; } } @@ -1274,25 +1270,18 @@ int tsParseInsertSql(SSqlObj *pSql) { goto _clean; } - if (taosArrayGetSize(pCmd->pDataBlocks) > 0) { // merge according to vgId - if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) { - goto _error; + if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgId + if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { + goto _clean; } } code = TSDB_CODE_SUCCESS; goto _clean; -_error: - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); - _clean: - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; - - pCmd->curSql = NULL; + pCmd->curSql = NULL; pCmd->parseFinished = 1; - return code; } @@ -1373,6 +1362,7 @@ int tsParseSql(SSqlObj *pSql, bool initial) { pSql->parseRetry++; ret = tscToSQLCmd(pSql, &SQLInfo); } + SQLInfoDestroy(&SQLInfo); } @@ -1399,7 +1389,7 @@ static int doPackSendDataBlock(SSqlObj *pSql, int32_t numOfRows, STableDataBlock return tscInvalidSQLErrMsg(pCmd->payload, "too many rows in sql, total number of rows should be less than 32767", NULL); } - if ((code = tscMergeTableDataBlocks(pSql, pCmd->pDataBlocks)) != TSDB_CODE_SUCCESS) { + if ((code = tscMergeTableDataBlocks(pSql)) != TSDB_CODE_SUCCESS) { return code; } @@ -1456,18 +1446,21 @@ static void parseFileSendDataBlock(void *param, TAOS_RES *tres, int code) { int32_t count = 0; int32_t maxRows = 0; - tscDestroyBlockArrayList(pSql->cmd.pDataBlocks); - pCmd->pDataBlocks = taosArrayInit(1, POINTER_BYTES); + tfree(pCmd->pTableMetaList); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + + if (pCmd->pTableBlockHashList == NULL) { + pCmd->pTableBlockHashList = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); + } STableDataBlocks *pTableDataBlock = NULL; - int32_t ret = tscCreateDataBlock(TSDB_PAYLOAD_SIZE, tinfo.rowSize, sizeof(SSubmitBlk), pTableMetaInfo->name, pTableMeta, &pTableDataBlock); + int32_t ret = tscGetDataBlockFromList(pCmd->pTableBlockHashList, pTableMeta->id.uid, TSDB_PAYLOAD_SIZE, + sizeof(SSubmitBlk), tinfo.rowSize, pTableMetaInfo->name, pTableMeta, &pTableDataBlock, NULL); if (ret != TSDB_CODE_SUCCESS) { // return ret; } - taosArrayPush(pCmd->pDataBlocks, &pTableDataBlock); tscAllocateMemIfNeed(pTableDataBlock, tinfo.rowSize, &maxRows); - char *tokenBuf = calloc(1, 4096); while ((readLen = tgetline(&line, &n, fp)) != -1) { @@ -1529,8 +1522,6 @@ void tscProcessMultiVnodesImportFromFile(SSqlObj *pSql) { SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport)); SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL); - - pNew->cmd.pDataBlocks = taosArrayInit(4, POINTER_BYTES); pCmd->count = 1; FILE *fp = fopen(pCmd->payload, "r"); diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 30e0729427..8134a35811 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -800,9 +800,9 @@ static int insertStmtExecute(STscStmt* stmt) { STableMetaInfo* pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); assert(pCmd->numOfClause == 1); - if (taosArrayGetSize(pCmd->pDataBlocks) > 0) { + if (taosHashGetSize(pCmd->pTableBlockHashList) > 0) { // merge according to vgid - int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks); + int code = tscMergeTableDataBlocks(stmt->pSql); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 18fc79c474..f813ff85d9 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -262,6 +262,11 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { SSqlStream *pStream = pObj->streamList; while (pStream) { tstrncpy(pSdesc->sql, pStream->pSql->sqlstr, sizeof(pSdesc->sql)); + if (pStream->dstTable == NULL) { + pSdesc->dstTable[0] = 0; + } else { + tstrncpy(pSdesc->dstTable, pStream->dstTable, sizeof(pSdesc->dstTable)); + } pSdesc->streamId = htonl(pStream->streamId); pSdesc->num = htobe64(pStream->num); diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 91b00e0109..8e4ef91e27 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1310,7 +1310,7 @@ static int32_t handleArithmeticExpr(SSqlCmd* pCmd, int32_t clauseIndex, int32_t SColumnIndex index = {.tableIndex = tableIndex}; SSqlExpr* pExpr = tscSqlExprAppend(pQueryInfo, TSDB_FUNC_ARITHM, &index, TSDB_DATA_TYPE_DOUBLE, sizeof(double), - -1000, sizeof(double), false); + getNewResColId(pQueryInfo), sizeof(double), false); char* name = (pItem->aliasName != NULL)? pItem->aliasName:pItem->pNode->token.z; size_t len = MIN(sizeof(pExpr->aliasName), pItem->pNode->token.n + 1); @@ -3282,7 +3282,12 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC ((pSchema->type == TSDB_DATA_TYPE_BINARY || pSchema->type == TSDB_DATA_TYPE_NCHAR) ? 1 : 0); if (pColFilter->filterstr) { - if (pExpr->nSQLOptr != TK_EQ && pExpr->nSQLOptr != TK_NE && pExpr->nSQLOptr != TK_LIKE) { + if (pExpr->nSQLOptr != TK_EQ + && pExpr->nSQLOptr != TK_NE + && pExpr->nSQLOptr != TK_ISNULL + && pExpr->nSQLOptr != TK_NOTNULL + && pExpr->nSQLOptr != TK_LIKE + ) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } } else { @@ -5312,7 +5317,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn // keep original limitation value in globalLimit pQueryInfo->clauseLimit = pQueryInfo->limit.limit; pQueryInfo->prjOffset = pQueryInfo->limit.offset; - pQueryInfo->tableLimit = -1; + pQueryInfo->vgroupLimit = -1; if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { /* @@ -5322,7 +5327,7 @@ int32_t parseLimitClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t clauseIn * than or equal to the value of limit. */ if (pQueryInfo->limit.limit > 0) { - pQueryInfo->tableLimit = pQueryInfo->limit.limit + pQueryInfo->limit.offset; + pQueryInfo->vgroupLimit = pQueryInfo->limit.limit + pQueryInfo->limit.offset; pQueryInfo->limit.limit = -1; } @@ -5908,25 +5913,33 @@ int32_t doLocalQueryProcess(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SQuerySQL* pQ if (pExprList->nExpr != 1) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - + bool server_status = false; tSQLExpr* pExpr = pExprList->a[0].pNode; if (pExpr->operand.z == NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - + //handle 'select 1' + if (pExpr->token.n == 1 && 0 == strncasecmp(pExpr->token.z, "1", 1)) { + server_status = true; + } else { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + } // TODO redefine the function - SDNodeDynConfOption functionsInfo[5] = {{"database()", 10}, - {"server_version()", 16}, - {"server_status()", 15}, - {"client_version()", 16}, - {"current_user()", 14}}; + SDNodeDynConfOption functionsInfo[5] = {{"database()", 10}, + {"server_version()", 16}, + {"server_status()", 15}, + {"client_version()", 16}, + {"current_user()", 14}}; int32_t index = -1; - for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { - if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && - functionsInfo[i].len == pExpr->operand.n) { - index = i; - break; + if (server_status == true) { + index = 2; + } else { + for (int32_t i = 0; i < tListLen(functionsInfo); ++i) { + if (strncasecmp(functionsInfo[i].name, pExpr->operand.z, functionsInfo[i].len) == 0 && + functionsInfo[i].len == pExpr->operand.n) { + index = i; + break; + } } } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 59bcdd691d..994dace1e3 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -152,7 +152,13 @@ void tscProcessHeartBeatRsp(void *param, TAOS_RES *tres, int code) { SRpcEpSet * epSet = &pRsp->epSet; if (epSet->numOfEps > 0) { tscEpSetHtons(epSet); - tscUpdateMgmtEpSet(pSql, epSet); + if (!tscEpSetIsEqual(&pSql->pTscObj->tscCorMgmtEpSet->epSet, epSet)) { + tscTrace("%p updating epset: numOfEps: %d, inUse: %d", pSql, epSet->numOfEps, epSet->inUse); + for (int8_t i = 0; i < epSet->numOfEps; i++) { + tscTrace("endpoint %d: fqdn = %s, port=%d", i, epSet->fqdn[i], epSet->port[i]); + } + tscUpdateMgmtEpSet(pSql, epSet); + } } pSql->pTscObj->connId = htonl(pRsp->connId); @@ -280,19 +286,19 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { } int32_t cmd = pCmd->command; - if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_INSERT || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && + + // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema + if (cmd == TSDB_SQL_INSERT && rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { + pSql->cmd.submitSchema = 1; + } + + if ((cmd == TSDB_SQL_SELECT || cmd == TSDB_SQL_FETCH || cmd == TSDB_SQL_UPDATE_TAGS_VAL) && (rpcMsg->code == TSDB_CODE_TDB_INVALID_TABLE_ID || rpcMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID || rpcMsg->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || - rpcMsg->code == TSDB_CODE_APP_NOT_READY || - rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE)) { + rpcMsg->code == TSDB_CODE_APP_NOT_READY)) { tscWarn("%p it shall renew table meta, code:%s, retry:%d", pSql, tstrerror(rpcMsg->code), ++pSql->retry); - // set the flag to denote that sql string needs to be re-parsed and build submit block with table schema - if (rpcMsg->code == TSDB_CODE_TDB_TABLE_RECONFIGURE) { - pSql->cmd.submitSchema = 1; - } - pSql->res.code = rpcMsg->code; // keep the previous error code if (pSql->retry > pSql->maxRetry) { tscError("%p max retry %d reached, give up", pSql, pSql->maxRetry); @@ -451,10 +457,10 @@ int tscProcessSql(SSqlObj *pSql) { int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg *) pSql->cmd.payload; - pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle); SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); - pRetrieveMsg->free = htons(pQueryInfo->type); + pRetrieveMsg->free = htons(pQueryInfo->type); + pRetrieveMsg->qhandle = htobe64(pSql->res.qhandle); // todo valid the vgroupId at the client side STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -681,7 +687,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { pQueryMsg->tagNameRelType = htons(pQueryInfo->tagCond.relType); pQueryMsg->numOfTags = htonl(numOfTags); pQueryMsg->queryType = htonl(pQueryInfo->type); - pQueryMsg->tableLimit = htobe64(pQueryInfo->tableLimit); + pQueryMsg->vgroupLimit = htobe64(pQueryInfo->vgroupLimit); size_t numOfOutput = tscSqlExprNumOfExprs(pQueryInfo); pQueryMsg->numOfOutput = htons((int16_t)numOfOutput); // this is the stage one output column number @@ -1394,6 +1400,43 @@ int tscBuildUpdateTagMsg(SSqlObj* pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } +//int tscBuildCancelQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { +// SCancelQueryMsg *pCancelMsg = (SCancelQueryMsg*) pSql->cmd.payload; +// pCancelMsg->qhandle = htobe64(pSql->res.qhandle); +// +// SQueryInfo *pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, pSql->cmd.clauseIndex); +// STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); +// +// if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { +// int32_t vgIndex = pTableMetaInfo->vgroupIndex; +// if (pTableMetaInfo->pVgroupTables == NULL) { +// SVgroupsInfo *pVgroupInfo = pTableMetaInfo->vgroupList; +// assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); +// +// pCancelMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); +// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex); +// } else { +// int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); +// assert(vgIndex >= 0 && vgIndex < numOfVgroups); +// +// SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); +// +// pCancelMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); +// tscDebug("%p build cancel query msg from vgId:%d, vgIndex:%d", pSql, pTableIdList->vgInfo.vgId, vgIndex); +// } +// } else { +// STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; +// pCancelMsg->header.vgId = htonl(pTableMeta->vgroupInfo.vgId); +// tscDebug("%p build cancel query msg from only one vgroup, vgId:%d", pSql, pTableMeta->vgroupInfo.vgId); +// } +// +// pSql->cmd.payloadLen = sizeof(SCancelQueryMsg); +// pSql->cmd.msgType = TSDB_MSG_TYPE_CANCEL_QUERY; +// +// pCancelMsg->header.contLen = htonl(sizeof(SCancelQueryMsg)); +// return TSDB_CODE_SUCCESS; +//} + int tscAlterDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SSqlCmd *pCmd = &pSql->cmd; pCmd->payloadLen = sizeof(SAlterDbMsg); diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index fae5b5856f..d7dec2f356 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -344,7 +344,7 @@ TAOS_RES* taos_query_c(TAOS *taos, const char *sqlstr, uint32_t sqlLen, TAOS_RES return NULL; } - taosNotePrintTsc(sqlstr); + nPrintTsc(sqlstr); SSqlObj* pSql = calloc(1, sizeof(SSqlObj)); if (pSql == NULL) { @@ -900,9 +900,9 @@ int taos_validate_sql(TAOS *taos, const char *sql) { strtolower(pSql->sqlstr, sql); pCmd->curSql = NULL; - if (NULL != pCmd->pTableList) { - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; + if (NULL != pCmd->pTableBlockHashList) { + taosHashCleanup(pCmd->pTableBlockHashList); + pCmd->pTableBlockHashList = NULL; } pSql->fp = asyncCallback; diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 68c3bcae16..74b8e4d958 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -535,6 +535,10 @@ static void tscCreateStream(void *param, TAOS_RES *res, int code) { pStream, pTableMetaInfo->name, pStream->interval.interval, pStream->interval.sliding, starttime, pSql->sqlstr); } +void tscSetStreamDestTable(SSqlStream* pStream, const char* dstTable) { + pStream->dstTable = dstTable; +} + TAOS_STREAM *taos_open_stream(TAOS *taos, const char *sqlstr, void (*fp)(void *param, TAOS_RES *, TAOS_ROW row), int64_t stime, void *param, void (*callback)(void *)) { STscObj *pObj = (STscObj *)taos; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 819a323db5..d2eb16795f 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -2149,6 +2149,38 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { } } +static bool needRetryInsert(SSqlObj* pParentObj, int32_t numOfSub) { + if (pParentObj->retry > pParentObj->maxRetry) { + tscError("%p max retry reached, abort the retry effort", pParentObj) + return false; + } + + for (int32_t i = 0; i < numOfSub; ++i) { + int32_t code = pParentObj->pSubs[i]->res.code; + if (code == TSDB_CODE_SUCCESS) { + continue; + } + + if (code != TSDB_CODE_TDB_TABLE_RECONFIGURE && code != TSDB_CODE_TDB_INVALID_TABLE_ID && + code != TSDB_CODE_VND_INVALID_VGROUP_ID && code != TSDB_CODE_RPC_NETWORK_UNAVAIL && + code != TSDB_CODE_APP_NOT_READY) { + pParentObj->res.code = code; + return false; + } + } + + return true; +} + +static void doFreeInsertSupporter(SSqlObj* pSqlObj) { + assert(pSqlObj != NULL && pSqlObj->subState.numOfSub > 0); + + for(int32_t i = 0; i < pSqlObj->subState.numOfSub; ++i) { + SSqlObj* pSql = pSqlObj->pSubs[i]; + tfree(pSql->param); + } +} + static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) { SInsertSupporter *pSupporter = (SInsertSupporter *)param; SSqlObj* pParentObj = pSupporter->pSql; @@ -2163,23 +2195,81 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) assert(pSql != NULL && pSql->res.code == numOfRows); pParentObj->res.code = pSql->res.code; - } - tfree(pSupporter); + // set the flag in the parent sqlObj + if (pSql->cmd.submitSchema) { + pParentObj->cmd.submitSchema = 1; + } + } if (atomic_sub_fetch_32(&pParentObj->subState.numOfRemain, 1) > 0) { return; } - - tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); // restore user defined fp pParentObj->fp = pParentObj->fetchFp; + int32_t numOfSub = pParentObj->subState.numOfSub; - // todo remove this parameter in async callback function definition. - // all data has been sent to vnode, call user function - int32_t v = (pParentObj->res.code != TSDB_CODE_SUCCESS) ? pParentObj->res.code : (int32_t)pParentObj->res.numOfRows; - (*pParentObj->fp)(pParentObj->param, pParentObj, v); + if (pParentObj->res.code == TSDB_CODE_SUCCESS) { + tscDebug("%p Async insertion completed, total inserted:%d", pParentObj, pParentObj->res.numOfRows); + doFreeInsertSupporter(pParentObj); + + // todo remove this parameter in async callback function definition. + // all data has been sent to vnode, call user function + int32_t v = (pParentObj->res.code != TSDB_CODE_SUCCESS) ? pParentObj->res.code : (int32_t)pParentObj->res.numOfRows; + (*pParentObj->fp)(pParentObj->param, pParentObj, v); + } else { + if (!needRetryInsert(pParentObj, numOfSub)) { + doFreeInsertSupporter(pParentObj); + tscQueueAsyncRes(pParentObj); + return; + } + + int32_t numOfFailed = 0; + for(int32_t i = 0; i < numOfSub; ++i) { + SSqlObj* pSql = pParentObj->pSubs[i]; + if (pSql->res.code != TSDB_CODE_SUCCESS) { + numOfFailed += 1; + + // clean up tableMeta in cache + tscFreeQueryInfo(&pSql->cmd, true); + SQueryInfo* pQueryInfo = tscGetQueryInfoDetailSafely(&pSql->cmd, 0); + STableMetaInfo* pMasterTableMetaInfo = tscGetTableMetaInfoFromCmd(&pParentObj->cmd, pSql->cmd.clauseIndex, 0); + tscAddTableMetaInfo(pQueryInfo, pMasterTableMetaInfo->name, NULL, NULL, NULL, NULL); + + tscDebug("%p, failed sub:%d, %p", pParentObj, i, pSql); + } + } + + tscError("%p Async insertion completed, total inserted:%d rows, numOfFailed:%d, numOfTotal:%d", pParentObj, + pParentObj->res.numOfRows, numOfFailed, numOfSub); + + tscDebug("%p cleanup %d tableMeta in cache", pParentObj, pParentObj->cmd.numOfTables); + for(int32_t i = 0; i < pParentObj->cmd.numOfTables; ++i) { + taosCacheRelease(tscMetaCache, (void**)&(pParentObj->cmd.pTableMetaList[i]), true); + } + + pParentObj->cmd.parseFinished = false; + pParentObj->subState.numOfRemain = numOfFailed; + + tscResetSqlCmdObj(&pParentObj->cmd, false); + + // in case of insert, redo parsing the sql string and build new submit data block for two reasons: + // 1. the table Id(tid & uid) may have been update, the submit block needs to be updated accordingly. + // 2. vnode may need the schema information along with submit block to update its local table schema. + tscDebug("%p re-parse sql to generate submit data, retry:%d", pParentObj, pParentObj->retry++); + int32_t code = tsParseSql(pParentObj, true); + if (code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) return; + + if (code != TSDB_CODE_SUCCESS) { + pParentObj->res.code = code; + doFreeInsertSupporter(pParentObj); + tscQueueAsyncRes(pParentObj); + return; + } + + tscDoQuery(pParentObj); + } } /** @@ -2187,19 +2277,19 @@ static void multiVnodeInsertFinalize(void* param, TAOS_RES* tres, int numOfRows) * @param pSql * @return */ -int32_t tscHandleInsertRetry(SSqlObj* pSql) { +int32_t tscHandleInsertRetry(SSqlObj* pParent, SSqlObj* pSql) { assert(pSql != NULL && pSql->param != NULL); - SSqlCmd* pCmd = &pSql->cmd; +// SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; SInsertSupporter* pSupporter = (SInsertSupporter*) pSql->param; assert(pSupporter->index < pSupporter->pSql->subState.numOfSub); - STableDataBlocks* pTableDataBlock = taosArrayGetP(pCmd->pDataBlocks, pSupporter->index); + STableDataBlocks* pTableDataBlock = taosArrayGetP(pParent->cmd.pDataBlocks, pSupporter->index); int32_t code = tscCopyDataBlockToPayload(pSql, pTableDataBlock); // free the data block created from insert sql string - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); +// pCmd->pDataBlocks = tscDestroyBlockArrayList(pParent->cmd.pDataBlocks); if ((pRes->code = code)!= TSDB_CODE_SUCCESS) { tscQueueAsyncRes(pSql); @@ -2213,6 +2303,20 @@ int32_t tscHandleMultivnodeInsert(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; + // it is the failure retry insert + if (pSql->pSubs != NULL) { + for(int32_t i = 0; i < pSql->subState.numOfSub; ++i) { + SSqlObj* pSub = pSql->pSubs[i]; + + tscDebug("%p sub:%p launch sub insert, orderOfSub:%d", pSql, pSub, i); + if (pSub->res.code != TSDB_CODE_SUCCESS) { + tscHandleInsertRetry(pSql, pSub); + } + } + + return TSDB_CODE_SUCCESS; + } + pSql->subState.numOfSub = (uint16_t)taosArrayGetSize(pCmd->pDataBlocks); assert(pSql->subState.numOfSub > 0); diff --git a/src/client/src/tscSystem.c b/src/client/src/tscSystem.c index 03b6ac8404..1eddeacc65 100644 --- a/src/client/src/tscSystem.c +++ b/src/client/src/tscSystem.c @@ -17,6 +17,7 @@ #include "taosmsg.h" #include "tref.h" #include "trpc.h" +#include "tnote.h" #include "tsystem.h" #include "ttimer.h" #include "tutil.h" @@ -41,7 +42,6 @@ int tscRefId = -1; int tscNumOfThreads; static pthread_once_t tscinit = PTHREAD_ONCE_INIT; -void taosInitNote(int numOfNoteLines, int maxNotes, char* lable); //void tscUpdateEpSet(void *ahandle, SRpcEpSet *pEpSet); void tscCheckDiskUsage(void *UNUSED_PARAM(para), void* UNUSED_PARAM(param)) { @@ -78,7 +78,6 @@ int32_t tscInitRpc(const char *user, const char *secretEncrypt, void **pDnodeCon return 0; } - void taos_init_imp(void) { char temp[128] = {0}; @@ -104,6 +103,7 @@ void taos_init_imp(void) { taosReadGlobalCfg(); taosCheckGlobalCfg(); + taosInitNotes(); rpcInit(); tscDebug("starting to initialize TAOS client ..."); @@ -111,11 +111,6 @@ void taos_init_imp(void) { } taosSetCoreDump(); - - if (tsTscEnableRecordSql != 0) { - taosInitNote(tsNumOfLogLines / 10, 1, (char*)"tsc_note"); - } - tscInitMsgsFp(); int queueSize = tsMaxConnections*2; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index a98132d319..10a860b1ff 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -333,13 +333,15 @@ void tscSetResRawPtr(SSqlRes* pRes, SQueryInfo* pQueryInfo) { if (isNull(p, TSDB_DATA_TYPE_NCHAR)) { memcpy(dst, p, varDataTLen(p)); - } else { + } else if (varDataLen(p) > 0) { int32_t length = taosUcs4ToMbs(varDataVal(p), varDataLen(p), varDataVal(dst)); varDataSetLen(dst, length); if (length == 0) { tscError("charset:%s to %s. val:%s convert failed.", DEFAULT_UNICODE_ENCODEC, tsCharset, (char*)p); } + } else { + varDataSetLen(dst, 0); } p += pInfo->field.bytes; @@ -377,7 +379,7 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { pRes->data = NULL; // pRes->data points to the buffer of pRsp, no need to free } -static void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) { +void tscFreeQueryInfo(SSqlCmd* pCmd, bool removeFromCache) { if (pCmd == NULL || pCmd->numOfClause == 0) { return; } @@ -403,12 +405,18 @@ void tscResetSqlCmdObj(SSqlCmd* pCmd, bool removeFromCache) { pCmd->msgType = 0; pCmd->parseFinished = 0; pCmd->autoCreated = 0; - - taosHashCleanup(pCmd->pTableList); - pCmd->pTableList = NULL; - - pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); + for(int32_t i = 0; i < pCmd->numOfTables; ++i) { + if (pCmd->pTableMetaList && pCmd->pTableMetaList[i]) { + taosCacheRelease(tscMetaCache, (void**)&(pCmd->pTableMetaList[i]), false); + } + } + + pCmd->numOfTables = 0; + tfree(pCmd->pTableMetaList); + + pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList); + pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); tscFreeQueryInfo(pCmd, removeFromCache); } @@ -575,6 +583,21 @@ void* tscDestroyBlockArrayList(SArray* pDataBlockList) { return NULL; } +void* tscDestroyBlockHashTable(SHashObj* pBlockHashTable) { + if (pBlockHashTable == NULL) { + return NULL; + } + + STableDataBlocks** p = taosHashIterate(pBlockHashTable, NULL); + while(p) { + tscDestroyDataBlock(*p); + p = taosHashIterate(pBlockHashTable, p); + } + + taosHashCleanup(pBlockHashTable); + return NULL; +} + int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock) { SSqlCmd* pCmd = &pSql->cmd; assert(pDataBlock->pTableMeta != NULL); @@ -671,9 +694,8 @@ int32_t tscCreateDataBlock(size_t initialSize, int32_t rowSize, int32_t startOff return TSDB_CODE_SUCCESS; } -int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t id, int32_t size, - int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, - STableDataBlocks** dataBlocks) { +int32_t tscGetDataBlockFromList(SHashObj* pHashList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, const char* tableId, STableMeta* pTableMeta, + STableDataBlocks** dataBlocks, SArray* pBlockList) { *dataBlocks = NULL; STableDataBlocks** t1 = (STableDataBlocks**)taosHashGet(pHashList, (const char*)&id, sizeof(id)); @@ -688,7 +710,9 @@ int32_t tscGetDataBlockFromList(void* pHashList, SArray* pDataBlockList, int64_t } taosHashPut(pHashList, (const char*)&id, sizeof(int64_t), (char*)dataBlocks, POINTER_BYTES); - taosArrayPush(pDataBlockList, dataBlocks); + if (pBlockList) { + taosArrayPush(pBlockList, dataBlocks); + } } return TSDB_CODE_SUCCESS; @@ -769,22 +793,37 @@ static int32_t getRowExpandSize(STableMeta* pTableMeta) { return result; } -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { +static void extractTableMeta(SSqlCmd* pCmd) { + pCmd->numOfTables = (int32_t) taosHashGetSize(pCmd->pTableBlockHashList); + pCmd->pTableMetaList = calloc(pCmd->numOfTables, POINTER_BYTES); + + STableDataBlocks **p1 = taosHashIterate(pCmd->pTableBlockHashList, NULL); + int32_t i = 0; + while(p1) { + STableDataBlocks* pBlocks = *p1; + pCmd->pTableMetaList[i++] = taosCacheTransfer(tscMetaCache, (void**) &pBlocks->pTableMeta); + p1 = taosHashIterate(pCmd->pTableBlockHashList, p1); + } + + pCmd->pTableBlockHashList = tscDestroyBlockHashTable(pCmd->pTableBlockHashList); +} + +int32_t tscMergeTableDataBlocks(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; void* pVnodeDataBlockHashList = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, false); SArray* pVnodeDataBlockList = taosArrayInit(8, POINTER_BYTES); - size_t total = taosArrayGetSize(pTableDataBlockList); - for (int32_t i = 0; i < total; ++i) { + STableDataBlocks** p = taosHashIterate(pCmd->pTableBlockHashList, NULL); + + STableDataBlocks* pOneTableBlock = *p; + while(pOneTableBlock) { // the maximum expanded size in byte when a row-wise data is converted to SDataRow format - STableDataBlocks* pOneTableBlock = taosArrayGetP(pTableDataBlockList, i); int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); STableDataBlocks* dataBuf = NULL; - int32_t ret = - tscGetDataBlockFromList(pVnodeDataBlockHashList, pVnodeDataBlockList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, - tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf); + int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, + tsInsertHeadSize, 0, pOneTableBlock->tableId, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); if (ret != TSDB_CODE_SUCCESS) { tscError("%p failed to prepare the data block buffer for merging table data, code:%d", pSql, ret); taosHashCleanup(pVnodeDataBlockHashList); @@ -839,14 +878,19 @@ int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SArray* pTableDataBlockList) { // the length does not include the SSubmitBlk structure pBlocks->dataLen = htonl(finalLen); dataBuf->numOfTables += 1; + + p = taosHashIterate(pCmd->pTableBlockHashList, p); + if (p == NULL) { + break; + } + + pOneTableBlock = *p; } - tscDestroyBlockArrayList(pTableDataBlockList); + extractTableMeta(pCmd); // free the table data blocks; pCmd->pDataBlocks = pVnodeDataBlockList; - -// tscFreeUnusedDataBlocks(pCmd->pDataBlocks); taosHashCleanup(pVnodeDataBlockHashList); return TSDB_CODE_SUCCESS; @@ -2006,7 +2050,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pnCmd->numOfClause = 0; pnCmd->clauseIndex = 0; pnCmd->pDataBlocks = NULL; + + pnCmd->numOfTables = 0; pnCmd->parseFinished = 1; + pnCmd->pTableMetaList = NULL; + pnCmd->pTableBlockHashList = NULL; if (tscAddSubqueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; @@ -2023,6 +2071,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void pNewQueryInfo->limit = pQueryInfo->limit; pNewQueryInfo->slimit = pQueryInfo->slimit; pNewQueryInfo->order = pQueryInfo->order; + pNewQueryInfo->vgroupLimit = pQueryInfo->vgroupLimit; pNewQueryInfo->tsBuf = NULL; pNewQueryInfo->fillType = pQueryInfo->fillType; pNewQueryInfo->fillVal = NULL; diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index 69bbccd67e..bec8590536 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -36,7 +36,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_FETCH, "fetch" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_INSERT, "insert" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_UPDATE_TAGS_VAL, "update-tag-val" ) - + // the SQL below is for mgmt node TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MGMT, "mgmt" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DB, "create-db" ) diff --git a/src/common/inc/tglobal.h b/src/common/inc/tglobal.h index 851ca57ba9..b5bb8998b4 100644 --- a/src/common/inc/tglobal.h +++ b/src/common/inc/tglobal.h @@ -35,6 +35,7 @@ extern int32_t tsNumOfMnodes; extern int32_t tsEnableVnodeBak; extern int32_t tsEnableTelemetryReporting; extern char tsEmail[]; +extern char tsArbitrator[]; // common extern int tsRpcTimer; @@ -45,7 +46,7 @@ extern int32_t tsShellActivityTimer; extern uint32_t tsMaxTmrCtrl; extern float tsNumOfThreadsPerCore; extern int32_t tsNumOfCommitThreads; -extern float tsRatioOfQueryThreads; // todo remove it +extern float tsRatioOfQueryCores; extern int8_t tsDaylight; extern char tsTimezone[]; extern char tsLocale[]; @@ -56,7 +57,7 @@ extern char tsTempDir[]; //query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer for each data node during query processing -extern int32_t tsHalfCoresForQuery; // only 50% will be used in query processing +extern int32_t tsRetrieveBlockingModel; // only 50% will be used in query processing // client extern int32_t tsTableMetaKeepTimer; diff --git a/src/common/inc/tname.h b/src/common/inc/tname.h index 6c48ca72f3..9e0093ebfe 100644 --- a/src/common/inc/tname.h +++ b/src/common/inc/tname.h @@ -25,6 +25,8 @@ void extractTableName(const char *tableId, char *name); char* extractDBName(const char *tableId, char *name); +size_t tableIdPrefix(const char* name, char* prefix, int32_t len); + void extractTableNameFromToken(SStrToken *pToken, SStrToken* pTable); SSchema tGetTableNameColumnSchema(); diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index a912cdfd7f..662df49027 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -52,7 +52,7 @@ int32_t tsMaxConnections = 5000; int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; int32_t tsNumOfCommitThreads = 1; -float tsRatioOfQueryThreads = 0.5f; +float tsRatioOfQueryCores = 1.0f; int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; char tsLocale[TSDB_LOCALE_LEN] = {0}; @@ -107,8 +107,8 @@ int64_t tsMaxRetentWindow = 24 * 3600L; // maximum time window tolerance // positive value (in MB) int32_t tsQueryBufferSize = -1; -// only 50% cpu will be used in query processing in dnode -int32_t tsHalfCoresForQuery = 0; +// in retrieve blocking model, the retrieve threads will wait for the completion of the query processing. +int32_t tsRetrieveBlockingModel = 0; // db parameters int32_t tsCacheBlockSize = TSDB_DEFAULT_CACHE_BLOCK_SIZE; @@ -203,8 +203,8 @@ int32_t tsVersion = 0; // log int32_t tsNumOfLogLines = 10000000; -int32_t mDebugFlag = 135; -int32_t sdbDebugFlag = 135; +int32_t mDebugFlag = 131; +int32_t sdbDebugFlag = 131; int32_t dDebugFlag = 135; int32_t vDebugFlag = 135; int32_t cDebugFlag = 131; @@ -220,7 +220,7 @@ int32_t debugFlag = 0; int32_t sDebugFlag = 135; int32_t wDebugFlag = 135; int32_t tsdbDebugFlag = 131; -int32_t cqDebugFlag = 135; +int32_t cqDebugFlag = 131; int32_t (*monStartSystemFp)() = NULL; void (*monStopSystemFp)() = NULL; @@ -416,7 +416,7 @@ static void doInitGlobalConfig(void) { cfg.option = "arbitrator"; cfg.ptr = tsArbitrator; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = TSDB_EP_LEN; @@ -444,12 +444,12 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); - cfg.option = "ratioOfQueryThreads"; - cfg.ptr = &tsRatioOfQueryThreads; + cfg.option = "ratioOfQueryCores"; + cfg.ptr = &tsRatioOfQueryCores; cfg.valType = TAOS_CFG_VTYPE_FLOAT; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG; - cfg.minValue = 0.1f; - cfg.maxValue = 0.9f; + cfg.minValue = 0.0f; + cfg.maxValue = 2.0f; cfg.ptrLength = 0; cfg.unitType = TAOS_CFG_UTYPE_NONE; taosInitConfigOption(cfg); @@ -887,8 +887,8 @@ static void doInitGlobalConfig(void) { cfg.unitType = TAOS_CFG_UTYPE_BYTE; taosInitConfigOption(cfg); - cfg.option = "halfCoresForQuery"; - cfg.ptr = &tsHalfCoresForQuery; + cfg.option = "retrieveBlockingModel"; + cfg.ptr = &tsRetrieveBlockingModel; cfg.valType = TAOS_CFG_VTYPE_INT32; cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW; cfg.minValue = 0; @@ -901,7 +901,7 @@ static void doInitGlobalConfig(void) { cfg.option = "timezone"; cfg.ptr = tsTimezone; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsTimezone); @@ -911,7 +911,7 @@ static void doInitGlobalConfig(void) { cfg.option = "locale"; cfg.ptr = tsLocale; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsLocale); @@ -921,7 +921,7 @@ static void doInitGlobalConfig(void) { cfg.option = "charset"; cfg.ptr = tsCharset; cfg.valType = TAOS_CFG_VTYPE_STRING; - cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_CLIENT; + cfg.cfgType = TSDB_CFG_CTYPE_B_CONFIG | TSDB_CFG_CTYPE_B_SHOW | TSDB_CFG_CTYPE_B_CLIENT; cfg.minValue = 0; cfg.maxValue = 0; cfg.ptrLength = tListLen(tsCharset); diff --git a/src/common/src/tname.c b/src/common/src/tname.c index bea8c52ef2..5c351edf48 100644 --- a/src/common/src/tname.c +++ b/src/common/src/tname.c @@ -39,6 +39,13 @@ char* extractDBName(const char* tableId, char* name) { return strncpy(name, &tableId[offset1 + 1], len); } +size_t tableIdPrefix(const char* name, char* prefix, int32_t len) { + tstrncpy(prefix, name, len); + strcat(prefix, TS_PATH_DELIMITER); + + return strlen(prefix); +} + SSchema tGetTableNameColumnSchema() { SSchema s = {0}; s.bytes = TSDB_TABLE_NAME_LEN - 1 + VARSTR_HEADER_SIZE; @@ -198,4 +205,4 @@ SSchema tscGetTbnameColumnSchema() { strcpy(s.name, TSQL_TBNAME_L); return s; -} \ No newline at end of file +} diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 0eb3eb21ce..e289f1ae1b 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.14-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.15-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index 4564bde81e..1a86bc57dc 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.14 + 2.0.15 jar JDBCDriver @@ -36,7 +36,6 @@ - commons-logging diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 7e087ebd9b..25a36e3a48 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.14 + 2.0.15 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java index cdd88b825e..cd2a768a38 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBStatement.java @@ -81,7 +81,7 @@ public class TSDBStatement implements Statement { } if (!this.connector.isUpdateQuery(pSql)) { - TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); + TSDBResultSet res = new TSDBResultSet(this.connector, resultSetPointer); res.setBatchFetch(this.connection.getBatchFetch()); return res; } else { @@ -125,7 +125,8 @@ public class TSDBStatement implements Statement { } public int getMaxFieldSize() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return 0; +// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } public void setMaxFieldSize(int max) throws SQLException { @@ -218,7 +219,8 @@ public class TSDBStatement implements Statement { } public int getFetchDirection() throws SQLException { - throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); + return ResultSet.FETCH_FORWARD; +// throw new SQLException(TSDBConstants.UNSUPPORT_METHOD_EXCEPTIONZ_MSG); } /* diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c index efb8795962..e278c3a7cc 100644 --- a/src/cq/src/cqMain.c +++ b/src/cq/src/cqMain.c @@ -57,6 +57,7 @@ typedef struct SCqObj { uint64_t uid; int32_t tid; // table ID int32_t rowSize; // bytes of a row + char * dstTable; char * sqlStr; // SQL string STSchema * pSchema; // pointer to schema array void * pStream; @@ -160,7 +161,7 @@ void cqStop(void *handle) { return; } SCqContext *pContext = handle; - cInfo("vgId:%d, stop all CQs", pContext->vgId); + cDebug("vgId:%d, stop all CQs", pContext->vgId); if (pContext->dbConn == NULL || pContext->master == 0) return; pthread_mutex_lock(&pContext->mutex); @@ -185,7 +186,7 @@ void cqStop(void *handle) { pthread_mutex_unlock(&pContext->mutex); } -void *cqCreate(void *handle, uint64_t uid, int32_t tid, char *sqlStr, STSchema *pSchema) { +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema) { if (tsEnableStream == 0) { return NULL; } @@ -195,9 +196,11 @@ void *cqCreate(void *handle, uint64_t uid, int32_t tid, char *sqlStr, STSchema * if (pObj == NULL) return NULL; pObj->uid = uid; - pObj->tid = tid; - pObj->sqlStr = malloc(strlen(sqlStr)+1); - strcpy(pObj->sqlStr, sqlStr); + pObj->tid = sid; + if (dstTable != NULL) { + pObj->dstTable = strdup(dstTable); + } + pObj->sqlStr = strdup(sqlStr); pObj->pSchema = tdDupSchema(pSchema); pObj->rowSize = schemaTLen(pSchema); @@ -247,6 +250,7 @@ void cqDrop(void *handle) { cInfo("vgId:%d, id:%d CQ:%s is dropped", pContext->vgId, pObj->tid, pObj->sqlStr); tdFreeSchema(pObj->pSchema); + free(pObj->dstTable); free(pObj->sqlStr); free(pObj); @@ -292,6 +296,7 @@ static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) { if (pObj->pStream == NULL) { pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, 0, pObj, NULL); if (pObj->pStream) { + tscSetStreamDestTable(pObj->pStream, pObj->dstTable); pContext->num++; cInfo("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr); } else { diff --git a/src/cq/test/cqtest.c b/src/cq/test/cqtest.c index 41380f0d86..f378835f0a 100644 --- a/src/cq/test/cqtest.c +++ b/src/cq/test/cqtest.c @@ -70,7 +70,7 @@ int main(int argc, char *argv[]) { tdDestroyTSchemaBuilder(&schemaBuilder); for (int sid =1; sid<10; ++sid) { - cqCreate(pCq, sid, sid, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema); + cqCreate(pCq, sid, sid, NULL, "select avg(speed) from demo.t1 sliding(1s) interval(5s)", pSchema); } tdFreeSchema(pSchema); diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index 5608cfd6d1..ebb25bf858 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -12,7 +12,7 @@ AUX_SOURCE_DIRECTORY(src SRC) IF (TD_LINUX) ADD_EXECUTABLE(taosd ${SRC}) - TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4) + TARGET_LINK_LIBRARIES(taosd mnode monitor http tsdb twal vnode cJson lz4 balance sync) IF (TD_SOMODE_STATIC) TARGET_LINK_LIBRARIES(taosd taos_static) @@ -32,10 +32,6 @@ IF (TD_LINUX) TARGET_LINK_LIBRARIES(taosd mqtt) ENDIF () - IF (TD_SYNC) - TARGET_LINK_LIBRARIES(taosd balance sync) - ENDIF () - SET(PREPARE_ENV_CMD "prepare_env_cmd") SET(PREPARE_ENV_TARGET "prepare_env_target") ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} diff --git a/src/dnode/inc/dnodeCfg.h b/src/dnode/inc/dnodeCfg.h index 35d8896460..d74303f325 100644 --- a/src/dnode/inc/dnodeCfg.h +++ b/src/dnode/inc/dnodeCfg.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitCfg(); void dnodeCleanupCfg(); diff --git a/src/dnode/inc/dnodeCheck.h b/src/dnode/inc/dnodeCheck.h index a4880b3c11..c94b9e9319 100644 --- a/src/dnode/inc/dnodeCheck.h +++ b/src/dnode/inc/dnodeCheck.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitCheck(); void dnodeCleanupCheck(); diff --git a/src/dnode/inc/dnodeEps.h b/src/dnode/inc/dnodeEps.h index 2a203498c1..a5840997b0 100644 --- a/src/dnode/inc/dnodeEps.h +++ b/src/dnode/inc/dnodeEps.h @@ -19,8 +19,7 @@ #ifdef __cplusplus extern "C" { #endif - -#include "taosmsg.h" +#include "dnodeInt.h" int32_t dnodeInitEps(); void dnodeCleanupEps(); diff --git a/src/dnode/inc/dnodeInt.h b/src/dnode/inc/dnodeInt.h index f4cbee1d13..1327cd4433 100644 --- a/src/dnode/inc/dnodeInt.h +++ b/src/dnode/inc/dnodeInt.h @@ -19,8 +19,13 @@ #ifdef __cplusplus extern "C" { #endif - +#include "taoserror.h" +#include "taosmsg.h" #include "tlog.h" +#include "trpc.h" +#include "tglobal.h" +#include "dnode.h" +#include "vnode.h" extern int32_t dDebugFlag; @@ -31,6 +36,14 @@ extern int32_t dDebugFlag; #define dDebug(...) { if (dDebugFlag & DEBUG_DEBUG) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }} #define dTrace(...) { if (dDebugFlag & DEBUG_TRACE) { taosPrintLog("DND ", dDebugFlag, __VA_ARGS__); }} +typedef enum { + TSDB_RUN_STATUS_INITIALIZE, + TSDB_RUN_STATUS_RUNING, + TSDB_RUN_STATUS_STOPPED +} SRunStatus; + +SRunStatus dnodeGetRunStatus(); + #ifdef __cplusplus } #endif diff --git a/src/dnode/inc/dnodeMInfos.h b/src/dnode/inc/dnodeMInfos.h index 2c3eef5d5d..f05e2b6f7b 100644 --- a/src/dnode/inc/dnodeMInfos.h +++ b/src/dnode/inc/dnodeMInfos.h @@ -19,8 +19,7 @@ #ifdef __cplusplus extern "C" { #endif - -#include "taosmsg.h" +#include "dnodeInt.h" int32_t dnodeInitMInfos(); void dnodeCleanupMInfos(); @@ -29,6 +28,10 @@ void dnodeUpdateEpSetForPeer(SRpcEpSet *pEpSet); void dnodeGetMInfos(SMInfos *pMinfos); bool dnodeIsMasterEp(char *ep); +void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell); +void dnodeGetEpSetForPeer(SRpcEpSet *epSet); +void dnodeGetEpSetForShell(SRpcEpSet *epSet); + #ifdef __cplusplus } #endif diff --git a/src/dnode/inc/dnodeMPeer.h b/src/dnode/inc/dnodeMPeer.h index 00221baa22..b7e566d7e4 100644 --- a/src/dnode/inc/dnodeMPeer.h +++ b/src/dnode/inc/dnodeMPeer.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitMPeer(); void dnodeCleanupMPeer(); diff --git a/src/dnode/inc/dnodeMRead.h b/src/dnode/inc/dnodeMRead.h index 8a8e71227d..279098d30e 100644 --- a/src/dnode/inc/dnodeMRead.h +++ b/src/dnode/inc/dnodeMRead.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitMRead(); void dnodeCleanupMRead(); diff --git a/src/dnode/inc/dnodeMWrite.h b/src/dnode/inc/dnodeMWrite.h index 6a3d41bc81..8d4fcce3be 100644 --- a/src/dnode/inc/dnodeMWrite.h +++ b/src/dnode/inc/dnodeMWrite.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitMWrite(); void dnodeCleanupMWrite(); diff --git a/src/dnode/inc/dnodeMain.h b/src/dnode/inc/dnodeMain.h index c1480407bd..ca79d53afd 100644 --- a/src/dnode/inc/dnodeMain.h +++ b/src/dnode/inc/dnodeMain.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitSystem(); void dnodeCleanUpSystem(); diff --git a/src/dnode/inc/dnodeModule.h b/src/dnode/inc/dnodeModule.h index 8618de3244..e645784c8f 100644 --- a/src/dnode/inc/dnodeModule.h +++ b/src/dnode/inc/dnodeModule.h @@ -19,10 +19,11 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitModules(); -void dnodeStartModules(); void dnodeCleanupModules(); +bool dnodeStartMnode(SMInfos *pMinfos); void dnodeProcessModuleStatus(uint32_t moduleStatus); #ifdef __cplusplus diff --git a/src/dnode/inc/dnodePeer.h b/src/dnode/inc/dnodePeer.h index 0dcf48f232..6d337ef6dc 100644 --- a/src/dnode/inc/dnodePeer.h +++ b/src/dnode/inc/dnodePeer.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitServer(); void dnodeCleanupServer(); diff --git a/src/dnode/inc/dnodeShell.h b/src/dnode/inc/dnodeShell.h index 300c86c599..3fa66d6a3b 100644 --- a/src/dnode/inc/dnodeShell.h +++ b/src/dnode/inc/dnodeShell.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitShell(); void dnodeCleanupShell(); diff --git a/src/dnode/inc/dnodeStep.h b/src/dnode/inc/dnodeStep.h new file mode 100644 index 0000000000..e181e19c46 --- /dev/null +++ b/src/dnode/inc/dnodeStep.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_STEP_H +#define TDENGINE_DNODE_STEP_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "dnodeInt.h" + +int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize); +void dnodeStepCleanup(SStep *pSteps, int32_t stepSize); +void dnodeReportStep(char *name, char *desc, int8_t finished); +void dnodeSendStartupStep(SRpcMsg *pMsg); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/dnode/inc/dnodeTelemetry.h b/src/dnode/inc/dnodeTelemetry.h index 6fb62556ae..e4fd5a0376 100644 --- a/src/dnode/inc/dnodeTelemetry.h +++ b/src/dnode/inc/dnodeTelemetry.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitTelemetry(); void dnodeCleanupTelemetry(); diff --git a/src/dnode/inc/dnodeVMgmt.h b/src/dnode/inc/dnodeVMgmt.h new file mode 100644 index 0000000000..821196defc --- /dev/null +++ b/src/dnode/inc/dnodeVMgmt.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_VMGMT_H +#define TDENGINE_DNODE_VMGMT_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "dnodeInt.h" + +int32_t dnodeInitVMgmt(); +void dnodeCleanupVMgmt(); +void dnodeDispatchToVMgmtQueue(SRpcMsg *rpcMsg); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/dnode/inc/dnodeVRead.h b/src/dnode/inc/dnodeVRead.h index 5b17693146..9c88886f88 100644 --- a/src/dnode/inc/dnodeVRead.h +++ b/src/dnode/inc/dnodeVRead.h @@ -19,12 +19,15 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitVRead(); void dnodeCleanupVRead(); void dnodeDispatchToVReadQueue(SRpcMsg *pMsg); -void * dnodeAllocVReadQueue(void *pVnode); -void dnodeFreeVReadQueue(void *pRqueue); +void * dnodeAllocVQueryQueue(void *pVnode); +void * dnodeAllocVFetchQueue(void *pVnode); +void dnodeFreeVQueryQueue(void *pQqueue); +void dnodeFreeVFetchQueue(void *pFqueue); #ifdef __cplusplus } diff --git a/src/dnode/inc/dnodeVWrite.h b/src/dnode/inc/dnodeVWrite.h index 759e9ca8a5..2ddff210f8 100644 --- a/src/dnode/inc/dnodeVWrite.h +++ b/src/dnode/inc/dnodeVWrite.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "dnodeInt.h" int32_t dnodeInitVWrite(); void dnodeCleanupVWrite(); diff --git a/src/dnode/inc/dnodeVnodes.h b/src/dnode/inc/dnodeVnodes.h new file mode 100644 index 0000000000..e60dd290ce --- /dev/null +++ b/src/dnode/inc/dnodeVnodes.h @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_DNODE_VNODES_H +#define TDENGINE_DNODE_VNODES_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "dnodeInt.h" + +int32_t dnodeInitVnodes(); +void dnodeCleanupVnodes(); +int32_t dnodeInitStatusTimer(); +void dnodeCleanupStatusTimer(); +void dnodeSendStatusMsgToMnode(); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/dnode/src/dnodeCfg.c b/src/dnode/src/dnodeCfg.c index 16d109a13a..89249d773b 100644 --- a/src/dnode/src/dnodeCfg.c +++ b/src/dnode/src/dnodeCfg.c @@ -16,9 +16,6 @@ #define _DEFAULT_SOURCE #include "os.h" #include "cJSON.h" -#include "tglobal.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeCfg.h" static SDnodeCfg tsCfg = {0}; diff --git a/src/dnode/src/dnodeCheck.c b/src/dnode/src/dnodeCheck.c index a9ee4ac649..be26bb967b 100644 --- a/src/dnode/src/dnodeCheck.c +++ b/src/dnode/src/dnodeCheck.c @@ -15,8 +15,6 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "tglobal.h" -#include "dnodeInt.h" #include "dnodeCheck.h" typedef struct { diff --git a/src/dnode/src/dnodeEps.c b/src/dnode/src/dnodeEps.c index 83f294e05e..e7dc7efeb2 100644 --- a/src/dnode/src/dnodeEps.c +++ b/src/dnode/src/dnodeEps.c @@ -16,10 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "cJSON.h" -#include "tglobal.h" #include "hash.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeEps.h" static SDnodeEps *tsEps = NULL; @@ -133,7 +130,7 @@ static void dnodePrintEps(SDnodeEps *eps) { dDebug("print dnodeEp, dnodeNum:%d", eps->dnodeNum); for (int32_t i = 0; i < eps->dnodeNum; i++) { SDnodeEp *ep = &eps->dnodeEps[i]; - dDebug("dnodeId:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort); + dDebug("dnode:%d, dnodeFqdn:%s dnodePort:%u", ep->dnodeId, ep->dnodeFqdn, ep->dnodePort); } } @@ -236,7 +233,14 @@ PRASE_EPS_OVER: dnodeResetEps(eps); if (eps) free(eps); +#if 0 dnodeUpdateEp(dnodeGetDnodeId(), tsLocalEp, tsLocalFqdn, &tsServerPort); +#else + if (dnodeCheckEpChanged(dnodeGetDnodeId(), tsLocalEp)) { + dError("dnode:%d, localEp is different from %s in dnodeEps.json and need reconfigured", dnodeGetDnodeId(), tsLocalEp); + return -1; + } +#endif terrno = 0; return 0; diff --git a/src/dnode/src/dnodeMInfos.c b/src/dnode/src/dnodeMInfos.c index 162de2243e..7c385a889d 100644 --- a/src/dnode/src/dnodeMInfos.c +++ b/src/dnode/src/dnodeMInfos.c @@ -16,10 +16,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "cJSON.h" -#include "tglobal.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeMInfos.h" static SMInfos tsMInfos; @@ -157,12 +154,13 @@ static void dnodeResetMInfos(SMInfos *pMinfos) { } static int32_t dnodeReadMInfos() { - int32_t len = 0; - int32_t maxLen = 2000; - char * content = calloc(1, maxLen + 1); - cJSON * root = NULL; - FILE * fp = NULL; - SMInfos minfos = {0}; + int32_t len = 0; + int32_t maxLen = 2000; + char * content = calloc(1, maxLen + 1); + cJSON * root = NULL; + FILE * fp = NULL; + SMInfos minfos = {0}; + bool nodeChanged = false; char file[TSDB_FILENAME_LEN + 20] = {0}; sprintf(file, "%s/mnodeEpSet.json", tsDnodeDir); @@ -221,14 +219,19 @@ static int32_t dnodeReadMInfos() { dError("failed to read mnodeEpSet.json, nodeId not found"); goto PARSE_MINFOS_OVER; } - minfos.mnodeInfos[i].mnodeId = nodeId->valueint; cJSON *nodeEp = cJSON_GetObjectItem(nodeInfo, "nodeEp"); if (!nodeEp || nodeEp->type != cJSON_String || nodeEp->valuestring == NULL) { dError("failed to read mnodeEpSet.json, nodeName not found"); goto PARSE_MINFOS_OVER; } - strncpy(minfos.mnodeInfos[i].mnodeEp, nodeEp->valuestring, TSDB_EP_LEN); + + SMInfo *pMinfo = &minfos.mnodeInfos[i]; + pMinfo->mnodeId = nodeId->valueint; + tstrncpy(pMinfo->mnodeEp, nodeEp->valuestring, TSDB_EP_LEN); + + bool changed = dnodeCheckEpChanged(pMinfo->mnodeId, pMinfo->mnodeEp); + if (changed) nodeChanged = changed; } dInfo("read file %s successed", file); @@ -245,6 +248,11 @@ PARSE_MINFOS_OVER: dnodeUpdateEp(mInfo->mnodeId, mInfo->mnodeEp, NULL, NULL); } dnodeResetMInfos(&minfos); + + if (nodeChanged) { + dnodeWriteMInfos(); + } + return 0; } @@ -286,3 +294,25 @@ static int32_t dnodeWriteMInfos() { dInfo("successed to write %s", file); return 0; } + +void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) { + SRpcConnInfo connInfo = {0}; + rpcGetConnInfo(rpcMsg->handle, &connInfo); + + SRpcEpSet epSet = {0}; + if (forShell) { + dnodeGetEpSetForShell(&epSet); + } else { + dnodeGetEpSetForPeer(&epSet); + } + + dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfEps:%d inUse:%d", taosMsg[rpcMsg->msgType], + taosIpStr(connInfo.clientIp), connInfo.user, epSet.numOfEps, epSet.inUse); + + for (int32_t i = 0; i < epSet.numOfEps; ++i) { + dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]); + epSet.port[i] = htons(epSet.port[i]); + } + + rpcSendRedirectRsp(rpcMsg->handle, &epSet); +} \ No newline at end of file diff --git a/src/dnode/src/dnodeMPeer.c b/src/dnode/src/dnodeMPeer.c index ee6dc5212e..0863666f76 100644 --- a/src/dnode/src/dnodeMPeer.c +++ b/src/dnode/src/dnodeMPeer.c @@ -15,16 +15,11 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "tutil.h" #include "tqueue.h" #include "twal.h" -#include "tglobal.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" +#include "dnodeVMgmt.h" +#include "dnodeMInfos.h" #include "dnodeMWrite.h" typedef struct { diff --git a/src/dnode/src/dnodeMRead.c b/src/dnode/src/dnodeMRead.c index 65f3af7b3b..0fc6400d99 100644 --- a/src/dnode/src/dnodeMRead.c +++ b/src/dnode/src/dnodeMRead.c @@ -15,16 +15,11 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "tutil.h" #include "tqueue.h" #include "twal.h" -#include "tglobal.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" +#include "dnodeVMgmt.h" +#include "dnodeMInfos.h" #include "dnodeMRead.h" typedef struct { diff --git a/src/dnode/src/dnodeMWrite.c b/src/dnode/src/dnodeMWrite.c index ef2d49ef42..414b66653d 100644 --- a/src/dnode/src/dnodeMWrite.c +++ b/src/dnode/src/dnodeMWrite.c @@ -15,17 +15,11 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "tutil.h" #include "ttimer.h" #include "tqueue.h" -#include "twal.h" -#include "tglobal.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" +#include "dnodeVMgmt.h" +#include "dnodeMInfos.h" #include "dnodeMWrite.h" typedef struct { @@ -127,7 +121,7 @@ void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) { dnodeSendRedirectMsg(pMsg, true); } else { SMnodeMsg *pWrite = mnodeCreateMsg(pMsg); - dDebug("msg:%p, app:%p type:%s is put into mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, + dTrace("msg:%p, app:%p type:%s is put into mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); taosWriteQitem(tsMWriteQueue, TAOS_QTYPE_RPC, pWrite); } @@ -136,7 +130,7 @@ void dnodeDispatchToMWriteQueue(SRpcMsg *pMsg) { } static void dnodeFreeMWriteMsg(SMnodeMsg *pWrite) { - dDebug("msg:%p, app:%p type:%s is freed from mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, + dTrace("msg:%p, app:%p type:%s is freed from mwrite queue:%p", pWrite, pWrite->rpcMsg.ahandle, taosMsg[pWrite->rpcMsg.msgType], tsMWriteQueue); mnodeCleanupMsg(pWrite); diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index 9f52dbd331..730dcf3681 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -16,15 +16,14 @@ #define _DEFAULT_SOURCE #include "os.h" #include "taos.h" -#include "tutil.h" +#include "tnote.h" +#include "ttimer.h" #include "tconfig.h" -#include "tglobal.h" #include "tfile.h" #include "twal.h" -#include "trpc.h" -#include "dnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" +// #include "tfs.h" +#include "tsync.h" +#include "dnodeStep.h" #include "dnodePeer.h" #include "dnodeModule.h" #include "dnodeEps.h" @@ -33,50 +32,47 @@ #include "dnodeCheck.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" +#include "dnodeVMgmt.h" +#include "dnodeVnodes.h" #include "dnodeMRead.h" #include "dnodeMWrite.h" #include "dnodeMPeer.h" #include "dnodeShell.h" #include "dnodeTelemetry.h" +void *tsDnodeTmr = NULL; static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED; static int32_t dnodeInitStorage(); static void dnodeCleanupStorage(); static void dnodeSetRunStatus(SRunStatus status); static void dnodeCheckDataDirOpenned(char *dir); -static int32_t dnodeInitComponents(); -static void dnodeCleanupComponents(int32_t stepId); static int dnodeCreateDir(const char *dir); -typedef struct { - const char *const name; - int32_t (*init)(); - void (*cleanup)(); -} SDnodeComponent; - -static const SDnodeComponent tsDnodeComponents[] = { - {"tfile", tfInit, tfCleanup}, - {"rpc", rpcInit, rpcCleanup}, - {"storage", dnodeInitStorage, dnodeCleanupStorage}, - {"dnodecfg", dnodeInitCfg, dnodeCleanupCfg}, - {"dnodeeps", dnodeInitEps, dnodeCleanupEps}, - {"globalcfg" ,taosCheckGlobalCfg, NULL}, - {"mnodeinfos",dnodeInitMInfos, dnodeCleanupMInfos}, - {"wal", walInit, walCleanUp}, - {"check", dnodeInitCheck, dnodeCleanupCheck}, // NOTES: dnodeInitCheck must be behind the dnodeinitStorage component !!! - {"vread", dnodeInitVRead, dnodeCleanupVRead}, - {"vwrite", dnodeInitVWrite, dnodeCleanupVWrite}, - {"mread", dnodeInitMRead, dnodeCleanupMRead}, - {"mwrite", dnodeInitMWrite, dnodeCleanupMWrite}, - {"mpeer", dnodeInitMPeer, dnodeCleanupMPeer}, - {"client", dnodeInitClient, dnodeCleanupClient}, - {"server", dnodeInitServer, dnodeCleanupServer}, - {"mgmt", dnodeInitMgmt, dnodeCleanupMgmt}, - {"modules", dnodeInitModules, dnodeCleanupModules}, - {"mgmt-tmr", dnodeInitMgmtTimer, dnodeCleanupMgmtTimer}, - {"shell", dnodeInitShell, dnodeCleanupShell}, - {"telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, +static SStep tsDnodeSteps[] = { + {"dnode-tfile", tfInit, tfCleanup}, + {"dnode-rpc", rpcInit, rpcCleanup}, + {"dnode-globalcfg", taosCheckGlobalCfg, NULL}, + {"dnode-storage", dnodeInitStorage, dnodeCleanupStorage}, + {"dnode-cfg", dnodeInitCfg, dnodeCleanupCfg}, + {"dnode-eps", dnodeInitEps, dnodeCleanupEps}, + {"dnode-minfos", dnodeInitMInfos, dnodeCleanupMInfos}, + {"dnode-wal", walInit, walCleanUp}, + {"dnode-sync", syncInit, syncCleanUp}, + {"dnode-check", dnodeInitCheck, dnodeCleanupCheck}, // NOTES: dnodeInitCheck must be behind the dnodeinitStorage component !!! + {"dnode-vread", dnodeInitVRead, dnodeCleanupVRead}, + {"dnode-vwrite", dnodeInitVWrite, dnodeCleanupVWrite}, + {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, + {"dnode-mread", dnodeInitMRead, dnodeCleanupMRead}, + {"dnode-mwrite", dnodeInitMWrite, dnodeCleanupMWrite}, + {"dnode-mpeer", dnodeInitMPeer, dnodeCleanupMPeer}, + {"dnode-client", dnodeInitClient, dnodeCleanupClient}, + {"dnode-server", dnodeInitServer, dnodeCleanupServer}, + {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, + {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, + {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, + {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, + {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, }; static int dnodeCreateDir(const char *dir) { @@ -87,24 +83,31 @@ static int dnodeCreateDir(const char *dir) { return 0; } -static void dnodeCleanupComponents(int32_t stepId) { - for (int32_t i = stepId; i >= 0; i--) { - if (tsDnodeComponents[i].cleanup) { - (*tsDnodeComponents[i].cleanup)(); - } - } +static void dnodeCleanupComponents() { + int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep); + dnodeStepCleanup(tsDnodeSteps, stepSize); } static int32_t dnodeInitComponents() { - int32_t code = 0; - for (int32_t i = 0; i < sizeof(tsDnodeComponents) / sizeof(tsDnodeComponents[0]); i++) { - if (tsDnodeComponents[i].init() != 0) { - dnodeCleanupComponents(i); - code = -1; - break; - } + int32_t stepSize = sizeof(tsDnodeSteps) / sizeof(SStep); + return dnodeStepInit(tsDnodeSteps, stepSize); +} + +static int32_t dnodeInitTmr() { + tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); + if (tsDnodeTmr == NULL) { + dError("failed to init dnode timer"); + return -1; + } + + return 0; +} + +static void dnodeCleanupTmr() { + if (tsDnodeTmr != NULL) { + taosTmrCleanUp(tsDnodeTmr); + tsDnodeTmr = NULL; } - return code; } int32_t dnodeInitSystem() { @@ -115,6 +118,8 @@ int32_t dnodeInitSystem() { taosInitGlobalCfg(); taosReadGlobalLogCfg(); taosSetCoreDump(); + taosInitNotes(); + dnodeInitTmr(); signal(SIGPIPE, SIG_IGN); if (dnodeCreateDir(tsLogDir) < 0) { @@ -140,7 +145,6 @@ int32_t dnodeInitSystem() { return -1; } - dnodeStartModules(); dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING); dInfo("TDengine is initialized successfully"); @@ -151,7 +155,8 @@ int32_t dnodeInitSystem() { void dnodeCleanUpSystem() { if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) { dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED); - dnodeCleanupComponents(sizeof(tsDnodeComponents) / sizeof(tsDnodeComponents[0]) - 1); + dnodeCleanupTmr(); + dnodeCleanupComponents(); taos_cleanup(); taosCloseLog(); } diff --git a/src/dnode/src/dnodeMgmt.c b/src/dnode/src/dnodeMgmt.c deleted file mode 100644 index 15378c77c1..0000000000 --- a/src/dnode/src/dnodeMgmt.c +++ /dev/null @@ -1,572 +0,0 @@ -/* - * Copyright (c) 2019 TAOS Data, Inc. - * - * This program is free software: you can use, redistribute, and/or modify - * it under the terms of the GNU Affero General Public License, version 3 - * or later ("AGPL"), as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. - * - * You should have received a copy of the GNU Affero General Public License - * along with this program. If not, see . - */ - -#define _DEFAULT_SOURCE -#include "os.h" -#include "cJSON.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "ttimer.h" -#include "tsdb.h" -#include "twal.h" -#include "tqueue.h" -#include "tsync.h" -#include "ttimer.h" -#include "tbn.h" -#include "tglobal.h" -#include "dnode.h" -#include "vnode.h" -#include "mnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" -#include "dnodeEps.h" -#include "dnodeCfg.h" -#include "dnodeMInfos.h" -#include "dnodeVRead.h" -#include "dnodeVWrite.h" -#include "dnodeModule.h" - -typedef struct { - pthread_t thread; - int32_t threadIndex; - int32_t failed; - int32_t opened; - int32_t vnodeNum; - int32_t * vnodeList; -} SOpenVnodeThread; - -typedef struct { - SRpcMsg rpcMsg; - char pCont[]; -} SMgmtMsg; - -void * tsDnodeTmr = NULL; -static void * tsStatusTimer = NULL; -static uint32_t tsRebootTime; -static taos_qset tsMgmtQset = NULL; -static taos_queue tsMgmtQueue = NULL; -static pthread_t tsQthread; - -static void dnodeProcessStatusRsp(SRpcMsg *pMsg); -static void dnodeSendStatusMsg(void *handle, void *tmrId); -static void *dnodeProcessMgmtQueue(void *param); - -static int32_t dnodeOpenVnodes(); -static void dnodeCloseVnodes(); -static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); -static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg); -static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); - -int32_t dnodeInitMgmt() { - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg; - dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeProcessCreateMnodeMsg; - - dnodeAddClientRspHandle(TSDB_MSG_TYPE_DM_STATUS_RSP, dnodeProcessStatusRsp); - tsRebootTime = taosGetTimestampSec(); - - int32_t code = vnodeInitResources(); - if (code != TSDB_CODE_SUCCESS) { - dnodeCleanupMgmt(); - return -1; - } - - // create the queue and thread to handle the message - tsMgmtQset = taosOpenQset(); - if (tsMgmtQset == NULL) { - dError("failed to create the mgmt queue set"); - dnodeCleanupMgmt(); - return -1; - } - - tsMgmtQueue = taosOpenQueue(); - if (tsMgmtQueue == NULL) { - dError("failed to create the mgmt queue"); - dnodeCleanupMgmt(); - return -1; - } - - taosAddIntoQset(tsMgmtQset, tsMgmtQueue, NULL); - - pthread_attr_t thAttr; - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - - code = pthread_create(&tsQthread, &thAttr, dnodeProcessMgmtQueue, NULL); - pthread_attr_destroy(&thAttr); - if (code != 0) { - dError("failed to create thread to process mgmt queue, reason:%s", strerror(errno)); - dnodeCleanupMgmt(); - return -1; - } - - code = dnodeOpenVnodes(); - if (code != TSDB_CODE_SUCCESS) { - dnodeCleanupMgmt(); - return -1; - } - - dInfo("dnode mgmt is initialized"); - - return TSDB_CODE_SUCCESS; -} - -int32_t dnodeInitMgmtTimer() { - tsDnodeTmr = taosTmrInit(100, 200, 60000, "DND-DM"); - if (tsDnodeTmr == NULL) { - dError("failed to init dnode timer"); - dnodeCleanupMgmt(); - return -1; - } - - taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer); - dInfo("dnode mgmt timer is initialized"); - return TSDB_CODE_SUCCESS; -} - -void dnodeSendStatusMsgToMnode() { - if (tsDnodeTmr != NULL && tsStatusTimer != NULL) { - dInfo("force send status msg to mnode"); - taosTmrReset(dnodeSendStatusMsg, 3, NULL, tsDnodeTmr, &tsStatusTimer); - } -} - -void dnodeCleanupMgmtTimer() { - if (tsStatusTimer != NULL) { - taosTmrStopA(&tsStatusTimer); - tsStatusTimer = NULL; - } - - if (tsDnodeTmr != NULL) { - taosTmrCleanUp(tsDnodeTmr); - tsDnodeTmr = NULL; - } -} - -void dnodeCleanupMgmt() { - dnodeCleanupMgmtTimer(); - dnodeCloseVnodes(); - - if (tsMgmtQset) taosQsetThreadResume(tsMgmtQset); - if (tsQthread) pthread_join(tsQthread, NULL); - - if (tsMgmtQueue) taosCloseQueue(tsMgmtQueue); - if (tsMgmtQset) taosCloseQset(tsMgmtQset); - tsMgmtQset = NULL; - tsMgmtQueue = NULL; - - vnodeCleanupResources(); -} - -static int32_t dnodeWriteToMgmtQueue(SRpcMsg *pMsg) { - int32_t size = sizeof(SMgmtMsg) + pMsg->contLen; - SMgmtMsg *pMgmt = taosAllocateQitem(size); - if (pMgmt == NULL) { - return TSDB_CODE_DND_OUT_OF_MEMORY; - } - - pMgmt->rpcMsg = *pMsg; - pMgmt->rpcMsg.pCont = pMgmt->pCont; - memcpy(pMgmt->pCont, pMsg->pCont, pMsg->contLen); - taosWriteQitem(tsMgmtQueue, TAOS_QTYPE_RPC, pMgmt); - - return TSDB_CODE_SUCCESS; -} - -void dnodeDispatchToMgmtQueue(SRpcMsg *pMsg) { - int32_t code = dnodeWriteToMgmtQueue(pMsg); - if (code != TSDB_CODE_SUCCESS) { - SRpcMsg rsp = {.handle = pMsg->handle, .code = code}; - rpcSendResponse(&rsp); - } - - rpcFreeCont(pMsg->pCont); -} - -static void *dnodeProcessMgmtQueue(void *param) { - SMgmtMsg *pMgmt; - SRpcMsg * pMsg; - SRpcMsg rsp = {0}; - int32_t qtype; - void * handle; - - while (1) { - if (taosReadQitemFromQset(tsMgmtQset, &qtype, (void **)&pMgmt, &handle) == 0) { - dDebug("qset:%p, dnode mgmt got no message from qset, exit", tsMgmtQset); - break; - } - - pMsg = &pMgmt->rpcMsg; - dDebug("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[pMsg->msgType]); - if (dnodeProcessMgmtMsgFp[pMsg->msgType]) { - rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg); - } else { - rsp.code = TSDB_CODE_DND_MSG_NOT_PROCESSED; - } - - rsp.handle = pMsg->handle; - rsp.pCont = NULL; - rpcSendResponse(&rsp); - - taosFreeQitem(pMsg); - } - - return NULL; -} - -static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { - DIR *dir = opendir(tsVnodeDir); - if (dir == NULL) { - return TSDB_CODE_DND_NO_WRITE_ACCESS; - } - - *numOfVnodes = 0; - struct dirent *de = NULL; - while ((de = readdir(dir)) != NULL) { - if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; - if (de->d_type & DT_DIR) { - if (strncmp("vnode", de->d_name, 5) != 0) continue; - int32_t vnode = atoi(de->d_name + 5); - if (vnode == 0) continue; - - (*numOfVnodes)++; - - if (*numOfVnodes >= TSDB_MAX_VNODES) { - dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES); - continue; - } else { - vnodeList[*numOfVnodes - 1] = vnode; - } - } - } - closedir(dir); - - return TSDB_CODE_SUCCESS; -} - -static void *dnodeOpenVnode(void *param) { - SOpenVnodeThread *pThread = param; - char vnodeDir[TSDB_FILENAME_LEN * 3]; - - dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); - - for (int32_t v = 0; v < pThread->vnodeNum; ++v) { - int32_t vgId = pThread->vnodeList[v]; - snprintf(vnodeDir, TSDB_FILENAME_LEN * 3, "%s/vnode%d", tsVnodeDir, vgId); - if (vnodeOpen(vgId, vnodeDir) < 0) { - dError("vgId:%d, failed to open vnode by thread:%d", vgId, pThread->threadIndex); - pThread->failed++; - } else { - dDebug("vgId:%d, is openned by thread:%d", vgId, pThread->threadIndex); - pThread->opened++; - } - } - - dDebug("thread:%d, total vnodes:%d, openned:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, - pThread->failed); - return NULL; -} - -static int32_t dnodeOpenVnodes() { - int32_t vnodeList[TSDB_MAX_VNODES] = {0}; - int32_t numOfVnodes = 0; - int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); - - if (status != TSDB_CODE_SUCCESS) { - dInfo("get dnode list failed"); - return status; - } - - int32_t threadNum = tsNumOfCores; - int32_t vnodesPerThread = numOfVnodes / threadNum + 1; - SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread)); - for (int32_t t = 0; t < threadNum; ++t) { - threads[t].threadIndex = t; - threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t)); - } - - for (int32_t v = 0; v < numOfVnodes; ++v) { - int32_t t = v % threadNum; - SOpenVnodeThread *pThread = &threads[t]; - pThread->vnodeList[pThread->vnodeNum++] = vnodeList[v]; - } - - dDebug("start %d threads to open %d vnodes", threadNum, numOfVnodes); - - for (int32_t t = 0; t < threadNum; ++t) { - SOpenVnodeThread *pThread = &threads[t]; - if (pThread->vnodeNum == 0) continue; - - pthread_attr_t thAttr; - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - if (pthread_create(&pThread->thread, &thAttr, dnodeOpenVnode, pThread) != 0) { - dError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno)); - } - - pthread_attr_destroy(&thAttr); - } - - int32_t openVnodes = 0; - int32_t failedVnodes = 0; - for (int32_t t = 0; t < threadNum; ++t) { - SOpenVnodeThread *pThread = &threads[t]; - if (pThread->vnodeNum > 0 && pThread->thread) { - pthread_join(pThread->thread, NULL); - } - openVnodes += pThread->opened; - failedVnodes += pThread->failed; - free(pThread->vnodeList); - } - - free(threads); - dInfo("there are total vnodes:%d, openned:%d failed:%d", numOfVnodes, openVnodes, failedVnodes); - - return TSDB_CODE_SUCCESS; -} - -static void dnodeCloseVnodes() { - int32_t vnodeList[TSDB_MAX_VNODES]= {0}; - int32_t numOfVnodes = 0; - int32_t status; - - status = vnodeGetVnodeList(vnodeList, &numOfVnodes); - - if (status != TSDB_CODE_SUCCESS) { - dInfo("get dnode list failed"); - return; - } - - for (int32_t i = 0; i < numOfVnodes; ++i) { - vnodeClose(vnodeList[i]); - } - - dInfo("total vnodes:%d are all closed", numOfVnodes); -} - -static void* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) { - SCreateVnodeMsg *pCreate = rpcMsg->pCont; - pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); - pCreate->cfg.cfgVersion = htonl(pCreate->cfg.cfgVersion); - pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); - pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize); - pCreate->cfg.totalBlocks = htonl(pCreate->cfg.totalBlocks); - pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); - pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); - pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); - pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); - pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock); - pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock); - pCreate->cfg.fsyncPeriod = htonl(pCreate->cfg.fsyncPeriod); - pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); - - for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { - pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); - } - - return pCreate; -} - -static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { - SCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg); - - void *pVnode = vnodeAcquire(pCreate->cfg.vgId); - if (pVnode != NULL) { - dDebug("vgId:%d, already exist, return success", pCreate->cfg.vgId); - vnodeRelease(pVnode); - return TSDB_CODE_SUCCESS; - } else { - dDebug("vgId:%d, create vnode msg is received", pCreate->cfg.vgId); - return vnodeCreate(pCreate); - } -} - -static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { - SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg); - - void *pVnode = vnodeAcquire(pAlter->cfg.vgId); - if (pVnode != NULL) { - dDebug("vgId:%d, alter vnode msg is received", pAlter->cfg.vgId); - int32_t code = vnodeAlter(pVnode, pAlter); - vnodeRelease(pVnode); - return code; - } else { - dError("vgId:%d, vnode not exist, can't alter it", pAlter->cfg.vgId); - return TSDB_CODE_VND_INVALID_VGROUP_ID; - } -} - -static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { - SDropVnodeMsg *pDrop = rpcMsg->pCont; - pDrop->vgId = htonl(pDrop->vgId); - - return vnodeDrop(pDrop->vgId); -} - -static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { -// SAlterStreamMsg *pStream = pCont; -// pStream->uid = htobe64(pStream->uid); -// pStream->stime = htobe64(pStream->stime); -// pStream->vnode = htonl(pStream->vnode); -// pStream->sid = htonl(pStream->sid); -// pStream->status = htonl(pStream->status); -// -// int32_t code = dnodeCreateStream(pStream); - - return 0; -} - -static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) { - SCfgDnodeMsg *pCfg = pMsg->pCont; - return taosCfgDynamicOptions(pCfg->config); -} - -static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { - SCreateMnodeMsg *pCfg = pMsg->pCont; - pCfg->dnodeId = htonl(pCfg->dnodeId); - if (pCfg->dnodeId != dnodeGetDnodeId()) { - dDebug("dnodeId:%d, in create mnode msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId()); - return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED; - } - - if (strcmp(pCfg->dnodeEp, tsLocalEp) != 0) { - dDebug("dnodeEp:%s, in create mnode msg is not equal with saved dnodeEp:%s", pCfg->dnodeEp, tsLocalEp); - return TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED; - } - - dDebug("dnodeId:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum); - for (int i = 0; i < pCfg->mnodes.mnodeNum; ++i) { - pCfg->mnodes.mnodeInfos[i].mnodeId = htonl(pCfg->mnodes.mnodeInfos[i].mnodeId); - dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); - } - - dnodeStartMnode(&pCfg->mnodes); - - return TSDB_CODE_SUCCESS; -} - -static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { - if (pMsg->code != TSDB_CODE_SUCCESS) { - dError("status rsp is received, error:%s", tstrerror(pMsg->code)); - taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); - return; - } - - SStatusRsp *pStatusRsp = pMsg->pCont; - SMInfos *pMinfos = &pStatusRsp->mnodes; - dnodeUpdateMInfos(pMinfos); - - SDnodeCfg *pCfg = &pStatusRsp->dnodeCfg; - pCfg->numOfVnodes = htonl(pCfg->numOfVnodes); - pCfg->moduleStatus = htonl(pCfg->moduleStatus); - pCfg->dnodeId = htonl(pCfg->dnodeId); - dnodeUpdateCfg(pCfg); - - vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes); - - SDnodeEps *pEps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess)); - dnodeUpdateEps(pEps); - - taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); -} - -static void dnodeSendStatusMsg(void *handle, void *tmrId) { - if (tsDnodeTmr == NULL) { - dError("dnode timer is already released"); - return; - } - - if (tsStatusTimer == NULL) { - taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); - dError("failed to start status timer"); - return; - } - - int32_t contLen = sizeof(SStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad); - SStatusMsg *pStatus = rpcMallocCont(contLen); - if (pStatus == NULL) { - taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); - dError("failed to malloc status message"); - return; - } - - dnodeGetCfg(&pStatus->dnodeId, pStatus->clusterId); - pStatus->dnodeId = htonl(dnodeGetDnodeId()); - pStatus->version = htonl(tsVersion); - pStatus->lastReboot = htonl(tsRebootTime); - pStatus->numOfCores = htons((uint16_t) tsNumOfCores); - pStatus->diskAvailable = tsAvailDataDirGB; - pStatus->alternativeRole = (uint8_t) tsAlternativeRole; - tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN); - - // fill cluster cfg parameters - pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes); - pStatus->clusterCfg.enableBalance = htonl(tsEnableBalance); - pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum); - pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold); - pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval); - pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode); - pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb); - tstrncpy(pStatus->clusterCfg.arbitrator, tsArbitrator, TSDB_EP_LEN); - tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64); - pStatus->clusterCfg.checkTime = 0; - char timestr[32] = "1970-01-01 00:00:00.00"; - (void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0); - tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN); - tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN); - - vnodeBuildStatusMsg(pStatus); - contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad); - pStatus->openVnodes = htons(pStatus->openVnodes); - - SRpcMsg rpcMsg = { - .pCont = pStatus, - .contLen = contLen, - .msgType = TSDB_MSG_TYPE_DM_STATUS - }; - - SRpcEpSet epSet; - dnodeGetEpSetForPeer(&epSet); - dnodeSendMsgToDnode(&epSet, &rpcMsg); -} - -void dnodeSendRedirectMsg(SRpcMsg *rpcMsg, bool forShell) { - SRpcConnInfo connInfo = {0}; - rpcGetConnInfo(rpcMsg->handle, &connInfo); - - SRpcEpSet epSet = {0}; - if (forShell) { - dnodeGetEpSetForShell(&epSet); - } else { - dnodeGetEpSetForPeer(&epSet); - } - - dDebug("msg:%s will be redirected, dnodeIp:%s user:%s, numOfEps:%d inUse:%d", taosMsg[rpcMsg->msgType], - taosIpStr(connInfo.clientIp), connInfo.user, epSet.numOfEps, epSet.inUse); - - for (int i = 0; i < epSet.numOfEps; ++i) { - dDebug("mnode index:%d %s:%d", i, epSet.fqdn[i], epSet.port[i]); - epSet.port[i] = htons(epSet.port[i]); - } - - rpcSendRedirectRsp(rpcMsg->handle, &epSet); -} diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c index f664618f51..62de85445c 100644 --- a/src/dnode/src/dnodeModule.c +++ b/src/dnode/src/dnodeModule.c @@ -15,15 +15,10 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taosdef.h" -#include "taosmsg.h" -#include "tglobal.h" #include "mnode.h" #include "http.h" #include "tmqtt.h" #include "monitor.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeModule.h" typedef struct { @@ -102,6 +97,20 @@ void dnodeCleanupModules() { } } +static int32_t dnodeStartModules() { + for (EModuleType module = 1; module < TSDB_MOD_MAX; ++module) { + if (tsModule[module].enable && tsModule[module].startFp) { + int32_t code = (*tsModule[module].startFp)(); + if (code != 0) { + dError("failed to start module:%s, code:%d", tsModule[module].name, code); + return code; + } + } + } + + return 0; +} + int32_t dnodeInitModules() { dnodeAllocModules(); @@ -115,17 +124,7 @@ int32_t dnodeInitModules() { } dInfo("dnode modules is initialized"); - return 0; -} - -void dnodeStartModules() { - for (EModuleType module = 1; module < TSDB_MOD_MAX; ++module) { - if (tsModule[module].enable && tsModule[module].startFp) { - if ((*tsModule[module].startFp)() != 0) { - dError("failed to start module:%s", tsModule[module].name); - } - } - } + return dnodeStartModules(); } void dnodeProcessModuleStatus(uint32_t moduleStatus) { diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index 6b5b28622b..de0c360c88 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -21,15 +21,12 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taosmsg.h" -#include "tglobal.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" -#include "dnodeMgmt.h" +#include "dnodeVMgmt.h" #include "dnodeVWrite.h" #include "dnodeMPeer.h" #include "dnodeMInfos.h" +#include "dnodeStep.h" static void (*dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *); @@ -44,19 +41,19 @@ int32_t dnodeInitServer() { dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = dnodeDispatchToVWriteQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = dnodeDispatchToVWriteQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToMgmtQueue; - dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeDispatchToMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeDispatchToVMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_TABLE] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_CONFIG_VNODE] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_AUTH] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_GRANT] = dnodeDispatchToMPeerQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_DM_STATUS] = dnodeDispatchToMPeerQueue; - + SRpcInit rpcInit; memset(&rpcInit, 0, sizeof(rpcInit)); rpcInit.localPort = tsDnodeDnodePort; @@ -91,14 +88,15 @@ static void dnodeProcessReqMsgFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { .pCont = NULL, .contLen = 0 }; - + if (pMsg->pCont == NULL) return; + if (pMsg->msgType == TSDB_MSG_TYPE_NETWORK_TEST) return dnodeSendStartupStep(pMsg); if (dnodeGetRunStatus() != TSDB_RUN_STATUS_RUNING) { rspMsg.code = TSDB_CODE_APP_NOT_READY; rpcSendResponse(&rspMsg); rpcFreeCont(pMsg->pCont); - dDebug("RPC %p, msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); + dTrace("RPC %p, msg:%s is ignored since dnode not running", pMsg->handle, taosMsg[pMsg->msgType]); return; } @@ -153,7 +151,7 @@ void dnodeCleanupClient() { static void dnodeProcessRspFromDnode(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { if (dnodeGetRunStatus() == TSDB_RUN_STATUS_STOPPED) { if (pMsg == NULL || pMsg->pCont == NULL) return; - dDebug("msg:%p is ignored since dnode is stopping", pMsg); + dTrace("msg:%p is ignored since dnode is stopping", pMsg); rpcFreeCont(pMsg->pCont); return; } diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 89f657f789..79cc70005b 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -15,20 +15,14 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosdef.h" -#include "taosmsg.h" -#include "tglobal.h" -#include "tutil.h" #include "http.h" #include "mnode.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeVRead.h" #include "dnodeVWrite.h" #include "dnodeMRead.h" #include "dnodeMWrite.h" #include "dnodeShell.h" +#include "dnodeStep.h" static void (*dnodeProcessShellMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *); @@ -74,8 +68,9 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SHOW] = dnodeDispatchToMReadQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_RETRIEVE] = dnodeDispatchToMReadQueue; - int32_t numOfThreads = tsNumOfCores * tsNumOfThreadsPerCore; - numOfThreads = (int32_t) ((1.0 - tsRatioOfQueryThreads) * numOfThreads / 2.0); + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_NETWORK_TEST] = dnodeSendStartupStep; + + int32_t numOfThreads = (tsNumOfCores * tsNumOfThreadsPerCore) / 2.0; if (numOfThreads < 1) { numOfThreads = 1; } @@ -142,7 +137,23 @@ static void dnodeProcessMsgFromShell(SRpcMsg *pMsg, SRpcEpSet *pEpSet) { } } +static int32_t dnodeAuthNettestUser(char *user, char *spi, char *encrypt, char *secret, char *ckey) { + if (strcmp(user, "nettestinternal") == 0) { + char pass[32] = {0}; + taosEncryptPass((uint8_t *)user, strlen(user), pass); + *spi = 0; + *encrypt = 0; + *ckey = 0; + memcpy(secret, pass, TSDB_KEY_LEN); + dTrace("nettest user is authorized"); + return 0; + } + + return -1; +} + static int dnodeRetrieveUserAuthInfo(char *user, char *spi, char *encrypt, char *secret, char *ckey) { + if (dnodeAuthNettestUser(user, spi, encrypt, secret, ckey) == 0) return 0; int code = mnodeRetriveAuth(user, spi, encrypt, secret, ckey); if (code != TSDB_CODE_APP_NOT_READY) return code; @@ -220,4 +231,4 @@ SStatisInfo dnodeGetStatisInfo() { } return info; -} +} \ No newline at end of file diff --git a/src/dnode/src/dnodeStep.c b/src/dnode/src/dnodeStep.c new file mode 100644 index 0000000000..0f535b9470 --- /dev/null +++ b/src/dnode/src/dnodeStep.c @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "dnodeStep.h" + +static SStartupStep tsStartupStep; + +void dnodeReportStep(char *name, char *desc, int8_t finished) { + tstrncpy(tsStartupStep.name, name, sizeof(tsStartupStep.name)); + tstrncpy(tsStartupStep.desc, desc, sizeof(tsStartupStep.desc)); + tsStartupStep.finished = finished; +} + +void dnodeSendStartupStep(SRpcMsg *pMsg) { + dInfo("nettest msg is received, cont:%s", (char *)pMsg->pCont); + + SStartupStep *pStep = rpcMallocCont(sizeof(SStartupStep)); + memcpy(pStep, &tsStartupStep, sizeof(SStartupStep)); + + dDebug("startup msg is sent, step:%s desc:%s finished:%d", pStep->name, pStep->desc, pStep->finished); + + SRpcMsg rpcRsp = {.handle = pMsg->handle, .pCont = pStep, .contLen = sizeof(SStartupStep)}; + rpcSendResponse(&rpcRsp); + rpcFreeCont(pMsg->pCont); +} + +void taosStepCleanupImp(SStep *pSteps, int32_t stepId) { + for (int32_t step = stepId; step >= 0; step--) { + SStep *pStep = pSteps + step; + dDebug("step:%s will cleanup", pStep->name); + if (pStep->cleanupFp != NULL) { + (*pStep->cleanupFp)(); + } + } +} + +int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize) { + for (int32_t step = 0; step < stepSize; step++) { + SStep *pStep = pSteps + step; + if (pStep->initFp == NULL) continue; + + dnodeReportStep(pStep->name, "Start initialization", 0); + + int32_t code = (*pStep->initFp)(); + if (code != 0) { + dDebug("step:%s will init", pStep->name); + taosStepCleanupImp(pSteps, step); + return code; + } + + dnodeReportStep(pStep->name, "Initialization complete", step + 1 >= stepSize); + } + + return 0; +} + +void dnodeStepCleanup(SStep *pSteps, int32_t stepSize) { + return taosStepCleanupImp(pSteps, stepSize - 1); +} \ No newline at end of file diff --git a/src/dnode/src/dnodeSystem.c b/src/dnode/src/dnodeSystem.c index 56316e9619..36232893b5 100644 --- a/src/dnode/src/dnodeSystem.c +++ b/src/dnode/src/dnodeSystem.c @@ -16,15 +16,15 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tgrant.h" -#include "tutil.h" -#include "tglobal.h" -#include "dnodeInt.h" +#include "tconfig.h" #include "dnodeMain.h" static void signal_handler(int32_t signum, siginfo_t *sigInfo, void *context); static tsem_t exitSem; int32_t main(int32_t argc, char *argv[]) { + int dump_config = 0; + // Set global configuration file for (int32_t i = 1; i < argc; ++i) { if (strcmp(argv[i], "-c") == 0) { @@ -38,6 +38,8 @@ int32_t main(int32_t argc, char *argv[]) { printf("'-c' requires a parameter, default:%s\n", configDir); exit(EXIT_FAILURE); } + } else if (strcmp(argv[i], "-C") == 0) { + dump_config = 1; } else if (strcmp(argv[i], "-V") == 0) { #ifdef _ACCT char *versionStr = "enterprise"; @@ -90,6 +92,20 @@ int32_t main(int32_t argc, char *argv[]) { #endif } + if (0 != dump_config) { + tscEmbedded = 1; + taosInitGlobalCfg(); + taosReadGlobalLogCfg(); + + if (!taosReadGlobalCfg()) { + printf("TDengine read global config failed"); + exit(EXIT_FAILURE); + } + + taosDumpGlobalCfg(); + exit(EXIT_SUCCESS); + } + if (tsem_init(&exitSem, 0, 0) != 0) { printf("failed to create exit semphore\n"); exit(EXIT_FAILURE); diff --git a/src/dnode/src/dnodeTelemetry.c b/src/dnode/src/dnodeTelemetry.c index e973f9901f..85f0137d89 100644 --- a/src/dnode/src/dnodeTelemetry.c +++ b/src/dnode/src/dnodeTelemetry.c @@ -15,9 +15,6 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "tglobal.h" -#include "tutil.h" #include "osTime.h" #include "tsocket.h" #include "tbuffer.h" @@ -32,8 +29,6 @@ #include "mnodeTable.h" #include "mnodeSdb.h" #include "mnodeAcct.h" -#include "dnode.h" -#include "dnodeInt.h" #include "dnodeTelemetry.h" static tsem_t tsExitSem; @@ -313,4 +308,4 @@ void dnodeCleanupTelemetry() { pthread_join(tsTelemetryThread, NULL); tsem_destroy(&tsExitSem); } -} +} \ No newline at end of file diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c new file mode 100644 index 0000000000..e3cf0820ae --- /dev/null +++ b/src/dnode/src/dnodeVMgmt.c @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tqueue.h" +#include "tworker.h" +#include "dnodeVMgmt.h" + +typedef struct { + SRpcMsg rpcMsg; + char pCont[]; +} SMgmtMsg; + +static SWorkerPool tsVMgmtWP; +static taos_queue tsVMgmtQueue = NULL; + +static void * dnodeProcessMgmtQueue(void *param); +static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg); +static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); + +int32_t dnodeInitVMgmt() { + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_MNODE] = dnodeProcessCreateMnodeMsg; + + int32_t code = vnodeInitMgmt(); + if (code != TSDB_CODE_SUCCESS) return -1; + + tsVMgmtWP.name = "vmgmt"; + tsVMgmtWP.workerFp = dnodeProcessMgmtQueue; + tsVMgmtWP.min = 1; + tsVMgmtWP.max = 1; + if (tWorkerInit(&tsVMgmtWP) != 0) return -1; + + tsVMgmtQueue = tWorkerAllocQueue(&tsVMgmtWP, NULL); + + dInfo("dnode vmgmt is initialized"); + return TSDB_CODE_SUCCESS; +} + +void dnodeCleanupVMgmt() { + tWorkerFreeQueue(&tsVMgmtWP, tsVMgmtQueue); + tWorkerCleanup(&tsVMgmtWP); + + tsVMgmtQueue = NULL; + vnodeCleanupMgmt(); +} + +static int32_t dnodeWriteToMgmtQueue(SRpcMsg *pMsg) { + int32_t size = sizeof(SMgmtMsg) + pMsg->contLen; + SMgmtMsg *pMgmt = taosAllocateQitem(size); + if (pMgmt == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; + + pMgmt->rpcMsg = *pMsg; + pMgmt->rpcMsg.pCont = pMgmt->pCont; + memcpy(pMgmt->pCont, pMsg->pCont, pMsg->contLen); + taosWriteQitem(tsVMgmtQueue, TAOS_QTYPE_RPC, pMgmt); + + return TSDB_CODE_SUCCESS; +} + +void dnodeDispatchToVMgmtQueue(SRpcMsg *pMsg) { + int32_t code = dnodeWriteToMgmtQueue(pMsg); + if (code != TSDB_CODE_SUCCESS) { + SRpcMsg rsp = {.handle = pMsg->handle, .code = code}; + rpcSendResponse(&rsp); + } + + rpcFreeCont(pMsg->pCont); +} + +static void *dnodeProcessMgmtQueue(void *wparam) { + SWorker * pWorker = wparam; + SWorkerPool *pPool = pWorker->pPool; + SMgmtMsg * pMgmt; + SRpcMsg * pMsg; + SRpcMsg rsp = {0}; + int32_t qtype; + void * handle; + + while (1) { + if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pMgmt, &handle) == 0) { + dDebug("qdnode mgmt got no message from qset:%p, , exit", pPool->qset); + break; + } + + pMsg = &pMgmt->rpcMsg; + dTrace("msg:%p, ahandle:%p type:%s will be processed", pMgmt, pMsg->ahandle, taosMsg[pMsg->msgType]); + if (dnodeProcessMgmtMsgFp[pMsg->msgType]) { + rsp.code = (*dnodeProcessMgmtMsgFp[pMsg->msgType])(pMsg); + } else { + rsp.code = TSDB_CODE_DND_MSG_NOT_PROCESSED; + } + + dTrace("msg:%p, is processed, code:0x%x", pMgmt, rsp.code); + if (rsp.code != TSDB_CODE_DND_ACTION_IN_PROGRESS) { + rsp.handle = pMsg->handle; + rsp.pCont = NULL; + rpcSendResponse(&rsp); + } + + taosFreeQitem(pMsg); + } + + return NULL; +} + +static SCreateVnodeMsg* dnodeParseVnodeMsg(SRpcMsg *rpcMsg) { + SCreateVnodeMsg *pCreate = rpcMsg->pCont; + pCreate->cfg.vgId = htonl(pCreate->cfg.vgId); + pCreate->cfg.cfgVersion = htonl(pCreate->cfg.cfgVersion); + pCreate->cfg.maxTables = htonl(pCreate->cfg.maxTables); + pCreate->cfg.cacheBlockSize = htonl(pCreate->cfg.cacheBlockSize); + pCreate->cfg.totalBlocks = htonl(pCreate->cfg.totalBlocks); + pCreate->cfg.daysPerFile = htonl(pCreate->cfg.daysPerFile); + pCreate->cfg.daysToKeep1 = htonl(pCreate->cfg.daysToKeep1); + pCreate->cfg.daysToKeep2 = htonl(pCreate->cfg.daysToKeep2); + pCreate->cfg.daysToKeep = htonl(pCreate->cfg.daysToKeep); + pCreate->cfg.minRowsPerFileBlock = htonl(pCreate->cfg.minRowsPerFileBlock); + pCreate->cfg.maxRowsPerFileBlock = htonl(pCreate->cfg.maxRowsPerFileBlock); + pCreate->cfg.fsyncPeriod = htonl(pCreate->cfg.fsyncPeriod); + pCreate->cfg.commitTime = htonl(pCreate->cfg.commitTime); + + for (int32_t j = 0; j < pCreate->cfg.replications; ++j) { + pCreate->nodes[j].nodeId = htonl(pCreate->nodes[j].nodeId); + } + + return pCreate; +} + +static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *rpcMsg) { + SCreateVnodeMsg *pCreate = dnodeParseVnodeMsg(rpcMsg); + + void *pVnode = vnodeAcquire(pCreate->cfg.vgId); + if (pVnode != NULL) { + dDebug("vgId:%d, already exist, return success", pCreate->cfg.vgId); + vnodeRelease(pVnode); + return TSDB_CODE_SUCCESS; + } else { + dDebug("vgId:%d, create vnode msg is received", pCreate->cfg.vgId); + return vnodeCreate(pCreate); + } +} + +static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { + SAlterVnodeMsg *pAlter = dnodeParseVnodeMsg(rpcMsg); + + void *pVnode = vnodeAcquire(pAlter->cfg.vgId); + if (pVnode != NULL) { + dDebug("vgId:%d, alter vnode msg is received", pAlter->cfg.vgId); + int32_t code = vnodeAlter(pVnode, pAlter); + vnodeRelease(pVnode); + return code; + } else { + dError("vgId:%d, vnode not exist, can't alter it", pAlter->cfg.vgId); + return TSDB_CODE_VND_INVALID_VGROUP_ID; + } +} + +static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { + SDropVnodeMsg *pDrop = rpcMsg->pCont; + pDrop->vgId = htonl(pDrop->vgId); + + return vnodeDrop(pDrop->vgId); +} + +static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg) { + return 0; +} + +static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg) { + SCfgDnodeMsg *pCfg = pMsg->pCont; + return taosCfgDynamicOptions(pCfg->config); +} + +static int32_t dnodeProcessCreateMnodeMsg(SRpcMsg *pMsg) { + SCreateMnodeMsg *pCfg = pMsg->pCont; + pCfg->dnodeId = htonl(pCfg->dnodeId); + if (pCfg->dnodeId != dnodeGetDnodeId()) { + dDebug("dnode:%d, in create mnode msg is not equal with saved dnodeId:%d", pCfg->dnodeId, dnodeGetDnodeId()); + return TSDB_CODE_MND_DNODE_ID_NOT_CONFIGURED; + } + + if (strcmp(pCfg->dnodeEp, tsLocalEp) != 0) { + dDebug("dnodeEp:%s, in create mnode msg is not equal with saved dnodeEp:%s", pCfg->dnodeEp, tsLocalEp); + return TSDB_CODE_MND_DNODE_EP_NOT_CONFIGURED; + } + + dDebug("dnode:%d, create mnode msg is received from mnodes, numOfMnodes:%d", pCfg->dnodeId, pCfg->mnodes.mnodeNum); + for (int i = 0; i < pCfg->mnodes.mnodeNum; ++i) { + pCfg->mnodes.mnodeInfos[i].mnodeId = htonl(pCfg->mnodes.mnodeInfos[i].mnodeId); + dDebug("mnode index:%d, mnode:%d:%s", i, pCfg->mnodes.mnodeInfos[i].mnodeId, pCfg->mnodes.mnodeInfos[i].mnodeEp); + } + + dnodeStartMnode(&pCfg->mnodes); + + return TSDB_CODE_SUCCESS; +} \ No newline at end of file diff --git a/src/dnode/src/dnodeVRead.c b/src/dnode/src/dnodeVRead.c index b42a627a3a..3f31e49370 100644 --- a/src/dnode/src/dnodeVRead.c +++ b/src/dnode/src/dnodeVRead.c @@ -15,71 +15,40 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "tglobal.h" #include "tqueue.h" -#include "vnode.h" -#include "dnodeInt.h" - -typedef struct { - pthread_t thread; // thread - int32_t workerId; // worker ID -} SVReadWorker; - -typedef struct { - int32_t max; // max number of workers - int32_t min; // min number of workers - int32_t num; // current number of workers - SVReadWorker * worker; - pthread_mutex_t mutex; -} SVReadWorkerPool; +#include "tworker.h" +#include "dnodeVRead.h" static void *dnodeProcessReadQueue(void *pWorker); // module global variable -static SVReadWorkerPool tsVReadWP; -static taos_qset tsVReadQset; +static SWorkerPool tsVQueryWP; +static SWorkerPool tsVFetchWP; int32_t dnodeInitVRead() { - tsVReadQset = taosOpenQset(); + const int32_t maxFetchThreads = 4; - tsVReadWP.min = tsNumOfCores; - tsVReadWP.max = tsNumOfCores * tsNumOfThreadsPerCore; - if (tsVReadWP.max <= tsVReadWP.min * 2) tsVReadWP.max = 2 * tsVReadWP.min; - tsVReadWP.worker = calloc(sizeof(SVReadWorker), tsVReadWP.max); - pthread_mutex_init(&tsVReadWP.mutex, NULL); + // calculate the available query thread + float threadsForQuery = MAX(tsNumOfCores * tsRatioOfQueryCores, 1); - if (tsVReadWP.worker == NULL) return -1; - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - pWorker->workerId = i; - } + tsVQueryWP.name = "vquery"; + tsVQueryWP.workerFp = dnodeProcessReadQueue; + tsVQueryWP.min = (int32_t) threadsForQuery; + tsVQueryWP.max = tsVQueryWP.min; + if (tWorkerInit(&tsVQueryWP) != 0) return -1; + + tsVFetchWP.name = "vfetch"; + tsVFetchWP.workerFp = dnodeProcessReadQueue; + tsVFetchWP.min = MIN(maxFetchThreads, tsNumOfCores); + tsVFetchWP.max = tsVFetchWP.min; + if (tWorkerInit(&tsVFetchWP) != 0) return -1; - dInfo("dnode vread is initialized, min worker:%d max worker:%d", tsVReadWP.min, tsVReadWP.max); return 0; } void dnodeCleanupVRead() { - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - if (pWorker->thread) { - taosQsetThreadResume(tsVReadQset); - } - } - - for (int i = 0; i < tsVReadWP.max; ++i) { - SVReadWorker *pWorker = tsVReadWP.worker + i; - if (pWorker->thread) { - pthread_join(pWorker->thread, NULL); - } - } - - free(tsVReadWP.worker); - taosCloseQset(tsVReadQset); - pthread_mutex_destroy(&tsVReadWP.mutex); - - dInfo("dnode vread is closed"); + tWorkerCleanup(&tsVFetchWP); + tWorkerCleanup(&tsVQueryWP); } void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { @@ -92,6 +61,7 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { pHead->vgId = htonl(pHead->vgId); pHead->contLen = htonl(pHead->contLen); + assert(pHead->contLen > 0); void *pVnode = vnodeAcquire(pHead->vgId); if (pVnode != NULL) { int32_t code = vnodeWriteToRQueue(pVnode, pCont, pHead->contLen, TAOS_QTYPE_RPC, pMsg); @@ -111,43 +81,20 @@ void dnodeDispatchToVReadQueue(SRpcMsg *pMsg) { rpcFreeCont(pMsg->pCont); } -void *dnodeAllocVReadQueue(void *pVnode) { - pthread_mutex_lock(&tsVReadWP.mutex); - taos_queue queue = taosOpenQueue(); - if (queue == NULL) { - pthread_mutex_unlock(&tsVReadWP.mutex); - return NULL; - } - - taosAddIntoQset(tsVReadQset, queue, pVnode); - - // spawn a thread to process queue - if (tsVReadWP.num < tsVReadWP.max) { - do { - SVReadWorker *pWorker = tsVReadWP.worker + tsVReadWP.num; - - pthread_attr_t thAttr; - pthread_attr_init(&thAttr); - pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); - - if (pthread_create(&pWorker->thread, &thAttr, dnodeProcessReadQueue, pWorker) != 0) { - dError("failed to create thread to process vread vqueue since %s", strerror(errno)); - } - - pthread_attr_destroy(&thAttr); - tsVReadWP.num++; - dDebug("dnode vread worker:%d is launched, total:%d", pWorker->workerId, tsVReadWP.num); - } while (tsVReadWP.num < tsVReadWP.min); - } - - pthread_mutex_unlock(&tsVReadWP.mutex); - dDebug("pVnode:%p, dnode vread queue:%p is allocated", pVnode, queue); - - return queue; +void *dnodeAllocVQueryQueue(void *pVnode) { + return tWorkerAllocQueue(&tsVQueryWP, pVnode); } -void dnodeFreeVReadQueue(void *pRqueue) { - taosCloseQueue(pRqueue); +void *dnodeAllocVFetchQueue(void *pVnode) { + return tWorkerAllocQueue(&tsVFetchWP, pVnode); +} + +void dnodeFreeVQueryQueue(void *pQqueue) { + tWorkerFreeQueue(&tsVQueryWP, pQqueue); +} + +void dnodeFreeVFetchQueue(void *pFqueue) { + tWorkerFreeQueue(&tsVFetchWP, pFqueue); } void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { @@ -164,18 +111,20 @@ void dnodeSendRpcVReadRsp(void *pVnode, SVReadMsg *pRead, int32_t code) { void dnodeDispatchNonRspMsg(void *pVnode, SVReadMsg *pRead, int32_t code) { } -static void *dnodeProcessReadQueue(void *pWorker) { - SVReadMsg *pRead; - int32_t qtype; - void * pVnode; +static void *dnodeProcessReadQueue(void *wparam) { + SWorker * pWorker = wparam; + SWorkerPool *pPool = pWorker->pPool; + SVReadMsg * pRead; + int32_t qtype; + void * pVnode; while (1) { - if (taosReadQitemFromQset(tsVReadQset, &qtype, (void **)&pRead, &pVnode) == 0) { - dDebug("qset:%p dnode vread got no message from qset, exiting", tsVReadQset); + if (taosReadQitemFromQset(pPool->qset, &qtype, (void **)&pRead, &pVnode) == 0) { + dDebug("dnode vquery got no message from qset:%p, exiting", pPool->qset); break; } - dTrace("msg:%p, app:%p type:%s will be processed in vread queue, qtype:%d", pRead, pRead->rpcAhandle, + dTrace("msg:%p, app:%p type:%s will be processed in vquery queue, qtype:%d", pRead, pRead->rpcAhandle, taosMsg[pRead->msgType], qtype); int32_t code = vnodeProcessRead(pVnode, pRead); diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 6d4b50ee54..a5ae8ac830 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -15,13 +15,8 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" -#include "taosmsg.h" -#include "tglobal.h" #include "tqueue.h" -#include "twal.h" -#include "vnode.h" -#include "dnodeInt.h" +#include "dnodeVWrite.h" typedef struct { taos_qall qall; diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c new file mode 100644 index 0000000000..85b997d94c --- /dev/null +++ b/src/dnode/src/dnodeVnodes.c @@ -0,0 +1,278 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "ttimer.h" +#include "dnodeEps.h" +#include "dnodeCfg.h" +#include "dnodeMInfos.h" +#include "dnodeVnodes.h" + +typedef struct { + pthread_t thread; + int32_t threadIndex; + int32_t failed; + int32_t opened; + int32_t vnodeNum; + int32_t * vnodeList; +} SOpenVnodeThread; + +extern void * tsDnodeTmr; +static void * tsStatusTimer = NULL; +static uint32_t tsRebootTime = 0; + +static void dnodeSendStatusMsg(void *handle, void *tmrId); +static void dnodeProcessStatusRsp(SRpcMsg *pMsg); + +int32_t dnodeInitStatusTimer() { + dnodeAddClientRspHandle(TSDB_MSG_TYPE_DM_STATUS_RSP, dnodeProcessStatusRsp); + + tsRebootTime = taosGetTimestampSec(); + taosTmrReset(dnodeSendStatusMsg, 500, NULL, tsDnodeTmr, &tsStatusTimer); + + dInfo("dnode status timer is initialized"); + return TSDB_CODE_SUCCESS; +} + +void dnodeCleanupStatusTimer() { + if (tsStatusTimer != NULL) { + taosTmrStopA(&tsStatusTimer); + tsStatusTimer = NULL; + } +} + +static int32_t dnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { + DIR *dir = opendir(tsVnodeDir); + if (dir == NULL) return TSDB_CODE_DND_NO_WRITE_ACCESS; + + *numOfVnodes = 0; + struct dirent *de = NULL; + while ((de = readdir(dir)) != NULL) { + if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0) continue; + if (de->d_type & DT_DIR) { + if (strncmp("vnode", de->d_name, 5) != 0) continue; + int32_t vnode = atoi(de->d_name + 5); + if (vnode == 0) continue; + + (*numOfVnodes)++; + + if (*numOfVnodes >= TSDB_MAX_VNODES) { + dError("vgId:%d, too many vnode directory in disk, exist:%d max:%d", vnode, *numOfVnodes, TSDB_MAX_VNODES); + continue; + } else { + vnodeList[*numOfVnodes - 1] = vnode; + } + } + } + closedir(dir); + + return TSDB_CODE_SUCCESS; +} + +static void *dnodeOpenVnode(void *param) { + SOpenVnodeThread *pThread = param; + + dDebug("thread:%d, start to open %d vnodes", pThread->threadIndex, pThread->vnodeNum); + + for (int32_t v = 0; v < pThread->vnodeNum; ++v) { + int32_t vgId = pThread->vnodeList[v]; + if (vnodeOpen(vgId) < 0) { + dError("vgId:%d, failed to open vnode by thread:%d", vgId, pThread->threadIndex); + pThread->failed++; + } else { + dDebug("vgId:%d, is openned by thread:%d", vgId, pThread->threadIndex); + pThread->opened++; + } + } + + dDebug("thread:%d, total vnodes:%d, openned:%d failed:%d", pThread->threadIndex, pThread->vnodeNum, pThread->opened, + pThread->failed); + return NULL; +} + +int32_t dnodeInitVnodes() { + int32_t vnodeList[TSDB_MAX_VNODES] = {0}; + int32_t numOfVnodes = 0; + int32_t status = dnodeGetVnodeList(vnodeList, &numOfVnodes); + + if (status != TSDB_CODE_SUCCESS) { + dInfo("get dnode list failed"); + return status; + } + + int32_t threadNum = tsNumOfCores; + int32_t vnodesPerThread = numOfVnodes / threadNum + 1; + SOpenVnodeThread *threads = calloc(threadNum, sizeof(SOpenVnodeThread)); + for (int32_t t = 0; t < threadNum; ++t) { + threads[t].threadIndex = t; + threads[t].vnodeList = calloc(vnodesPerThread, sizeof(int32_t)); + } + + for (int32_t v = 0; v < numOfVnodes; ++v) { + int32_t t = v % threadNum; + SOpenVnodeThread *pThread = &threads[t]; + pThread->vnodeList[pThread->vnodeNum++] = vnodeList[v]; + } + + dDebug("start %d threads to open %d vnodes", threadNum, numOfVnodes); + + for (int32_t t = 0; t < threadNum; ++t) { + SOpenVnodeThread *pThread = &threads[t]; + if (pThread->vnodeNum == 0) continue; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + if (pthread_create(&pThread->thread, &thAttr, dnodeOpenVnode, pThread) != 0) { + dError("thread:%d, failed to create thread to open vnode, reason:%s", pThread->threadIndex, strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + } + + int32_t openVnodes = 0; + int32_t failedVnodes = 0; + for (int32_t t = 0; t < threadNum; ++t) { + SOpenVnodeThread *pThread = &threads[t]; + if (pThread->vnodeNum > 0 && pThread->thread) { + pthread_join(pThread->thread, NULL); + } + openVnodes += pThread->opened; + failedVnodes += pThread->failed; + free(pThread->vnodeList); + } + + free(threads); + dInfo("there are total vnodes:%d, openned:%d", numOfVnodes, openVnodes); + + if (failedVnodes != 0) { + dError("there are total vnodes:%d, failed:%d", numOfVnodes, failedVnodes); + return -1; + } + + return TSDB_CODE_SUCCESS; +} + +void dnodeCleanupVnodes() { + int32_t vnodeList[TSDB_MAX_VNODES]= {0}; + int32_t numOfVnodes = 0; + int32_t status; + + status = vnodeGetVnodeList(vnodeList, &numOfVnodes); + + if (status != TSDB_CODE_SUCCESS) { + dInfo("get dnode list failed"); + return; + } + + for (int32_t i = 0; i < numOfVnodes; ++i) { + vnodeClose(vnodeList[i]); + } + + dInfo("total vnodes:%d are all closed", numOfVnodes); +} + +static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { + if (pMsg->code != TSDB_CODE_SUCCESS) { + dError("status rsp is received, error:%s", tstrerror(pMsg->code)); + taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); + return; + } + + SStatusRsp *pStatusRsp = pMsg->pCont; + SMInfos *minfos = &pStatusRsp->mnodes; + dnodeUpdateMInfos(minfos); + + SDnodeCfg *pCfg = &pStatusRsp->dnodeCfg; + pCfg->numOfVnodes = htonl(pCfg->numOfVnodes); + pCfg->moduleStatus = htonl(pCfg->moduleStatus); + pCfg->dnodeId = htonl(pCfg->dnodeId); + dnodeUpdateCfg(pCfg); + + vnodeSetAccess(pStatusRsp->vgAccess, pCfg->numOfVnodes); + + SDnodeEps *pEps = (SDnodeEps *)((char *)pStatusRsp->vgAccess + pCfg->numOfVnodes * sizeof(SVgroupAccess)); + dnodeUpdateEps(pEps); + + taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); +} + +static void dnodeSendStatusMsg(void *handle, void *tmrId) { + if (tsDnodeTmr == NULL) { + dError("dnode timer is already released"); + return; + } + + if (tsStatusTimer == NULL) { + taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); + dError("failed to start status timer"); + return; + } + + int32_t contLen = sizeof(SStatusMsg) + TSDB_MAX_VNODES * sizeof(SVnodeLoad); + SStatusMsg *pStatus = rpcMallocCont(contLen); + if (pStatus == NULL) { + taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); + dError("failed to malloc status message"); + return; + } + + dnodeGetCfg(&pStatus->dnodeId, pStatus->clusterId); + pStatus->dnodeId = htonl(dnodeGetDnodeId()); + pStatus->version = htonl(tsVersion); + pStatus->lastReboot = htonl(tsRebootTime); + pStatus->numOfCores = htons((uint16_t) tsNumOfCores); + pStatus->diskAvailable = tsAvailDataDirGB; + pStatus->alternativeRole = (uint8_t) tsAlternativeRole; + tstrncpy(pStatus->dnodeEp, tsLocalEp, TSDB_EP_LEN); + + // fill cluster cfg parameters + pStatus->clusterCfg.numOfMnodes = htonl(tsNumOfMnodes); + pStatus->clusterCfg.enableBalance = htonl(tsEnableBalance); + pStatus->clusterCfg.mnodeEqualVnodeNum = htonl(tsMnodeEqualVnodeNum); + pStatus->clusterCfg.offlineThreshold = htonl(tsOfflineThreshold); + pStatus->clusterCfg.statusInterval = htonl(tsStatusInterval); + pStatus->clusterCfg.maxtablesPerVnode = htonl(tsMaxTablePerVnode); + pStatus->clusterCfg.maxVgroupsPerDb = htonl(tsMaxVgroupsPerDb); + tstrncpy(pStatus->clusterCfg.arbitrator, tsArbitrator, TSDB_EP_LEN); + tstrncpy(pStatus->clusterCfg.timezone, tsTimezone, 64); + pStatus->clusterCfg.checkTime = 0; + char timestr[32] = "1970-01-01 00:00:00.00"; + (void)taosParseTime(timestr, &pStatus->clusterCfg.checkTime, strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0); + tstrncpy(pStatus->clusterCfg.locale, tsLocale, TSDB_LOCALE_LEN); + tstrncpy(pStatus->clusterCfg.charset, tsCharset, TSDB_LOCALE_LEN); + + vnodeBuildStatusMsg(pStatus); + contLen = sizeof(SStatusMsg) + pStatus->openVnodes * sizeof(SVnodeLoad); + pStatus->openVnodes = htons(pStatus->openVnodes); + + SRpcMsg rpcMsg = { + .pCont = pStatus, + .contLen = contLen, + .msgType = TSDB_MSG_TYPE_DM_STATUS + }; + + SRpcEpSet epSet; + dnodeGetEpSetForPeer(&epSet); + dnodeSendMsgToDnode(&epSet, &rpcMsg); +} + +void dnodeSendStatusMsgToMnode() { + if (tsDnodeTmr != NULL && tsStatusTimer != NULL) { + dInfo("force send status msg to mnode"); + taosTmrReset(dnodeSendStatusMsg, 3, NULL, tsDnodeTmr, &tsStatusTimer); + } +} \ No newline at end of file diff --git a/src/inc/dnode.h b/src/inc/dnode.h index 1efaa4a24b..dd41360e68 100644 --- a/src/inc/dnode.h +++ b/src/inc/dnode.h @@ -29,13 +29,6 @@ typedef struct { int32_t httpReqNum; } SStatisInfo; -typedef enum { - TSDB_RUN_STATUS_INITIALIZE, - TSDB_RUN_STATUS_RUNING, - TSDB_RUN_STATUS_STOPPED -} SRunStatus; - -SRunStatus dnodeGetRunStatus(); SStatisInfo dnodeGetStatisInfo(); bool dnodeIsFirstDeploy(); @@ -56,8 +49,10 @@ void *dnodeSendCfgTableToRecv(int32_t vgId, int32_t tid); void *dnodeAllocVWriteQueue(void *pVnode); void dnodeFreeVWriteQueue(void *pWqueue); void dnodeSendRpcVWriteRsp(void *pVnode, void *pWrite, int32_t code); -void *dnodeAllocVReadQueue(void *pVnode); -void dnodeFreeVReadQueue(void *pRqueue); +void *dnodeAllocVQueryQueue(void *pVnode); +void *dnodeAllocVFetchQueue(void *pVnode); +void dnodeFreeVQueryQueue(void *pQqueue); +void dnodeFreeVFetchQueue(void *pFqueue); int32_t dnodeAllocateMPeerQueue(); void dnodeFreeMPeerQueue(); @@ -71,8 +66,18 @@ void dnodeDelayReprocessMWriteMsg(void *pMsg); void dnodeSendStatusMsgToMnode(); +typedef struct { + char *name; + int32_t (*initFp)(); + void (*cleanupFp)(); +} SStep; + +int32_t dnodeStepInit(SStep *pSteps, int32_t stepSize); +void dnodeStepCleanup(SStep *pSteps, int32_t stepSize); +void dnodeReportStep(char *name, char *desc, int8_t finished); + #ifdef __cplusplus } #endif -#endif +#endif \ No newline at end of file diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index ca20293392..0cc06be1db 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -286,6 +286,9 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size, void* buf #define TSDB_SHOW_SQL_LEN 512 #define TSDB_SLOW_QUERY_SQL_LEN 512 +#define TSDB_STEP_NAME_LEN 32 +#define TSDB_STEP_DESC_LEN 128 + #define TSDB_MQTT_HOSTNAME_LEN 64 #define TSDB_MQTT_PORT_LEN 8 #define TSDB_MQTT_USER_LEN 24 @@ -439,6 +442,10 @@ typedef enum { TAOS_QTYPE_QUERY = 4 } EQType; +#define TSDB_MAX_TIERS 3 +#define TSDB_MAX_DISKS_PER_TIER 16 +#define TSDB_MAX_DISKS (TSDB_MAX_TIERS * TSDB_MAX_DISKS_PER_TIER) + typedef enum { TSDB_SUPER_TABLE = 0, // super table TSDB_CHILD_TABLE = 1, // table created from super table diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h index a720b68e59..be33262f7f 100644 --- a/src/inc/taoserror.h +++ b/src/inc/taoserror.h @@ -64,7 +64,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TRAN_ID, 0, 0x000F, "Invalid tr TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_SESSION_ID, 0, 0x0010, "Invalid session id") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_MSG_TYPE, 0, 0x0011, "Invalid message type") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_RESPONSE_TYPE, 0, 0x0012, "Invalid response type") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, 0, 0x0013, "Invalid timestamp") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_INVALID_TIME_STAMP, 0, 0x0013, "Client and server's time is not synchronized") TAOS_DEFINE_ERROR(TSDB_CODE_APP_NOT_READY, 0, 0x0014, "Database not ready") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, 0, 0x0015, "Unable to resolve FQDN") @@ -192,6 +192,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_DND_MSG_NOT_PROCESSED, 0, 0x0400, "Message no TAOS_DEFINE_ERROR(TSDB_CODE_DND_OUT_OF_MEMORY, 0, 0x0401, "Dnode out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_DND_NO_WRITE_ACCESS, 0, 0x0402, "No permission for disk files in dnode") TAOS_DEFINE_ERROR(TSDB_CODE_DND_INVALID_MSG_LEN, 0, 0x0403, "Invalid message length") +TAOS_DEFINE_ERROR(TSDB_CODE_DND_ACTION_IN_PROGRESS, 0, 0x0404, "Action in progress") // vnode TAOS_DEFINE_ERROR(TSDB_CODE_VND_ACTION_IN_PROGRESS, 0, 0x0500, "Action in progress") @@ -205,9 +206,10 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, 0, 0x0507, "Missing da TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, 0, 0x0508, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, 0, 0x0509, "Unexpected generic error in vnode") TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, 0, 0x050A, "Invalid version file") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Vnode memory is full because commit failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, 0, 0x050B, "Database memory is full for commit failed") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, 0, 0x050C, "Database memory is full for waiting commit") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, 0, 0x0511, "Database suspended") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Write operation denied") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, 0, 0x0512, "Database write operation denied") TAOS_DEFINE_ERROR(TSDB_CODE_VND_SYNCING, 0, 0x0513, "Database is syncing") // tsdb @@ -394,6 +396,18 @@ TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_BAD_SEQ, 0, 0x2113, "src bad se TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_INCOMPLETE, 0, 0x2114, "src incomplete") TAOS_DEFINE_ERROR(TSDB_CODE_ODBC_CONV_SRC_GENERAL, 0, 0x2115, "src general") +// tfs +TAOS_DEFINE_ERROR(TSDB_CODE_FS_OUT_OF_MEMORY, 0, 0x2200, "tfs out of memory") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_CFG, 0, 0x2201, "tfs invalid mount config") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_TOO_MANY_MOUNT, 0, 0x2202, "tfs too many mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_DUP_PRIMARY, 0, 0x2203, "tfs duplicate primary mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_PRIMARY_DISK, 0, 0x2204, "tfs no primary mount") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_MOUNT_AT_TIER, 0, 0x2205, "tfs no mount at tier") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_FILE_ALREADY_EXISTS, 0, 0x2206, "tfs file already exists") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_INVLD_LEVEL, 0, 0x2207, "tfs invalid level") +TAOS_DEFINE_ERROR(TSDB_CODE_FS_NO_VALID_DISK, 0, 0x2208, "tfs no valid disk") + + #ifdef TAOS_ERROR_C }; #endif diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index e8e3029244..27d857ce14 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -105,10 +105,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DM_AUTH, "auth" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY12, "dummy12" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY13, "dummy13" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY14, "dummy14" ) - - -TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "network-test" ) - +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_NETWORK_TEST, "nettest" ) #ifndef TAOS_MESSAGE_C TSDB_MSG_TYPE_MAX // 105 @@ -476,7 +473,7 @@ typedef struct { int16_t numOfGroupCols; // num of group by columns int16_t orderByIdx; int16_t orderType; // used in group by xx order by xxx - int64_t tableLimit; // limit the number of rows for each table, used in order by + limit in stable projection query. + int64_t vgroupLimit; // limit the number of rows for each table, used in order by + limit in stable projection query. int16_t prjOrder; // global order in super table projection query. int64_t limit; int64_t offset; @@ -790,6 +787,7 @@ typedef struct { typedef struct { char sql[TSDB_SHOW_SQL_LEN]; + char dstTable[TSDB_TABLE_NAME_LEN]; uint32_t streamId; int64_t num; // number of computing/cycles int64_t useconds; @@ -840,6 +838,14 @@ typedef struct { char ckey[TSDB_KEY_LEN]; } SAuthMsg, SAuthRsp; +typedef struct { + int8_t finished; + int8_t reserved1[7]; + char name[TSDB_STEP_NAME_LEN]; + char desc[TSDB_STEP_DESC_LEN]; + char reserved2[64]; +} SStartupStep; + #pragma pack(pop) #ifdef __cplusplus diff --git a/src/inc/tcq.h b/src/inc/tcq.h index afa744a9c4..ad123d4080 100644 --- a/src/inc/tcq.h +++ b/src/inc/tcq.h @@ -42,7 +42,7 @@ void cqStart(void *handle); void cqStop(void *handle); // cqCreate is called by TSDB to start an instance of CQ -void *cqCreate(void *handle, uint64_t uid, int32_t sid, char *sqlStr, STSchema *pSchema); +void *cqCreate(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); // cqDrop is called by TSDB to stop an instance of CQ, handle is the return value of cqCreate void cqDrop(void *handle); diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 58859f42bc..04d6c78815 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -48,7 +48,7 @@ typedef struct { void *cqH; int (*notifyStatus)(void *, int status, int eno); int (*eventCallBack)(void *); - void *(*cqCreateFunc)(void *handle, uint64_t uid, int sid, char *sqlStr, STSchema *pSchema); + void *(*cqCreateFunc)(void *handle, uint64_t uid, int32_t sid, const char* dstTable, char *sqlStr, STSchema *pSchema); void (*cqDropFunc)(void *handle); } STsdbAppH; @@ -321,7 +321,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle); */ void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage); -int tsdbInitCommitQueue(int nthreads); +int tsdbInitCommitQueue(); void tsdbDestroyCommitQueue(); int tsdbSyncCommit(TSDB_REPO_T *repo); void tsdbIncCommitRef(int vgId); diff --git a/src/inc/vnode.h b/src/inc/vnode.h index 5f643295d6..95f1d27b59 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -19,18 +19,9 @@ #ifdef __cplusplus extern "C" { #endif - #include "trpc.h" #include "twal.h" -typedef enum _VN_STATUS { - TAOS_VN_STATUS_INIT = 0, - TAOS_VN_STATUS_READY = 1, - TAOS_VN_STATUS_CLOSING = 2, - TAOS_VN_STATUS_UPDATING = 3, - TAOS_VN_STATUS_RESET = 4, -} EVnodeStatus; - typedef struct { int32_t len; void * rsp; @@ -43,6 +34,7 @@ typedef struct { void * rpcHandle; void * rpcAhandle; void * qhandle; + void * pVnode; int8_t qtype; int8_t msgType; SRspRet rspRet; @@ -60,29 +52,35 @@ typedef struct { SWalHead pHead[]; } SVWriteMsg; +// vnodeStatus extern char *vnodeStatus[]; +// vnodeMain int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); -int32_t vnodeOpen(int32_t vgId, char *rootDir); +int32_t vnodeOpen(int32_t vgId); int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); int32_t vnodeClose(int32_t vgId); -void* vnodeAcquire(int32_t vgId); // add refcount -void vnodeRelease(void *pVnode); // dec refCount +// vnodeMgmt +int32_t vnodeInitMgmt(); +void vnodeCleanupMgmt(); +void* vnodeAcquire(int32_t vgId); +void vnodeRelease(void *pVnode); void* vnodeGetWal(void *pVnode); +int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); +void vnodeBuildStatusMsg(void *pStatus); +void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes); +// vnodeWrite int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg); void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite); int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet); -int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); -void vnodeBuildStatusMsg(void *pStatus); + +// vnodeSync void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code); -void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes); - -int32_t vnodeInitResources(); -void vnodeCleanupResources(); +// vnodeRead int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qtype, void *rparam); void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); @@ -91,4 +89,4 @@ int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); } #endif -#endif +#endif \ No newline at end of file diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index d65c943e28..7e5ebb0596 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -45,13 +45,13 @@ typedef struct SShellArguments { char* timezone; bool is_raw_time; bool is_use_passwd; + bool dump_config; char file[TSDB_FILENAME_LEN]; char dir[TSDB_FILENAME_LEN]; int threadNum; char* commands; int abort; int port; - int endPort; int pktLen; char* netTestRole; } SShellArguments; diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index 22f01ac142..7a9e242668 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -32,14 +32,14 @@ /**************** Global variables ****************/ #ifdef _TD_POWER_ char CLIENT_VERSION[] = "Welcome to the PowerDB shell from %s, Client Version:%s\n" - "Copyright (c) 2017 by PowerDB, Inc. All rights reserved.\n\n"; + "Copyright (c) 2020 by PowerDB, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "power> "; char CONTINUE_PROMPT[] = " -> "; int prompt_size = 7; #else char CLIENT_VERSION[] = "Welcome to the TDengine shell from %s, Client Version:%s\n" - "Copyright (c) 2017 by TAOS Data, Inc. All rights reserved.\n\n"; + "Copyright (c) 2020 by TAOS Data, Inc. All rights reserved.\n\n"; char PROMPT_HEADER[] = "taos> "; char CONTINUE_PROMPT[] = " -> "; @@ -509,7 +509,9 @@ static int dumpResultToFile(const char* fname, TAOS_RES* tres) { static void shellPrintNChar(const char *str, int length, int width) { - int pos = 0, cols = 0; + wchar_t tail[3]; + int pos = 0, cols = 0, totalCols = 0, tailLen = 0; + while (pos < length) { wchar_t wc; int bytes = mbtowc(&wc, str + pos, MB_CUR_MAX); @@ -526,15 +528,44 @@ static void shellPrintNChar(const char *str, int length, int width) { #else int w = wcwidth(wc); #endif - if (w > 0) { - if (width > 0 && cols + w > width) { - break; - } + if (w <= 0) { + continue; + } + + if (width <= 0) { + printf("%lc", wc); + continue; + } + + totalCols += w; + if (totalCols > width) { + break; + } + if (totalCols <= (width - 3)) { printf("%lc", wc); cols += w; + } else { + tail[tailLen] = wc; + tailLen++; } } + if (totalCols > width) { + // width could be 1 or 2, so printf("...") cannot be used + for (int i = 0; i < 3; i++) { + if (cols >= width) { + break; + } + putchar('.'); + ++cols; + } + } else { + for (int i = 0; i < tailLen; i++) { + printf("%lc", tail[i]); + } + cols = totalCols; + } + for (; cols < width; cols++) { putchar(' '); } @@ -656,13 +687,21 @@ static int calcColWidth(TAOS_FIELD* field, int precision) { return MAX(25, width); case TSDB_DATA_TYPE_BINARY: - case TSDB_DATA_TYPE_NCHAR: if (field->bytes > tsMaxBinaryDisplayWidth) { return MAX(tsMaxBinaryDisplayWidth, width); } else { return MAX(field->bytes, width); } + case TSDB_DATA_TYPE_NCHAR: { + int16_t bytes = field->bytes * TSDB_NCHAR_SIZE; + if (bytes > tsMaxBinaryDisplayWidth) { + return MAX(tsMaxBinaryDisplayWidth, width); + } else { + return MAX(bytes, width); + } + } + case TSDB_DATA_TYPE_TIMESTAMP: if (args.is_raw_time) { return MAX(14, width); diff --git a/src/kit/shell/src/shellLinux.c b/src/kit/shell/src/shellLinux.c index 3226ad830a..15b2b077c9 100644 --- a/src/kit/shell/src/shellLinux.c +++ b/src/kit/shell/src/shellLinux.c @@ -39,6 +39,7 @@ static struct argp_option options[] = { {"user", 'u', "USER", 0, "The user name to use when connecting to the server."}, {"user", 'A', "Auth", 0, "The user auth to use when connecting to the server."}, {"config-dir", 'c', "CONFIG_DIR", 0, "Configuration directory."}, + {"dump-config", 'C', 0, 0, "Dump configuration."}, {"commands", 's', "COMMANDS", 0, "Commands to run without enter the shell."}, {"raw-time", 'r', 0, 0, "Output time as uint64_t."}, {"file", 'f', "FILE", 0, "Script to run without enter the shell."}, @@ -46,8 +47,7 @@ static struct argp_option options[] = { {"thread", 'T', "THREADNUM", 0, "Number of threads when using multi-thread to import data."}, {"database", 'd', "DATABASE", 0, "Database to use when connecting to the server."}, {"timezone", 't', "TIMEZONE", 0, "Time zone of the shell, default is local."}, - {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is NULL, options: client|clients|server."}, - {"endport", 'e', "ENDPORT", 0, "Net test end port, default is 6042."}, + {"netrole", 'n', "NETROLE", 0, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup."}, {"pktlen", 'l', "PKTLEN", 0, "Packet length used for net test, default is 1000 bytes."}, {0}}; @@ -97,6 +97,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { tstrncpy(configDir, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; + case 'C': + arguments->dump_config = true; + break; case 's': arguments->commands = arg; break; @@ -130,20 +133,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { case 'd': arguments->database = arg; break; - case 'n': arguments->netTestRole = arg; break; - - case 'e': - if (arg) { - arguments->endPort = atoi(arg); - } else { - fprintf(stderr, "Invalid end port\n"); - return -1; - } - break; - case 'l': if (arg) { arguments->pktLen = atoi(arg); @@ -152,7 +144,6 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { return -1; } break; - case OPT_ABORT: arguments->abort = 1; break; diff --git a/src/kit/shell/src/shellMain.c b/src/kit/shell/src/shellMain.c index 2083ad3e9b..4f0c5e3f99 100644 --- a/src/kit/shell/src/shellMain.c +++ b/src/kit/shell/src/shellMain.c @@ -15,6 +15,7 @@ #include "os.h" #include "shell.h" +#include "tconfig.h" #include "tnettest.h" pthread_t pid; @@ -58,11 +59,11 @@ SShellArguments args = { .timezone = NULL, .is_raw_time = false, .is_use_passwd = false, + .dump_config = false, .file = "\0", .dir = "\0", .threadNum = 5, - .commands = NULL, - .endPort = 6042, + .commands = NULL, .pktLen = 1000, .netTestRole = NULL }; @@ -79,11 +80,22 @@ int main(int argc, char* argv[]) { shellParseArgument(argc, argv, &args); + if (args.dump_config) { + taosInitGlobalCfg(); + taosReadGlobalLogCfg(); + + if (!taosReadGlobalCfg()) { + printf("TDengine read global config failed"); + exit(EXIT_FAILURE); + } + + taosDumpGlobalCfg(); + exit(0); + } + if (args.netTestRole && args.netTestRole[0] != 0) { taos_init(); - CmdArguments cmdArgs; - memcpy(&cmdArgs, &args, sizeof(SShellArguments)); - taosNetTest(&cmdArgs); + taosNetTest(args.netTestRole, args.host, args.port, args.pktLen); exit(0); } diff --git a/src/kit/shell/src/shellWindows.c b/src/kit/shell/src/shellWindows.c index ce98681391..64eed9eace 100644 --- a/src/kit/shell/src/shellWindows.c +++ b/src/kit/shell/src/shellWindows.c @@ -35,6 +35,8 @@ void printHelp() { printf("%s%s%s\n", indent, indent, "The user auth to use when connecting to the server."); printf("%s%s\n", indent, "-c"); printf("%s%s%s\n", indent, indent, "Configuration directory."); + printf("%s%s\n", indent, "-C"); + printf("%s%s%s\n", indent, indent, "Dump configuration."); printf("%s%s\n", indent, "-s"); printf("%s%s%s\n", indent, indent, "Commands to run without enter the shell."); printf("%s%s\n", indent, "-r"); @@ -45,6 +47,10 @@ void printHelp() { printf("%s%s%s\n", indent, indent, "Database to use when connecting to the server."); printf("%s%s\n", indent, "-t"); printf("%s%s%s\n", indent, indent, "Time zone of the shell, default is local."); + printf("%s%s\n", indent, "-n"); + printf("%s%s%s\n", indent, indent, "Net role when network connectivity test, default is startup, options: client|server|rpc|startup."); + printf("%s%s\n", indent, "-l"); + printf("%s%s%s\n", indent, indent, "Packet length used for net test, default is 1000 bytes."); exit(EXIT_SUCCESS); } @@ -100,6 +106,8 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { fprintf(stderr, "Option -c requires an argument\n"); exit(EXIT_FAILURE); } + } else if (strcmp(argv[i], "-C") == 0) { + arguments->dump_config = true; } else if (strcmp(argv[i], "-s") == 0) { if (i < argc - 1) { arguments->commands = argv[++i]; @@ -137,6 +145,24 @@ void shellParseArgument(int argc, char *argv[], SShellArguments *arguments) { exit(EXIT_FAILURE); } } + // For time zone + else if (strcmp(argv[i], "-n") == 0) { + if (i < argc - 1) { + arguments->netTestRole = argv[++i]; + } else { + fprintf(stderr, "option -n requires an argument\n"); + exit(EXIT_FAILURE); + } + } + // For time zone + else if (strcmp(argv[i], "-l") == 0) { + if (i < argc - 1) { + arguments->pktLen = atoi(argv[++i]); + } else { + fprintf(stderr, "option -l requires an argument\n"); + exit(EXIT_FAILURE); + } + } // For temperory command TODO else if (strcmp(argv[i], "--help") == 0) { printHelp(); diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index a7258c9724..588d21574b 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -14,6 +14,9 @@ */ #include +#include +#include + #include "os.h" #include "taos.h" #include "taosdef.h" @@ -64,7 +67,10 @@ enum _show_tables_index { TSDB_SHOW_TABLES_NAME_INDEX, TSDB_SHOW_TABLES_CREATED_TIME_INDEX, TSDB_SHOW_TABLES_COLUMNS_INDEX, - TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_METRIC_INDEX, + TSDB_SHOW_TABLES_UID_INDEX, + TSDB_SHOW_TABLES_TID_INDEX, + TSDB_SHOW_TABLES_VGID_INDEX, TSDB_MAX_SHOW_TABLES }; @@ -92,24 +98,27 @@ typedef struct { extern char version[]; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; - int32_t tables; + char name[TSDB_DB_NAME_LEN + 1]; + char create_time[32]; + int32_t ntables; int32_t vgroups; - int16_t replications; + int16_t replica; int16_t quorum; - int16_t daysPerFile; - int16_t daysToKeep; - int16_t daysToKeep1; - int16_t daysToKeep2; - int32_t cacheBlockSize; //MB - int32_t totalBlocks; - int32_t minRowsPerFileBlock; - int32_t maxRowsPerFileBlock; - int8_t walLevel; - int32_t fsyncPeriod; - int8_t compression; - int8_t precision; // time resolution + int16_t days; + char keeplist[32]; + //int16_t daysToKeep; + //int16_t daysToKeep1; + //int16_t daysToKeep2; + int32_t cache; //MB + int32_t blocks; + int32_t minrows; + int32_t maxrows; + int8_t wallevel; + int32_t fsync; + int8_t comp; + char precision[8]; // time resolution int8_t update; + char status[16]; } SDbInfo; typedef struct { @@ -128,8 +137,17 @@ typedef struct { int32_t totalThreads; char dbName[TSDB_TABLE_NAME_LEN + 1]; void *taosCon; + int64_t rowsOfDumpOut; + int64_t tablesOfDumpOut; } SThreadParaObj; +typedef struct { + int64_t totalRowsOfDumpOut; + int64_t totalChildTblsOfDumpOut; + int32_t totalSuperTblsOfDumpOut; + int32_t totalDatabasesOfDumpOut; +} resultStatistics; + static int64_t totalDumpOutRows = 0; SDbInfo **dbInfos = NULL; @@ -167,6 +185,7 @@ static struct argp_option options[] = { // input/output file {"outpath", 'o', "OUTPATH", 0, "Output file path.", 1}, {"inpath", 'i', "INPATH", 0, "Input file path.", 1}, + {"resultFile", 'r', "RESULTFILE", 0, "DumpOut/In Result file path and name.", 1}, #ifdef _TD_POWER_ {"config", 'c', "CONFIG_DIR", 0, "Configure directory. Default is /etc/power/taos.cfg.", 1}, #else @@ -200,6 +219,8 @@ struct arguments { // output file char outpath[TSDB_FILENAME_LEN+1]; char inpath[TSDB_FILENAME_LEN+1]; + // result file + char *resultFile; char *encode; // dump unit option bool all_databases; @@ -274,6 +295,9 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { tstrncpy(arguments->inpath, full_path.we_wordv[0], TSDB_FILENAME_LEN); wordfree(&full_path); break; + case 'r': + arguments->resultFile = arg; + break; case 'c': if (wordexp(arg, &full_path, 0) != 0) { fprintf(stderr, "Invalid path %s\n", arg); @@ -343,19 +367,22 @@ static error_t parse_opt(int key, char *arg, struct argp_state *state) { /* Our argp parser. */ static struct argp argp = {options, parse_opt, args_doc, doc}; +static resultStatistics g_resultStatistics = {0}; +static FILE *g_fpOfResult = NULL; +static int g_numOfCores = 1; int taosDumpOut(struct arguments *arguments); int taosDumpIn(struct arguments *arguments); void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp); int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *taosCon); -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon); -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp); -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp); -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon); -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon); +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName); +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName); +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName); +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName); +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName); int taosCheckParam(struct arguments *arguments); void taosFreeDbInfos(); -static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName); +static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName); struct arguments tsArguments = { // connection option @@ -371,7 +398,8 @@ struct arguments tsArguments = { 0, // outpath and inpath "", - "", + "", + "./dump_result.txt", NULL, // dump unit option false, @@ -392,18 +420,34 @@ struct arguments tsArguments = { 0, false }; - -int queryDB(TAOS *taos, char *command) { - TAOS_RES *pSql = NULL; + +static int queryDbImpl(TAOS *taos, char *command) { + int i; + TAOS_RES *res = NULL; int32_t code = -1; + + for (i = 0; i < 5; i++) { + if (NULL != res) { + taos_free_result(res); + res = NULL; + } - pSql = taos_query(taos, command); - code = taos_errno(pSql); - if (code) { - fprintf(stderr, "sql error: %s, reason:%s\n", command, taos_errstr(pSql)); - } - taos_free_result(pSql); - return code; + res = taos_query(taos, command); + code = taos_errno(res); + if (0 == code) { + break; + } + } + + if (code != 0) { + fprintf(stderr, "Failed to run <%s>, reason: %s\n", command, taos_errstr(res)); + taos_free_result(res); + //taos_close(taos); + return -1; + } + + taos_free_result(res); + return 0; } int main(int argc, char *argv[]) { @@ -430,6 +474,7 @@ int main(int argc, char *argv[]) { printf("mysqlFlag: %d\n", tsArguments.mysqlFlag); printf("outpath: %s\n", tsArguments.outpath); printf("inpath: %s\n", tsArguments.inpath); + printf("resultFile: %s\n", tsArguments.resultFile); printf("encode: %s\n", tsArguments.encode); printf("all_databases: %d\n", tsArguments.all_databases); printf("databases: %d\n", tsArguments.databases); @@ -459,13 +504,80 @@ int main(int argc, char *argv[]) { if (taosCheckParam(&tsArguments) < 0) { exit(EXIT_FAILURE); } + + g_fpOfResult = fopen(tsArguments.resultFile, "a"); + if (NULL == g_fpOfResult) { + fprintf(stderr, "Failed to open %s for save result\n", tsArguments.resultFile); + return 1; + }; - if (tsArguments.isDumpIn) { - if (taosDumpIn(&tsArguments) < 0) return -1; - } else { - if (taosDumpOut(&tsArguments) < 0) return -1; + fprintf(g_fpOfResult, "#############################################################################\n"); + fprintf(g_fpOfResult, "============================== arguments config =============================\n"); + { + fprintf(g_fpOfResult, "host: %s\n", tsArguments.host); + fprintf(g_fpOfResult, "user: %s\n", tsArguments.user); + fprintf(g_fpOfResult, "password: %s\n", tsArguments.password); + fprintf(g_fpOfResult, "port: %u\n", tsArguments.port); + fprintf(g_fpOfResult, "cversion: %s\n", tsArguments.cversion); + fprintf(g_fpOfResult, "mysqlFlag: %d\n", tsArguments.mysqlFlag); + fprintf(g_fpOfResult, "outpath: %s\n", tsArguments.outpath); + fprintf(g_fpOfResult, "inpath: %s\n", tsArguments.inpath); + fprintf(g_fpOfResult, "resultFile: %s\n", tsArguments.resultFile); + fprintf(g_fpOfResult, "encode: %s\n", tsArguments.encode); + fprintf(g_fpOfResult, "all_databases: %d\n", tsArguments.all_databases); + fprintf(g_fpOfResult, "databases: %d\n", tsArguments.databases); + fprintf(g_fpOfResult, "schemaonly: %d\n", tsArguments.schemaonly); + fprintf(g_fpOfResult, "with_property: %d\n", tsArguments.with_property); + fprintf(g_fpOfResult, "start_time: %" PRId64 "\n", tsArguments.start_time); + fprintf(g_fpOfResult, "end_time: %" PRId64 "\n", tsArguments.end_time); + fprintf(g_fpOfResult, "data_batch: %d\n", tsArguments.data_batch); + fprintf(g_fpOfResult, "max_sql_len: %d\n", tsArguments.max_sql_len); + fprintf(g_fpOfResult, "table_batch: %d\n", tsArguments.table_batch); + fprintf(g_fpOfResult, "thread_num: %d\n", tsArguments.thread_num); + fprintf(g_fpOfResult, "allow_sys: %d\n", tsArguments.allow_sys); + fprintf(g_fpOfResult, "abort: %d\n", tsArguments.abort); + fprintf(g_fpOfResult, "isDumpIn: %d\n", tsArguments.isDumpIn); + fprintf(g_fpOfResult, "arg_list_len: %d\n", tsArguments.arg_list_len); + + for (int32_t i = 0; i < tsArguments.arg_list_len; i++) { + fprintf(g_fpOfResult, "arg_list[%d]: %s\n", i, tsArguments.arg_list[i]); + } } + g_numOfCores = (int32_t)sysconf(_SC_NPROCESSORS_ONLN); + + time_t tTime = time(NULL); + struct tm tm = *localtime(&tTime); + + if (tsArguments.isDumpIn) { + fprintf(g_fpOfResult, "============================== DUMP IN ============================== \n"); + fprintf(g_fpOfResult, "# DumpIn start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpIn(&tsArguments) < 0) { + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return -1; + } + } else { + fprintf(g_fpOfResult, "============================== DUMP OUT ============================== \n"); + fprintf(g_fpOfResult, "# DumpOut start time: %d-%02d-%02d %02d:%02d:%02d\n", tm.tm_year + 1900, tm.tm_mon + 1, + tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); + if (taosDumpOut(&tsArguments) < 0) { + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return -1; + } + + fprintf(g_fpOfResult, "\n============================== TOTAL STATISTICS ============================== \n"); + fprintf(g_fpOfResult, "# total database count: %d\n", g_resultStatistics.totalDatabasesOfDumpOut); + fprintf(g_fpOfResult, "# total super table count: %d\n", g_resultStatistics.totalSuperTblsOfDumpOut); + fprintf(g_fpOfResult, "# total child table count: %"PRId64"\n", g_resultStatistics.totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# total row count: %"PRId64"\n", g_resultStatistics.totalRowsOfDumpOut); + } + + fprintf(g_fpOfResult, "\n"); + fclose(g_fpOfResult); + return 0; } @@ -586,64 +698,97 @@ int32_t taosSaveTableOfMetricToTempFile(TAOS *taosCon, char* metric, struct argu sprintf(tmpCommand, "select tbname from %s", metric); - TAOS_RES *result = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(result); + TAOS_RES *res = taos_query(taosCon, tmpCommand); + int32_t code = taos_errno(res); if (code != 0) { fprintf(stderr, "failed to run command %s\n", tmpCommand); free(tmpCommand); - taos_free_result(result); + taos_free_result(res); + return -1; + } + free(tmpCommand); + + char tmpBuf[TSDB_FILENAME_LEN + 1]; + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".select-tbname.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(result); - - int32_t numOfTable = 0; - int32_t numOfThread = *totalNumOfThread; - char tmpFileName[TSDB_FILENAME_LEN + 1]; - while ((row = taos_fetch_row(result)) != NULL) { - if (0 == numOfTable) { - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(result); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - free(tmpCommand); - return -1; - } - - numOfThread++; - } + TAOS_FIELD *fields = taos_fetch_fields(res); + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { + memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[0], fields[0].bytes); tstrncpy(tableRecord.metric, metric, TSDB_TABLE_NAME_LEN); - - taosWrite(fd, &tableRecord, sizeof(STableRecord)); - + + taosWrite(fd, &tableRecord, sizeof(STableRecord)); numOfTable++; - - if (numOfTable >= arguments->table_batch) { - numOfTable = 0; - close(fd); - fd = -1; - } } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); + + int maxThreads = arguments->thread_num; + int tableOfPerFile ; + if (numOfTable <= arguments->thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / arguments->thread_num; + if (0 != numOfTable % arguments->thread_num) { + tableOfPerFile += 1; + } + } + + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; + } + + int32_t numOfThread = *totalNumOfThread; + int subFd = -1; + for (; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); + close(fd); + return -1; + } + + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); + } + + sprintf(tmpBuf, ".select-tbname.tmp"); + (void)remove(tmpBuf); if (fd >= 0) { close(fd); fd = -1; - } - - taos_free_result(result); + } *totalNumOfThread = numOfThread; - - free(tmpCommand); return 0; } @@ -700,7 +845,7 @@ int taosDumpOut(struct arguments *arguments) { int32_t code = taos_errno(result); if (code != 0) { - fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(taos)); + fprintf(stderr, "failed to run command: %s, reason: %s\n", command, taos_errstr(result)); goto _exit_failure; } @@ -736,27 +881,29 @@ int taosDumpOut(struct arguments *arguments) { } strncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); -#if 0 if (arguments->with_property) { - dbInfos[count]->tables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); + dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); - dbInfos[count]->replications = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); + dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->daysPerFile = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); - dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); - dbInfos[count]->daysToKeep1; - dbInfos[count]->daysToKeep2; - dbInfos[count]->cacheBlockSize = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); - dbInfos[count]->totalBlocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); - dbInfos[count]->minRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); - dbInfos[count]->maxRowsPerFileBlock = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); - dbInfos[count]->walLevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); - dbInfos[count]->fsyncPeriod = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); - dbInfos[count]->compression = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); - dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + + strncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + //dbInfos[count]->daysToKeep = *((int16_t *)row[TSDB_SHOW_DB_KEEP_INDEX]); + //dbInfos[count]->daysToKeep1; + //dbInfos[count]->daysToKeep2; + dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); + dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); + dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); + dbInfos[count]->maxrows = *((int32_t *)row[TSDB_SHOW_DB_MAXROWS_INDEX]); + dbInfos[count]->wallevel = *((int8_t *)row[TSDB_SHOW_DB_WALLEVEL_INDEX]); + dbInfos[count]->fsync = *((int32_t *)row[TSDB_SHOW_DB_FSYNC_INDEX]); + dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); + + strncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + //dbInfos[count]->precision = *((int8_t *)row[TSDB_SHOW_DB_PRECISION_INDEX]); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); } -#endif count++; if (arguments->databases) { @@ -781,6 +928,8 @@ int taosDumpOut(struct arguments *arguments) { taosDumpDb(dbInfos[0], arguments, fp, taos); } else { // case: taosdump tablex tabley ... taosDumpCreateDbClause(dbInfos[0], arguments->with_property, fp); + fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfos[0]->name); + g_resultStatistics.totalDatabasesOfDumpOut++; sprintf(command, "use %s", dbInfos[0]->name); @@ -796,6 +945,7 @@ int taosDumpOut(struct arguments *arguments) { int32_t totalNumOfThread = 1; // 0: all normal talbe into .tables.tmp.0 int normalTblFd = -1; int32_t retCode; + int superTblCnt = 0 ; for (int i = 1; arguments->arg_list[i]; i++) { if (taosGetTableRecordInfo(arguments->arg_list[i], &tableRecordInfo, taos) < 0) { fprintf(stderr, "input the invalide table %s\n", arguments->arg_list[i]); @@ -803,11 +953,17 @@ int taosDumpOut(struct arguments *arguments) { } if (tableRecordInfo.isMetric) { // dump all table of this metric - (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } retCode = taosSaveTableOfMetricToTempFile(taos, tableRecordInfo.tableRecord.metric, arguments, &totalNumOfThread); } else { if (tableRecordInfo.tableRecord.metric[0] != '\0') { // dump this sub table and it's metric - (void)taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos); + int ret = taosDumpStable(tableRecordInfo.tableRecord.metric, fp, taos, dbInfos[0]->name); + if (0 == ret) { + superTblCnt++; + } } retCode = taosSaveAllNormalTableToTempFile(taos, tableRecordInfo.tableRecord.name, tableRecordInfo.tableRecord.metric, &normalTblFd); } @@ -819,13 +975,17 @@ int taosDumpOut(struct arguments *arguments) { goto _clean_tmp_file; } } + + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; if (-1 != normalTblFd){ taosClose(normalTblFd); } // start multi threads to dumpout - taosStartDumpOutWorkThreads(arguments, totalNumOfThread, dbInfos[0]->name); + taosStartDumpOutWorkThreads(taos, arguments, totalNumOfThread, dbInfos[0]->name); char tmpFileName[TSDB_FILENAME_LEN + 1]; _clean_tmp_file: @@ -855,41 +1015,27 @@ _exit_failure: return -1; } -int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { +int taosGetTableDes(char* dbName, char *table, STableDef *tableDes, TAOS* taosCon, bool isSuperTable) { TAOS_ROW row = NULL; - TAOS_RES *tmpResult = NULL; + TAOS_RES* res = NULL; int count = 0; - char* tempCommand = (char *)malloc(COMMAND_SIZE); - if (tempCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } - - char* tbuf = (char *)malloc(COMMAND_SIZE); - if (tbuf == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - free(tempCommand); - return -1; - } - - sprintf(tempCommand, "describe %s", table); + char sqlstr[COMMAND_SIZE]; + sprintf(sqlstr, "describe %s.%s;", dbName, table); - tmpResult = taos_query(taosCon, tempCommand); - int32_t code = taos_errno(tmpResult); + res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + TAOS_FIELD *fields = taos_fetch_fields(res); tstrncpy(tableDes->name, table, TSDB_COL_NAME_LEN); - while ((row = taos_fetch_row(tmpResult)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], @@ -901,12 +1047,10 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe count++; } - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; if (isSuperTable) { - free(tempCommand); - free(tbuf); return count; } @@ -915,37 +1059,33 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe if (strcmp(tableDes->cols[i].note, "TAG") != 0) continue; - sprintf(tempCommand, "select %s from %s", tableDes->cols[i].field, table); + sprintf(sqlstr, "select %s from %s.%s", tableDes->cols[i].field, dbName, table); - tmpResult = taos_query(taosCon, tempCommand); - code = taos_errno(tmpResult); + res = taos_query(taosCon, sqlstr); + code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - fields = taos_fetch_fields(tmpResult); + fields = taos_fetch_fields(res); - row = taos_fetch_row(tmpResult); + row = taos_fetch_row(res); if (NULL == row) { - fprintf(stderr, " fetch failed to run command %s\n", tempCommand); - free(tempCommand); - free(tbuf); - taos_free_result(tmpResult); + fprintf(stderr, " fetch failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } if (row[0] == NULL) { sprintf(tableDes->cols[i].note, "%s", "NULL"); - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; continue; } - int32_t* length = taos_fetch_lengths(tmpResult); + int32_t* length = taos_fetch_lengths(res); //int32_t* length = taos_fetch_lengths(tmpResult); switch (fields[0].type) { @@ -970,18 +1110,22 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe case TSDB_DATA_TYPE_DOUBLE: sprintf(tableDes->cols[i].note, "%f", GET_DOUBLE_VAL(row[0])); break; - case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_BINARY: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); tableDes->cols[i].note[0] = '\''; + char tbuf[COMMAND_SIZE]; converStringToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); char* pstr = stpcpy(&(tableDes->cols[i].note[1]), tbuf); *(pstr++) = '\''; break; - case TSDB_DATA_TYPE_NCHAR: + } + case TSDB_DATA_TYPE_NCHAR: { memset(tableDes->cols[i].note, 0, sizeof(tableDes->cols[i].note)); + char tbuf[COMMAND_SIZE]; convertNCharToReadable((char *)row[0], length[0], tbuf, COMMAND_SIZE); sprintf(tableDes->cols[i].note, "\'%s\'", tbuf); break; + } case TSDB_DATA_TYPE_TIMESTAMP: sprintf(tableDes->cols[i].note, "%" PRId64 "", *(int64_t *)row[0]); #if 0 @@ -1001,17 +1145,14 @@ int taosGetTableDes(char *table, STableDef *tableDes, TAOS* taosCon, bool isSupe break; } - taos_free_result(tmpResult); - tmpResult = NULL; + taos_free_result(res); + res = NULL; } - free(tempCommand); - free(tbuf); - return count; } -int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon) { +int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1030,7 +1171,7 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI memset(tableDes, 0, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); */ - count = taosGetTableDes(table, tableDes, taosCon, false); + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); if (count < 0) { free(tableDes); @@ -1038,10 +1179,10 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI } // create child-table using super-table - taosDumpCreateMTableClause(tableDes, metric, count, fp); + taosDumpCreateMTableClause(tableDes, metric, count, fp, dbName); } else { // dump table definition - count = taosGetTableDes(table, tableDes, taosCon, false); + count = taosGetTableDes(dbName, table, tableDes, taosCon, false); if (count < 0) { free(tableDes); @@ -1049,39 +1190,28 @@ int32_t taosDumpTable(char *table, char *metric, struct arguments *arguments, FI } // create normal-table or super-table - taosDumpCreateTableClause(tableDes, count, fp); + taosDumpCreateTableClause(tableDes, count, fp, dbName); } free(tableDes); - return taosDumpTableData(fp, table, arguments, taosCon); + return taosDumpTableData(fp, table, arguments, taosCon, dbName); } void taosDumpCreateDbClause(SDbInfo *dbInfo, bool isDumpProperty, FILE *fp) { + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } - - char *pstr = tmpCommand; - - pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s", dbInfo->name); + char *pstr = sqlstr; + pstr += sprintf(pstr, "CREATE DATABASE IF NOT EXISTS %s ", dbInfo->name); if (isDumpProperty) { - #if 0 pstr += sprintf(pstr, - "TABLES %d vgroups %d REPLICA %d quorum %d DAYS %d KEEP %d CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION %s UPDATE %d", - dbInfo->tables, dbInfo->vgroups, dbInfo->replications, dbInfo->quorum, dbInfo->daysPerFile, dbInfo->daysToKeep, dbInfo->cacheBlockSize, - dbInfo->totalBlocks, dbInfo->minRowsPerFileBlock, dbInfo->maxRowsPerFileBlock, dbInfo->walLevel, dbInfo->fsyncPeriod, dbInfo->compression, - dbInfo->precision, dbInfo->update); - #endif + "TABLES %d VGROUPS %d REPLICA %d QUORUM %d DAYS %d KEEP %s CACHE %d BLOCKS %d MINROWS %d MAXROWS %d WALLEVEL %d FYNC %d COMP %d PRECISION '%s' UPDATE %d", + dbInfo->ntables, dbInfo->vgroups, dbInfo->replica, dbInfo->quorum, dbInfo->days, dbInfo->keeplist, dbInfo->cache, + dbInfo->blocks, dbInfo->minrows, dbInfo->maxrows, dbInfo->wallevel, dbInfo->fsync, dbInfo->comp, dbInfo->precision, dbInfo->update); } pstr += sprintf(pstr, ";"); - - fprintf(fp, "%s\n\n", tmpCommand); - free(tmpCommand); + fprintf(fp, "%s\n\n", sqlstr); } void* taosDumpOutWorkThreadFp(void *arg) @@ -1090,34 +1220,34 @@ void* taosDumpOutWorkThreadFp(void *arg) STableRecord tableRecord; int fd; - char tmpFileName[TSDB_FILENAME_LEN*4] = {0}; - sprintf(tmpFileName, ".tables.tmp.%d", pThread->threadIndex); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + char tmpBuf[TSDB_FILENAME_LEN*4] = {0}; + sprintf(tmpBuf, ".tables.tmp.%d", pThread->threadIndex); + fd = open(tmpBuf, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); if (fd == -1) { - fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpFileName); + fprintf(stderr, "taosDumpTableFp() failed to open temp file: %s\n", tmpBuf); return NULL; } FILE *fp = NULL; - memset(tmpFileName, 0, TSDB_FILENAME_LEN + 128); + memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); if (tsArguments.outpath[0] != 0) { - sprintf(tmpFileName, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); + sprintf(tmpBuf, "%s/%s.tables.%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex); } else { - sprintf(tmpFileName, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); + sprintf(tmpBuf, "%s.tables.%d.sql", pThread->dbName, pThread->threadIndex); } - fp = fopen(tmpFileName, "w"); + fp = fopen(tmpBuf, "w"); if (fp == NULL) { - fprintf(stderr, "failed to open file %s\n", tmpFileName); + fprintf(stderr, "failed to open file %s\n", tmpBuf); close(fd); return NULL; } - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, "use %s", pThread->dbName); + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, "use %s", pThread->dbName); - TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpFileName); + TAOS_RES* tmpResult = taos_query(pThread->taosCon, tmpBuf); int32_t code = taos_errno(tmpResult); if (code != 0) { fprintf(stderr, "invalid database %s\n", pThread->dbName); @@ -1127,11 +1257,47 @@ void* taosDumpOutWorkThreadFp(void *arg) return NULL; } + int fileNameIndex = 1; + int tablesInOneFile = 0; + int64_t lastRowsPrint = 5000000; fprintf(fp, "USE %s;\n\n", pThread->dbName); while (1) { ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon); + + int ret = taosDumpTable(tableRecord.name, tableRecord.metric, &tsArguments, fp, pThread->taosCon, pThread->dbName); + if (ret >= 0) { + // TODO: sum table count and table rows by self + pThread->tablesOfDumpOut++; + pThread->rowsOfDumpOut += ret; + + if (pThread->rowsOfDumpOut >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from database %s\n", pThread->rowsOfDumpOut, pThread->dbName); + lastRowsPrint += 5000000; + } + + tablesInOneFile++; + if (tablesInOneFile >= tsArguments.table_batch) { + fclose(fp); + tablesInOneFile = 0; + + memset(tmpBuf, 0, TSDB_FILENAME_LEN + 128); + if (tsArguments.outpath[0] != 0) { + sprintf(tmpBuf, "%s/%s.tables.%d-%d.sql", tsArguments.outpath, pThread->dbName, pThread->threadIndex, fileNameIndex); + } else { + sprintf(tmpBuf, "%s.tables.%d-%d.sql", pThread->dbName, pThread->threadIndex, fileNameIndex); + } + fileNameIndex++; + + fp = fopen(tmpBuf, "w"); + if (fp == NULL) { + fprintf(stderr, "failed to open file %s\n", tmpBuf); + close(fd); + taos_free_result(tmpResult); + return NULL; + } + } + } } taos_free_result(tmpResult); @@ -1141,21 +1307,18 @@ void* taosDumpOutWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfThread, char *dbName) +static void taosStartDumpOutWorkThreads(void* taosCon, struct arguments* args, int32_t numOfThread, char *dbName) { pthread_attr_t thattr; SThreadParaObj *threadObj = (SThreadParaObj *)calloc(numOfThread, sizeof(SThreadParaObj)); for (int t = 0; t < numOfThread; ++t) { SThreadParaObj *pThread = threadObj + t; + pThread->rowsOfDumpOut = 0; + pThread->tablesOfDumpOut = 0; pThread->threadIndex = t; pThread->totalThreads = numOfThread; tstrncpy(pThread->dbName, dbName, TSDB_TABLE_NAME_LEN); - pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); - - if (pThread->taosCon == NULL) { - fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); - exit(0); - } + pThread->taosCon = taosCon; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); @@ -1170,15 +1333,24 @@ static void taosStartDumpOutWorkThreads(struct arguments* args, int32_t numOfTh pthread_join(threadObj[t].threadID, NULL); } + // TODO: sum all thread dump table count and rows of per table, then save into result_output.txt + int64_t totalRowsOfDumpOut = 0; + int64_t totalChildTblsOfDumpOut = 0; for (int32_t t = 0; t < numOfThread; ++t) { - taos_close(threadObj[t].taosCon); + totalChildTblsOfDumpOut += threadObj[t].tablesOfDumpOut; + totalRowsOfDumpOut += threadObj[t].rowsOfDumpOut; } + + fprintf(g_fpOfResult, "# child table counter: %"PRId64"\n", totalChildTblsOfDumpOut); + fprintf(g_fpOfResult, "# row counter: %"PRId64"\n", totalRowsOfDumpOut); + g_resultStatistics.totalChildTblsOfDumpOut += totalChildTblsOfDumpOut; + g_resultStatistics.totalRowsOfDumpOut += totalRowsOfDumpOut; free(threadObj); } -int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { +int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon, char* dbName) { int count = 0; STableDef *tableDes = (STableDef *)calloc(1, sizeof(STableDef) + sizeof(SColDes) * TSDB_MAX_COLUMNS); @@ -1187,15 +1359,15 @@ int32_t taosDumpStable(char *table, FILE *fp, TAOS* taosCon) { exit(-1); } - count = taosGetTableDes(table, tableDes, taosCon, true); + count = taosGetTableDes(dbName, table, tableDes, taosCon, true); if (count < 0) { free(tableDes); - fprintf(stderr, "failed to get stable schema\n"); + fprintf(stderr, "failed to get stable[%s] schema\n", table); exit(-1); } - taosDumpCreateTableClause(tableDes, count, fp); + taosDumpCreateTableClause(tableDes, count, fp, dbName); free(tableDes); return 0; @@ -1207,38 +1379,19 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) TAOS_ROW row; int fd = -1; STableRecord tableRecord; + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - exit(-1); - } - - sprintf(tmpCommand, "use %s", dbName); + sprintf(sqlstr, "show %s.stables", dbName); - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(tmpResult); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int32_t code = taos_errno(res); if (code != 0) { - fprintf(stderr, "invalid database %s, error: %s\n", dbName, taos_errstr(taosCon)); - free(tmpCommand); - taos_free_result(tmpResult); - exit(-1); - } - - taos_free_result(tmpResult); - - sprintf(tmpCommand, "show stables"); - - tmpResult = taos_query(taosCon, tmpCommand); - code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "failed to run command %s, error: %s\n", tmpCommand, taos_errstr(taosCon)); - free(tmpCommand); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason: %s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); exit(-1); } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + TAOS_FIELD *fields = taos_fetch_fields(res); char tmpFileName[TSDB_FILENAME_LEN + 1]; memset(tmpFileName, 0, TSDB_FILENAME_LEN); @@ -1246,32 +1399,38 @@ int32_t taosDumpCreateSuperTableClause(TAOS* taosCon, char* dbName, FILE *fp) fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); if (fd == -1) { fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(tmpResult); - free(tmpCommand); + taos_free_result(res); (void)remove(".stables.tmp"); exit(-1); } - while ((row = taos_fetch_row(tmpResult)) != NULL) { + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); strncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); taosWrite(fd, &tableRecord, sizeof(STableRecord)); } - taos_free_result(tmpResult); + taos_free_result(res); (void)lseek(fd, 0, SEEK_SET); + int superTblCnt = 0; while (1) { ssize_t readLen = read(fd, &tableRecord, sizeof(STableRecord)); if (readLen <= 0) break; - (void)taosDumpStable(tableRecord.name, fp, taosCon); + int ret = taosDumpStable(tableRecord.name, fp, taosCon, dbName); + if (0 == ret) { + superTblCnt++; + } } + // TODO: save dump super table into result_output.txt + fprintf(g_fpOfResult, "# super table counter: %d\n", superTblCnt); + g_resultStatistics.totalSuperTblsOfDumpOut += superTblCnt; + close(fd); (void)remove(".stables.tmp"); - free(tmpCommand); return 0; } @@ -1282,111 +1441,126 @@ int taosDumpDb(SDbInfo *dbInfo, struct arguments *arguments, FILE *fp, TAOS *tao STableRecord tableRecord; taosDumpCreateDbClause(dbInfo, arguments->with_property, fp); - - char* tmpCommand = (char *)malloc(COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; - } - - sprintf(tmpCommand, "use %s", dbInfo->name); - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); - int32_t code = taos_errno(tmpResult); - if (code != 0) { - fprintf(stderr, "invalid database %s\n", dbInfo->name); - free(tmpCommand); - taos_free_result(tmpResult); - return -1; - } - taos_free_result(tmpResult); + fprintf(g_fpOfResult, "\n#### database: %s\n", dbInfo->name); + g_resultStatistics.totalDatabasesOfDumpOut++; + + char sqlstr[TSDB_MAX_SQL_LEN] = {0}; fprintf(fp, "USE %s;\n\n", dbInfo->name); (void)taosDumpCreateSuperTableClause(taosCon, dbInfo->name, fp); - sprintf(tmpCommand, "show tables"); + sprintf(sqlstr, "show %s.tables", dbInfo->name); - tmpResult = taos_query(taosCon, tmpCommand); - code = taos_errno(tmpResult); + TAOS_RES* res = taos_query(taosCon, sqlstr); + int code = taos_errno(res); if (code != 0) { - fprintf(stderr, "failed to run command %s\n", tmpCommand); - free(tmpCommand); - taos_free_result(tmpResult); + fprintf(stderr, "failed to run command <%s>, reason:%s\n", sqlstr, taos_errstr(res)); + taos_free_result(res); return -1; } - TAOS_FIELD *fields = taos_fetch_fields(tmpResult); + char tmpBuf[TSDB_FILENAME_LEN + 1]; + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".show-tables.tmp"); + fd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (fd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + taos_free_result(res); + return -1; + } - int32_t numOfTable = 0; - int32_t numOfThread = 0; - char tmpFileName[TSDB_FILENAME_LEN + 1]; - while ((row = taos_fetch_row(tmpResult)) != NULL) { - if (0 == numOfTable) { - memset(tmpFileName, 0, TSDB_FILENAME_LEN); - sprintf(tmpFileName, ".tables.tmp.%d", numOfThread); - fd = open(tmpFileName, O_RDWR | O_CREAT, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); - if (fd == -1) { - fprintf(stderr, "failed to open temp file: %s\n", tmpFileName); - taos_free_result(tmpResult); - for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - free(tmpCommand); - return -1; - } - - numOfThread++; - } + TAOS_FIELD *fields = taos_fetch_fields(res); + int32_t numOfTable = 0; + while ((row = taos_fetch_row(res)) != NULL) { memset(&tableRecord, 0, sizeof(STableRecord)); tstrncpy(tableRecord.name, (char *)row[TSDB_SHOW_TABLES_NAME_INDEX], fields[TSDB_SHOW_TABLES_NAME_INDEX].bytes); tstrncpy(tableRecord.metric, (char *)row[TSDB_SHOW_TABLES_METRIC_INDEX], fields[TSDB_SHOW_TABLES_METRIC_INDEX].bytes); - + taosWrite(fd, &tableRecord, sizeof(STableRecord)); - + numOfTable++; - - if (numOfTable >= arguments->table_batch) { - numOfTable = 0; - close(fd); - fd = -1; - } } + taos_free_result(res); + lseek(fd, 0, SEEK_SET); + + int maxThreads = tsArguments.thread_num; + int tableOfPerFile ; + if (numOfTable <= tsArguments.thread_num) { + tableOfPerFile = 1; + maxThreads = numOfTable; + } else { + tableOfPerFile = numOfTable / tsArguments.thread_num; + if (0 != numOfTable % tsArguments.thread_num) { + tableOfPerFile += 1; + } + } + + char* tblBuf = (char*)calloc(1, tableOfPerFile * sizeof(STableRecord)); + if (NULL == tblBuf){ + fprintf(stderr, "failed to calloc %" PRIzu "\n", tableOfPerFile * sizeof(STableRecord)); + close(fd); + return -1; + } + + int32_t numOfThread = 0; + int subFd = -1; + for (numOfThread = 0; numOfThread < maxThreads; numOfThread++) { + memset(tmpBuf, 0, TSDB_FILENAME_LEN); + sprintf(tmpBuf, ".tables.tmp.%d", numOfThread); + subFd = open(tmpBuf, O_RDWR | O_CREAT | O_TRUNC, S_IRWXU | S_IRGRP | S_IXGRP | S_IROTH); + if (subFd == -1) { + fprintf(stderr, "failed to open temp file: %s\n", tmpBuf); + for (int32_t loopCnt = 0; loopCnt < numOfThread; loopCnt++) { + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } + sprintf(tmpBuf, ".show-tables.tmp"); + (void)remove(tmpBuf); + close(fd); + return -1; + } + + // read tableOfPerFile for fd, write to subFd + ssize_t readLen = read(fd, tblBuf, tableOfPerFile * sizeof(STableRecord)); + if (readLen <= 0) { + close(subFd); + break; + } + taosWrite(subFd, tblBuf, readLen); + close(subFd); + } + + sprintf(tmpBuf, ".show-tables.tmp"); + (void)remove(tmpBuf); if (fd >= 0) { close(fd); fd = -1; } - taos_free_result(tmpResult); + taos_free_result(res); // start multi threads to dumpout - taosStartDumpOutWorkThreads(arguments, numOfThread, dbInfo->name); + taosStartDumpOutWorkThreads(taosCon, arguments, numOfThread, dbInfo->name); for (int loopCnt = 0; loopCnt < numOfThread; loopCnt++) { - sprintf(tmpFileName, ".tables.tmp.%d", loopCnt); - (void)remove(tmpFileName); - } - - free(tmpCommand); + sprintf(tmpBuf, ".tables.tmp.%d", loopCnt); + (void)remove(tmpBuf); + } return 0; } -void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { +void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp, char* dbName) { int counter = 0; int count_temp = 0; + char sqlstr[COMMAND_SIZE]; - char* tmpBuf = (char *)malloc(COMMAND_SIZE); - if (tmpBuf == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return; - } + char* pstr = sqlstr; - char* pstr = tmpBuf; - - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s", tableDes->name); + pstr += sprintf(sqlstr, "CREATE TABLE IF NOT EXISTS %s.%s", dbName, tableDes->name); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -1420,12 +1594,10 @@ void taosDumpCreateTableClause(STableDef *tableDes, int numOfCols, FILE *fp) { pstr += sprintf(pstr, ");"); - fprintf(fp, "%s\n", tmpBuf); - - free(tmpBuf); + fprintf(fp, "%s\n\n", sqlstr); } -void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp) { +void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols, FILE *fp, char* dbName) { int counter = 0; int count_temp = 0; @@ -1438,7 +1610,7 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols char *pstr = NULL; pstr = tmpBuf; - pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s USING %s TAGS (", tableDes->name, metric); + pstr += sprintf(tmpBuf, "CREATE TABLE IF NOT EXISTS %s.%s USING %s.%s TAGS (", dbName, tableDes->name, dbName, metric); for (; counter < numOfCols; counter++) { if (tableDes->cols[counter].note[0] != '\0') break; @@ -1479,48 +1651,36 @@ void taosDumpCreateMTableClause(STableDef *tableDes, char *metric, int numOfCols free(tmpBuf); } -int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon) { - /* char temp[MAX_COMMAND_SIZE] = "\0"; */ - int64_t totalRows = 0; +int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* taosCon, char* dbName) { + int64_t lastRowsPrint = 5000000; + int64_t totalRows = 0; int count = 0; char *pstr = NULL; TAOS_ROW row = NULL; int numFields = 0; - char *tbuf = NULL; - - char* tmpCommand = (char *)calloc(1, COMMAND_SIZE); - if (tmpCommand == NULL) { - fprintf(stderr, "failed to allocate memory\n"); - return -1; + + if (arguments->schemaonly) { + return 0; } int32_t sql_buf_len = arguments->max_sql_len; char* tmpBuffer = (char *)calloc(1, sql_buf_len + 128); if (tmpBuffer == NULL) { fprintf(stderr, "failed to allocate memory\n"); - free(tmpCommand); return -1; } pstr = tmpBuffer; - if (arguments->schemaonly) { - free(tmpCommand); - free(tmpBuffer); - return 0; - } + char sqlstr[1024] = {0}; + sprintf(sqlstr, + "select * from %s.%s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc;", + dbName, tbname, arguments->start_time, arguments->end_time); - sprintf(tmpCommand, - "select * from %s where _c0 >= %" PRId64 " and _c0 <= %" PRId64 " order by _c0 asc", - tbname, - arguments->start_time, - arguments->end_time); - - TAOS_RES* tmpResult = taos_query(taosCon, tmpCommand); + TAOS_RES* tmpResult = taos_query(taosCon, sqlstr); int32_t code = taos_errno(tmpResult); if (code != 0) { - fprintf(stderr, "failed to run command %s, reason: %s\n", tmpCommand, taos_errstr(taosCon)); - free(tmpCommand); + fprintf(stderr, "failed to run command %s, reason: %s\n", sqlstr, taos_errstr(tmpResult)); free(tmpBuffer); taos_free_result(tmpResult); return -1; @@ -1529,14 +1689,6 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* numFields = taos_field_count(tmpResult); assert(numFields > 0); TAOS_FIELD *fields = taos_fetch_fields(tmpResult); - tbuf = (char *)malloc(COMMAND_SIZE); - if (tbuf == NULL) { - fprintf(stderr, "No enough memory\n"); - free(tmpCommand); - free(tmpBuffer); - taos_free_result(tmpResult); - return -1; - } int rowFlag = 0; int32_t curr_sqlstr_len = 0; @@ -1550,7 +1702,7 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* if (count == 0) { total_sqlstr_len = 0; - curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s VALUES (", tbname); + curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "INSERT INTO %s.%s VALUES (", dbName, tbname); } else { if (arguments->mysqlFlag) { if (0 == rowFlag) { @@ -1594,17 +1746,21 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* case TSDB_DATA_TYPE_DOUBLE: curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%f", GET_DOUBLE_VAL(row[col])); break; - case TSDB_DATA_TYPE_BINARY: + case TSDB_DATA_TYPE_BINARY: { + char tbuf[COMMAND_SIZE] = {0}; //*(pstr++) = '\''; converStringToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); //pstr = stpcpy(pstr, tbuf); //*(pstr++) = '\''; pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; - case TSDB_DATA_TYPE_NCHAR: + } + case TSDB_DATA_TYPE_NCHAR: { + char tbuf[COMMAND_SIZE] = {0}; convertNCharToReadable((char *)row[col], length[col], tbuf, COMMAND_SIZE); pstr += sprintf(pstr + curr_sqlstr_len, "\'%s\'", tbuf); break; + } case TSDB_DATA_TYPE_TIMESTAMP: if (!arguments->mysqlFlag) { curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, "%" PRId64 "", *(int64_t *)row[col]); @@ -1624,9 +1780,14 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* curr_sqlstr_len += sprintf(pstr + curr_sqlstr_len, ") "); - totalRows++; + totalRows++; count++; fprintf(fp, "%s", tmpBuffer); + + if (totalRows >= lastRowsPrint) { + printf(" %"PRId64 " rows already be dumpout from %s.%s\n", totalRows, dbName, tbname); + lastRowsPrint += 5000000; + } total_sqlstr_len += curr_sqlstr_len; @@ -1638,19 +1799,12 @@ int taosDumpTableData(FILE *fp, char *tbname, struct arguments *arguments, TAOS* //} } + fprintf(fp, "\n"); atomic_add_fetch_64(&totalDumpOutRows, totalRows); - - fprintf(fp, "\n"); - - if (tbuf) { - free(tbuf); - } taos_free_result(tmpResult); - tmpResult = NULL; - free(tmpCommand); free(tmpBuffer); - return 0; + return totalRows; } int taosCheckParam(struct arguments *arguments) { @@ -1986,159 +2140,6 @@ static FILE* taosOpenDumpInFile(char *fptr) { return f; } -int taosDumpInOneFile_old(TAOS * taos, FILE* fp, char* fcharset, char* encode) { - char *command = NULL; - char *lcommand = NULL; - int tsize = 0; - char *line = NULL; - _Bool isRun = true; - size_t line_size = 0; - char *pstr = NULL; - char *lstr = NULL; - size_t inbytesleft = 0; - size_t outbytesleft = COMMAND_SIZE; - char *tcommand = NULL; - char *charsetOfFile = NULL; - iconv_t cd = (iconv_t)(-1); - - command = (char *)malloc(COMMAND_SIZE); - lcommand = (char *)malloc(COMMAND_SIZE); - if (command == NULL || lcommand == NULL) { - fprintf(stderr, "failed to connect to allocate memory\n"); - goto _dumpin_exit_failure; - } - - // Resolve locale - if (*fcharset != '\0') { - charsetOfFile = fcharset; - } else { - charsetOfFile = encode; - } - - if (charsetOfFile != NULL && strcasecmp(tsCharset, charsetOfFile) != 0) { - cd = iconv_open(tsCharset, charsetOfFile); - if (cd == ((iconv_t)(-1))) { - fprintf(stderr, "Failed to open iconv handle\n"); - goto _dumpin_exit_failure; - } - } - - pstr = command; - int64_t linenu = 0; - while (1) { - ssize_t size = getline(&line, &line_size, fp); - linenu++; - if (size <= 0) break; - if (size == 1) { - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - - taosReplaceCtrlChar(tcommand); - - if (queryDB(taos, tcommand) != 0) { - fprintf(stderr, "error sql: linenu: %" PRId64 " failed\n", linenu); - exit(0); - } - - pstr = command; - pstr[0] = '\0'; - tsize = 0; - isRun = true; - } - - continue; - } - - /* if (line[0] == '-' && line[1] == '-') continue; */ - - line[size - 1] = 0; - - if (tsize + size - 1 > COMMAND_SIZE) { - fprintf(stderr, "command is too long\n"); - goto _dumpin_exit_failure; - } - - if (line[size - 2] == '\\') { - line[size - 2] = ' '; - isRun = false; - } else { - isRun = true; - } - - memcpy(pstr, line, size - 1); - pstr += (size - 1); - *pstr = '\0'; - - if (!isRun) continue; - - if (command != pstr && !isEmptyCommand(command)) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(tcommand); - if (queryDB(taos, tcommand) != 0) { - fprintf(stderr, "error sql: linenu:%" PRId64 " failed\n", linenu); - exit(0); - } - } - - pstr = command; - *pstr = '\0'; - tsize = 0; - } - - if (pstr != command) { - inbytesleft = pstr - command; - memset(lcommand, 0, COMMAND_SIZE); - pstr = command; - lstr = lcommand; - outbytesleft = COMMAND_SIZE; - if (cd != ((iconv_t)(-1))) { - iconv(cd, &pstr, &inbytesleft, &lstr, &outbytesleft); - tcommand = lcommand; - } else { - tcommand = command; - } - taosReplaceCtrlChar(lcommand); - if (queryDB(taos, tcommand) != 0) - fprintf(stderr, "error sql: linenu:%" PRId64 " failed \n", linenu); - } - - if (cd != ((iconv_t)(-1))) iconv_close(cd); - tfree(line); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return 0; - -_dumpin_exit_failure: - if (cd != ((iconv_t)(-1))) iconv_close(cd); - tfree(command); - tfree(lcommand); - taos_close(taos); - fclose(fp); - return -1; -} - int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, char* fileName) { int read_len = 0; char * cmd = NULL; @@ -2152,6 +2153,7 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c return -1; } + int lastRowsPrint = 5000000; int lineNo = 0; while ((read_len = getline(&line, &line_len, fp)) != -1) { ++lineNo; @@ -2172,12 +2174,18 @@ int taosDumpInOneFile(TAOS * taos, FILE* fp, char* fcharset, char* encode, c memcpy(cmd + cmd_len, line, read_len); cmd[read_len + cmd_len]= '\0'; - if (queryDB(taos, cmd)) { + if (queryDbImpl(taos, cmd)) { fprintf(stderr, "error sql: linenu:%d, file:%s\n", lineNo, fileName); + fprintf(g_fpOfResult, "error sql: linenu:%d, file:%s\n", lineNo, fileName); } memset(cmd, 0, TSDB_MAX_ALLOWED_SQL_LEN); - cmd_len = 0; + cmd_len = 0; + + if (lineNo >= lastRowsPrint) { + printf(" %d lines already be executed from file %s\n", lineNo, fileName); + lastRowsPrint += 5000000; + } } tfree(cmd); @@ -2204,7 +2212,7 @@ void* taosDumpInWorkThreadFp(void *arg) return NULL; } -static void taosStartDumpInWorkThreads(struct arguments *args) +static void taosStartDumpInWorkThreads(void* taosCon, struct arguments *args) { pthread_attr_t thattr; SThreadParaObj *pThread; @@ -2219,11 +2227,7 @@ static void taosStartDumpInWorkThreads(struct arguments *args) pThread = threadObj + t; pThread->threadIndex = t; pThread->totalThreads = totalThreads; - pThread->taosCon = taos_connect(args->host, args->user, args->password, NULL, args->port); - if (pThread->taosCon == NULL) { - fprintf(stderr, "ERROR: thread:%d failed connect to TDengine, error:%s\n", pThread->threadIndex, taos_errstr(pThread->taosCon)); - exit(0); - } + pThread->taosCon = taosCon; pthread_attr_init(&thattr); pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); @@ -2272,7 +2276,7 @@ int taosDumpIn(struct arguments *arguments) { taosDumpInOneFile(taos, fp, tsfCharset, arguments->encode, tsDbSqlFile); } - taosStartDumpInWorkThreads(arguments); + taosStartDumpInWorkThreads(taos, arguments); taos_close(taos); taosFreeSQLFiles(); diff --git a/src/mnode/inc/mnodeMnode.h b/src/mnode/inc/mnodeMnode.h index 93f2fa11ea..ffdec02eb6 100644 --- a/src/mnode/inc/mnodeMnode.h +++ b/src/mnode/inc/mnodeMnode.h @@ -43,8 +43,8 @@ void mnodeIncMnodeRef(struct SMnodeObj *pMnode); void mnodeDecMnodeRef(struct SMnodeObj *pMnode); char * mnodeGetMnodeRoleStr(); -void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet); -void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet); +void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet, bool redirect); +void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet, bool redirect); char* mnodeGetMnodeMasterEp(); void mnodeGetMnodeInfos(void *mnodes); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index c971a945aa..25dbb10536 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -317,13 +317,6 @@ static int32_t mnodeCheckDbCfg(SDbCfg *pCfg) { return TSDB_CODE_MND_INVALID_DB_OPTION; } -#ifndef _SYNC - if (pCfg->replications != 1) { - mError("invalid db option replications:%d can only be 1 in this version", pCfg->replications); - return TSDB_CODE_MND_INVALID_DB_OPTION; - } -#endif - if (pCfg->update < TSDB_MIN_DB_UPDATE || pCfg->update > TSDB_MAX_DB_UPDATE) { mError("invalid db option update:%d valid range: [%d, %d]", pCfg->update, TSDB_MIN_DB_UPDATE, TSDB_MAX_DB_UPDATE); return TSDB_CODE_MND_INVALID_DB_OPTION; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 0ff50b2307..037ee2864a 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -111,9 +111,6 @@ static int32_t mnodeDnodeActionInsert(SSdbRow *pRow) { static int32_t mnodeDnodeActionDelete(SSdbRow *pRow) { SDnodeObj *pDnode = pRow->pObj; -#ifndef _SYNC - mnodeDropAllDnodeVgroups(pDnode); -#endif mnodeDropMnodeLocal(pDnode->dnodeId); bnNotify(); mnodeUpdateDnodeEps(); @@ -306,7 +303,7 @@ void mnodeUpdateDnode(SDnodeObj *pDnode) { int32_t code = sdbUpdateRow(&row); if (code != TSDB_CODE_SUCCESS && code != TSDB_CODE_MND_ACTION_IN_PROGRESS) { - mError("dnodeId:%d, failed update", pDnode->dnodeId); + mError("dnode:%d, failed update", pDnode->dnodeId); } } @@ -705,11 +702,7 @@ static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { mInfo("dnode:%d, start to drop it", pDnode->dnodeId); -#ifndef _SYNC - int32_t code = mnodeDropDnode(pDnode, pMsg); -#else int32_t code = bnDropDnode(pDnode); -#endif mnodeDecDnodeRef(pDnode); return code; } @@ -1179,58 +1172,3 @@ static char* mnodeGetDnodeAlternativeRoleStr(int32_t alternativeRole) { default:return "any"; } } - -#ifndef _SYNC - -int32_t bnInit() { return TSDB_CODE_SUCCESS; } -void bnCleanUp() {} -void bnNotify() {} -void bnCheckModules() {} -void bnReset() {} -int32_t bnAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; } - -char* syncRole[] = { - "offline", - "unsynced", - "syncing", - "slave", - "master" -}; - -int32_t bnAllocVnodes(SVgObj *pVgroup) { - void * pIter = NULL; - SDnodeObj *pDnode = NULL; - SDnodeObj *pSelDnode = NULL; - float vnodeUsage = 1000.0; - - while (1) { - pIter = mnodeGetNextDnode(pIter, &pDnode); - if (pDnode == NULL) break; - - if (pDnode->numOfCores > 0 && pDnode->openVnodes < TSDB_MAX_VNODES) { - float openVnodes = pDnode->openVnodes; - if (pDnode->isMgmt) openVnodes += tsMnodeEqualVnodeNum; - - float usage = openVnodes / pDnode->numOfCores; - if (usage <= vnodeUsage) { - pSelDnode = pDnode; - vnodeUsage = usage; - } - } - mnodeDecDnodeRef(pDnode); - } - - if (pSelDnode == NULL) { - mError("failed to alloc vnode to vgroup"); - return TSDB_CODE_MND_NO_ENOUGH_DNODES; - } - - pVgroup->vnodeGid[0].dnodeId = pSelDnode->dnodeId; - pVgroup->vnodeGid[0].pDnode = pSelDnode; - - mDebug("dnode:%d, alloc one vnode to vgroup, openVnodes:%d", pSelDnode->dnodeId, pSelDnode->openVnodes); - return TSDB_CODE_SUCCESS; -} - -#endif - diff --git a/src/mnode/src/mnodeMain.c b/src/mnode/src/mnodeMain.c index 86f2c821f9..7b520c6022 100644 --- a/src/mnode/src/mnodeMain.c +++ b/src/mnode/src/mnodeMain.c @@ -37,16 +37,10 @@ #include "mnodeShow.h" #include "mnodeProfile.h" -typedef struct { - const char *const name; - int (*init)(); - void (*cleanup)(); -} SMnodeComponent; - void *tsMnodeTmr = NULL; static bool tsMgmtIsRunning = false; -static const SMnodeComponent tsMnodeComponents[] = { +static SStep tsMnodeSteps[] = { {"sdbref", sdbInitRef, sdbCleanUpRef}, {"profile", mnodeInitProfile, mnodeCleanupProfile}, {"cluster", mnodeInitCluster, mnodeCleanupCluster}, @@ -67,22 +61,14 @@ static void mnodeInitTimer(); static void mnodeCleanupTimer(); static bool mnodeNeedStart() ; -static void mnodeCleanupComponents(int32_t stepId) { - for (int32_t i = stepId; i >= 0; i--) { - tsMnodeComponents[i].cleanup(); - } +static void mnodeCleanupComponents() { + int32_t stepSize = sizeof(tsMnodeSteps) / sizeof(SStep); + dnodeStepCleanup(tsMnodeSteps, stepSize); } static int32_t mnodeInitComponents() { - int32_t code = 0; - for (int32_t i = 0; i < sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]); i++) { - if (tsMnodeComponents[i].init() != 0) { - mnodeCleanupComponents(i); - code = -1; - break; - } - } - return code; + int32_t stepSize = sizeof(tsMnodeSteps) / sizeof(SStep); + return dnodeStepInit(tsMnodeSteps, stepSize); } int32_t mnodeStartSystem() { @@ -132,7 +118,7 @@ void mnodeCleanupSystem() { dnodeFreeMReadQueue(); dnodeFreeMPeerQueue(); mnodeCleanupTimer(); - mnodeCleanupComponents(sizeof(tsMnodeComponents) / sizeof(tsMnodeComponents[0]) - 1); + mnodeCleanupComponents(); mInfo("mnode is cleaned up"); } diff --git a/src/mnode/src/mnodeMnode.c b/src/mnode/src/mnodeMnode.c index 68acae7dec..ea5260c76d 100644 --- a/src/mnode/src/mnodeMnode.c +++ b/src/mnode/src/mnodeMnode.c @@ -273,14 +273,14 @@ void mnodeUpdateMnodeEpSet(SMInfos *pMinfos) { mnodeMnodeUnLock(); } -void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) { +void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet, bool redirect) { mnodeMnodeRdLock(); *epSet = tsMEpForPeer; mnodeMnodeUnLock(); mTrace("vgId:1, mnodes epSet for peer is returned, num:%d inUse:%d", tsMEpForPeer.numOfEps, tsMEpForPeer.inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { - if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort + TSDB_PORT_DNODEDNODE) { + if (redirect && strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort + TSDB_PORT_DNODEDNODE) { epSet->inUse = (i + 1) % epSet->numOfEps; mTrace("vgId:1, mnode:%d, for peer ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { @@ -289,14 +289,14 @@ void mnodeGetMnodeEpSetForPeer(SRpcEpSet *epSet) { } } -void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet) { +void mnodeGetMnodeEpSetForShell(SRpcEpSet *epSet, bool redirect) { mnodeMnodeRdLock(); *epSet = tsMEpForShell; mnodeMnodeUnLock(); mTrace("vgId:1, mnodes epSet for shell is returned, num:%d inUse:%d", tsMEpForShell.numOfEps, tsMEpForShell.inUse); for (int32_t i = 0; i < epSet->numOfEps; ++i) { - if (strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { + if (redirect && strcmp(epSet->fqdn[i], tsLocalFqdn) == 0 && htons(epSet->port[i]) == tsServerPort) { epSet->inUse = (i + 1) % epSet->numOfEps; mTrace("vgId:1, mnode:%d, for shell ep:%s:%u, set inUse to %d", i, epSet->fqdn[i], htons(epSet->port[i]), epSet->inUse); } else { diff --git a/src/mnode/src/mnodePeer.c b/src/mnode/src/mnodePeer.c index cfb7b7781b..aaf8b69427 100644 --- a/src/mnode/src/mnodePeer.c +++ b/src/mnode/src/mnodePeer.c @@ -54,7 +54,7 @@ int32_t mnodeProcessPeerReq(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForPeer(epSet); + mnodeGetMnodeEpSetForPeer(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index 36b6ff7a59..7c35829f88 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -282,27 +282,34 @@ static int32_t mnodeRetrieveConns(SShowObj *pShow, char *data, int32_t rows, voi // not thread safe, need optimized int32_t mnodeSaveQueryStreamList(SConnObj *pConn, SHeartBeatMsg *pHBMsg) { - pConn->numOfQueries = htonl(pHBMsg->numOfQueries); - if (pConn->numOfQueries > 0) { + pConn->numOfQueries = 0; + pConn->numOfStreams = 0; + int32_t numOfQueries = htonl(pHBMsg->numOfQueries); + int32_t numOfStreams = htonl(pHBMsg->numOfStreams); + + if (numOfQueries > 0) { if (pConn->pQueries == NULL) { pConn->pQueries = calloc(sizeof(SQueryDesc), QUERY_STREAM_SAVE_SIZE); } - int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfQueries) * sizeof(SQueryDesc); + pConn->numOfQueries = MIN(QUERY_STREAM_SAVE_SIZE, numOfQueries); + + int32_t saveSize = pConn->numOfQueries * sizeof(SQueryDesc); if (saveSize > 0 && pConn->pQueries != NULL) { memcpy(pConn->pQueries, pHBMsg->pData, saveSize); } } - pConn->numOfStreams = htonl(pHBMsg->numOfStreams); - if (pConn->numOfStreams > 0) { + if (numOfStreams > 0) { if (pConn->pStreams == NULL) { pConn->pStreams = calloc(sizeof(SStreamDesc), QUERY_STREAM_SAVE_SIZE); } - int32_t saveSize = MIN(QUERY_STREAM_SAVE_SIZE, pConn->numOfStreams) * sizeof(SStreamDesc); + pConn->numOfStreams = MIN(QUERY_STREAM_SAVE_SIZE, numOfStreams); + + int32_t saveSize = pConn->numOfStreams * sizeof(SStreamDesc); if (saveSize > 0 && pConn->pStreams != NULL) { - memcpy(pConn->pStreams, pHBMsg->pData + pConn->numOfQueries * sizeof(SQueryDesc), saveSize); + memcpy(pConn->pStreams, pHBMsg->pData + numOfQueries * sizeof(SQueryDesc), saveSize); } } @@ -450,6 +457,12 @@ static int32_t mnodeGetStreamMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p pSchema[cols].bytes = htons(pShow->bytes[cols]); cols++; + pShow->bytes[cols] = TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE; + pSchema[cols].type = TSDB_DATA_TYPE_BINARY; + strcpy(pSchema[cols].name, "dest table"); + pSchema[cols].bytes = htons(pShow->bytes[cols]); + cols++; + pShow->bytes[cols] = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE; pSchema[cols].type = TSDB_DATA_TYPE_BINARY; strcpy(pSchema[cols].name, "ip:port"); @@ -524,6 +537,10 @@ static int32_t mnodeRetrieveStreams(SShowObj *pShow, char *data, int32_t rows, v STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pConnObj->user, pShow->bytes[cols]); cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; + STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDesc->dstTable, pShow->bytes[cols]); + cols++; + pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; snprintf(ipStr, sizeof(ipStr), "%s:%u", taosIpStr(pConnObj->ip), pConnObj->port); STR_WITH_MAXSIZE_TO_VARSTR(pWrite, ipStr, pShow->bytes[cols]); diff --git a/src/mnode/src/mnodeRead.c b/src/mnode/src/mnodeRead.c index c2a70bc01d..200f589b78 100644 --- a/src/mnode/src/mnodeRead.c +++ b/src/mnode/src/mnodeRead.c @@ -50,7 +50,7 @@ int32_t mnodeProcessRead(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForShell(epSet); + mnodeGetMnodeEpSetForShell(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 2da46d5b4b..3c1c92226a 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -282,7 +282,7 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { pRsp->onlineDnodes = htonl(mnodeGetOnlineDnodesNum()); pRsp->totalDnodes = htonl(mnodeGetDnodesNum()); - mnodeGetMnodeEpSetForShell(&pRsp->epSet); + mnodeGetMnodeEpSetForShell(&pRsp->epSet, false); pMsg->rpcRsp.rsp = pRsp; pMsg->rpcRsp.len = sizeof(SHeartBeatRsp); @@ -349,7 +349,7 @@ static int32_t mnodeProcessConnectMsg(SMnodeMsg *pMsg) { pConnectRsp->writeAuth = pUser->writeAuth; pConnectRsp->superAuth = pUser->superAuth; - mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet); + mnodeGetMnodeEpSetForShell(&pConnectRsp->epSet, false); connect_over: if (code != TSDB_CODE_SUCCESS) { diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 2149cb12c0..6297bb21d0 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -396,14 +396,15 @@ static void mnodeAddTableIntoStable(SSTableObj *pStable, SCTableObj *pCtable) { atomic_add_fetch_32(&pStable->numOfTables, 1); if (pStable->vgHash == NULL) { - pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + pStable->vgHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + mDebug("table:%s, create hash:%p", pStable->info.tableId, pStable->vgHash); } if (pStable->vgHash != NULL) { if (taosHashGet(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)) == NULL) { taosHashPut(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId), &pCtable->vgId, sizeof(pCtable->vgId)); - mDebug("table:%s, vgId:%d is put into stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId, - (int32_t)taosHashGetSize(pStable->vgHash)); + mDebug("table:%s, vgId:%d is put into stable hash:%p, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId, + pStable->vgHash, taosHashGetSize(pStable->vgHash)); } } } @@ -416,13 +417,14 @@ static void mnodeRemoveTableFromStable(SSTableObj *pStable, SCTableObj *pCtable) SVgObj *pVgroup = mnodeGetVgroup(pCtable->vgId); if (pVgroup == NULL) { taosHashRemove(pStable->vgHash, &pCtable->vgId, sizeof(pCtable->vgId)); - mDebug("table:%s, vgId:%d is remove from stable vgList, sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId, - (int32_t)taosHashGetSize(pStable->vgHash)); + mDebug("table:%s, vgId:%d is remove from stable hash:%p sizeOfVgList:%d", pStable->info.tableId, pCtable->vgId, + pStable->vgHash, taosHashGetSize(pStable->vgHash)); } mnodeDecVgroupRef(pVgroup); } static void mnodeDestroySuperTable(SSTableObj *pStable) { + mDebug("table:%s, is destroyed, stable hash:%p", pStable->info.tableId, pStable->vgHash); if (pStable->vgHash != NULL) { taosHashCleanup(pStable->vgHash); pStable->vgHash = NULL; @@ -464,6 +466,9 @@ static int32_t mnodeSuperTableActionUpdate(SSdbRow *pRow) { SSTableObj *pNew = pRow->pObj; SSTableObj *pTable = mnodeGetSuperTable(pNew->info.tableId); if (pTable != NULL && pTable != pNew) { + mDebug("table:%s, will be updated, hash:%p sizeOfVgList:%d, new hash:%p sizeOfVgList:%d", pTable->info.tableId, + pTable->vgHash, taosHashGetSize(pTable->vgHash), pNew->vgHash, taosHashGetSize(pNew->vgHash)); + void *oldTableId = pTable->info.tableId; void *oldSchema = pTable->schema; void *oldVgHash = pTable->vgHash; @@ -479,6 +484,9 @@ static int32_t mnodeSuperTableActionUpdate(SSdbRow *pRow) { free(pNew); free(oldTableId); free(oldSchema); + + mDebug("table:%s, update finished, hash:%p sizeOfVgList:%d", pTable->info.tableId, pTable->vgHash, + taosHashGetSize(pTable->vgHash)); } mnodeDecTableRef(pTable); @@ -783,8 +791,8 @@ static int32_t mnodeProcessDropTableMsg(SMnodeMsg *pMsg) { if (pMsg->pTable->type == TSDB_SUPER_TABLE) { SSTableObj *pSTable = (SSTableObj *)pMsg->pTable; - mInfo("msg:%p, app:%p table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", - pMsg, pMsg->rpcMsg.ahandle, pDrop->tableId, pSTable->uid, pSTable->numOfTables, (int32_t)taosHashGetSize(pSTable->vgHash)); + mInfo("msg:%p, app:%p table:%s, start to drop stable, uid:%" PRIu64 ", numOfChildTables:%d, sizeOfVgList:%d", pMsg, + pMsg->rpcMsg.ahandle, pDrop->tableId, pSTable->uid, pSTable->numOfTables, taosHashGetSize(pSTable->vgHash)); return mnodeProcessDropSuperTableMsg(pMsg); } else { SCTableObj *pCTable = (SCTableObj *)pMsg->pTable; @@ -925,7 +933,10 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { if (pMsg == NULL) return TSDB_CODE_MND_APP_ERROR; SSTableObj *pStable = (SSTableObj *)pMsg->pTable; - if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) { + mInfo("msg:%p, app:%p stable:%s will be dropped, hash:%p sizeOfVgList:%d", pMsg, pMsg->rpcMsg.ahandle, + pStable->info.tableId, pStable->vgHash, taosHashGetSize(pStable->vgHash)); + + if (pStable->vgHash != NULL /*pStable->numOfTables != 0*/) { int32_t *pVgId = taosHashIterate(pStable->vgHash, NULL); while (pVgId) { SVgObj *pVgroup = mnodeGetVgroup(*pVgId); @@ -938,8 +949,9 @@ static int32_t mnodeProcessDropSuperTableMsg(SMnodeMsg *pMsg) { pDrop->uid = htobe64(pStable->uid); mnodeExtractTableName(pStable->info.tableId, pDrop->tableId); - mInfo("msg:%p, app:%p stable:%s, send drop stable msg to vgId:%d", pMsg, pMsg->rpcMsg.ahandle, - pStable->info.tableId, pVgroup->vgId); + mInfo("msg:%p, app:%p stable:%s, send drop stable msg to vgId:%d, hash:%p sizeOfVgList:%d", pMsg, + pMsg->rpcMsg.ahandle, pStable->info.tableId, pVgroup->vgId, pStable->vgHash, + taosHashGetSize(pStable->vgHash)); SRpcEpSet epSet = mnodeGetEpSetFromVgroup(pVgroup); SRpcMsg rpcMsg = {.pCont = pDrop, .contLen = sizeof(SDropSTableMsg), .msgType = TSDB_MSG_TYPE_MD_DROP_STABLE}; dnodeSendMsgToDnode(&epSet, &rpcMsg); @@ -1482,8 +1494,8 @@ static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) { pMsg->rpcRsp.rsp = pMeta; - mDebug("msg:%p, app:%p stable:%s, uid:%" PRIu64 " table meta is retrieved", pMsg, pMsg->rpcMsg.ahandle, - pTable->info.tableId, pTable->uid); + mDebug("msg:%p, app:%p stable:%s, uid:%" PRIu64 " table meta is retrieved, sizeOfVgList:%d numOfTables:%d", pMsg, + pMsg->rpcMsg.ahandle, pTable->info.tableId, pTable->uid, taosHashGetSize(pTable->vgHash), pTable->numOfTables); return TSDB_CODE_SUCCESS; } @@ -1512,7 +1524,7 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { char *msg = (char *)pRsp + sizeof(SSTableVgroupRspMsg); for (int32_t i = 0; i < numOfTable; ++i) { - char * stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; + char *stableName = (char *)pInfo + sizeof(SSTableVgroupMsg) + (TSDB_TABLE_FNAME_LEN)*i; SSTableObj *pTable = mnodeGetSuperTable(stableName); if (pTable == NULL) { mError("msg:%p, app:%p stable:%s, not exist while get stable vgroup info", pMsg, pMsg->rpcMsg.ahandle, stableName); @@ -1533,6 +1545,8 @@ static int32_t mnodeProcessSuperTableVgroupMsg(SMnodeMsg *pMsg) { msg += sizeof(SVgroupsMsg); } else { SVgroupsMsg *pVgroupMsg = (SVgroupsMsg *)msg; + mDebug("msg:%p, app:%p stable:%s, hash:%p sizeOfVgList:%d will be returned", pMsg, pMsg->rpcMsg.ahandle, + pTable->info.tableId, pTable->vgHash, taosHashGetSize(pTable->vgHash)); int32_t *pVgId = taosHashIterate(pTable->vgHash, NULL); int32_t vgSize = 0; @@ -1734,6 +1748,16 @@ static int32_t mnodeDoCreateChildTable(SMnodeMsg *pMsg, int32_t tid) { if (pTable->info.type == TSDB_CHILD_TABLE) { STagData *pTagData = (STagData *)pCreate->schema; // it is a tag key + + char prefix[64] = {0}; + size_t prefixLen = tableIdPrefix(pMsg->pDb->name, prefix, 64); + if (0 != strncasecmp(prefix, pTagData->name, prefixLen)) { + mError("msg:%p, app:%p table:%s, corresponding super table:%s not in this db", pMsg, pMsg->rpcMsg.ahandle, + pCreate->tableId, pTagData->name); + mnodeDestroyChildTable(pTable); + return TSDB_CODE_TDB_INVALID_CREATE_TB_MSG; + } + if (pMsg->pSTable == NULL) pMsg->pSTable = mnodeGetSuperTable(pTagData->name); if (pMsg->pSTable == NULL) { mError("msg:%p, app:%p table:%s, corresponding super table:%s does not exist", pMsg, pMsg->rpcMsg.ahandle, @@ -2629,9 +2653,7 @@ static int32_t mnodeRetrieveShowTables(SShowObj *pShow, char *data, int32_t rows SPatternCompareInfo info = PATTERN_COMPARE_INFO_INITIALIZER; char prefix[64] = {0}; - tstrncpy(prefix, pDb->name, 64); - strcat(prefix, TS_PATH_DELIMITER); - int32_t prefixLen = strlen(prefix); + int32_t prefixLen = tableIdPrefix(pDb->name, prefix, 64); char* pattern = NULL; if (pShow->payloadLen > 0) { diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index d3020de6bd..eec559600f 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -315,7 +315,8 @@ void mnodeUpdateVgroupStatus(SVgObj *pVgroup, SDnodeObj *pDnode, SVnodeLoad *pVl for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { SVnodeGid *pVgid = &pVgroup->vnodeGid[i]; if (pVgid->pDnode == pDnode) { - mTrace("dnode:%d, receive status from dnode, vgId:%d status is %d:%s", pDnode->dnodeId, pVgroup->vgId, pVgid->role, syncRole[pVgid->role]); + mTrace("dnode:%d, receive status from dnode, vgId:%d status:%s last:%s", pDnode->dnodeId, pVgroup->vgId, + syncRole[pVload->role], syncRole[pVgid->role]); pVgid->role = pVload->role; if (pVload->role == TAOS_SYNC_ROLE_MASTER) { pVgroup->inUse = i; diff --git a/src/mnode/src/mnodeWrite.c b/src/mnode/src/mnodeWrite.c index 53981238a7..c0699b05b3 100644 --- a/src/mnode/src/mnodeWrite.c +++ b/src/mnode/src/mnodeWrite.c @@ -50,7 +50,7 @@ int32_t mnodeProcessWrite(SMnodeMsg *pMsg) { if (!sdbIsMaster()) { SMnodeRsp *rpcRsp = &pMsg->rpcRsp; SRpcEpSet *epSet = rpcMallocCont(sizeof(SRpcEpSet)); - mnodeGetMnodeEpSetForShell(epSet); + mnodeGetMnodeEpSetForShell(epSet, true); rpcRsp->rsp = epSet; rpcRsp->len = sizeof(SRpcEpSet); diff --git a/src/plugins/http/src/httpContext.c b/src/plugins/http/src/httpContext.c index 22f464924e..266228e7ac 100644 --- a/src/plugins/http/src/httpContext.c +++ b/src/plugins/http/src/httpContext.c @@ -79,7 +79,7 @@ bool httpInitContexts() { void httpCleanupContexts() { if (tsHttpServer.contextCache != NULL) { SCacheObj *cache = tsHttpServer.contextCache; - httpInfo("context cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); + httpInfo("context cache is cleanuping, size:%d", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.contextCache); tsHttpServer.contextCache = NULL; } diff --git a/src/plugins/http/src/httpSession.c b/src/plugins/http/src/httpSession.c index a96e4433b0..35ce0160b2 100644 --- a/src/plugins/http/src/httpSession.c +++ b/src/plugins/http/src/httpSession.c @@ -107,7 +107,7 @@ static void httpDestroySession(void *data) { void httpCleanUpSessions() { if (tsHttpServer.sessionCache != NULL) { SCacheObj *cache = tsHttpServer.sessionCache; - httpInfo("session cache is cleanuping, size:%" PRIzu "", taosHashGetSize(cache->pHashTable)); + httpInfo("session cache is cleanuping, size:%d", taosHashGetSize(cache->pHashTable)); taosCacheCleanup(tsHttpServer.sessionCache); tsHttpServer.sessionCache = NULL; } diff --git a/src/plugins/http/src/httpSql.c b/src/plugins/http/src/httpSql.c index 564f555c40..3e517c6fa6 100644 --- a/src/plugins/http/src/httpSql.c +++ b/src/plugins/http/src/httpSql.c @@ -181,7 +181,7 @@ void httpProcessMultiSql(HttpContext *pContext) { char *sql = httpGetCmdsString(pContext, cmd->sql); httpTraceL("context:%p, fd:%d, user:%s, process pos:%d, start query, sql:%s", pContext, pContext->fd, pContext->user, multiCmds->pos, sql); - taosNotePrintHttp(sql); + nPrintHttp(sql); taos_query_a(pContext->session->taos, sql, httpProcessMultiSqlCallBack, (void *)pContext); } @@ -329,7 +329,7 @@ void httpProcessSingleSqlCmd(HttpContext *pContext) { } httpTraceL("context:%p, fd:%d, user:%s, start query, sql:%s", pContext, pContext->fd, pContext->user, sql); - taosNotePrintHttp(sql); + nPrintHttp(sql); taos_query_a(pSession->taos, sql, httpProcessSingleSqlCallBack, (void *)pContext); } diff --git a/src/plugins/http/src/httpSystem.c b/src/plugins/http/src/httpSystem.c index 3b8858b62e..34a70a658b 100644 --- a/src/plugins/http/src/httpSystem.c +++ b/src/plugins/http/src/httpSystem.c @@ -37,7 +37,6 @@ void opInitHandle(HttpServer* pServer) {} #endif HttpServer tsHttpServer; -void taosInitNote(int32_t numOfNoteLines, int32_t maxNotes, char* lable); int32_t httpInitSystem() { strcpy(tsHttpServer.label, "rest"); @@ -48,9 +47,6 @@ int32_t httpInitSystem() { pthread_mutex_init(&tsHttpServer.serverMutex, NULL); - if (tsHttpEnableRecordSql != 0) { - taosInitNote(tsNumOfLogLines / 10, 1, (char*)"http_note"); - } restInitHandle(&tsHttpServer); adminInitHandle(&tsHttpServer); gcInitHandle(&tsHttpServer); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index b73f7ce3f5..9b29ad909a 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -140,6 +140,11 @@ typedef struct SQueryCostInfo { uint64_t numOfTimeWindows; } SQueryCostInfo; +typedef struct { + int64_t vgroupLimit; + int64_t ts; +} SOrderedPrjQueryInfo; + typedef struct SQuery { int16_t numOfCols; int16_t numOfTags; @@ -167,6 +172,7 @@ typedef struct SQuery { tFilePage** sdata; STableQueryInfo* current; + SOrderedPrjQueryInfo prjInfo; // limit value for each vgroup, only available in global order projection query. SSingleColumnFilterInfo* pFilterInfo; } SQuery; @@ -185,7 +191,7 @@ typedef struct SQueryRuntimeEnv { void* pQueryHandle; void* pSecQueryHandle; // another thread for bool stableQuery; // super table query or not - bool topBotQuery; // false + bool topBotQuery; // TODO used bitwise flag bool groupbyNormalCol; // denote if this is a groupby normal column query bool hasTagResults; // if there are tag values in final result or not bool timeWindowInterpo;// if the time window start/end required interpolation @@ -210,14 +216,13 @@ enum { typedef struct SQInfo { void* signature; int32_t code; // error code to returned to client - int64_t owner; // if it is in execution + int64_t owner; // if it is in execution void* tsdb; SMemRef memRef; int32_t vgId; STableGroupInfo tableGroupInfo; // table list SArray STableGroupInfo tableqinfoGroupInfo; // this is a group array list, including SArray structure SQueryRuntimeEnv runtimeEnv; -// SArray* arrTableIdInfo; SHashObj* arrTableIdInfo; int32_t groupIndex; @@ -233,6 +238,7 @@ typedef struct SQInfo { tsem_t ready; int32_t dataReady; // denote if query result is ready or not void* rspContext; // response context + int64_t startExecTs; // start to exec timestamp } SQInfo; #endif // TDENGINE_QUERYEXECUTOR_H diff --git a/src/query/inc/tsqlfunction.h b/src/query/inc/tsqlfunction.h index 5a923db52c..38bb0b8a71 100644 --- a/src/query/inc/tsqlfunction.h +++ b/src/query/inc/tsqlfunction.h @@ -250,10 +250,9 @@ enum { }; typedef struct STwaInfo { - TSKEY lastKey; int8_t hasResult; // flag to denote has value double dOutput; - double lastValue; + SPoint1 p; STimeWindow win; } STwaInfo; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index a45c0ac6ef..58f05c9d9d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -128,11 +128,14 @@ static UNUSED_FUNC void* u_realloc(void* p, size_t __size) { #define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st))) #define GET_NUM_OF_TABLEGROUP(q) taosArrayGetSize((q)->tableqinfoGroupInfo.pGroupList) #define GET_TABLEGROUP(q, _index) ((SArray*) taosArrayGetP((q)->tableqinfoGroupInfo.pGroupList, (_index))) +#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) static void setQueryStatus(SQuery *pQuery, int8_t status); static void finalizeQueryResult(SQueryRuntimeEnv *pRuntimeEnv); -#define QUERY_IS_INTERVAL_QUERY(_q) ((_q)->interval.interval > 0) +static int32_t getMaximumIdleDurationSec() { + return tsShellActivityTimer * 2; +} static void getNextTimeWindow(SQuery* pQuery, STimeWindow* tw) { int32_t factor = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); @@ -700,65 +703,60 @@ static FORCE_INLINE int32_t getForwardStepsInBlock(int32_t numOfRows, __block_se return forwardStep; } +static int32_t updateResultRowCurrentIndex(SResultRowInfo* pWindowResInfo, TSKEY lastKey, bool ascQuery) { + int32_t i = 0; + int64_t skey = TSKEY_INITIAL_VAL; + + int32_t numOfClosed = 0; + for (i = 0; i < pWindowResInfo->size; ++i) { + SResultRow *pResult = pWindowResInfo->pResult[i]; + if (pResult->closed) { + numOfClosed += 1; + continue; + } + + TSKEY ekey = pResult->win.ekey; + if ((ekey <= lastKey && ascQuery) || (pResult->win.skey >= lastKey && !ascQuery)) { + closeTimeWindow(pWindowResInfo, i); + } else { + skey = pResult->win.skey; + break; + } + } + + // all windows are closed, set the last one to be the skey + if (skey == TSKEY_INITIAL_VAL) { + assert(i == pWindowResInfo->size); + pWindowResInfo->curIndex = pWindowResInfo->size - 1; + } else { + pWindowResInfo->curIndex = i; + pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex]->win.skey; + } + + return numOfClosed; +} + /** * NOTE: the query status only set for the first scan of master scan. */ static int32_t doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey, SResultRowInfo *pWindowResInfo) { SQuery *pQuery = pRuntimeEnv->pQuery; - if (pRuntimeEnv->scanFlag != MASTER_SCAN) { - return pWindowResInfo->size; - } - - // for group by normal column query, close time window and return. - if (!QUERY_IS_INTERVAL_QUERY(pQuery)) { - closeAllTimeWindow(pWindowResInfo); + if (pRuntimeEnv->scanFlag != MASTER_SCAN || pWindowResInfo->size == 0) { return pWindowResInfo->size; } // no qualified results exist, abort check int32_t numOfClosed = 0; - - if (pWindowResInfo->size == 0) { - return pWindowResInfo->size; - } + bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); // query completed - if ((lastKey >= pQuery->current->win.ekey && QUERY_IS_ASC_QUERY(pQuery)) || - (lastKey <= pQuery->current->win.ekey && !QUERY_IS_ASC_QUERY(pQuery))) { + if ((lastKey >= pQuery->current->win.ekey && ascQuery) || (lastKey <= pQuery->current->win.ekey && (!ascQuery))) { closeAllTimeWindow(pWindowResInfo); pWindowResInfo->curIndex = pWindowResInfo->size - 1; setQueryStatus(pQuery, QUERY_COMPLETED | QUERY_RESBUF_FULL); } else { // set the current index to be the last unclosed window - int32_t i = 0; - int64_t skey = TSKEY_INITIAL_VAL; - - for (i = 0; i < pWindowResInfo->size; ++i) { - SResultRow *pResult = pWindowResInfo->pResult[i]; - if (pResult->closed) { - numOfClosed += 1; - continue; - } - - TSKEY ekey = pResult->win.ekey; - if ((ekey <= lastKey && QUERY_IS_ASC_QUERY(pQuery)) || - (pResult->win.skey >= lastKey && !QUERY_IS_ASC_QUERY(pQuery))) { - closeTimeWindow(pWindowResInfo, i); - } else { - skey = pResult->win.skey; - break; - } - } - - // all windows are closed, set the last one to be the skey - if (skey == TSKEY_INITIAL_VAL) { - assert(i == pWindowResInfo->size); - pWindowResInfo->curIndex = pWindowResInfo->size - 1; - } else { - pWindowResInfo->curIndex = i; - } - - pWindowResInfo->prevSKey = pWindowResInfo->pResult[pWindowResInfo->curIndex]->win.skey; + numOfClosed = updateResultRowCurrentIndex(pWindowResInfo, lastKey, ascQuery); // the number of completed slots are larger than the threshold, return current generated results to client. if (numOfClosed > pQuery->rec.threshold) { @@ -1047,24 +1045,6 @@ static void setNotInterpoWindowKey(SQLFunctionCtx* pCtx, int32_t numOfOutput, in } } -//static double getTSWindowInterpoVal(SColumnInfoData* pColInfo, int16_t srcColIndex, int16_t rowIndex, TSKEY key, char** prevRow, TSKEY* tsCols, int32_t step) { -// TSKEY start = tsCols[rowIndex]; -// TSKEY prevTs = (rowIndex == 0)? *(TSKEY *) prevRow[0] : tsCols[rowIndex - step]; -// -// double v1 = 0, v2 = 0, v = 0; -// char *prevVal = (rowIndex == 0)? prevRow[srcColIndex] : ((char*)pColInfo->pData) + (rowIndex - step) * pColInfo->info.bytes; -// -// GET_TYPED_DATA(v1, double, pColInfo->info.type, (char *)prevVal); -// GET_TYPED_DATA(v2, double, pColInfo->info.type, (char *)pColInfo->pData + rowIndex * pColInfo->info.bytes); -// -// SPoint point1 = (SPoint){.key = prevTs, .val = &v1}; -// SPoint point2 = (SPoint){.key = start, .val = &v2}; -// SPoint point = (SPoint){.key = key, .val = &v}; -// taosGetLinearInterpolationVal(TSDB_DATA_TYPE_DOUBLE, &point1, &point2, &point); -// -// return v; -//} - // window start key interpolation static bool setTimeWindowInterpolationStartTs(SQueryRuntimeEnv* pRuntimeEnv, int32_t pos, int32_t numOfRows, SArray* pDataBlock, TSKEY* tsCols, STimeWindow* win) { SQuery* pQuery = pRuntimeEnv->pQuery; @@ -1235,6 +1215,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); } done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); @@ -1246,6 +1228,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_END_INTERP); } } @@ -1286,6 +1270,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_START_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_START_INTERP); } done = resultRowInterpolated(pResult, RESULT_ROW_END_INTERP); @@ -1296,6 +1282,8 @@ static void blockwiseApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SDataStatis * if (interp) { setResultRowInterpo(pResult, RESULT_ROW_END_INTERP); } + } else { + setNotInterpoWindowKey(pRuntimeEnv->pCtx, pQuery->numOfOutput, RESULT_ROW_END_INTERP); } } @@ -1799,9 +1787,12 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl // interval query with limit applied int32_t numOfRes = 0; - if (QUERY_IS_INTERVAL_QUERY(pQuery) || pRuntimeEnv->groupbyNormalCol) { + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { numOfRes = doCheckQueryCompleted(pRuntimeEnv, lastKey, pWindowResInfo); - } else { + } else if (pRuntimeEnv->groupbyNormalCol) { + closeAllTimeWindow(pWindowResInfo); + numOfRes = pWindowResInfo->size; + } else { // projection query numOfRes = (int32_t)getNumOfResult(pRuntimeEnv); // update the number of output result @@ -2138,8 +2129,31 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { pRuntimeEnv->pool = destroyResultRowPool(pRuntimeEnv->pool); } +static bool needBuildResAfterQueryComplete(SQInfo* pQInfo) { + return pQInfo->rspContext != NULL; +} + #define IS_QUERY_KILLED(_q) ((_q)->code == TSDB_CODE_TSC_QUERY_CANCELLED) +static bool isQueryKilled(SQInfo *pQInfo) { + if (IS_QUERY_KILLED(pQInfo)) { + return true; + } + + // query has been executed more than tsShellActivityTimer, and the retrieve has not arrived + // abort current query execution. + if (pQInfo->owner != 0 && ((taosGetTimestampSec() - pQInfo->startExecTs) > getMaximumIdleDurationSec()) && + (!needBuildResAfterQueryComplete(pQInfo))) { + + assert(pQInfo->startExecTs != 0); + qDebug("QInfo:%p retrieve not arrive beyond %d sec, abort current query execution, start:%"PRId64", current:%d", pQInfo, 1, + pQInfo->startExecTs, taosGetTimestampSec()); + return true; + } + + return false; +} + static void setQueryKilled(SQInfo *pQInfo) { pQInfo->code = TSDB_CODE_TSC_QUERY_CANCELLED;} static bool isFixedOutputQuery(SQueryRuntimeEnv* pRuntimeEnv) { @@ -2864,7 +2878,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { + if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -3432,7 +3446,7 @@ int32_t mergeIntoGroupResultImpl(SQInfo *pQInfo, SArray *pGroup) { int64_t startt = taosGetTimestampMs(); while (1) { - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); tfree(pTableList); @@ -4018,7 +4032,7 @@ void scanOneTableDataBlocks(SQueryRuntimeEnv *pRuntimeEnv, TSKEY start) { cond.twindow.skey, cond.twindow.ekey); // check if query is killed or not - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } } @@ -4461,6 +4475,18 @@ static void stableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBloc } else { blockwiseApplyFunctions(pRuntimeEnv, pStatis, pDataBlockInfo, pWindowResInfo, searchFn, pDataBlock); } + + if (QUERY_IS_INTERVAL_QUERY(pQuery)) { + bool ascQuery = QUERY_IS_ASC_QUERY(pQuery); + + // TODO refactor + if ((pTableQueryInfo->lastKey >= pTableQueryInfo->win.ekey && ascQuery) || (pTableQueryInfo->lastKey <= pTableQueryInfo->win.ekey && (!ascQuery))) { + closeAllTimeWindow(pWindowResInfo); + pWindowResInfo->curIndex = pWindowResInfo->size - 1; + } else { + updateResultRowCurrentIndex(pWindowResInfo, pTableQueryInfo->lastKey, ascQuery); + } + } } bool queryHasRemainResForTableQuery(SQueryRuntimeEnv* pRuntimeEnv) { @@ -4675,7 +4701,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { SDataBlockInfo blockInfo = SDATA_BLOCK_INITIALIZER; while (tsdbNextDataBlock(pQueryHandle)) { - if (IS_QUERY_KILLED(GET_QINFO_ADDR(pRuntimeEnv))) { + if (isQueryKilled(GET_QINFO_ADDR(pRuntimeEnv))) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5112,7 +5138,7 @@ static int64_t scanMultiTableDataBlocks(SQInfo *pQInfo) { while (tsdbNextDataBlock(pQueryHandle)) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5479,19 +5505,25 @@ static void sequentialTableProcess(SQInfo *pQInfo) { // return; // } + if (pQuery->prjInfo.vgroupLimit != -1) { + assert(pQuery->limit.limit == -1 && pQuery->limit.offset == 0); + } else if (pQuery->limit.limit != -1) { + assert(pQuery->prjInfo.vgroupLimit == -1); + } + bool hasMoreBlock = true; int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order); SQueryCostInfo *summary = &pRuntimeEnv->summary; while ((hasMoreBlock = tsdbNextDataBlock(pQueryHandle)) == true) { summary->totalBlocks += 1; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } tsdbRetrieveDataBlockInfo(pQueryHandle, &blockInfo); STableQueryInfo **pTableQueryInfo = - (STableQueryInfo **)taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); + (STableQueryInfo **) taosHashGet(pQInfo->tableqinfoGroupInfo.map, &blockInfo.tid, sizeof(blockInfo.tid)); if (pTableQueryInfo == NULL) { break; } @@ -5503,6 +5535,25 @@ static void sequentialTableProcess(SQInfo *pQInfo) { setTagVal(pRuntimeEnv, pQuery->current->pTable, pQInfo->tsdb); } + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->current->windowResInfo.size > pQuery->prjInfo.vgroupLimit) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } + + // it is a super table ordered projection query, check for the number of output for each vgroup + if (pQuery->prjInfo.vgroupLimit > 0 && pQuery->rec.rows >= pQuery->prjInfo.vgroupLimit) { + if (QUERY_IS_ASC_QUERY(pQuery) && blockInfo.window.skey >= pQuery->prjInfo.ts) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } else if (!QUERY_IS_ASC_QUERY(pQuery) && blockInfo.window.ekey <= pQuery->prjInfo.ts) { + pQuery->current->lastKey = + QUERY_IS_ASC_QUERY(pQuery) ? blockInfo.window.ekey + step : blockInfo.window.skey + step; + continue; + } + } + uint32_t status = 0; SDataStatis *pStatis = NULL; SArray *pDataBlock = NULL; @@ -5520,6 +5571,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) { } ensureOutputBuffer(pRuntimeEnv, &blockInfo); + int64_t prev = getNumOfResult(pRuntimeEnv); + pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1; int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock); @@ -5530,17 +5583,30 @@ static void sequentialTableProcess(SQInfo *pQInfo) { pQuery->rec.rows = getNumOfResult(pRuntimeEnv); + int64_t inc = pQuery->rec.rows - prev; + pQuery->current->windowResInfo.size += (int32_t) inc; + // the flag may be set by tableApplyFunctionsOnBlock, clear it here CLEAR_QUERY_STATUS(pQuery, QUERY_COMPLETED); updateTableIdInfo(pQuery, pQInfo->arrTableIdInfo); - skipResults(pRuntimeEnv); - // the limitation of output result is reached, set the query completed - if (limitResults(pRuntimeEnv)) { - setQueryStatus(pQuery, QUERY_COMPLETED); - SET_STABLE_QUERY_OVER(pQInfo); - break; + if (pQuery->prjInfo.vgroupLimit >= 0) { + if (((pQuery->rec.rows + pQuery->rec.total) < pQuery->prjInfo.vgroupLimit) || ((pQuery->rec.rows + pQuery->rec.total) > pQuery->prjInfo.vgroupLimit && prev < pQuery->prjInfo.vgroupLimit)) { + if (QUERY_IS_ASC_QUERY(pQuery) && pQuery->prjInfo.ts < blockInfo.window.ekey) { + pQuery->prjInfo.ts = blockInfo.window.ekey; + } else if (!QUERY_IS_ASC_QUERY(pQuery) && pQuery->prjInfo.ts > blockInfo.window.skey) { + pQuery->prjInfo.ts = blockInfo.window.skey; + } + } + } else { + // the limitation of output result is reached, set the query completed + skipResults(pRuntimeEnv); + if (limitResults(pRuntimeEnv)) { + setQueryStatus(pQuery, QUERY_COMPLETED); + SET_STABLE_QUERY_OVER(pQInfo); + break; + } } // while the output buffer is full or limit/offset is applied, query may be paused here @@ -5582,7 +5648,7 @@ static void sequentialTableProcess(SQInfo *pQInfo) { 1 == taosArrayGetSize(pQInfo->tableqinfoGroupInfo.pGroupList)); while (pQInfo->tableIndex < pQInfo->tableqinfoGroupInfo.numOfTables) { - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5768,7 +5834,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { qDebug("QInfo:%p master scan completed, elapsed time: %" PRId64 "ms, reverse scan start", pQInfo, el); // query error occurred or query is killed, abort current execution - if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -5789,7 +5855,7 @@ static void multiTableQueryProcess(SQInfo *pQInfo) { setQueryStatus(pQuery, QUERY_COMPLETED); - if (pQInfo->code != TSDB_CODE_SUCCESS || IS_QUERY_KILLED(pQInfo)) { + if (pQInfo->code != TSDB_CODE_SUCCESS || isQueryKilled(pQInfo)) { qDebug("QInfo:%p query killed or error occurred, code:%s, abort", pQInfo, tstrerror(pQInfo->code)); //TODO finalizeQueryResult may cause SEGSEV, since the memory may not allocated yet, add a cleanup function instead longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); @@ -5905,7 +5971,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) pQuery->rec.rows = getNumOfResult(pRuntimeEnv); doSecondaryArithmeticProcess(pQuery); - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { longjmp(pRuntimeEnv->env, TSDB_CODE_TSC_QUERY_CANCELLED); } @@ -6284,7 +6350,7 @@ static int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SArray **pTableIdList, pQueryMsg->interval.offset = htobe64(pQueryMsg->interval.offset); pQueryMsg->limit = htobe64(pQueryMsg->limit); pQueryMsg->offset = htobe64(pQueryMsg->offset); - pQueryMsg->tableLimit = htobe64(pQueryMsg->tableLimit); + pQueryMsg->vgroupLimit = htobe64(pQueryMsg->vgroupLimit); pQueryMsg->order = htons(pQueryMsg->order); pQueryMsg->orderColId = htons(pQueryMsg->orderColId); @@ -6885,6 +6951,8 @@ static SQInfo *createQInfoImpl(SQueryTableMsg *pQueryMsg, SSqlGroupbyExpr *pGrou pQuery->fillType = pQueryMsg->fillType; pQuery->numOfTags = pQueryMsg->numOfTags; pQuery->tagColList = pTagCols; + pQuery->prjInfo.vgroupLimit = pQueryMsg->vgroupLimit; + pQuery->prjInfo.ts = (pQueryMsg->order == TSDB_ORDER_ASC)? INT64_MIN:INT64_MAX; pQuery->colList = calloc(numOfCols, sizeof(SSingleColumnFilterInfo)); if (pQuery->colList == NULL) { @@ -7479,7 +7547,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_lock(&pQInfo->lock); pQInfo->dataReady = QUERY_RESULT_READY; - buildRes = (pQInfo->rspContext != NULL); + buildRes = needBuildResAfterQueryComplete(pQInfo); // clear qhandle owner, it must be in the secure area. other thread may run ahead before current, after it is // put into task to be executed. @@ -7488,6 +7556,7 @@ static bool doBuildResCheck(SQInfo* pQInfo) { pthread_mutex_unlock(&pQInfo->lock); + // used in retrieve blocking model. tsem_post(&pQInfo->ready); return buildRes; } @@ -7504,7 +7573,9 @@ bool qTableQuery(qinfo_t qinfo) { return false; } - if (IS_QUERY_KILLED(pQInfo)) { + pQInfo->startExecTs = taosGetTimestampSec(); + + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p it is already killed, abort", pQInfo); return doBuildResCheck(pQInfo); } @@ -7536,7 +7607,7 @@ bool qTableQuery(qinfo_t qinfo) { } SQuery* pQuery = pRuntimeEnv->pQuery; - if (IS_QUERY_KILLED(pQInfo)) { + if (isQueryKilled(pQInfo)) { qDebug("QInfo:%p query is killed", pQInfo); } else if (pQuery->rec.rows == 0) { qDebug("QInfo:%p over, %" PRIzu " tables queried, %"PRId64" rows are returned", pQInfo, pQInfo->tableqinfoGroupInfo.numOfTables, pQuery->rec.total); @@ -7564,30 +7635,31 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex int32_t code = TSDB_CODE_SUCCESS; -#if _NON_BLOCKING_RETRIEVE - SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - - pthread_mutex_lock(&pQInfo->lock); - assert(pQInfo->rspContext == NULL); - - if (pQInfo->dataReady == QUERY_RESULT_READY) { - *buildRes = true; - qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%"PRId64", code:%d", pQInfo, pQuery->rowSize, pQuery->rec.rows, - pQInfo->code); - } else { - *buildRes = false; - qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + if (tsRetrieveBlockingModel) { pQInfo->rspContext = pRspContext; - assert(pQInfo->rspContext != NULL); - } + tsem_wait(&pQInfo->ready); + *buildRes = true; + code = pQInfo->code; + } else { + SQuery *pQuery = pQInfo->runtimeEnv.pQuery; - code = pQInfo->code; - pthread_mutex_unlock(&pQInfo->lock); -#else - tsem_wait(&pQInfo->ready); - *buildRes = true; - code = pQInfo->code; -#endif + pthread_mutex_lock(&pQInfo->lock); + + assert(pQInfo->rspContext == NULL); + if (pQInfo->dataReady == QUERY_RESULT_READY) { + *buildRes = true; + qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%" PRId64 ", code:%s", pQInfo, pQuery->rowSize, + pQuery->rec.rows, tstrerror(pQInfo->code)); + } else { + *buildRes = false; + qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + pQInfo->rspContext = pRspContext; + assert(pQInfo->rspContext != NULL); + } + + code = pQInfo->code; + pthread_mutex_unlock(&pQInfo->lock); + } return code; } @@ -7655,7 +7727,7 @@ int32_t qQueryCompleted(qinfo_t qinfo) { } SQuery* pQuery = pQInfo->runtimeEnv.pQuery; - return IS_QUERY_KILLED(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); + return isQueryKilled(pQInfo) || Q_STATUS_EQUAL(pQuery->status, QUERY_OVER); } int32_t qKillQuery(qinfo_t qinfo) { @@ -7952,8 +8024,6 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } - const int32_t DEFAULT_QHANDLE_LIFE_SPAN = tsShellActivityTimer * 2 * 1000; - SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); @@ -7969,7 +8039,8 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qInfo) { return NULL; } else { TSDB_CACHE_PTR_TYPE handleVal = (TSDB_CACHE_PTR_TYPE) qInfo; - void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), DEFAULT_QHANDLE_LIFE_SPAN); + void** handle = taosCachePut(pQueryMgmt->qinfoPool, &handleVal, sizeof(TSDB_CACHE_PTR_TYPE), &qInfo, sizeof(TSDB_CACHE_PTR_TYPE), + (getMaximumIdleDurationSec()*1000)); // pthread_mutex_unlock(&pQueryMgmt->lock); return handle; diff --git a/src/query/src/qFilterfunc.c b/src/query/src/qFilterfunc.c index b6050dddd8..2a40533e90 100644 --- a/src/query/src/qFilterfunc.c +++ b/src/query/src/qFilterfunc.c @@ -21,6 +21,12 @@ #include "tcompare.h" #include "tsqlfunction.h" +#define FLT_EQUAL(_x, _y) (fabs((_x) - (_y)) <= (4 * FLT_EPSILON)) +#define FLT_GREATER(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) > (_y))) +#define FLT_LESS(_x, _y) (!FLT_EQUAL((_x), (_y)) && ((_x) < (_y))) +#define FLT_GREATEREQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) > (_y))) +#define FLT_LESSEQUAL(_x, _y) (FLT_EQUAL((_x), (_y)) || ((_x) < (_y))) + bool less_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int8_t *)minval < pFilter->filterInfo.upperBndi); } @@ -38,35 +44,35 @@ bool less_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool less_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval < pFilter->filterInfo.upperBndd); + return FLT_LESS(*(float*)minval, pFilter->filterInfo.upperBndd); } bool less_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(double *)minval < pFilter->filterInfo.upperBndd); + return *(double *)minval < pFilter->filterInfo.upperBndd; } ////////////////////////////////////////////////////////////////// -bool large_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i8(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int8_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i16(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i16(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int16_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i32(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i32(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int32_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(int64_t *)maxval > pFilter->filterInfo.lowerBndi); } -bool large_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)maxval > pFilter->filterInfo.lowerBndd); +bool larger_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { + return FLT_GREATER(*(float*)maxval, pFilter->filterInfo.lowerBndd); } -bool large_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { +bool larger_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { return (*(double *)maxval > pFilter->filterInfo.lowerBndd); } ///////////////////////////////////////////////////////////////////// @@ -88,10 +94,14 @@ bool lessEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool lessEqual_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd); + return FLT_LESSEQUAL(*(float*)minval, pFilter->filterInfo.upperBndd); } bool lessEqual_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { + if ((fabs(*(double*)minval) - pFilter->filterInfo.upperBndd) <= 2 * DBL_EPSILON) { + return true; + } + return (*(double *)minval <= pFilter->filterInfo.upperBndd); } @@ -113,11 +123,15 @@ bool largeEqual_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool largeEqual_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)maxval >= pFilter->filterInfo.lowerBndd); + return FLT_GREATEREQUAL(*(float*)maxval, pFilter->filterInfo.lowerBndd); } bool largeEqual_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(double *)maxval >= pFilter->filterInfo.lowerBndd); + if (fabs(*(double *)maxval - pFilter->filterInfo.lowerBndd) <= 2 * DBL_EPSILON) { + return true; + } + + return (*(double *)maxval - pFilter->filterInfo.lowerBndd > (2 * DBL_EPSILON)); } //////////////////////////////////////////////////////////////////////// @@ -162,10 +176,12 @@ bool equal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { } } +// user specified input filter value and the original saved float value may needs to +// increase the tolerance to obtain the correct result. bool equal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(float *)minval == *(float *)maxval) { - return (fabs(*(float *)minval - pFilter->filterInfo.lowerBndd) <= FLT_EPSILON); - } else { /* range filter */ + return FLT_EQUAL(*(float*)minval, pFilter->filterInfo.lowerBndd); + } else { // range filter assert(*(float *)minval < *(float *)maxval); return *(float *)minval <= pFilter->filterInfo.lowerBndd && *(float *)maxval >= pFilter->filterInfo.lowerBndd; } @@ -173,10 +189,9 @@ bool equal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool equal_dd(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(double *)minval == *(double *)maxval) { - return (*(double *)minval == pFilter->filterInfo.lowerBndd); - } else { /* range filter */ + return (fabs(*(double *)minval - pFilter->filterInfo.lowerBndd) <= 2 * DBL_EPSILON); + } else { // range filter assert(*(double *)minval < *(double *)maxval); - return *(double *)minval <= pFilter->filterInfo.lowerBndi && *(double *)maxval >= pFilter->filterInfo.lowerBndi; } } @@ -255,7 +270,7 @@ bool nequal_i64(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool nequal_ds(SColumnFilterElem *pFilter, char *minval, char *maxval) { if (*(float *)minval == *(float *)maxval) { - return (*(float *)minval != pFilter->filterInfo.lowerBndd); + return !FLT_EQUAL(*(float *)minval, pFilter->filterInfo.lowerBndd); } return true; @@ -364,7 +379,8 @@ bool rangeFilter_i64_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) //////////////////////////////////////////////////////////////////////// bool rangeFilter_ds_ii(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd && *(float *)maxval >= pFilter->filterInfo.lowerBndd); + return FLT_LESSEQUAL(*(float *)minval, pFilter->filterInfo.upperBndd) && + FLT_GREATEREQUAL(*(float *)maxval, pFilter->filterInfo.lowerBndd); } bool rangeFilter_ds_ee(SColumnFilterElem *pFilter, char *minval, char *maxval) { @@ -376,7 +392,8 @@ bool rangeFilter_ds_ie(SColumnFilterElem *pFilter, char *minval, char *maxval) { } bool rangeFilter_ds_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) { - return (*(float *)minval <= pFilter->filterInfo.upperBndd && *(float *)maxval > pFilter->filterInfo.lowerBndd); + return FLT_GREATER(*(float *)maxval, pFilter->filterInfo.lowerBndd) && + FLT_LESSEQUAL(*(float *)minval, pFilter->filterInfo.upperBndd); } ////////////////////////////////////////////////////////////////////////// @@ -400,7 +417,7 @@ bool rangeFilter_dd_ei(SColumnFilterElem *pFilter, char *minval, char *maxval) { bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i8, - large_i8, + larger_i8, equal_i8, lessEqual_i8, largeEqual_i8, @@ -413,7 +430,7 @@ bool (*filterFunc_i8[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i16, - large_i16, + larger_i16, equal_i16, lessEqual_i16, largeEqual_i16, @@ -426,7 +443,7 @@ bool (*filterFunc_i16[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i32, - large_i32, + larger_i32, equal_i32, lessEqual_i32, largeEqual_i32, @@ -439,7 +456,7 @@ bool (*filterFunc_i32[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_i64, - large_i64, + larger_i64, equal_i64, lessEqual_i64, largeEqual_i64, @@ -452,7 +469,7 @@ bool (*filterFunc_i64[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_ds, - large_ds, + larger_ds, equal_ds, lessEqual_ds, largeEqual_ds, @@ -465,7 +482,7 @@ bool (*filterFunc_ds[])(SColumnFilterElem *pFilter, char *minval, char *maxval) bool (*filterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *maxval) = { NULL, less_dd, - large_dd, + larger_dd, equal_dd, lessEqual_dd, largeEqual_dd, @@ -551,7 +568,7 @@ bool (*rangeFilterFunc_dd[])(SColumnFilterElem *pFilter, char *minval, char *max __filter_func_t* getRangeFilterFuncArray(int32_t type) { switch(type) { - case TSDB_DATA_TYPE_BOOL: return rangeFilterFunc_i8; + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: return rangeFilterFunc_i8; case TSDB_DATA_TYPE_SMALLINT: return rangeFilterFunc_i16; case TSDB_DATA_TYPE_INT: return rangeFilterFunc_i32; @@ -565,7 +582,7 @@ __filter_func_t* getRangeFilterFuncArray(int32_t type) { __filter_func_t* getValueFilterFuncArray(int32_t type) { switch(type) { - case TSDB_DATA_TYPE_BOOL: return filterFunc_i8; + case TSDB_DATA_TYPE_BOOL: case TSDB_DATA_TYPE_TINYINT: return filterFunc_i8; case TSDB_DATA_TYPE_SMALLINT: return filterFunc_i16; case TSDB_DATA_TYPE_INT: return filterFunc_i32; diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index 3bdc0d477f..51125d62b9 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -234,7 +234,13 @@ int32_t tBucketIntHash(tMemBucket *pBucket, const void *value) { } int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value) { - double v = GET_DOUBLE_VAL(value); + double v = 0; + if (pBucket->type == TSDB_DATA_TYPE_FLOAT) { + v = GET_FLOAT_VAL(value); + } else { + v = GET_DOUBLE_VAL(value); + } + int32_t index = -1; if (pBucket->range.dMinVal == DBL_MAX) { diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index 00a97d7bc2..596a3b28e2 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -631,15 +631,19 @@ static void rpcReleaseConn(SRpcConn *pConn) { // if there is an outgoing message, free it if (pConn->outType && pConn->pReqMsg) { SRpcReqContext *pContext = pConn->pContext; - if (pContext->pRsp) { + if (pContext) { + if (pContext->pRsp) { // for synchronous API, post semaphore to unblock app - pContext->pRsp->code = TSDB_CODE_RPC_APP_ERROR; - pContext->pRsp->pCont = NULL; - pContext->pRsp->contLen = 0; - tsem_post(pContext->pSem); + pContext->pRsp->code = TSDB_CODE_RPC_APP_ERROR; + pContext->pRsp->pCont = NULL; + pContext->pRsp->contLen = 0; + tsem_post(pContext->pSem); + } + pContext->pConn = NULL; + taosRemoveRef(tsRpcRefId, pContext->rid); + } else { + assert(0); } - pContext->pConn = NULL; - taosRemoveRef(tsRpcRefId, pContext->rid); } } @@ -1083,16 +1087,13 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) { if (code == TSDB_CODE_RPC_INVALID_TIME_STAMP || code == TSDB_CODE_RPC_AUTH_FAILURE) { rpcCloseConn(pConn); } - tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); + if (pHead->msgType + 1 > 1 && pHead->msgType+1 < TSDB_MSG_TYPE_MAX) { + tDebug("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code); + } else { + tError("%s %p %p, %s is sent with error code:0x%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType], code); + } } } else { // msg is passed to app only parsing is ok - - if (pHead->msgType == TSDB_MSG_TYPE_NETWORK_TEST) { - rpcSendQuickRsp(pConn, TSDB_CODE_SUCCESS); - rpcFreeMsg(pRecv->msg); - return pConn; - } - rpcProcessIncomingMsg(pConn, pHead, pContext); } } diff --git a/src/rpc/src/rpcTcp.c b/src/rpc/src/rpcTcp.c index 2850046d05..178b96c423 100644 --- a/src/rpc/src/rpcTcp.c +++ b/src/rpc/src/rpcTcp.c @@ -242,7 +242,14 @@ static void *taosAcceptTcpConnection(void *arg) { taosKeepTcpAlive(connFd); struct timeval to={1, 0}; - taosSetSockOpt(connFd, SOL_SOCKET, SO_RCVTIMEO, &to, sizeof(to)); + int32_t ret = taosSetSockOpt(connFd, SOL_SOCKET, SO_RCVTIMEO, &to, sizeof(to)); + if (ret != 0) { + taosCloseSocket(connFd); + tError("%s failed to set recv timeout fd(%s)for connection from:%s:%hu", pServerObj->label, strerror(errno), + taosInetNtoa(caddr.sin_addr), htons(caddr.sin_port)); + continue; + } + // pick up the thread to handle this connection pThreadObj = pServerObj->pThreadObj[threadId]; diff --git a/src/rpc/test/rclient.c b/src/rpc/test/rclient.c index 5721525ade..faa6d40da3 100644 --- a/src/rpc/test/rclient.c +++ b/src/rpc/test/rclient.c @@ -188,7 +188,8 @@ int main(int argc, char *argv[]) { tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs*appThreads); tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0*numOfReqs*appThreads/usedTime, msgSize); - getchar(); + int ch = getchar(); + UNUSED(ch); taosCloseLog(); diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 6d0c52284f..2be25447c4 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -62,12 +62,15 @@ typedef struct { typedef struct { SSyncHead syncHead; uint16_t port; + uint16_t tranId; char fqdn[TSDB_FQDN_LEN]; int32_t sourceId; // only for arbitrator } SFirstPkt; typedef struct { - int8_t sync; + int8_t sync; + int8_t reserved; + uint16_t tranId; } SFirstPktRsp; typedef struct { @@ -187,6 +190,7 @@ void syncRestartConnection(SSyncPeer *pPeer); void syncBroadcastStatus(SSyncNode *pNode); void syncAddPeerRef(SSyncPeer *pPeer); int32_t syncDecPeerRef(SSyncPeer *pPeer); +uint16_t syncGenTranId(); #ifdef __cplusplus } diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index b73ca27ce9..080c7d3514 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -226,7 +226,7 @@ int64_t syncStart(const SSyncInfo *pInfo) { } if (pNode->selfIndex < 0) { - sInfo("vgId:%d, this node is not configured", pNode->vgId); + sError("vgId:%d, this node is not configured", pNode->vgId); terrno = TSDB_CODE_SYN_INVALID_CONFIG; syncStop(pNode->rid); return -1; @@ -396,9 +396,7 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) { pFwdRsp->code = code; int32_t msgLen = sizeof(SSyncHead) + sizeof(SFwdRsp); - int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, msgLen); - - if (retLen == msgLen) { + if (taosWriteMsg(pPeer->peerFd, msg, msgLen) == msgLen) { sTrace("%s, forward-rsp is sent, code:%x hver:%" PRIu64, pPeer->id, code, version); } else { sDebug("%s, failed to send forward ack, restart", pPeer->id); @@ -795,7 +793,7 @@ void syncRestartConnection(SSyncPeer *pPeer) { static void syncProcessSyncRequest(char *msg, SSyncPeer *pPeer) { SSyncNode *pNode = pPeer->pSyncNode; - sDebug("%s, sync-req is received", pPeer->id); + sInfo("%s, sync-req is received", pPeer->id); if (pPeer->ip == 0) return; @@ -873,6 +871,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { firstPkt.syncHead.type = TAOS_SMSG_SYNC_REQ; firstPkt.syncHead.vgId = pNode->vgId; firstPkt.syncHead.len = sizeof(firstPkt) - sizeof(SSyncHead); + firstPkt.tranId = syncGenTranId(); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; taosTmrReset(syncNotStarted, tsSyncTimer * 1000, pPeer, tsSyncTmrCtrl, &pPeer->timer); @@ -880,8 +879,7 @@ static void syncRecoverFromMaster(SSyncPeer *pPeer) { if (taosWriteMsg(pPeer->peerFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { sError("%s, failed to send sync-req to peer", pPeer->id); } else { - nodeSStatus = TAOS_SYNC_STATUS_START; - sInfo("%s, sync-req is sent to peer, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); + sInfo("%s, sync-req is sent to peer, tranId:%u, sstatus:%s", pPeer->id, firstPkt.tranId, syncStatus[nodeSStatus]); } } @@ -1018,8 +1016,7 @@ static void syncSendPeersStatusMsgToPeer(SSyncPeer *pPeer, char ack, int8_t type pPeersStatus->peersStatus[i].version = pNode->peerInfo[i]->version; } - int32_t retLen = taosWriteMsg(pPeer->peerFd, msg, statusMsgLen); - if (retLen == statusMsgLen) { + if (taosWriteMsg(pPeer->peerFd, msg, statusMsgLen) == statusMsgLen) { sDebug("%s, status is sent, self:%s:%s:%" PRIu64 ", peer:%s:%s:%" PRIu64 ", ack:%d tranId:%u type:%s pfd:%d", pPeer->id, syncRole[nodeRole], syncStatus[nodeSStatus], nodeVersion, syncRole[pPeer->role], syncStatus[pPeer->sstatus], pPeer->version, pPeersStatus->ack, pPeersStatus->tranId, @@ -1053,10 +1050,11 @@ static void syncSetupPeerConnection(SSyncPeer *pPeer) { firstPkt.syncHead.type = TAOS_SMSG_STATUS; tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; + firstPkt.tranId = syncGenTranId(); firstPkt.sourceId = pNode->vgId; // tell arbitrator its vgId if (taosWriteMsg(connFd, &firstPkt, sizeof(firstPkt)) == sizeof(firstPkt)) { - sDebug("%s, connection to peer server is setup, pfd:%d sfd:%d", pPeer->id, connFd, pPeer->syncFd); + sDebug("%s, connection to peer server is setup, pfd:%d sfd:%d tranId:%u", pPeer->id, connFd, pPeer->syncFd, firstPkt.tranId); pPeer->peerFd = connFd; pPeer->role = TAOS_SYNC_ROLE_UNSYNCED; pPeer->pConn = taosAllocateTcpConn(tsTcpPool, pPeer, connFd); @@ -1093,7 +1091,9 @@ static void syncCreateRestoreDataThread(SSyncPeer *pPeer) { pthread_attr_destroy(&thattr); if (ret < 0) { - sError("%s, failed to create sync thread", pPeer->id); + SSyncNode *pNode = pPeer->pSyncNode; + nodeSStatus = TAOS_SYNC_STATUS_INIT; + sError("%s, failed to create sync thread, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); taosClose(pPeer->syncFd); syncDecPeerRef(pPeer); } else { @@ -1123,6 +1123,8 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { return; } + sDebug("vgId:%d, firstPkt is received, tranId:%u", vgId, firstPkt.tranId); + SSyncNode *pNode = *ppNode; pthread_mutex_lock(&pNode->mutex); @@ -1141,6 +1143,9 @@ static void syncProcessIncommingConnection(int32_t connFd, uint32_t sourceIp) { // first packet tells what kind of link if (firstPkt.syncHead.type == TAOS_SMSG_SYNC_DATA) { pPeer->syncFd = connFd; + nodeSStatus = TAOS_SYNC_STATUS_START; + sInfo("%s, sync-data pkt from master is received, tranId:%u, set sstatus:%s", pPeer->id, firstPkt.tranId, + syncStatus[nodeSStatus]); syncCreateRestoreDataThread(pPeer); } else { sDebug("%s, TCP connection is up, pfd:%d sfd:%d, old pfd:%d", pPeer->id, connFd, pPeer->syncFd, pPeer->peerFd); @@ -1312,7 +1317,7 @@ static int32_t syncForwardToPeerImpl(SSyncNode *pNode, void *data, void *mhandle } // always update version - sTrace("vgId:%d, forward to peer, replica:%d role:%s qtype:%s hver:%" PRIu64, pNode->vgId, pNode->replica, + sTrace("vgId:%d, update version, replica:%d role:%s qtype:%s hver:%" PRIu64, pNode->vgId, pNode->replica, syncRole[nodeRole], qtypeStr[qtype], pWalHead->version); nodeVersion = pWalHead->version; diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index d156c93865..4247468bca 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -36,6 +36,8 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex if (sindex < 0 || eindex < sindex) return; + sDebug("%s, extra files will be removed between sindex:%d and eindex:%d", pPeer->id, sindex, eindex); + while (1) { name[0] = 0; magic = (*pNode->getFileInfo)(pNode->vgId, name, &index, eindex, &size, &fversion); @@ -43,7 +45,7 @@ static void syncRemoveExtraFile(SSyncPeer *pPeer, int32_t sindex, int32_t eindex snprintf(fname, sizeof(fname), "%s/%s", pNode->path, name); (void)remove(fname); - sDebug("%s, %s is removed", pPeer->id, fname); + sInfo("%s, %s is removed for its extra", pPeer->id, fname); index++; if (index > eindex) break; @@ -61,11 +63,12 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { bool fileChanged = false; *fversion = 0; - sinfo.index = 0; + sinfo.index = -1; while (1) { // read file info - int32_t ret = taosReadMsg(pPeer->syncFd, &(minfo), sizeof(minfo)); - if (ret < 0) { + minfo.index = -1; + int32_t ret = taosReadMsg(pPeer->syncFd, &minfo, sizeof(SFileInfo)); + if (ret != sizeof(SFileInfo) || minfo.index == -1) { sError("%s, failed to read file info while restore file since %s", pPeer->id, strerror(errno)); break; } @@ -75,7 +78,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { sDebug("%s, no more files to restore", pPeer->id); // remove extra files after the current index - syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX); + if (sinfo.index != -1) syncRemoveExtraFile(pPeer, sinfo.index + 1, TAOS_SYNC_MAX_INDEX); code = 0; break; } @@ -96,7 +99,7 @@ static int32_t syncRestoreFile(SSyncPeer *pPeer, uint64_t *fversion) { // send file ack ret = taosWriteMsg(pPeer->syncFd, &fileAck, sizeof(fileAck)); - if (ret < 0) { + if (ret != sizeof(fileAck)) { sError("%s, failed to write file:%s ack while restore file since %s", pPeer->id, minfo.name, strerror(errno)); break; } @@ -154,7 +157,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) { while (1) { ret = taosReadMsg(pPeer->syncFd, pHead, sizeof(SWalHead)); - if (ret < 0) { + if (ret != sizeof(SWalHead)) { sError("%s, failed to read walhead while restore wal since %s", pPeer->id, strerror(errno)); break; } @@ -166,7 +169,7 @@ static int32_t syncRestoreWal(SSyncPeer *pPeer) { } // wal sync over ret = taosReadMsg(pPeer->syncFd, pHead->cont, pHead->len); - if (ret < 0) { + if (ret != pHead->len) { sError("%s, failed to read walcont, len:%d while restore wal since %s", pPeer->id, pHead->len, strerror(errno)); break; } @@ -286,11 +289,12 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { uint64_t fversion = 0; sInfo("%s, start to restore, sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); - SFirstPktRsp firstPktRsp = {.sync = 1}; - if (taosWriteMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) < 0) { + SFirstPktRsp firstPktRsp = {.sync = 1, .tranId = syncGenTranId()}; + if (taosWriteMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) != sizeof(SFirstPktRsp)) { sError("%s, failed to send sync firstPkt rsp since %s", pPeer->id, strerror(errno)); return -1; } + sDebug("%s, send firstPktRsp to peer, tranId:%u", pPeer->id, firstPktRsp.tranId); sInfo("%s, start to restore file, set sstatus:%s", pPeer->id, syncStatus[nodeSStatus]); int32_t code = syncRestoreFile(pPeer, &fversion); diff --git a/src/sync/src/syncRetrieve.c b/src/sync/src/syncRetrieve.c index 36b197dd46..82e3700c7a 100644 --- a/src/sync/src/syncRetrieve.c +++ b/src/sync/src/syncRetrieve.c @@ -58,7 +58,7 @@ static int32_t syncGetFileVersion(SSyncNode *pNode, SSyncPeer *pPeer) { uint64_t fver, wver; int32_t code = (*pNode->getVersion)(pNode->vgId, &fver, &wver); if (code != 0) { - sDebug("%s, vnode is commiting while retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); + sDebug("%s, vnode is commiting while get fver for retrieve, last fver:%" PRIu64, pPeer->id, pPeer->lastFileVer); return -1; } @@ -92,7 +92,10 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { int32_t code = -1; char name[TSDB_FILENAME_LEN * 2] = {0}; - if (syncGetFileVersion(pNode, pPeer) < 0) return -1; + if (syncGetFileVersion(pNode, pPeer) < 0) { + pPeer->fileChanged = 1; + return -1; + } while (1) { // retrieve file info @@ -100,12 +103,11 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { fileInfo.size = 0; fileInfo.magic = (*pNode->getFileInfo)(pNode->vgId, fileInfo.name, &fileInfo.index, TAOS_SYNC_MAX_INDEX, &fileInfo.size, &fileInfo.fversion); - // fileInfo.size = htonl(size); sDebug("%s, file:%s info is sent, size:%" PRId64, pPeer->id, fileInfo.name, fileInfo.size); // send the file info int32_t ret = taosWriteMsg(pPeer->syncFd, &(fileInfo), sizeof(fileInfo)); - if (ret < 0) { + if (ret != sizeof(fileInfo)) { code = -1; sError("%s, failed to write file:%s info while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); break; @@ -119,8 +121,8 @@ static int32_t syncRetrieveFile(SSyncPeer *pPeer) { } // wait for the ack from peer - ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(fileAck)); - if (ret < 0) { + ret = taosReadMsg(pPeer->syncFd, &fileAck, sizeof(SFileAck)); + if (ret != sizeof(SFileAck)) { code = -1; sError("%s, failed to read file:%s ack while retrieve file since %s", pPeer->id, fileInfo.name, strerror(errno)); break; @@ -384,12 +386,15 @@ static int32_t syncRetrieveWal(SSyncPeer *pPeer) { } if (code == 0) { - pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; - sInfo("%s, wal retrieve is finished, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); - SWalHead walHead; memset(&walHead, 0, sizeof(walHead)); - taosWriteMsg(pPeer->syncFd, &walHead, sizeof(walHead)); + if (taosWriteMsg(pPeer->syncFd, &walHead, sizeof(walHead)) == sizeof(walHead)) { + pPeer->sstatus = TAOS_SYNC_STATUS_CACHE; + sInfo("%s, wal retrieve is finished, set sstatus:%s", pPeer->id, syncStatus[pPeer->sstatus]); + } else { + sError("%s, failed to send last wal record since %s", pPeer->id, strerror(errno)); + code = -1; + } } else { sError("%s, failed to send wal since %s, code:0x%x", pPeer->id, strerror(errno), code); } @@ -404,20 +409,23 @@ static int32_t syncRetrieveFirstPkt(SSyncPeer *pPeer) { memset(&firstPkt, 0, sizeof(firstPkt)); firstPkt.syncHead.type = TAOS_SMSG_SYNC_DATA; firstPkt.syncHead.vgId = pNode->vgId; + firstPkt.tranId = syncGenTranId(); tstrncpy(firstPkt.fqdn, tsNodeFqdn, sizeof(firstPkt.fqdn)); firstPkt.port = tsSyncPort; - if (taosWriteMsg(pPeer->syncFd, &firstPkt, sizeof(firstPkt)) < 0) { - sError("%s, failed to send sync firstPkt since %s", pPeer->id, strerror(errno)); + if (taosWriteMsg(pPeer->syncFd, &firstPkt, sizeof(firstPkt)) != sizeof(firstPkt)) { + sError("%s, failed to send sync firstPkt since %s, tranId:%u", pPeer->id, strerror(errno), firstPkt.tranId); return -1; } + sDebug("%s, send sync-data pkt to peer, tranId:%u", pPeer->id, firstPkt.tranId); SFirstPktRsp firstPktRsp; - if (taosReadMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) < 0) { - sError("%s, failed to read sync firstPkt rsp since %s", pPeer->id, strerror(errno)); + if (taosReadMsg(pPeer->syncFd, &firstPktRsp, sizeof(SFirstPktRsp)) != sizeof(SFirstPktRsp)) { + sError("%s, failed to read sync firstPkt rsp since %s, tranId:%u", pPeer->id, strerror(errno), firstPkt.tranId); return -1; } + sDebug("%s, recv firstPktRsp from peer, tranId:%u", pPeer->id, firstPkt.tranId); return 0; } diff --git a/src/tsdb/src/tsdbCommitQueue.c b/src/tsdb/src/tsdbCommitQueue.c index c86b8f32b7..75a2cbcb8d 100644 --- a/src/tsdb/src/tsdbCommitQueue.c +++ b/src/tsdb/src/tsdbCommitQueue.c @@ -14,6 +14,7 @@ */ #include "os.h" +#include "tglobal.h" #include "tlist.h" #include "tref.h" #include "tsdbMain.h" @@ -36,7 +37,8 @@ static void *tsdbLoopCommit(void *arg); SCommitQueue tsCommitQueue = {0}; -int tsdbInitCommitQueue(int nthreads) { +int tsdbInitCommitQueue() { + int nthreads = tsNumOfCommitThreads; SCommitQueue *pQueue = &tsCommitQueue; if (nthreads < 1) nthreads = 1; diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index 3990c0c516..9d65325001 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -872,7 +872,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) { for (int i = 0; i < pMeta->maxTables; i++) { STable *pTable = pMeta->tables[i]; if (pTable && pTable->type == TSDB_STREAM_TABLE) { - pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, + pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, tsdbGetTableSchemaImpl(pTable, false, false, -1)); } } diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c index 25c815b74e..9dfa147c8f 100644 --- a/src/tsdb/src/tsdbMeta.c +++ b/src/tsdb/src/tsdbMeta.c @@ -828,7 +828,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo if (lock && tsdbUnlockRepoMeta(pRepo) < 0) return -1; if (TABLE_TYPE(pTable) == TSDB_STREAM_TABLE && addIdx) { - pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql, + pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), TABLE_NAME(pTable)->data, pTable->sql, tsdbGetTableSchemaImpl(pTable, false, false, -1)); } diff --git a/src/util/inc/hash.h b/src/util/inc/hash.h index b0319d3e13..5bada93d1c 100644 --- a/src/util/inc/hash.h +++ b/src/util/inc/hash.h @@ -82,7 +82,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp * @param pHashObj * @return */ -size_t taosHashGetSize(const SHashObj *pHashObj); +int32_t taosHashGetSize(const SHashObj *pHashObj); /** * put element into hash table, if the element with the same key exists, update it diff --git a/src/util/inc/tconfig.h b/src/util/inc/tconfig.h index 33819f6a20..665528f140 100644 --- a/src/util/inc/tconfig.h +++ b/src/util/inc/tconfig.h @@ -78,6 +78,7 @@ extern char * tsCfgStatusStr[]; void taosReadGlobalLogCfg(); bool taosReadGlobalCfg(); void taosPrintGlobalCfg(); +void taosDumpGlobalCfg(); void taosInitConfigOption(SGlobalCfg cfg); SGlobalCfg * taosGetConfigOption(const char *option); diff --git a/src/util/inc/tnettest.h b/src/util/inc/tnettest.h index 426df5cbb2..b7585bd715 100644 --- a/src/util/inc/tnettest.h +++ b/src/util/inc/tnettest.h @@ -20,27 +20,7 @@ extern "C" { #endif -typedef struct CmdArguments { - char* host; - char* password; - char* user; - char* auth; - char* database; - char* timezone; - bool is_raw_time; - bool is_use_passwd; - char file[TSDB_FILENAME_LEN]; - char dir[TSDB_FILENAME_LEN]; - int threadNum; - char* commands; - int abort; - int port; - int endPort; - int pktLen; - char* netTestRole; -} CmdArguments; - -void taosNetTest(CmdArguments* args); +void taosNetTest(char *role, char *host, int port, int pkgLen); #ifdef __cplusplus } diff --git a/src/util/inc/tnote.h b/src/util/inc/tnote.h index 552224abf0..7511b61f41 100644 --- a/src/util/inc/tnote.h +++ b/src/util/inc/tnote.h @@ -20,41 +20,42 @@ extern "C" { #endif -#include "os.h" -#include "tutil.h" -#include "tglobal.h" - #define MAX_NOTE_LINE_SIZE 66000 #define NOTE_FILE_NAME_LEN 300 - -typedef struct _taosNoteInfo { - int taosNoteFileNum ; - int taosNoteMaxLines; - int taosNoteLines; - char taosNoteName[NOTE_FILE_NAME_LEN]; - int taosNoteFlag; - int taosNoteFd; - int taosNoteOpenInProgress; - pthread_mutex_t taosNoteMutex; -}taosNoteInfo; - -void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...); -extern taosNoteInfo m_HttpNote; -extern taosNoteInfo m_TscNote; +typedef struct { + int32_t fileNum; + int32_t maxLines; + int32_t lines; + int32_t flag; + int32_t fd; + int32_t openInProgress; + char name[NOTE_FILE_NAME_LEN]; + pthread_mutex_t mutex; +} SNoteObj; -extern int tsHttpEnableRecordSql; -extern int tsTscEnableRecordSql; +extern SNoteObj tsHttpNote; +extern SNoteObj tsTscNote; +extern SNoteObj tsInfoNote; -#define taosNotePrintHttp(...) \ +void taosInitNotes(); +void taosNotePrint(SNoteObj* pNote, const char* const format, ...); +void taosNotePrintBuffer(SNoteObj *pNote, char *buffer, int32_t len); + +#define nPrintHttp(...) \ if (tsHttpEnableRecordSql) { \ - taosNotePrint(&m_HttpNote, __VA_ARGS__); \ + taosNotePrint(&tsHttpNote, __VA_ARGS__); \ + } + +#define nPrintTsc(...) \ + if (tsTscEnableRecordSql) { \ + taosNotePrint(&tsTscNote, __VA_ARGS__); \ + } + +#define nInfo(buffer, len) \ + if (tscEmbedded == 1) { \ + taosNotePrintBuffer(&tsInfoNote, buffer, len); \ } - -#define taosNotePrintTsc(...) \ - if (tsTscEnableRecordSql) { \ - taosNotePrint(&m_TscNote, __VA_ARGS__); \ - } #ifdef __cplusplus } diff --git a/src/util/inc/tworker.h b/src/util/inc/tworker.h new file mode 100644 index 0000000000..7bc1eba2fd --- /dev/null +++ b/src/util/inc/tworker.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_TWORKER_H +#define TDENGINE_TWORKER_H + +#ifdef __cplusplus +extern "C" { +#endif + +typedef void *(*FWorkerThread)(void *pWorker); +struct SWorkerPool; + +typedef struct { + pthread_t thread; // thread + int32_t id; // worker ID + struct SWorkerPool *pPool; +} SWorker; + +typedef struct SWorkerPool { + int32_t max; // max number of workers + int32_t min; // min number of workers + int32_t num; // current number of workers + void * qset; + char * name; + SWorker *worker; + FWorkerThread workerFp; + pthread_mutex_t mutex; +} SWorkerPool; + +int32_t tWorkerInit(SWorkerPool *pPool); +void tWorkerCleanup(SWorkerPool *pPool); +void * tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle); +void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/util/src/hash.c b/src/util/src/hash.c index 0e3e0d3e24..7a835e87e7 100644 --- a/src/util/src/hash.c +++ b/src/util/src/hash.c @@ -189,7 +189,7 @@ SHashObj *taosHashInit(size_t capacity, _hash_fn_t fn, bool update, SHashLockTyp return pHashObj; } -size_t taosHashGetSize(const SHashObj *pHashObj) { return (pHashObj == NULL) ? 0 : pHashObj->size; } +int32_t taosHashGetSize(const SHashObj *pHashObj) { return (int32_t)((pHashObj == NULL) ? 0 : pHashObj->size); } int32_t taosHashPut(SHashObj *pHashObj, const void *key, size_t keyLen, void *data, size_t size) { uint32_t hashVal = (*pHashObj->hashFp)(key, (uint32_t)keyLen); diff --git a/src/util/src/tconfig.c b/src/util/src/tconfig.c index 173de294cf..1d2bd6252f 100644 --- a/src/util/src/tconfig.c +++ b/src/util/src/tconfig.c @@ -240,9 +240,6 @@ void taosReadGlobalLogCfg() { int olen, vlen; char fileName[PATH_MAX] = {0}; - mDebugFlag = 135; - sdbDebugFlag = 135; - wordexp_t full_path; if ( 0 != wordexp(configDir, &full_path, 0)) { printf("\nconfig file: %s wordexp fail! reason:%s\n", configDir, strerror(errno)); @@ -400,3 +397,57 @@ void taosPrintGlobalCfg() { taosPrintOsInfo(); } + +static void taosDumpCfg(SGlobalCfg *cfg) { + int optionLen = (int)strlen(cfg->option); + int blankLen = TSDB_CFG_PRINT_LEN - optionLen; + blankLen = blankLen < 0 ? 0 : blankLen; + + char blank[TSDB_CFG_PRINT_LEN]; + memset(blank, ' ', TSDB_CFG_PRINT_LEN); + blank[blankLen] = 0; + + switch (cfg->valType) { + case TAOS_CFG_VTYPE_INT16: + printf(" %s:%s%d%s\n", cfg->option, blank, *((int16_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_INT32: + printf(" %s:%s%d%s\n", cfg->option, blank, *((int32_t *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_FLOAT: + printf(" %s:%s%f%s\n", cfg->option, blank, *((float *)cfg->ptr), tsGlobalUnit[cfg->unitType]); + break; + case TAOS_CFG_VTYPE_STRING: + case TAOS_CFG_VTYPE_IPSTR: + case TAOS_CFG_VTYPE_DIRECTORY: + printf(" %s:%s%s%s\n", cfg->option, blank, (char *)cfg->ptr, tsGlobalUnit[cfg->unitType]); + break; + default: + break; + } +} + +void taosDumpGlobalCfg() { + printf("taos global config:\n"); + printf("==================================\n"); + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalCfg *cfg = tsGlobalConfig + i; + if (tscEmbedded == 0 && !(cfg->cfgType & TSDB_CFG_CTYPE_B_CLIENT)) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_NOT_PRINT) continue; + if (!(cfg->cfgType & TSDB_CFG_CTYPE_B_SHOW)) continue; + + taosDumpCfg(cfg); + } + + printf("\ntaos local config:\n"); + printf("==================================\n"); + + for (int i = 0; i < tsGlobalConfigNum; ++i) { + SGlobalCfg *cfg = tsGlobalConfig + i; + if (tscEmbedded == 0 && !(cfg->cfgType & TSDB_CFG_CTYPE_B_CLIENT)) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_NOT_PRINT) continue; + if (cfg->cfgType & TSDB_CFG_CTYPE_B_SHOW) continue; + + taosDumpCfg(cfg); + } +} diff --git a/src/util/src/tlog.c b/src/util/src/tlog.c index ad3a922304..c0f89e8465 100644 --- a/src/util/src/tlog.c +++ b/src/util/src/tlog.c @@ -17,6 +17,7 @@ #include "os.h" #include "tulog.h" #include "tlog.h" +#include "tnote.h" #include "tutil.h" #define MAX_LOGLINE_SIZE (1000) @@ -287,7 +288,6 @@ static int32_t taosOpenLogFile(char *fn, int32_t maxLines, int32_t maxFileNum) { tsLogObj.fileNum = maxFileNum; taosGetLogFileName(fn); - if (strlen(fn) < LOG_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".0"); @@ -401,6 +401,7 @@ void taosPrintLog(const char *flags, int32_t dflag, const char *format, ...) { } if (dflag & DEBUG_SCREEN) taosWrite(1, buffer, (uint32_t)len); + if (dflag == 255) nInfo(buffer, len); } void taosDumpData(unsigned char *msg, int32_t len) { diff --git a/src/util/src/tnettest.c b/src/util/src/tnettest.c index 6fd5265983..89601147a5 100644 --- a/src/util/src/tnettest.c +++ b/src/util/src/tnettest.c @@ -13,50 +13,43 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" #include "taosdef.h" #include "taosmsg.h" #include "taoserror.h" #include "tulog.h" -#include "tconfig.h" #include "tglobal.h" #include "tsocket.h" #include "trpc.h" #include "rpcHead.h" -#include "tutil.h" -#include "tnettest.h" -#define MAX_PKG_LEN (64*1000) -#define BUFFER_SIZE (MAX_PKG_LEN + 1024) +#define MAX_PKG_LEN (64 * 1000) +#define BUFFER_SIZE (MAX_PKG_LEN + 1024) + +extern int32_t tsRpcMaxUdpSize; typedef struct { + char * hostFqdn; uint32_t hostIp; - uint16_t port; - uint16_t pktLen; -} info_s; + int32_t port; + int32_t pktLen; +} STestInfo; -extern int tsRpcMaxUdpSize; - -static char g_user[TSDB_USER_LEN+1] = {0}; -static char g_pass[TSDB_PASSWORD_LEN+1] = {0}; -static char g_serverFqdn[TSDB_FQDN_LEN] = {0}; -static uint16_t g_startPort = 0; -static uint16_t g_endPort = 6042; -static uint32_t g_pktLen = 0; - - -static void *bindUdpPort(void *sarg) { - info_s *pinfo = (info_s *)sarg; - int port = pinfo->port; - SOCKET serverSocket; +static void *taosNetBindUdpPort(void *sarg) { + STestInfo *pinfo = (STestInfo *)sarg; + int32_t port = pinfo->port; + SOCKET serverSocket; + char buffer[BUFFER_SIZE]; + int32_t iDataNum; + socklen_t sin_size; + int32_t bufSize = 1024000; struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - char buffer[BUFFER_SIZE]; - int iDataNum; - + if ((serverSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { - perror("socket"); + uError("failed to create UDP socket since %s", strerror(errno)); return NULL; } @@ -66,49 +59,60 @@ static void *bindUdpPort(void *sarg) { server_addr.sin_addr.s_addr = htonl(INADDR_ANY); if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { - perror("connect"); + uError("failed to bind UDP port:%d since %s", port, strerror(errno)); return NULL; } - socklen_t sin_size; + if (taosSetSockOpt(serverSocket, SOL_SOCKET, SO_SNDBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + uError("failed to set the send buffer size for UDP socket\n"); + taosCloseSocket(serverSocket); + return NULL; + } + + if (taosSetSockOpt(serverSocket, SOL_SOCKET, SO_RCVBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + uError("failed to set the receive buffer size for UDP socket\n"); + taosCloseSocket(serverSocket); + return NULL; + } + + uInfo("UDP server at port:%d is listening", port); while (1) { memset(buffer, 0, BUFFER_SIZE); - sin_size = sizeof(*(struct sockaddr *)&server_addr); - iDataNum = recvfrom(serverSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&clientAddr, &sin_size); if (iDataNum < 0) { - perror("recvfrom null"); + uDebug("failed to perform recvfrom func at %d since %s", port, strerror(errno)); continue; } - if (iDataNum > 0) { - printf("recv Client: %s pkg from UDP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); - //printf("Read msg from udp:%s ... %s\n", buffer, buffer+iDataNum-16); - sendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int)sin_size); + uInfo("UDP: recv:%d bytes from %s at %d", iDataNum, taosInetNtoa(clientAddr.sin_addr), port); + + if (iDataNum > 0) { + iDataNum = taosSendto(serverSocket, buffer, iDataNum, 0, (struct sockaddr *)&clientAddr, (int32_t)sin_size); } + + uInfo("UDP: send:%d bytes to %s at %d", iDataNum, taosInetNtoa(clientAddr.sin_addr), port); } taosCloseSocket(serverSocket); return NULL; } -static void *bindTcpPort(void *sarg) { - info_s *pinfo = (info_s *)sarg; - int port = pinfo->port; - SOCKET serverSocket; - +static void *taosNetBindTcpPort(void *sarg) { struct sockaddr_in server_addr; struct sockaddr_in clientAddr; - int addr_len = sizeof(clientAddr); - SOCKET client; - char buffer[BUFFER_SIZE]; - int iDataNum = 0; + + STestInfo *pinfo = sarg; + int32_t port = pinfo->port; + SOCKET serverSocket; + int32_t addr_len = sizeof(clientAddr); + SOCKET client; + char buffer[BUFFER_SIZE]; if ((serverSocket = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)) < 0) { - printf("socket() fail: %s", strerror(errno)); + uError("failed to create TCP socket since %s", strerror(errno)); return NULL; } @@ -117,235 +121,205 @@ static void *bindTcpPort(void *sarg) { server_addr.sin_port = htons(port); server_addr.sin_addr.s_addr = htonl(INADDR_ANY); + int32_t reuse = 1; + if (taosSetSockOpt(serverSocket, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) { + uError("setsockopt SO_REUSEADDR failed: %d (%s)", errno, strerror(errno)); + taosCloseSocket(serverSocket); + return NULL; + } + if (bind(serverSocket, (struct sockaddr *)&server_addr, sizeof(server_addr)) < 0) { - printf("port:%d bind() fail: %s", port, strerror(errno)); + uError("failed to bind TCP port:%d since %s", port, strerror(errno)); return NULL; } - if (listen(serverSocket, 5) < 0) { - printf("listen() fail: %s", strerror(errno)); + + if (taosKeepTcpAlive(serverSocket) < 0) { + uError("failed to set tcp server keep-alive option since %s", strerror(errno)); + taosCloseSocket(serverSocket); return NULL; } - //printf("Bind port: %d success\n", port); + if (listen(serverSocket, 10) < 0) { + uError("failed to listen TCP port:%d since %s", port, strerror(errno)); + return NULL; + } + + uInfo("TCP server at port:%d is listening", port); + while (1) { client = accept(serverSocket, (struct sockaddr *)&clientAddr, (socklen_t *)&addr_len); if (client < 0) { - printf("accept() fail: %s", strerror(errno)); + uDebug("TCP: failed to accept at port:%d since %s", port, strerror(errno)); continue; } - iDataNum = 0; - memset(buffer, 0, BUFFER_SIZE); - int nleft, nread; - char *ptr = buffer; - nleft = pinfo->pktLen; - while (nleft > 0) { - nread = recv(client, ptr, BUFFER_SIZE, 0); + int32_t ret = taosReadMsg(client, buffer, pinfo->pktLen); + if (ret < 0 || ret != pinfo->pktLen) { + uError("TCP: failed to read %d bytes at port:%d since %s", pinfo->pktLen, port, strerror(errno)); + taosCloseSocket(serverSocket); + return NULL; + } - if (nread == 0) { - break; - } else if (nread < 0) { - if (errno == EINTR) { - continue; - } else { - printf("recv Client: %s pkg from TCP port: %d fail:%s.\n", taosInetNtoa(clientAddr.sin_addr), port, strerror(errno)); - taosCloseSocket(serverSocket); - return NULL; - } - } else { - nleft -= nread; - ptr += nread; - iDataNum += nread; - } - } - - printf("recv Client: %s pkg from TCP port: %d, pkg len: %d\n", taosInetNtoa(clientAddr.sin_addr), port, iDataNum); - if (iDataNum > 0) { - send(client, buffer, iDataNum, 0); + uInfo("TCP: read:%d bytes from %s at %d", pinfo->pktLen, taosInetNtoa(clientAddr.sin_addr), port); + + ret = taosWriteMsg(client, buffer, pinfo->pktLen); + if (ret < 0) { + uError("TCP: failed to write %d bytes at %d since %s", pinfo->pktLen, port, strerror(errno)); + taosCloseSocket(serverSocket); + return NULL; } + + uInfo("TCP: write:%d bytes to %s at %d", pinfo->pktLen, taosInetNtoa(clientAddr.sin_addr), port); } - + taosCloseSocket(serverSocket); return NULL; } -static int checkTcpPort(info_s *info) { - struct sockaddr_in serverAddr; - SOCKET clientSocket; - char sendbuf[BUFFER_SIZE]; - char recvbuf[BUFFER_SIZE]; - int iDataNum = 0; +static int32_t taosNetCheckTcpPort(STestInfo *info) { + SOCKET clientSocket; + char buffer[BUFFER_SIZE] = {0}; + if ((clientSocket = socket(AF_INET, SOCK_STREAM, 0)) < 0) { - printf("socket() fail: %s\n", strerror(errno)); + uError("failed to create TCP client socket since %s", strerror(errno)); return -1; } - // set send and recv overtime - struct timeval timeout; - timeout.tv_sec = 2; //s - timeout.tv_usec = 0; //us - if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) { - perror("setsockopt send timer failed:"); - } - if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) { - perror("setsockopt recv timer failed:"); + int32_t reuse = 1; + if (taosSetSockOpt(clientSocket, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, sizeof(reuse)) < 0) { + uError("setsockopt SO_REUSEADDR failed: %d (%s)", errno, strerror(errno)); + taosCloseSocket(clientSocket); + return -1; } + struct sockaddr_in serverAddr; + memset((char *)&serverAddr, 0, sizeof(serverAddr)); serverAddr.sin_family = AF_INET; - serverAddr.sin_port = htons(info->port); - + serverAddr.sin_port = (uint16_t)htons((uint16_t)info->port); serverAddr.sin_addr.s_addr = info->hostIp; - //printf("=================================\n"); if (connect(clientSocket, (struct sockaddr *)&serverAddr, sizeof(serverAddr)) < 0) { - printf("connect() fail: %s\t", strerror(errno)); + uError("TCP: failed to connect port %s:%d since %s", taosIpStr(info->hostIp), info->port, strerror(errno)); return -1; } - //printf("Connect to: %s:%d...success\n", host, port); - memset(sendbuf, 0, BUFFER_SIZE); - memset(recvbuf, 0, BUFFER_SIZE); - struct in_addr ipStr; - memcpy(&ipStr, &info->hostIp, 4); - sprintf(sendbuf, "client send tcp pkg to %s:%d, content: 1122334455", taosInetNtoa(ipStr), info->port); - sprintf(sendbuf + info->pktLen - 16, "1122334455667788"); + taosKeepTcpAlive(clientSocket); - send(clientSocket, sendbuf, info->pktLen, 0); + sprintf(buffer, "client send TCP pkg to %s:%d, content: 1122334455", taosIpStr(info->hostIp), info->port); + sprintf(buffer + info->pktLen - 16, "1122334455667788"); - memset(recvbuf, 0, BUFFER_SIZE); - int nleft, nread; - char *ptr = recvbuf; - nleft = info->pktLen; - while (nleft > 0) { - nread = recv(clientSocket, ptr, BUFFER_SIZE, 0);; - - if (nread == 0) { - break; - } else if (nread < 0) { - if (errno == EINTR) { - continue; - } else { - printf("recv ack pkg from TCP port: %d fail:%s.\n", info->port, strerror(errno)); - taosCloseSocket(clientSocket); - return -1; - } - } else { - nleft -= nread; - ptr += nread; - iDataNum += nread; - } - } - - if (iDataNum < info->pktLen) { - printf("recv ack pkg len: %d, less than req pkg len: %d from tcp port: %d\n", iDataNum, info->pktLen, info->port); + int32_t ret = taosWriteMsg(clientSocket, buffer, info->pktLen); + if (ret < 0) { + uError("TCP: failed to write msg to %s:%d since %s", taosIpStr(info->hostIp), info->port, strerror(errno)); + return -1; + } + + ret = taosReadMsg(clientSocket, buffer, info->pktLen); + if (ret < 0) { + uError("TCP: failed to read msg from %s:%d since %s", taosIpStr(info->hostIp), info->port, strerror(errno)); return -1; } - //printf("Read ack pkg len:%d from tcp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8); taosCloseSocket(clientSocket); return 0; } -static int checkUdpPort(info_s *info) { +static int32_t taosNetCheckUdpPort(STestInfo *info) { + SOCKET clientSocket; + char buffer[BUFFER_SIZE] = {0}; + int32_t iDataNum = 0; + int32_t bufSize = 1024000; + struct sockaddr_in serverAddr; - SOCKET clientSocket; - char sendbuf[BUFFER_SIZE]; - char recvbuf[BUFFER_SIZE]; - int iDataNum = 0; + if ((clientSocket = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP)) < 0) { - perror("socket"); + uError("failed to create udp client socket since %s", strerror(errno)); return -1; } - // set overtime - struct timeval timeout; - timeout.tv_sec = 2; //s - timeout.tv_usec = 0; //us - if (setsockopt(clientSocket, SOL_SOCKET,SO_SNDTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) { - perror("setsockopt send timer failed:"); + if (taosSetSockOpt(clientSocket, SOL_SOCKET, SO_SNDBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + uError("failed to set the send buffer size for UDP socket\n"); + return -1; } - if (setsockopt(clientSocket, SOL_SOCKET,SO_RCVTIMEO, (char *)&timeout, sizeof(struct timeval)) == -1) { - perror("setsockopt recv timer failed:"); + + if (taosSetSockOpt(clientSocket, SOL_SOCKET, SO_RCVBUF, (void *)&bufSize, sizeof(bufSize)) != 0) { + uError("failed to set the receive buffer size for UDP socket\n"); + return -1; } - + serverAddr.sin_family = AF_INET; serverAddr.sin_port = htons(info->port); serverAddr.sin_addr.s_addr = info->hostIp; - - memset(sendbuf, 0, BUFFER_SIZE); - memset(recvbuf, 0, BUFFER_SIZE); struct in_addr ipStr; memcpy(&ipStr, &info->hostIp, 4); - sprintf(sendbuf, "client send udp pkg to %s:%d, content: 1122334455", taosInetNtoa(ipStr), info->port); - sprintf(sendbuf + info->pktLen - 16, "1122334455667788"); + sprintf(buffer, "client send udp pkg to %s:%d, content: 1122334455", taosInetNtoa(ipStr), info->port); + sprintf(buffer + info->pktLen - 16, "1122334455667788"); socklen_t sin_size = sizeof(*(struct sockaddr *)&serverAddr); - int code = sendto(clientSocket, sendbuf, info->pktLen, 0, (struct sockaddr *)&serverAddr, (int)sin_size); - if (code < 0) { - perror("sendto"); + iDataNum = taosSendto(clientSocket, buffer, info->pktLen, 0, (struct sockaddr *)&serverAddr, (int32_t)sin_size); + if (iDataNum < 0 || iDataNum != info->pktLen) { + uError("UDP: failed to perform sendto func since %s", strerror(errno)); return -1; } - iDataNum = recvfrom(clientSocket, recvbuf, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size); + memset(buffer, 0, BUFFER_SIZE); + sin_size = sizeof(*(struct sockaddr *)&serverAddr); + iDataNum = recvfrom(clientSocket, buffer, BUFFER_SIZE, 0, (struct sockaddr *)&serverAddr, &sin_size); - if (iDataNum < info->pktLen) { - printf("Read ack pkg len: %d, less than req pkg len: %d from udp port: %d\t\t", iDataNum, info->pktLen, info->port); + if (iDataNum < 0 || iDataNum != info->pktLen) { + uError("UDP: received ack:%d bytes(expect:%d) from port:%d since %s", iDataNum, info->pktLen, info->port, strerror(errno)); return -1; } - - //printf("Read ack pkg len:%d from udp port: %d, buffer: %s %s\n", info->pktLen, port, recvbuf, recvbuf+iDataNum-8); + taosCloseSocket(clientSocket); return 0; } -static void checkPort(uint32_t hostIp, uint16_t startPort, uint16_t maxPort, uint16_t pktLen) { - int ret; - info_s info; - memset(&info, 0, sizeof(info_s)); - info.hostIp = hostIp; - info.pktLen = pktLen; +static void taosNetCheckPort(uint32_t hostIp, int32_t startPort, int32_t endPort, int32_t pktLen) { + int32_t ret; + STestInfo info; - for (uint16_t port = startPort; port <= maxPort; port++) { - //printf("test: %s:%d\n", info.host, port); - printf("\n"); + memset(&info, 0, sizeof(STestInfo)); + info.hostIp = hostIp; + info.pktLen = pktLen; + for (int32_t port = startPort; port <= endPort; port++) { info.port = port; - ret = checkTcpPort(&info); + ret = taosNetCheckTcpPort(&info); if (ret != 0) { - printf("tcp port:%d test fail.\t\n", port); + uError("failed to test TCP port:%d", port); } else { - printf("tcp port:%d test ok.\t\t", port); + uInfo("successed to test TCP port:%d", port); } - - ret = checkUdpPort(&info); + + ret = taosNetCheckUdpPort(&info); if (ret != 0) { - printf("udp port:%d test fail.\t\n", port); + uError("failed to test UDP port:%d", port); } else { - printf("udp port:%d test ok.\t\t", port); + uInfo("successed to test UDP port:%d", port); } } - - printf("\n"); - return ; } -void* tnetInitRpc(char* secretEncrypt, char spi) { +void *taosNetInitRpc(char *secretEncrypt, char spi) { SRpcInit rpcInit; - void* pRpcConn = NULL; + void * pRpcConn = NULL; + + char user[] = "nettestinternal"; + char pass[] = "nettestinternal"; + taosEncryptPass((uint8_t *)pass, strlen(pass), secretEncrypt); - taosEncryptPass((uint8_t *)g_pass, strlen(g_pass), secretEncrypt); - memset(&rpcInit, 0, sizeof(rpcInit)); rpcInit.localPort = 0; - rpcInit.label = "NET-TEST"; + rpcInit.label = "NT"; rpcInit.numOfThreads = 1; // every DB connection has only one thread rpcInit.cfp = NULL; rpcInit.sessions = 16; rpcInit.connType = TAOS_CONN_CLIENT; - rpcInit.user = g_user; + rpcInit.user = user; rpcInit.idleTime = 2000; rpcInit.ckey = "key"; rpcInit.spi = spi; @@ -355,17 +329,18 @@ void* tnetInitRpc(char* secretEncrypt, char spi) { return pRpcConn; } -static int rpcCheckPortImpl(const char* serverFqdn, uint16_t port, uint16_t pktLen, char spi) { +static int32_t taosNetCheckRpc(const char* serverFqdn, uint16_t port, uint16_t pktLen, char spi, SStartupStep *pStep) { SRpcEpSet epSet; SRpcMsg reqMsg; SRpcMsg rspMsg; - void* pRpcConn; + void * pRpcConn; char secretEncrypt[32] = {0}; - pRpcConn = tnetInitRpc(secretEncrypt, spi); + pRpcConn = taosNetInitRpc(secretEncrypt, spi); if (NULL == pRpcConn) { - return -1; + uError("failed to init client rpc"); + return TSDB_CODE_RPC_NETWORK_UNAVAIL; } memset(&epSet, 0, sizeof(SRpcEpSet)); @@ -373,205 +348,172 @@ static int rpcCheckPortImpl(const char* serverFqdn, uint16_t port, uint16_t pktL epSet.numOfEps = 1; epSet.port[0] = port; strcpy(epSet.fqdn[0], serverFqdn); - + reqMsg.msgType = TSDB_MSG_TYPE_NETWORK_TEST; reqMsg.pCont = rpcMallocCont(pktLen); reqMsg.contLen = pktLen; reqMsg.code = 0; reqMsg.handle = NULL; // rpc handle returned to app - reqMsg.ahandle = NULL; // app handle set by client + reqMsg.ahandle = NULL; // app handle set by client + strcpy(reqMsg.pCont, "nettest"); rpcSendRecv(pRpcConn, &epSet, &reqMsg, &rspMsg); - // handle response if ((rspMsg.code != 0) || (rspMsg.msgType != TSDB_MSG_TYPE_NETWORK_TEST + 1)) { - //printf("code:%d[%s]\n", rspMsg.code, tstrerror(rspMsg.code)); - return -1; + uDebug("ret code 0x%x %s", rspMsg.code, tstrerror(rspMsg.code)); + return rspMsg.code; } - + + int32_t code = 0; + if (pStep != NULL && rspMsg.pCont != NULL && rspMsg.contLen > 0 && rspMsg.contLen <= sizeof(SStartupStep)) { + memcpy(pStep, rspMsg.pCont, rspMsg.contLen); + code = 1; + } + rpcFreeCont(rspMsg.pCont); - rpcClose(pRpcConn); - - return 0; + return code; } -static void rpcCheckPort(uint32_t hostIp) { - int ret; - char spi; +static int32_t taosNetParseStartup(SStartupStep *pCont) { + SStartupStep *pStep = pCont; + uInfo("step:%s desc:%s", pStep->name, pStep->desc); - for (uint16_t port = g_startPort; port <= g_endPort; port++) { - //printf("test: %s:%d\n", info.host, port); - printf("\n"); + if (pStep->finished) { + uInfo("check startup finished"); + } - //================ check tcp port ================ - int32_t pktLen; - if (g_pktLen <= tsRpcMaxUdpSize) { - pktLen = tsRpcMaxUdpSize + 1000; - } else { - pktLen = g_pktLen; + return pStep->finished ? 0 : 1; +} + +static void taosNetTestStartup(char *host, int32_t port) { + uInfo("check startup, host:%s port:%d\n", host, port); + + SStartupStep *pStep = malloc(sizeof(SStartupStep)); + while (1) { + int32_t code = taosNetCheckRpc(host, port + TSDB_PORT_DNODEDNODE, 20, 0, pStep); + if (code > 0) { + code = taosNetParseStartup(pStep); } - spi = 1; - ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); - if (ret != 0) { - spi = 0; - ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); - if (ret != 0) { - printf("TCP port:%d test fail.\t\t", port); - } else { - //printf("tcp port:%d test ok.\t\t", port); - printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); - } + if (code > 0) { + uDebug("continue check startup step"); } else { - //printf("tcp port:%d test ok.\t\t", port); - printf("TCP port:\033[32m%d test OK\033[0m\t\t", port); - } - - //================ check udp port ================ - if (g_pktLen >= tsRpcMaxUdpSize) { - pktLen = tsRpcMaxUdpSize - 1000; - } else { - pktLen = g_pktLen; - } - - spi = 0; - ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); - if (ret != 0) { - spi = 1; - ret = rpcCheckPortImpl(g_serverFqdn, port, pktLen, spi); - if (ret != 0) { - printf("udp port:%d test fail.\t\n", port); - } else { - //printf("udp port:%d test ok.\t\n", port); - printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); - } - } else { - //printf("udp port:%d test ok.\t\n", port); - printf("UDP port:\033[32m%d test OK\033[0m\t\n", port); + if (code < 0) { + uError("failed to check startup step, code:0x%x %s", code, tstrerror(code)); + } + break; } } - - printf("\n"); - return ; + + free(pStep); } -static void taosNetTestClient(int flag) { - uint32_t serverIp = taosGetIpFromFqdn(g_serverFqdn); +static void taosNetTestRpc(char *host, int32_t startPort, int32_t pkgLen) { + int32_t endPort = startPort + 9; + char spi = 0; + + uInfo("check rpc, host:%s startPort:%d endPort:%d pkgLen:%d\n", host, startPort, endPort, pkgLen); + + for (uint16_t port = startPort; port <= endPort; port++) { + int32_t sendpkgLen; + if (pkgLen <= tsRpcMaxUdpSize) { + sendpkgLen = tsRpcMaxUdpSize + 1000; + } else { + sendpkgLen = pkgLen; + } + + int32_t ret = taosNetCheckRpc(host, port, sendpkgLen, spi, NULL); + if (ret < 0) { + uError("failed to test TCP port:%d", port); + } else { + uInfo("successed to test TCP port:%d", port); + } + + if (pkgLen >= tsRpcMaxUdpSize) { + sendpkgLen = tsRpcMaxUdpSize - 1000; + } else { + sendpkgLen = pkgLen; + } + + ret = taosNetCheckRpc(host, port, pkgLen, spi, NULL); + if (ret < 0) { + uError("failed to test UDP port:%d", port); + } else { + uInfo("successed to test UDP port:%d", port); + } + } +} + +static void taosNetTestClient(char *host, int32_t startPort, int32_t pkgLen) { + int32_t endPort = startPort + 11; + uInfo("work as client, host:%s startPort:%d endPort:%d pkgLen:%d\n", host, startPort, endPort, pkgLen); + + uint32_t serverIp = taosGetIpFromFqdn(host); if (serverIp == 0xFFFFFFFF) { - printf("Failed to resolve FQDN:%s", g_serverFqdn); + uError("failed to resolve fqdn:%s", host); exit(-1); } - if (0 == flag) { - checkPort(serverIp, g_startPort, g_endPort, g_pktLen); - } else { - rpcCheckPort(serverIp); - } - - return; + uInfo("server ip:%s is resolved from host:%s", taosIpStr(serverIp), host); + taosNetCheckPort(serverIp, startPort, endPort, pkgLen); } -static void taosNetTestServer(uint16_t startPort, uint16_t endPort, int pktLen) { +static void taosNetTestServer(char *host, int32_t startPort, int32_t pkgLen) { + int32_t endPort = startPort + 11; + uInfo("work as server, host:%s startPort:%d endPort:%d pkgLen:%d\n", host, startPort, endPort, pkgLen); - int port = startPort; - int num = endPort - startPort + 1; + int32_t port = startPort; + int32_t num = endPort - startPort + 1; + if (num < 0) num = 1; - if (num < 0) { - num = 1; - } - pthread_t *pids = malloc(2 * num * sizeof(pthread_t)); - info_s * tinfos = malloc(num * sizeof(info_s)); - info_s * uinfos = malloc(num * sizeof(info_s)); + STestInfo *tinfos = malloc(num * sizeof(STestInfo)); + STestInfo *uinfos = malloc(num * sizeof(STestInfo)); - for (size_t i = 0; i < num; i++) { - info_s *tcpInfo = tinfos + i; - tcpInfo->port = (uint16_t)(port + i); - tcpInfo->pktLen = pktLen; + for (int32_t i = 0; i < num; i++) { + STestInfo *tcpInfo = tinfos + i; + tcpInfo->port = port + i; + tcpInfo->pktLen = pkgLen; - if (pthread_create(pids + i, NULL, bindTcpPort, tcpInfo) != 0) - { - printf("create thread fail, port:%d.\n", port); + if (pthread_create(pids + i, NULL, taosNetBindTcpPort, tcpInfo) != 0) { + uInfo("failed to create TCP test thread, %s:%d", tcpInfo->hostFqdn, tcpInfo->port); exit(-1); } - info_s *udpInfo = uinfos + i; - udpInfo->port = (uint16_t)(port + i); - if (pthread_create(pids + num + i, NULL, bindUdpPort, udpInfo) != 0) - { - printf("create thread fail, port:%d.\n", port); + STestInfo *udpInfo = uinfos + i; + udpInfo->port = port + i; + tcpInfo->pktLen = pkgLen; + if (pthread_create(pids + num + i, NULL, taosNetBindUdpPort, udpInfo) != 0) { + uInfo("failed to create UDP test thread, %s:%d", tcpInfo->hostFqdn, tcpInfo->port); exit(-1); } } - - for (int i = 0; i < num; i++) { + + for (int32_t i = 0; i < num; i++) { pthread_join(pids[i], NULL); pthread_join(pids[(num + i)], NULL); } } +void taosNetTest(char *role, char *host, int32_t port, int32_t pkgLen) { + tscEmbedded = 1; + if (host == NULL) host = tsLocalFqdn; + if (port == 0) port = tsServerPort; + if (pkgLen <= 10) pkgLen = 1000; + if (pkgLen > MAX_PKG_LEN) pkgLen = MAX_PKG_LEN; -void taosNetTest(CmdArguments *args) { - if (0 == args->pktLen) { - g_pktLen = 1000; + if (0 == strcmp("client", role)) { + taosNetTestClient(host, port, pkgLen); + } else if (0 == strcmp("server", role)) { + taosNetTestServer(host, port, pkgLen); + } else if (0 == strcmp("rpc", role)) { + taosNetTestRpc(host, port, pkgLen); + } else if (0 == strcmp("startup", role)) { + taosNetTestStartup(host, port); } else { - g_pktLen = args->pktLen; - } - - if (args->port && args->endPort) { - if (args->port > args->endPort) { - printf("endPort[%d] must not lesss port[%d]\n", args->endPort, args->port); - exit(-1); - } - } - - if (args->host && args->host[0] != 0) { - if (strlen(args->host) >= TSDB_EP_LEN) { - printf("host invalid: %s\n", args->host); - exit(-1); - } - - taosGetFqdnPortFromEp(args->host, g_serverFqdn, &g_startPort); - } else { - tstrncpy(g_serverFqdn, "127.0.0.1", TSDB_IPv4ADDR_LEN); - g_startPort = tsServerPort; - } - - if (args->port) { - g_startPort = args->port; - } - - if (args->endPort) { - g_endPort = args->endPort; - } - - if (g_startPort > g_endPort) { - printf("endPort[%d] must not lesss port[%d]\n", g_endPort, g_startPort); - exit(-1); + taosNetTestStartup(host, port); } - - if (args->is_use_passwd) { - if (args->password == NULL) args->password = getpass("Enter password: "); - } else { - args->password = TSDB_DEFAULT_PASS; - } - tstrncpy(g_pass, args->password, TSDB_PASSWORD_LEN); - - if (args->user == NULL) { - args->user = TSDB_DEFAULT_USER; - } - tstrncpy(g_user, args->user, TSDB_USER_LEN); - - if (0 == strcmp("client", args->netTestRole)) { - printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); - taosNetTestClient(0); - } else if (0 == strcmp("clients", args->netTestRole)) { - printf("host: %s\tstart port: %d\tend port: %d\tpacket len: %d\n", g_serverFqdn, g_startPort, g_endPort, g_pktLen); - taosNetTestClient(1); - } else if (0 == strcmp("server", args->netTestRole)) { - taosNetTestServer(g_startPort, g_endPort, g_pktLen); - } + tscEmbedded = 0; } - diff --git a/src/util/src/tnote.c b/src/util/src/tnote.c index 9536f6fb70..f2db0b3316 100644 --- a/src/util/src/tnote.c +++ b/src/util/src/tnote.c @@ -13,277 +13,260 @@ * along with this program. If not, see . */ +#define _DEFAULT_SOURCE #include "os.h" +#include "tutil.h" +#include "tglobal.h" #include "tnote.h" -taosNoteInfo m_HttpNote; -taosNoteInfo m_TscNote; +SNoteObj tsHttpNote; +SNoteObj tsTscNote; +SNoteObj tsInfoNote; -int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum, taosNoteInfo * pNote); +static int32_t taosOpenNoteWithMaxLines(char *fn, int32_t maxLines, int32_t maxNoteNum, SNoteObj *pNote); +static void taosCloseNoteByFd(int32_t oldFd, SNoteObj *pNote); -void taosInitNote(int numOfNoteLines, int maxNotes, char* lable) -{ - taosNoteInfo * pNote = NULL; - char temp[128] = { 0 }; +static void taosInitNote(int32_t numOfLines, int32_t maxNotes, SNoteObj *pNote, char *name) { + memset(pNote, 0, sizeof(SNoteObj)); + pNote->fileNum = 1; + pNote->fd = -1; - if (strcasecmp(lable, "http_note") == 0) { - pNote = &m_HttpNote; - sprintf(temp, "%s/httpnote", tsLogDir); - } else if (strcasecmp(lable, "tsc_note") == 0) { - pNote = &m_TscNote; - sprintf(temp, "%s/tscnote-%d", tsLogDir, getpid()); - } else { - return; - } + if (taosOpenNoteWithMaxLines(name, numOfLines, maxNotes, pNote) < 0) { + fprintf(stderr, "failed to init note file\n"); + } - memset(pNote, 0, sizeof(taosNoteInfo)); - pNote->taosNoteFileNum = 1; - //pNote->taosNoteMaxLines = 0; - //pNote->taosNoteLines = 0; - //pNote->taosNoteFlag = 0; - pNote->taosNoteFd = -1; - //pNote->taosNoteOpenInProgress = 0; - - if (taosOpenNoteWithMaxLines(temp, numOfNoteLines, maxNotes, pNote) < 0) - fprintf(stderr, "failed to init note file\n"); - - taosNotePrint(pNote, "=================================================="); - taosNotePrint(pNote, "=================== new note ==================="); - taosNotePrint(pNote, "=================================================="); + taosNotePrint(pNote, "=================================================="); + taosNotePrint(pNote, "=================== new note ==================="); + taosNotePrint(pNote, "=================================================="); } -void taosCloseNoteByFd(int oldFd, taosNoteInfo * pNote); -bool taosLockNote(int fd, taosNoteInfo * pNote) -{ - if (fd < 0) return false; +void taosInitNotes() { + char name[TSDB_FILENAME_LEN * 2] = {0}; - if (pNote->taosNoteFileNum > 1) { - int ret = (int)(flock(fd, LOCK_EX | LOCK_NB)); - if (ret == 0) { - return true; - } - } + if (tsTscEnableRecordSql) { + snprintf(name, TSDB_FILENAME_LEN * 2, "%s/tscsql-%d", tsLogDir, taosGetPId()); + taosInitNote(tsNumOfLogLines, 1, &tsTscNote, name); + } - return false; + if (tsHttpEnableRecordSql) { + snprintf(name, TSDB_FILENAME_LEN * 2, "%s/httpsql", tsLogDir); + taosInitNote(tsNumOfLogLines, 1, &tsHttpNote, name); + } + + if (tscEmbedded == 1) { + snprintf(name, TSDB_FILENAME_LEN * 2, "%s/taosinfo", tsLogDir); + taosInitNote(tsNumOfLogLines, 1, &tsInfoNote, name); + } } -void taosUnLockNote(int fd, taosNoteInfo * pNote) -{ - if (fd < 0) return; +static bool taosLockNote(int32_t fd, SNoteObj *pNote) { + if (fd < 0) return false; - if (pNote->taosNoteFileNum > 1) { - flock(fd, LOCK_UN | LOCK_NB); + if (pNote->fileNum > 1) { + int32_t ret = (int32_t)(flock(fd, LOCK_EX | LOCK_NB)); + if (ret == 0) { + return true; } + } + + return false; } -void *taosThreadToOpenNewNote(void *param) -{ - char name[NOTE_FILE_NAME_LEN * 2]; - taosNoteInfo * pNote = (taosNoteInfo *)param; +static void taosUnLockNote(int32_t fd, SNoteObj *pNote) { + if (fd < 0) return; - pNote->taosNoteFlag ^= 1; - pNote->taosNoteLines = 0; - sprintf(name, "%s.%d", pNote->taosNoteName, pNote->taosNoteFlag); + if (pNote->fileNum > 1) { + flock(fd, LOCK_UN | LOCK_NB); + } +} - umask(0); +static void *taosThreadToOpenNewNote(void *param) { + char name[NOTE_FILE_NAME_LEN * 2]; + SNoteObj *pNote = (SNoteObj *)param; - int fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); - if (fd < 0) { - return NULL; - } + pNote->flag ^= 1; + pNote->lines = 0; + sprintf(name, "%s.%d", pNote->name, pNote->flag); - taosLockNote(fd, pNote); - (void)lseek(fd, 0, SEEK_SET); + umask(0); - int oldFd = pNote->taosNoteFd; - pNote->taosNoteFd = fd; - pNote->taosNoteLines = 0; - pNote->taosNoteOpenInProgress = 0; - taosNotePrint(pNote, "=============== new note is opened ============="); - - taosCloseNoteByFd(oldFd, pNote); + int32_t fd = open(name, O_WRONLY | O_CREAT | O_TRUNC, S_IRWXU | S_IRWXG | S_IRWXO); + if (fd < 0) { return NULL; + } + + taosLockNote(fd, pNote); + (void)lseek(fd, 0, SEEK_SET); + + int32_t oldFd = pNote->fd; + pNote->fd = fd; + pNote->lines = 0; + pNote->openInProgress = 0; + taosNotePrint(pNote, "=============== new note is opened ============="); + + taosCloseNoteByFd(oldFd, pNote); + return NULL; } -int taosOpenNewNote(taosNoteInfo * pNote) -{ - pthread_mutex_lock(&pNote->taosNoteMutex); +static int32_t taosOpenNewNote(SNoteObj *pNote) { + pthread_mutex_lock(&pNote->mutex); - if (pNote->taosNoteLines > pNote->taosNoteMaxLines && pNote->taosNoteOpenInProgress == 0) { - pNote->taosNoteOpenInProgress = 1; + if (pNote->lines > pNote->maxLines && pNote->openInProgress == 0) { + pNote->openInProgress = 1; - taosNotePrint(pNote, "=============== open new note =================="); - pthread_t pattern; - pthread_attr_t attr; - pthread_attr_init(&attr); - pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + taosNotePrint(pNote, "=============== open new note =================="); + pthread_t pattern; + pthread_attr_t attr; + pthread_attr_init(&attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); - pthread_create(&pattern, &attr, taosThreadToOpenNewNote, (void*)pNote); - pthread_attr_destroy(&attr); - } + pthread_create(&pattern, &attr, taosThreadToOpenNewNote, (void *)pNote); + pthread_attr_destroy(&attr); + } - pthread_mutex_unlock(&pNote->taosNoteMutex); + pthread_mutex_unlock(&pNote->mutex); - return pNote->taosNoteFd; + return pNote->fd; } -bool taosCheckNoteIsOpen(char *noteName, taosNoteInfo * pNote) -{ - /* - int exist = access(noteName, F_OK); - if (exist != 0) { - return false; - } - */ +static bool taosCheckNoteIsOpen(char *noteName, SNoteObj *pNote) { + int32_t fd = open(noteName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + if (fd < 0) { + fprintf(stderr, "failed to open note:%s reason:%s\n", noteName, strerror(errno)); + return true; + } - int fd = open(noteName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); - if (fd < 0) { - fprintf(stderr, "failed to open note:%s reason:%s\n", noteName, strerror(errno)); - return true; - } - - if (taosLockNote(fd, pNote)) { - taosUnLockNote(fd, pNote); - close(fd); - return false; - } - else { - close(fd); - return true; - } + if (taosLockNote(fd, pNote)) { + taosUnLockNote(fd, pNote); + close(fd); + return false; + } else { + close(fd); + return true; + } } -void taosGetNoteName(char *fn, taosNoteInfo * pNote) -{ - if (pNote->taosNoteFileNum > 1) { - for (int i = 0; i < pNote->taosNoteFileNum; i++) { - char fileName[NOTE_FILE_NAME_LEN]; +static void taosGetNoteName(char *fn, SNoteObj *pNote) { + if (pNote->fileNum > 1) { + for (int32_t i = 0; i < pNote->fileNum; i++) { + char fileName[NOTE_FILE_NAME_LEN]; - sprintf(fileName, "%s%d.0", fn, i); - bool file1open = taosCheckNoteIsOpen(fileName, pNote); + sprintf(fileName, "%s%d.0", fn, i); + bool file1open = taosCheckNoteIsOpen(fileName, pNote); - sprintf(fileName, "%s%d.1", fn, i); - bool file2open = taosCheckNoteIsOpen(fileName, pNote); + sprintf(fileName, "%s%d.1", fn, i); + bool file2open = taosCheckNoteIsOpen(fileName, pNote); - if (!file1open && !file2open) { - sprintf(pNote->taosNoteName, "%s%d", fn, i); - return; - } - } + if (!file1open && !file2open) { + sprintf(pNote->name, "%s%d", fn, i); + return; + } } + } - if (strlen(fn) < NOTE_FILE_NAME_LEN) { - strcpy(pNote->taosNoteName, fn); - } + if (strlen(fn) < NOTE_FILE_NAME_LEN) { + strcpy(pNote->name, fn); + } } -int taosOpenNoteWithMaxLines(char *fn, int maxLines, int maxNoteNum, taosNoteInfo * pNote) -{ - char name[NOTE_FILE_NAME_LEN * 2] = "\0"; - struct stat notestat0, notestat1; - int size; +static int32_t taosOpenNoteWithMaxLines(char *fn, int32_t maxLines, int32_t maxNoteNum, SNoteObj *pNote) { + char name[NOTE_FILE_NAME_LEN * 2] = {0}; + int32_t size; + struct stat logstat0, logstat1; - pNote->taosNoteMaxLines = maxLines; - pNote->taosNoteFileNum = maxNoteNum; - taosGetNoteName(fn, pNote); - - if (strlen(fn) > NOTE_FILE_NAME_LEN * 2 - 2) { - fprintf(stderr, "the len of file name overflow:%s\n", fn); - return -1; - } + pNote->maxLines = maxLines; + pNote->fileNum = maxNoteNum; + taosGetNoteName(fn, pNote); + if (strlen(fn) < NOTE_FILE_NAME_LEN + 50 - 2) { strcpy(name, fn); strcat(name, ".0"); + } + bool log0Exist = stat(name, &logstat0) >= 0; - // if none of the note files exist, open 0, if both exists, open the old one - if (stat(name, ¬estat0) < 0) { - pNote->taosNoteFlag = 0; - } else { - strcpy(name, fn); - strcat(name, ".1"); - if (stat(name, ¬estat1) < 0) { - pNote->taosNoteFlag = 1; - } - else { - pNote->taosNoteFlag = (notestat0.st_mtime > notestat1.st_mtime) ? 0 : 1; - } - } + if (strlen(fn) < NOTE_FILE_NAME_LEN + 50 - 2) { + strcpy(name, fn); + strcat(name, ".1"); + } + bool log1Exist = stat(name, &logstat1) >= 0; - char noteName[NOTE_FILE_NAME_LEN * 2] = "\0"; - sprintf(noteName, "%s.%d", pNote->taosNoteName, pNote->taosNoteFlag); - pthread_mutex_init(&pNote->taosNoteMutex, NULL); + if (!log0Exist && !log1Exist) { + pNote->flag = 0; + } else if (!log1Exist) { + pNote->flag = 0; + } else if (!log0Exist) { + pNote->flag = 1; + } else { + pNote->flag = (logstat0.st_mtime > logstat1.st_mtime) ? 0 : 1; + } - umask(0); - pNote->taosNoteFd = open(noteName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); + char noteName[NOTE_FILE_NAME_LEN * 2] = {0}; + sprintf(noteName, "%s.%d", pNote->name, pNote->flag); + pthread_mutex_init(&pNote->mutex, NULL); - if (pNote->taosNoteFd < 0) { - fprintf(stderr, "failed to open note file:%s reason:%s\n", noteName, strerror(errno)); - return -1; - } - taosLockNote(pNote->taosNoteFd, pNote); + umask(0); + pNote->fd = open(noteName, O_WRONLY | O_CREAT, S_IRWXU | S_IRWXG | S_IRWXO); - // only an estimate for number of lines - struct stat filestat; - if (fstat(pNote->taosNoteFd, &filestat) < 0) { - fprintf(stderr, "failed to fstat note file:%s reason:%s\n", noteName, strerror(errno)); - return -1; - } - size = (int)filestat.st_size; - pNote->taosNoteLines = size / 60; + if (pNote->fd < 0) { + fprintf(stderr, "failed to open note file:%s reason:%s\n", noteName, strerror(errno)); + return -1; + } + taosLockNote(pNote->fd, pNote); - lseek(pNote->taosNoteFd, 0, SEEK_END); + // only an estimate for number of lines + struct stat filestat; + if (fstat(pNote->fd, &filestat) < 0) { + fprintf(stderr, "failed to fstat note file:%s reason:%s\n", noteName, strerror(errno)); + return -1; + } + size = (int32_t)filestat.st_size; + pNote->lines = size / 60; - return 0; + lseek(pNote->fd, 0, SEEK_END); + + return 0; } -void taosNotePrint(taosNoteInfo * pNote, const char * const format, ...) -{ - va_list argpointer; - char buffer[MAX_NOTE_LINE_SIZE+2]; - int len; - struct tm Tm, *ptm; - struct timeval timeSecs; - time_t curTime; +void taosNotePrintBuffer(SNoteObj *pNote, char *buffer, int32_t len) { + if (pNote->fd <= 0) return; + taosWrite(pNote->fd, buffer, len); - gettimeofday(&timeSecs, NULL); - curTime = timeSecs.tv_sec; - ptm = localtime_r(&curTime, &Tm); -#ifndef LINUX - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%lld ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, - ptm->tm_min, ptm->tm_sec, (int)timeSecs.tv_usec, taosGetPthreadId()); -#else - len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d %lx ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, - ptm->tm_sec, (int)timeSecs.tv_usec, (unsigned long int)pthread_self()); -#endif - va_start(argpointer, format); - len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); - va_end(argpointer); - - if (len >= MAX_NOTE_LINE_SIZE) len = MAX_NOTE_LINE_SIZE - 2; - - buffer[len++] = '\n'; - buffer[len] = 0; - - if (pNote->taosNoteFd >= 0) { - taosWrite(pNote->taosNoteFd, buffer, (unsigned int)len); - - if (pNote->taosNoteMaxLines > 0) { - pNote->taosNoteLines++; - if ((pNote->taosNoteLines > pNote->taosNoteMaxLines) && (pNote->taosNoteOpenInProgress == 0)) - taosOpenNewNote(pNote); - } - } + if (pNote->maxLines > 0) { + pNote->lines++; + if ((pNote->lines > pNote->maxLines) && (pNote->openInProgress == 0)) taosOpenNewNote(pNote); + } } -void taosCloseNote(taosNoteInfo * pNote) -{ - taosCloseNoteByFd(pNote->taosNoteFd, pNote); +void taosNotePrint(SNoteObj *pNote, const char *const format, ...) { + va_list argpointer; + char buffer[MAX_NOTE_LINE_SIZE + 2]; + int32_t len; + struct tm Tm, *ptm; + struct timeval timeSecs; + time_t curTime; + + gettimeofday(&timeSecs, NULL); + curTime = timeSecs.tv_sec; + ptm = localtime_r(&curTime, &Tm); + len = sprintf(buffer, "%02d/%02d %02d:%02d:%02d.%06d 0x%08" PRIx64 " ", ptm->tm_mon + 1, ptm->tm_mday, ptm->tm_hour, + ptm->tm_min, ptm->tm_sec, (int32_t)timeSecs.tv_usec, taosGetPthreadId()); + va_start(argpointer, format); + len += vsnprintf(buffer + len, MAX_NOTE_LINE_SIZE - len, format, argpointer); + va_end(argpointer); + + if (len >= MAX_NOTE_LINE_SIZE) len = MAX_NOTE_LINE_SIZE - 2; + + buffer[len++] = '\n'; + buffer[len] = 0; + + taosNotePrintBuffer(pNote, buffer, len); } -void taosCloseNoteByFd(int fd, taosNoteInfo * pNote) -{ - if (fd >= 0) { - taosUnLockNote(fd, pNote); - close(fd); - } +// static void taosCloseNote(SNoteObj *pNote) { taosCloseNoteByFd(pNote->fd, pNote); } + +static void taosCloseNoteByFd(int32_t fd, SNoteObj *pNote) { + if (fd >= 0) { + taosUnLockNote(fd, pNote); + close(fd); + } } diff --git a/src/util/src/tqueue.c b/src/util/src/tqueue.c index 143ce7c474..d72bc5f412 100644 --- a/src/util/src/tqueue.c +++ b/src/util/src/tqueue.c @@ -358,15 +358,15 @@ int taosReadQitemFromQset(taos_qset param, int *type, void **pitem, void **phand if (queue->head) { pNode = queue->head; *pitem = pNode->item; - *type = pNode->type; - *phandle = queue->ahandle; + if (type) *type = pNode->type; + if (phandle) *phandle = queue->ahandle; queue->head = pNode->next; if (queue->head == NULL) queue->tail = NULL; queue->numOfItems--; atomic_sub_fetch_32(&qset->numOfItems, 1); code = 1; - uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, *type, queue->numOfItems); + uTrace("item:%p is read out from queue:%p, type:%d items:%d", *pitem, queue, pNode->type, queue->numOfItems); } pthread_mutex_unlock(&queue->mutex); diff --git a/src/util/src/ttimer.c b/src/util/src/ttimer.c index 0222a6d80a..4eafbd1ae9 100644 --- a/src/util/src/ttimer.c +++ b/src/util/src/ttimer.c @@ -225,10 +225,11 @@ static void addToWheel(tmr_obj_t* timer, uint32_t delay) { } static bool removeFromWheel(tmr_obj_t* timer) { - if (timer->wheel >= tListLen(wheels)) { + uint8_t wheelIdx = timer->wheel; + if (wheelIdx >= tListLen(wheels)) { return false; } - time_wheel_t* wheel = wheels + timer->wheel; + time_wheel_t* wheel = wheels + wheelIdx; bool removed = false; pthread_mutex_lock(&wheel->mutex); diff --git a/src/util/src/tworker.c b/src/util/src/tworker.c new file mode 100644 index 0000000000..8b4053bccd --- /dev/null +++ b/src/util/src/tworker.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "tulog.h" +#include "tqueue.h" +#include "tworker.h" + +int32_t tWorkerInit(SWorkerPool *pPool) { + pPool->qset = taosOpenQset(); + pPool->worker = calloc(sizeof(SWorker), pPool->max); + pthread_mutex_init(&pPool->mutex, NULL); + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + pWorker->id = i; + pWorker->pPool = pPool; + } + + uInfo("worker:%s is initialized, min:%d max:%d", pPool->name, pPool->min, pPool->max); + return 0; +} + +void tWorkerCleanup(SWorkerPool *pPool) { + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + if(taosCheckPthreadValid(pWorker->thread)) { + taosQsetThreadResume(pPool->qset); + } + } + + for (int i = 0; i < pPool->max; ++i) { + SWorker *pWorker = pPool->worker + i; + if (taosCheckPthreadValid(pWorker->thread)) { + pthread_join(pWorker->thread, NULL); + } + } + + free(pPool->worker); + taosCloseQset(pPool->qset); + pthread_mutex_destroy(&pPool->mutex); + + uInfo("worker:%s is closed", pPool->name); +} + +void *tWorkerAllocQueue(SWorkerPool *pPool, void *ahandle) { + pthread_mutex_lock(&pPool->mutex); + taos_queue pQueue = taosOpenQueue(); + if (pQueue == NULL) { + pthread_mutex_unlock(&pPool->mutex); + return NULL; + } + + taosAddIntoQset(pPool->qset, pQueue, ahandle); + + // spawn a thread to process queue + if (pPool->num < pPool->max) { + do { + SWorker *pWorker = pPool->worker + pPool->num; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, pPool->workerFp, pWorker) != 0) { + uError("worker:%s:%d failed to create thread to process since %s", pPool->name, pWorker->id, strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + pPool->num++; + uDebug("worker:%s:%d is launched, total:%d", pPool->name, pWorker->id, pPool->num); + } while (pPool->num < pPool->min); + } + + pthread_mutex_unlock(&pPool->mutex); + uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pPool->name, pQueue, ahandle); + + return pQueue; +} + +void tWorkerFreeQueue(SWorkerPool *pPool, void *pQueue) { + taosCloseQueue(pQueue); + uDebug("worker:%s, queue:%p is freed", pPool->name, pQueue); +} diff --git a/src/vnode/inc/vnodeCfg.h b/src/vnode/inc/vnodeCfg.h index c5887fef5d..ba148c07c1 100644 --- a/src/vnode/inc/vnodeCfg.h +++ b/src/vnode/inc/vnodeCfg.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "vnodeInt.h" int32_t vnodeReadCfg(SVnodeObj *pVnode); int32_t vnodeWriteCfg(SCreateVnodeMsg *pVnodeCfg); diff --git a/src/vnode/inc/vnodeInt.h b/src/vnode/inc/vnodeInt.h index e468c2e83e..b28eb690fe 100644 --- a/src/vnode/inc/vnodeInt.h +++ b/src/vnode/inc/vnodeInt.h @@ -19,11 +19,11 @@ #ifdef __cplusplus extern "C" { #endif - #include "tlog.h" #include "tsync.h" -#include "twal.h" #include "tcq.h" +#include "tsdb.h" +#include "vnode.h" extern int32_t vDebugFlag; @@ -35,39 +35,39 @@ extern int32_t vDebugFlag; #define vTrace(...) { if (vDebugFlag & DEBUG_TRACE) { taosPrintLog("VND ", vDebugFlag, __VA_ARGS__); }} typedef struct { - int32_t vgId; // global vnode group ID - int32_t refCount; // reference count - int32_t queuedWMsg; - int32_t queuedRMsg; - int32_t flowctrlLevel; - int8_t status; - int8_t role; - int8_t accessState; - int8_t isFull; - int8_t isCommiting; - uint64_t version; // current version - uint64_t fversion; // version on saved data file - void *wqueue; - void *rqueue; - void *wal; - void *tsdb; - int64_t sync; - void *events; - void *cq; // continuous query - int32_t cfgVersion; - STsdbCfg tsdbCfg; - SSyncCfg syncCfg; - SWalCfg walCfg; - void *qMgmt; - char *rootDir; - tsem_t sem; - int8_t dropped; - char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; + int32_t vgId; // global vnode group ID + int32_t refCount; // reference count + int32_t queuedWMsg; + int32_t queuedRMsg; + int32_t flowctrlLevel; + int8_t status; + int8_t role; + int8_t accessState; + int8_t isFull; + int8_t isCommiting; + uint64_t version; // current version + uint64_t cversion; // version while commit start + uint64_t fversion; // version on saved data file + void * wqueue; // write queue + void * qqueue; // read query queue + void * fqueue; // read fetch/cancel queue + void * wal; + void * tsdb; + int64_t sync; + void * events; + void * cq; // continuous query + int32_t cfgVersion; + STsdbCfg tsdbCfg; + SSyncCfg syncCfg; + SWalCfg walCfg; + void * qMgmt; + char * rootDir; + tsem_t sem; + int8_t dropped; + char db[TSDB_ACCT_LEN + TSDB_DB_NAME_LEN]; + pthread_mutex_t statusMutex; } SVnodeObj; -void vnodeInitWriteFp(void); -void vnodeInitReadFp(void); - #ifdef __cplusplus } #endif diff --git a/src/vnode/inc/vnodeMain.h b/src/vnode/inc/vnodeMain.h new file mode 100644 index 0000000000..e1ddcdc36a --- /dev/null +++ b/src/vnode/inc/vnodeMain.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_MAIN_H +#define TDENGINE_VNODE_MAIN_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg); +int32_t vnodeDrop(int32_t vgId); +int32_t vnodeOpen(int32_t vgId); +int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); +int32_t vnodeClose(int32_t vgId); + +int32_t vnodeReset(SVnodeObj *pVnode); +void vnodeCleanUp(SVnodeObj *pVnode); +void vnodeDestroy(SVnodeObj *pVnode); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/vnode/inc/vnodeMgmt.h b/src/vnode/inc/vnodeMgmt.h new file mode 100644 index 0000000000..5a7e745619 --- /dev/null +++ b/src/vnode/inc/vnodeMgmt.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_MGMT_H +#define TDENGINE_VNODE_MGMT_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitMgmt(); +void vnodeCleanupMgmt(); + +void* vnodeAcquire(int32_t vgId); +void vnodeRelease(void *pVnode); +void* vnodeGetWal(void *pVnode); + +int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes); +void vnodeBuildStatusMsg(void *pStatus); +void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes); + +void vnodeAddIntoHash(SVnodeObj* pVnode); +void vnodeRemoveFromHash(SVnodeObj * pVnode); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/inc/vnodeRead.h b/src/vnode/inc/vnodeRead.h new file mode 100644 index 0000000000..f2953d79f4 --- /dev/null +++ b/src/vnode/inc/vnodeRead.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_READ_H +#define TDENGINE_VNODE_READ_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitRead(void); +void vnodeCleanupRead(void); + +int32_t vnodeWriteToRQueue(void *pVnode, void *pCont, int32_t contLen, int8_t qtype, void *rparam); +void vnodeFreeFromRQueue(void *pVnode, SVReadMsg *pRead); +int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/inc/vnodeStatus.h b/src/vnode/inc/vnodeStatus.h new file mode 100644 index 0000000000..00ac47df65 --- /dev/null +++ b/src/vnode/inc/vnodeStatus.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_STATUS_H +#define TDENGINE_VNODE_STATUS_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +typedef enum _VN_STATUS { + TAOS_VN_STATUS_INIT = 0, + TAOS_VN_STATUS_READY = 1, + TAOS_VN_STATUS_CLOSING = 2, + TAOS_VN_STATUS_UPDATING = 3, + TAOS_VN_STATUS_RESET = 4, +} EVnodeStatus; + +bool vnodeSetInitStatus(SVnodeObj* pVnode); +bool vnodeSetReadyStatus(SVnodeObj* pVnode); +bool vnodeSetClosingStatus(SVnodeObj* pVnode); +bool vnodeSetUpdatingStatus(SVnodeObj* pVnode); +bool vnodeSetResetStatus(SVnodeObj* pVnode); + +bool vnodeInInitStatus(SVnodeObj* pVnode); +bool vnodeInReadyStatus(SVnodeObj* pVnode); +bool vnodeInClosingStatus(SVnodeObj* pVnode); +bool vnodeInResetStatus(SVnodeObj* pVnode); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/vnode/inc/vnodeSync.h b/src/vnode/inc/vnodeSync.h new file mode 100644 index 0000000000..ae02ca17cb --- /dev/null +++ b/src/vnode/inc/vnodeSync.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_SYNC_H +#define TDENGINE_VNODE_SYNC_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fver); +int32_t vnodeGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId); +void vnodeNotifyRole(int32_t vgId, int8_t role); +void vnodeCtrlFlow(int32_t vgId, int32_t level); +int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion); +void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); +int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); +int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); + +void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/vnode/inc/vnodeVersion.h b/src/vnode/inc/vnodeVersion.h index 1d086cb21f..913e3915ab 100644 --- a/src/vnode/inc/vnodeVersion.h +++ b/src/vnode/inc/vnodeVersion.h @@ -19,6 +19,7 @@ #ifdef __cplusplus extern "C" { #endif +#include "vnodeInt.h" int32_t vnodeReadVersion(SVnodeObj *pVnode); int32_t vnodeSaveVersion(SVnodeObj *pVnode); diff --git a/src/vnode/inc/vnodeWorker.h b/src/vnode/inc/vnodeWorker.h new file mode 100644 index 0000000000..01d9d42900 --- /dev/null +++ b/src/vnode/inc/vnodeWorker.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_WORKER_H +#define TDENGINE_VNODE_WORKER_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitMWorker(); +void vnodeCleanupMWorker(); +int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode); +int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/vnode/inc/vnodeWrite.h b/src/vnode/inc/vnodeWrite.h new file mode 100644 index 0000000000..8b3f0fdb58 --- /dev/null +++ b/src/vnode/inc/vnodeWrite.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_WRITE_H +#define TDENGINE_VNODE_WRITE_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitWrite(void); +void vnodeCleanupWrite(void); + +int32_t vnodeWriteToWQueue(void *pVnode, void *pHead, int32_t qtype, void *pRpcMsg); +void vnodeFreeFromWQueue(void *pVnode, SVWriteMsg *pWrite); +int32_t vnodeProcessWrite(void *pVnode, void *pHead, int32_t qtype, void *pRspRet); + +#ifdef __cplusplus +} +#endif + +#endif \ No newline at end of file diff --git a/src/vnode/src/vnodeCfg.c b/src/vnode/src/vnodeCfg.c index 2d56157328..e0881db000 100644 --- a/src/vnode/src/vnodeCfg.c +++ b/src/vnode/src/vnodeCfg.c @@ -15,13 +15,9 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taosmsg.h" -#include "taoserror.h" #include "cJSON.h" #include "tglobal.h" -#include "tsdb.h" #include "dnode.h" -#include "vnodeInt.h" #include "vnodeCfg.h" static void vnodeLoadCfg(SVnodeObj *pVnode, SCreateVnodeMsg* vnodeMsg) { @@ -242,9 +238,8 @@ int32_t vnodeReadCfg(SVnodeObj *pVnode) { } tstrncpy(node->nodeEp, nodeEp->valuestring, TSDB_EP_LEN); - if (!nodeChanged) { - nodeChanged = dnodeCheckEpChanged(node->nodeId, node->nodeEp); - } + bool changed = dnodeCheckEpChanged(node->nodeId, node->nodeEp); + if (changed) nodeChanged = changed; } ret = TSDB_CODE_SUCCESS; diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index b516c9d90e..d7e33deb15 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -18,77 +18,18 @@ #include "taoserror.h" #include "taosmsg.h" #include "tglobal.h" -#include "trpc.h" -#include "tsdb.h" -#include "tutil.h" -#include "vnode.h" -#include "vnodeInt.h" +// #include "tfs.h" #include "query.h" #include "dnode.h" #include "vnodeCfg.h" +#include "vnodeStatus.h" +#include "vnodeSync.h" #include "vnodeVersion.h" +#include "vnodeMgmt.h" +#include "vnodeWorker.h" +#include "vnodeMain.h" -static SHashObj*tsVnodesHash; -static void vnodeCleanUp(SVnodeObj *pVnode); -static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno); -static uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fversion); -static int32_t vnodeGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId); -static void vnodeNotifyRole(int32_t vgId, int8_t role); -static void vnodeCtrlFlow(int32_t vgId, int32_t level); -static int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion); -static void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); -static int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); -static int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); - -#ifndef _SYNC -int64_t syncStart(const SSyncInfo *info) { return NULL; } -int32_t syncForwardToPeer(int64_t rid, void *pHead, void *mhandle, int32_t qtype) { return 0; } -void syncStop(int64_t rid) {} -int32_t syncReconfig(int64_t rid, const SSyncCfg *cfg) { return 0; } -int32_t syncGetNodesRole(int64_t rid, SNodesRole *cfg) { return 0; } -void syncConfirmForward(int64_t rid, uint64_t version, int32_t code) {} -#endif - -char* vnodeStatus[] = { - "init", - "ready", - "closing", - "updating", - "reset" -}; - -int32_t vnodeInitResources() { - int32_t code = syncInit(); - if (code != 0) return code; - - vnodeInitWriteFp(); - vnodeInitReadFp(); - - tsVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); - if (tsVnodesHash == NULL) { - vError("failed to init vnode list"); - return TSDB_CODE_VND_OUT_OF_MEMORY; - } - - if (tsdbInitCommitQueue(tsNumOfCommitThreads) < 0) { - vError("failed to init vnode commit queue"); - return terrno; - } - - return TSDB_CODE_SUCCESS; -} - -void vnodeCleanupResources() { - tsdbDestroyCommitQueue(); - - if (tsVnodesHash != NULL) { - vDebug("vnode list is cleanup"); - taosHashCleanup(tsVnodesHash); - tsVnodesHash = NULL; - } - - syncCleanUp(); -} +static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno); int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { int32_t code; @@ -155,7 +96,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { vInfo("vgId:%d, vnode dir is created, walLevel:%d fsyncPeriod:%d", pVnodeCfg->cfg.vgId, pVnodeCfg->cfg.walLevel, pVnodeCfg->cfg.fsyncPeriod); - code = vnodeOpen(pVnodeCfg->cfg.vgId, rootDir); + code = vnodeOpen(pVnodeCfg->cfg.vgId); return code; } @@ -170,79 +111,89 @@ int32_t vnodeDrop(int32_t vgId) { vInfo("vgId:%d, vnode will be dropped, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); pVnode->dropped = 1; + // remove from hash, so new messages wont be consumed + vnodeRemoveFromHash(pVnode); vnodeRelease(pVnode); - vnodeCleanUp(pVnode); + vnodeCleanupInMWorker(pVnode); return TSDB_CODE_SUCCESS; } +static int32_t vnodeAlterImp(SVnodeObj *pVnode, SCreateVnodeMsg *pVnodeCfg) { + int32_t code = vnodeWriteCfg(pVnodeCfg); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = vnodeReadCfg(pVnode); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = walAlter(pVnode->wal, &pVnode->walCfg); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = syncReconfig(pVnode->sync, &pVnode->syncCfg); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + if (pVnode->tsdb) { + code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + } + + return 0; +} + int32_t vnodeAlter(void *vparam, SCreateVnodeMsg *pVnodeCfg) { SVnodeObj *pVnode = vparam; // vnode in non-ready state and still needs to return success instead of TSDB_CODE_VND_INVALID_STATUS // cfgVersion can be corrected by status msg - if (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_UPDATING) != TAOS_VN_STATUS_READY) { + if (!vnodeSetUpdatingStatus(pVnode)) { vDebug("vgId:%d, vnode is not ready, do alter operation later", pVnode->vgId); return TSDB_CODE_SUCCESS; } - int32_t code = vnodeWriteCfg(pVnodeCfg); - if (code != TSDB_CODE_SUCCESS) { - pVnode->status = TAOS_VN_STATUS_READY; - return code; + int32_t code = vnodeAlterImp(pVnode, pVnodeCfg); + vnodeSetReadyStatus(pVnode); + + if (code != 0) { + vError("vgId:%d, failed to alter vnode, code:0x%x", pVnode->vgId, code); + } else { + vDebug("vgId:%d, vnode is altered", pVnode->vgId); } - code = vnodeReadCfg(pVnode); - if (code != TSDB_CODE_SUCCESS) { - pVnode->status = TAOS_VN_STATUS_READY; - return code; - } - - code = walAlter(pVnode->wal, &pVnode->walCfg); - if (code != TSDB_CODE_SUCCESS) { - pVnode->status = TAOS_VN_STATUS_READY; - return code; - } - - code = syncReconfig(pVnode->sync, &pVnode->syncCfg); - if (code != TSDB_CODE_SUCCESS) { - pVnode->status = TAOS_VN_STATUS_READY; - return code; - } - - if (pVnode->tsdb) { - code = tsdbConfigRepo(pVnode->tsdb, &pVnode->tsdbCfg); - if (code != TSDB_CODE_SUCCESS) { - pVnode->status = TAOS_VN_STATUS_READY; - return code; - } - } - - pVnode->status = TAOS_VN_STATUS_READY; - vDebug("vgId:%d, vnode is altered", pVnode->vgId); - - return TSDB_CODE_SUCCESS; + return code; } -int32_t vnodeOpen(int32_t vnode, char *rootDir) { - char temp[TSDB_FILENAME_LEN]; +int32_t vnodeOpen(int32_t vgId) { + char temp[TSDB_FILENAME_LEN * 3]; + char rootDir[TSDB_FILENAME_LEN * 2]; + snprintf(rootDir, TSDB_FILENAME_LEN * 2, "%s/vnode%d", tsVnodeDir, vgId); SVnodeObj *pVnode = calloc(sizeof(SVnodeObj), 1); if (pVnode == NULL) { - vError("vgId:%d, failed to open vnode since no enough memory", vnode); + vError("vgId:%d, failed to open vnode since no enough memory", vgId); return TAOS_SYSTEM_ERROR(errno); } atomic_add_fetch_32(&pVnode->refCount, 1); - pVnode->vgId = vnode; - pVnode->status = TAOS_VN_STATUS_INIT; + pVnode->vgId = vgId; pVnode->fversion = 0; pVnode->version = 0; pVnode->tsdbCfg.tsdbId = pVnode->vgId; pVnode->rootDir = strdup(rootDir); pVnode->accessState = TSDB_VN_ALL_ACCCESS; tsem_init(&pVnode->sem, 0, 0); + pthread_mutex_init(&pVnode->statusMutex, NULL); + vnodeSetInitStatus(pVnode); int32_t code = vnodeReadCfg(pVnode); if (code != TSDB_CODE_SUCCESS) { @@ -252,8 +203,8 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { code = vnodeReadVersion(pVnode); if (code != TSDB_CODE_SUCCESS) { - vError("vgId:%d, failed to read version, generate it from data file", pVnode->vgId); - // Allow vnode start even when read version fails, set version as walVersion or zero + vError("vgId:%d, failed to read file version, generate it from data file", pVnode->vgId); + // Allow vnode start even when read file version fails, set file version as wal version or zero // vnodeCleanUp(pVnode); // return code; } @@ -261,8 +212,9 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { pVnode->fversion = pVnode->version; pVnode->wqueue = dnodeAllocVWriteQueue(pVnode); - pVnode->rqueue = dnodeAllocVReadQueue(pVnode); - if (pVnode->wqueue == NULL || pVnode->rqueue == NULL) { + pVnode->qqueue = dnodeAllocVQueryQueue(pVnode); + pVnode->fqueue = dnodeAllocVFetchQueue(pVnode); + if (pVnode->wqueue == NULL || pVnode->qqueue == NULL || pVnode->fqueue == NULL) { vnodeCleanUp(pVnode); return terrno; } @@ -272,7 +224,7 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { sprintf(cqCfg.user, "_root"); strcpy(cqCfg.pass, tsInternalPass); strcpy(cqCfg.db, pVnode->db); - cqCfg.vgId = vnode; + cqCfg.vgId = vgId; cqCfg.cqWrite = vnodeWriteToCache; pVnode->cq = cqOpen(pVnode, &cqCfg); if (pVnode->cq == NULL) { @@ -341,13 +293,13 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { vDebug("vgId:%d, vnode is opened in %s, pVnode:%p", pVnode->vgId, rootDir, pVnode); tsdbIncCommitRef(pVnode->vgId); - taosHashPut(tsVnodesHash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); + vnodeAddIntoHash(pVnode); SSyncInfo syncInfo; syncInfo.vgId = pVnode->vgId; syncInfo.version = pVnode->version; syncInfo.syncCfg = pVnode->syncCfg; - sprintf(syncInfo.path, "%s", rootDir); + tstrncpy(syncInfo.path, rootDir, TSDB_FILENAME_LEN); syncInfo.getWalInfo = vnodeGetWalInfo; syncInfo.getFileInfo = vnodeGetFileInfo; syncInfo.writeToCache = vnodeWriteToCache; @@ -358,19 +310,15 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) { syncInfo.getVersion = vnodeGetVersion; pVnode->sync = syncStart(&syncInfo); -#ifndef _SYNC - pVnode->role = TAOS_SYNC_ROLE_MASTER; -#else if (pVnode->sync <= 0) { - vError("vgId:%d, failed to open sync module, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, + vError("vgId:%d, failed to open sync, replica:%d reason:%s", pVnode->vgId, pVnode->syncCfg.replica, tstrerror(terrno)); - vnodeRelease(pVnode); + vnodeRemoveFromHash(pVnode); vnodeCleanUp(pVnode); return terrno; } -#endif - pVnode->status = TAOS_VN_STATUS_READY; + vnodeSetReadyStatus(pVnode); return TSDB_CODE_SUCCESS; } @@ -379,30 +327,16 @@ int32_t vnodeClose(int32_t vgId) { if (pVnode == NULL) return 0; vDebug("vgId:%d, vnode will be closed, pVnode:%p", pVnode->vgId, pVnode); + vnodeRemoveFromHash(pVnode); vnodeRelease(pVnode); vnodeCleanUp(pVnode); return 0; } -void vnodeRelease(void *vparam) { - if (vparam == NULL) return; - SVnodeObj *pVnode = vparam; - int32_t code = 0; - int32_t vgId = pVnode->vgId; - - int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); - vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", vgId, refCount, pVnode); - assert(refCount >= 0); - - if (refCount > 0) { - if (pVnode->status == TAOS_VN_STATUS_RESET && refCount <= 3) { - tsem_post(&pVnode->sem); - } - return; - } - - vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", vgId, refCount, pVnode); +void vnodeDestroy(SVnodeObj *pVnode) { + int32_t code = 0; + int32_t vgId = pVnode->vgId; if (pVnode->qMgmt) { qCleanupQueryMgmt(pVnode->qMgmt); @@ -440,9 +374,14 @@ void vnodeRelease(void *vparam) { pVnode->wqueue = NULL; } - if (pVnode->rqueue) { - dnodeFreeVReadQueue(pVnode->rqueue); - pVnode->rqueue = NULL; + if (pVnode->qqueue) { + dnodeFreeVQueryQueue(pVnode->qqueue); + pVnode->qqueue = NULL; + } + + if (pVnode->fqueue) { + dnodeFreeVFetchQueue(pVnode->fqueue); + pVnode->fqueue = NULL; } tfree(pVnode->rootDir); @@ -465,121 +404,16 @@ void vnodeRelease(void *vparam) { } tsem_destroy(&pVnode->sem); + pthread_mutex_destroy(&pVnode->statusMutex); free(pVnode); tsdbDecCommitRef(vgId); - - int32_t count = taosHashGetSize(tsVnodesHash); - vDebug("vgId:%d, vnode is destroyed, vnodes:%d", vgId, count); } -static void vnodeIncRef(void *ptNode) { - assert(ptNode != NULL); - - SVnodeObj **ppVnode = (SVnodeObj **)ptNode; - assert(ppVnode); - assert(*ppVnode); - - SVnodeObj *pVnode = *ppVnode; - atomic_add_fetch_32(&pVnode->refCount, 1); - vTrace("vgId:%d, get vnode, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); -} - -void *vnodeAcquire(int32_t vgId) { - SVnodeObj **ppVnode = taosHashGetCB(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); - - if (ppVnode == NULL || *ppVnode == NULL) { - terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; - vDebug("vgId:%d, not exist", vgId); - return NULL; - } - - return *ppVnode; -} - -void *vnodeGetWal(void *pVnode) { - return ((SVnodeObj *)pVnode)->wal; -} - -static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) { - int64_t totalStorage = 0; - int64_t compStorage = 0; - int64_t pointsWritten = 0; - - if (pVnode->status != TAOS_VN_STATUS_READY) return; - if (pStatus->openVnodes >= TSDB_MAX_VNODES) return; - - if (pVnode->tsdb) { - tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); - } - - SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++]; - pLoad->vgId = htonl(pVnode->vgId); - pLoad->cfgVersion = htonl(pVnode->cfgVersion); - pLoad->totalStorage = htobe64(totalStorage); - pLoad->compStorage = htobe64(compStorage); - pLoad->pointsWritten = htobe64(pointsWritten); - pLoad->status = pVnode->status; - pLoad->role = pVnode->role; - pLoad->replica = pVnode->syncCfg.replica; -} - -int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { - void *pIter = taosHashIterate(tsVnodesHash, NULL); - while (pIter) { - SVnodeObj **pVnode = pIter; - if (*pVnode) { - - (*numOfVnodes)++; - if (*numOfVnodes >= TSDB_MAX_VNODES) { - vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES); - continue; - } else { - vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId; - } - - } - - pIter = taosHashIterate(tsVnodesHash, pIter); - } - return TSDB_CODE_SUCCESS; -} - -void vnodeBuildStatusMsg(void *param) { - SStatusMsg *pStatus = param; - - void *pIter = taosHashIterate(tsVnodesHash, NULL); - while (pIter) { - SVnodeObj **pVnode = pIter; - if (*pVnode) { - vnodeBuildVloadMsg(*pVnode, pStatus); - } - pIter = taosHashIterate(tsVnodesHash, pIter); - } -} - -void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) { - for (int32_t i = 0; i < numOfVnodes; ++i) { - pAccess[i].vgId = htonl(pAccess[i].vgId); - SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId); - if (pVnode != NULL) { - pVnode->accessState = pAccess[i].accessState; - if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) { - vDebug("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState); - } - vnodeRelease(pVnode); - } - } -} - -static void vnodeCleanUp(SVnodeObj *pVnode) { - // remove from hash, so new messages wont be consumed - taosHashRemove(tsVnodesHash, &pVnode->vgId, sizeof(int32_t)); - - if (pVnode->status != TAOS_VN_STATUS_INIT) { +void vnodeCleanUp(SVnodeObj *pVnode) { + if (!vnodeInInitStatus(pVnode)) { // it may be in updateing or reset state, then it shall wait int32_t i = 0; - while (atomic_val_compare_exchange_8(&pVnode->status, TAOS_VN_STATUS_READY, TAOS_VN_STATUS_CLOSING) != - TAOS_VN_STATUS_READY) { + while (!vnodeSetClosingStatus(pVnode)) { if (++i % 1000 == 0) { sched_yield(); } @@ -613,19 +447,20 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { if (status == TSDB_STATUS_COMMIT_START) { pVnode->isCommiting = 1; - pVnode->fversion = pVnode->version; + pVnode->cversion = pVnode->version; vDebug("vgId:%d, start commit, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); - if (pVnode->status != TAOS_VN_STATUS_INIT) { + if (!vnodeInInitStatus(pVnode)) { return walRenew(pVnode->wal); } return 0; } if (status == TSDB_STATUS_COMMIT_OVER) { - vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); pVnode->isCommiting = 0; pVnode->isFull = 0; - if (pVnode->status != TAOS_VN_STATUS_INIT) { + pVnode->fversion = pVnode->cversion; + vDebug("vgId:%d, commit over, fver:%" PRIu64 " vver:%" PRIu64, pVnode->vgId, pVnode->fversion, pVnode->version); + if (!vnodeInInitStatus(pVnode)) { walRemoveOneOldFile(pVnode->wal); } return vnodeSaveVersion(pVnode); @@ -634,75 +469,12 @@ static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno) { return 0; } -static uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, - uint64_t *fversion) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while get file info", vgId); - return 0; - } - - *fversion = pVnode->fversion; - uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, index, eindex, size); - - vnodeRelease(pVnode); - return ret; -} - -static int32_t vnodeGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while get wal info", vgId); - return -1; - } - - int32_t code = walGetWalFile(pVnode->wal, fileName, fileId); - - vnodeRelease(pVnode); - return code; -} - -static void vnodeNotifyRole(int32_t vgId, int8_t role) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vTrace("vgId:%d, vnode not found while notify role", vgId); - return; - } - - vInfo("vgId:%d, sync role changed from %s to %s", pVnode->vgId, syncRole[pVnode->role], syncRole[role]); - pVnode->role = role; - dnodeSendStatusMsgToMnode(); - - if (pVnode->role == TAOS_SYNC_ROLE_MASTER) { - cqStart(pVnode->cq); - } else { - cqStop(pVnode->cq); - } - - vnodeRelease(pVnode); -} - -static void vnodeCtrlFlow(int32_t vgId, int32_t level) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vTrace("vgId:%d, vnode not found while flow ctrl", vgId); - return; - } - - if (pVnode->flowctrlLevel != level) { - vDebug("vgId:%d, set flowctrl level from %d to %d", pVnode->vgId, pVnode->flowctrlLevel, level); - pVnode->flowctrlLevel = level; - } - - vnodeRelease(pVnode); -} - -static int32_t vnodeResetTsdb(SVnodeObj *pVnode) { +int32_t vnodeReset(SVnodeObj *pVnode) { char rootDir[128] = "\0"; sprintf(rootDir, "%s/tsdb", pVnode->rootDir); - if (pVnode->status != TAOS_VN_STATUS_CLOSING && pVnode->status != TAOS_VN_STATUS_INIT) { - pVnode->status = TAOS_VN_STATUS_RESET; + if (!vnodeSetResetStatus(pVnode)) { + return -1; } void *tsdb = pVnode->tsdb; @@ -725,70 +497,8 @@ static int32_t vnodeResetTsdb(SVnodeObj *pVnode) { appH.cqDropFunc = cqDrop; pVnode->tsdb = tsdbOpenRepo(rootDir, &appH); - pVnode->status = TAOS_VN_STATUS_READY; + vnodeSetReadyStatus(pVnode); vnodeRelease(pVnode); return 0; } - -static int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while notify file synced", vgId); - return 0; - } - - pVnode->fversion = fversion; - pVnode->version = fversion; - vnodeSaveVersion(pVnode); - - vDebug("vgId:%d, data file is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); - int32_t code = vnodeResetTsdb(pVnode); - - vnodeRelease(pVnode); - return code; -} - -void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { - void *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while confirm forward", vgId); - return; - } - - dnodeSendRpcVWriteRsp(pVnode, wparam, code); - vnodeRelease(pVnode); -} - -static int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while write to cache", vgId); - return TSDB_CODE_VND_INVALID_VGROUP_ID; - } - - int32_t code = vnodeWriteToWQueue(pVnode, wparam, qtype, rparam); - - vnodeRelease(pVnode); - return code; -} - -static int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while write to cache", vgId); - return -1; - } - - int32_t code = 0; - if (pVnode->isCommiting) { - vDebug("vgId:%d, vnode is commiting while get version", vgId); - code = -1; - } else { - *fver = pVnode->fversion; - *wver = pVnode->version; - } - - vnodeRelease(pVnode); - return code; -} diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c new file mode 100644 index 0000000000..cf42690d7d --- /dev/null +++ b/src/vnode/src/vnodeMgmt.c @@ -0,0 +1,192 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "dnode.h" +#include "vnodeStatus.h" +#include "vnodeWorker.h" +#include "vnodeRead.h" +#include "vnodeWrite.h" +#include "vnodeMain.h" + +static SHashObj *tsVnodesHash = NULL; + +static int32_t vnodeInitHash(void); +static void vnodeCleanupHash(void); +static void vnodeIncRef(void *ptNode); + +static SStep tsVnodeSteps[] = { + {"vnode-worker", vnodeInitMWorker, vnodeCleanupMWorker}, + {"vnode-write", vnodeInitWrite, vnodeCleanupWrite}, + {"vnode-read", vnodeInitRead, vnodeCleanupRead}, + {"vnode-hash", vnodeInitHash, vnodeCleanupHash}, + {"tsdb-queue", tsdbInitCommitQueue, tsdbDestroyCommitQueue} +}; + +int32_t vnodeInitMgmt() { + int32_t stepSize = sizeof(tsVnodeSteps) / sizeof(SStep); + return dnodeStepInit(tsVnodeSteps, stepSize); +} + +void vnodeCleanupMgmt() { + int32_t stepSize = sizeof(tsVnodeSteps) / sizeof(SStep); + dnodeStepCleanup(tsVnodeSteps, stepSize); +} + +static int32_t vnodeInitHash() { + tsVnodesHash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (tsVnodesHash == NULL) { + vError("failed to init vnode mgmt"); + return -1; + } + + return 0; +} + +static void vnodeCleanupHash() { + if (tsVnodesHash != NULL) { + vDebug("vnode mgmt is cleanup"); + taosHashCleanup(tsVnodesHash); + tsVnodesHash = NULL; + } +} + +void *vnodeGetWal(void *pVnode) { + return ((SVnodeObj *)pVnode)->wal; +} + +void vnodeAddIntoHash(SVnodeObj *pVnode) { + taosHashPut(tsVnodesHash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); +} + +void vnodeRemoveFromHash(SVnodeObj *pVnode) { + taosHashRemove(tsVnodesHash, &pVnode->vgId, sizeof(int32_t)); +} + +static void vnodeIncRef(void *ptNode) { + assert(ptNode != NULL); + + SVnodeObj **ppVnode = (SVnodeObj **)ptNode; + assert(ppVnode); + assert(*ppVnode); + + SVnodeObj *pVnode = *ppVnode; + atomic_add_fetch_32(&pVnode->refCount, 1); + vTrace("vgId:%d, get vnode, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); +} + +void *vnodeAcquire(int32_t vgId) { + SVnodeObj **ppVnode = taosHashGetCB(tsVnodesHash, &vgId, sizeof(int32_t), vnodeIncRef, NULL, sizeof(void *)); + + if (ppVnode == NULL || *ppVnode == NULL) { + terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; + vDebug("vgId:%d, not exist", vgId); + return NULL; + } + + return *ppVnode; +} + +void vnodeRelease(void *vparam) { + SVnodeObj *pVnode = vparam; + if (vparam == NULL) return; + + int32_t refCount = atomic_sub_fetch_32(&pVnode->refCount, 1); + vTrace("vgId:%d, release vnode, refCount:%d pVnode:%p", pVnode->vgId, refCount, pVnode); + assert(refCount >= 0); + + if (refCount > 0) { + if (vnodeInResetStatus(pVnode) && refCount <= 3) { + tsem_post(&pVnode->sem); + } + } else { + vDebug("vgId:%d, vnode will be destroyed, refCount:%d pVnode:%p", pVnode->vgId, refCount, pVnode); + vnodeDestroyInMWorker(pVnode); + int32_t count = taosHashGetSize(tsVnodesHash); + vDebug("vgId:%d, vnode is destroyed, vnodes:%d", pVnode->vgId, count); + } +} + +static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) { + int64_t totalStorage = 0; + int64_t compStorage = 0; + int64_t pointsWritten = 0; + + if (!vnodeInReadyStatus(pVnode)) return; + if (pStatus->openVnodes >= TSDB_MAX_VNODES) return; + + if (pVnode->tsdb) { + tsdbReportStat(pVnode->tsdb, &pointsWritten, &totalStorage, &compStorage); + } + + SVnodeLoad *pLoad = &pStatus->load[pStatus->openVnodes++]; + pLoad->vgId = htonl(pVnode->vgId); + pLoad->cfgVersion = htonl(pVnode->cfgVersion); + pLoad->totalStorage = htobe64(totalStorage); + pLoad->compStorage = htobe64(compStorage); + pLoad->pointsWritten = htobe64(pointsWritten); + pLoad->status = pVnode->status; + pLoad->role = pVnode->role; + pLoad->replica = pVnode->syncCfg.replica; +} + +int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { + void *pIter = taosHashIterate(tsVnodesHash, NULL); + while (pIter) { + SVnodeObj **pVnode = pIter; + if (*pVnode) { + + (*numOfVnodes)++; + if (*numOfVnodes >= TSDB_MAX_VNODES) { + vError("vgId:%d, too many open vnodes, exist:%d max:%d", (*pVnode)->vgId, *numOfVnodes, TSDB_MAX_VNODES); + continue; + } else { + vnodeList[*numOfVnodes - 1] = (*pVnode)->vgId; + } + + } + + pIter = taosHashIterate(tsVnodesHash, pIter); + } + return TSDB_CODE_SUCCESS; +} + +void vnodeBuildStatusMsg(void *param) { + SStatusMsg *pStatus = param; + + void *pIter = taosHashIterate(tsVnodesHash, NULL); + while (pIter) { + SVnodeObj **pVnode = pIter; + if (*pVnode) { + vnodeBuildVloadMsg(*pVnode, pStatus); + } + pIter = taosHashIterate(tsVnodesHash, pIter); + } +} + +void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) { + for (int32_t i = 0; i < numOfVnodes; ++i) { + pAccess[i].vgId = htonl(pAccess[i].vgId); + SVnodeObj *pVnode = vnodeAcquire(pAccess[i].vgId); + if (pVnode != NULL) { + pVnode->accessState = pAccess[i].accessState; + if (pVnode->accessState != TSDB_VN_ALL_ACCCESS) { + vDebug("vgId:%d, access state is set to %d", pAccess[i].vgId, pVnode->accessState); + } + vnodeRelease(pVnode); + } + } +} \ No newline at end of file diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index ed6d29505f..637d470f8a 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -14,28 +14,27 @@ */ #define _DEFAULT_SOURCE -#define _NON_BLOCKING_RETRIEVE 0 #include "os.h" -#include "tglobal.h" -#include "taoserror.h" #include "taosmsg.h" -#include "query.h" -#include "trpc.h" -#include "tsdb.h" -#include "vnode.h" -#include "vnodeInt.h" #include "tqueue.h" +#include "tglobal.h" +#include "query.h" +#include "vnodeStatus.h" static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead); + static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId); -void vnodeInitReadFp(void) { +int32_t vnodeInitRead(void) { vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg; vnodeProcessReadMsgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessFetchMsg; + return 0; } +void vnodeCleanupRead() {} + // // After the fetch request enters the vnode queue, if the vnode cannot provide services, the process function are // still required, or there will be a deadlock, so we don’t do any check here, but put the check codes before the @@ -54,7 +53,7 @@ int32_t vnodeProcessRead(void *vparam, SVReadMsg *pRead) { } static int32_t vnodeCheckRead(SVnodeObj *pVnode) { - if (pVnode->status != TAOS_VN_STATUS_READY) { + if (!vnodeInReadyStatus(pVnode)) { vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; @@ -115,13 +114,16 @@ int32_t vnodeWriteToRQueue(void *vparam, void *pCont, int32_t contLen, int8_t qt } pRead->qtype = qtype; - atomic_add_fetch_32(&pVnode->refCount, 1); atomic_add_fetch_32(&pVnode->queuedRMsg, 1); - vTrace("vgId:%d, write into vrqueue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); - taosWriteQitem(pVnode->rqueue, qtype, pRead); - return TSDB_CODE_SUCCESS; + if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL || pRead->msgType == TSDB_MSG_TYPE_FETCH) { + vTrace("vgId:%d, write into vfetch queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + return taosWriteQitem(pVnode->fqueue, qtype, pRead); + } else { + vTrace("vgId:%d, write into vquery queue, refCount:%d queued:%d", pVnode->vgId, pVnode->refCount, pVnode->queuedRMsg); + return taosWriteQitem(pVnode->qqueue, qtype, pRead); + } } static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void *ahandle) { @@ -131,7 +133,7 @@ static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void int32_t code = vnodeWriteToRQueue(pVnode, qhandle, 0, TAOS_QTYPE_QUERY, &rpcMsg); if (code == TSDB_CODE_SUCCESS) { - vDebug("QInfo:%p add to vread queue for exec query", *qhandle); + vTrace("QInfo:%p add to vread queue for exec query", *qhandle); } return code; @@ -162,7 +164,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, } } else { *freeHandle = true; - vDebug("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); + vTrace("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); } } else { SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -197,26 +199,29 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // qHandle needs to be freed correctly if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { - SRetrieveTableMsg *killQueryMsg = (SRetrieveTableMsg *)pRead->pCont; - killQueryMsg->free = htons(killQueryMsg->free); - killQueryMsg->qhandle = htobe64(killQueryMsg->qhandle); - - vWarn("QInfo:%p connection %p broken, kill query", (void *)killQueryMsg->qhandle, pRead->rpcHandle); - assert(pRead->contLen > 0 && killQueryMsg->free == 1); - - void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)killQueryMsg->qhandle); - if (qhandle == NULL || *qhandle == NULL) { - vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)killQueryMsg->qhandle, - pRead->rpcHandle); - } else { - assert(*qhandle == (void *)killQueryMsg->qhandle); - - qKillQuery(*qhandle); - qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true); - } - - return TSDB_CODE_TSC_QUERY_CANCELLED; + vError("error rpc msg in query, %s", tstrerror(pRead->code)); } +// assert(pRead->code != TSDB_CODE_RPC_NETWORK_UNAVAIL); +// if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { +// SCancelQueryMsg *pMsg = (SCancelQueryMsg *)pRead->pCont; +//// pMsg->free = htons(killQueryMsg->free); +// pMsg->qhandle = htobe64(pMsg->qhandle); +// +// vWarn("QInfo:%p connection %p broken, kill query", (void *)pMsg->qhandle, pRead->rpcHandle); +//// assert(pRead->contLen > 0 && pMsg->free == 1); +// +// void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)pMsg->qhandle); +// if (qhandle == NULL || *qhandle == NULL) { +// vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)pMsg->qhandle, pRead->rpcHandle); +// } else { +// assert(*qhandle == (void *)pMsg->qhandle); +// +// qKillQuery(*qhandle); +// qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true); +// } +// +// return TSDB_CODE_TSC_QUERY_CANCELLED; +// } int32_t code = TSDB_CODE_SUCCESS; void ** handle = NULL; @@ -261,7 +266,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } if (handle != NULL) { - vDebug("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); + vTrace("vgId:%d, QInfo:%p, dnode query msg disposed, create qhandle and returns to app", vgId, *handle); code = vnodePutItemIntoReadQueue(pVnode, handle, pRead->rpcHandle); if (code != TSDB_CODE_SUCCESS) { pRsp->code = code; @@ -273,10 +278,10 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { assert(pCont != NULL); void **qhandle = (void **)pRead->qhandle; - vDebug("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); + vTrace("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); // In the retrieve blocking model, only 50% CPU will be used in query processing - if (tsHalfCoresForQuery) { + if (tsRetrieveBlockingModel) { qTableQuery(*qhandle); // do execute query qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false); } else { @@ -289,7 +294,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { pRead->rpcHandle = qGetResultRetrieveMsg(*qhandle); assert(pRead->rpcHandle != NULL); - vDebug("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, + vTrace("vgId:%d, QInfo:%p, start to build retrieval rsp after query paused, %p", pVnode->vgId, *qhandle, pRead->rpcHandle); // set the real rsp error code @@ -322,7 +327,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { pRetrieve->free = htons(pRetrieve->free); pRetrieve->qhandle = htobe64(pRetrieve->qhandle); - vDebug("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void *)pRetrieve->qhandle, + vTrace("vgId:%d, QInfo:%p, retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, (void *)pRetrieve->qhandle, pRetrieve->free, pRead->rpcHandle); memset(pRet, 0, sizeof(SRspRet)); @@ -338,11 +343,12 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } if (code != TSDB_CODE_SUCCESS) { - vError("vgId:%d, invalid handle in retrieving result, code:0x%08x, QInfo:%p", pVnode->vgId, code, (void *)pRetrieve->qhandle); + vError("vgId:%d, invalid handle in retrieving result, code:%s, QInfo:%p", pVnode->vgId, tstrerror(code), (void *)pRetrieve->qhandle); vnodeBuildNoResultQueryRsp(pRet); return code; } - + + // kill current query and free corresponding resources. if (pRetrieve->free == 1) { vWarn("vgId:%d, QInfo:%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, *handle); qKillQuery(*handle); @@ -373,10 +379,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { memset(pRet->rsp, 0, sizeof(SRetrieveTableRsp)); freeHandle = true; } else { // result is not ready, return immediately - assert(buildRes == true); - // Only effects in the non-blocking model - if (!tsHalfCoresForQuery) { + if (!tsRetrieveBlockingModel) { if (!buildRes) { assert(pRead->rpcHandle != NULL); @@ -401,12 +405,11 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // notify connection(handle) that current qhandle is created, if current connection from // client is broken, the query needs to be killed immediately. int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) { - SRetrieveTableMsg *killQueryMsg = rpcMallocCont(sizeof(SRetrieveTableMsg)); - killQueryMsg->qhandle = htobe64((uint64_t)qhandle); - killQueryMsg->free = htons(1); - killQueryMsg->header.vgId = htonl(vgId); - killQueryMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); + SRetrieveTableMsg *pMsg = rpcMallocCont(sizeof(SRetrieveTableMsg)); + pMsg->qhandle = htobe64((uint64_t)qhandle); + pMsg->header.vgId = htonl(vgId); + pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); - vDebug("QInfo:%p register qhandle to connect:%p", qhandle, handle); - return rpcReportProgress(handle, (char *)killQueryMsg, sizeof(SRetrieveTableMsg)); + vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle); + return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg)); } diff --git a/src/vnode/src/vnodeStatus.c b/src/vnode/src/vnodeStatus.c new file mode 100644 index 0000000000..d09a6a8663 --- /dev/null +++ b/src/vnode/src/vnodeStatus.c @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "vnodeStatus.h" + +char* vnodeStatus[] = { + "init", + "ready", + "closing", + "updating", + "reset" +}; + +bool vnodeSetInitStatus(SVnodeObj* pVnode) { + pthread_mutex_lock(&pVnode->statusMutex); + pVnode->status = TAOS_VN_STATUS_INIT; + pthread_mutex_unlock(&pVnode->statusMutex); + return true; +} + +bool vnodeSetReadyStatus(SVnodeObj* pVnode) { + bool set = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_INIT || pVnode->status == TAOS_VN_STATUS_READY || + pVnode->status == TAOS_VN_STATUS_UPDATING || pVnode->status == TAOS_VN_STATUS_RESET) { + pVnode->status = TAOS_VN_STATUS_READY; + set = true; + } else { + vDebug("vgId:%d, cannot set status:ready, old:%s", pVnode->vgId, vnodeStatus[pVnode->status]); + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return set; +} + +bool vnodeSetClosingStatus(SVnodeObj* pVnode) { + bool set = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_READY) { + pVnode->status = TAOS_VN_STATUS_CLOSING; + set = true; + } else { + vTrace("vgId:%d, cannot set status:closing, old:%s", pVnode->vgId, vnodeStatus[pVnode->status]); + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return set; +} + +bool vnodeSetUpdatingStatus(SVnodeObj* pVnode) { + bool set = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_READY) { + pVnode->status = TAOS_VN_STATUS_UPDATING; + set = true; + } else { + vDebug("vgId:%d, cannot set status:updating, old:%s", pVnode->vgId, vnodeStatus[pVnode->status]); + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return set; +} + +bool vnodeSetResetStatus(SVnodeObj* pVnode) { + bool set = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status != TAOS_VN_STATUS_CLOSING && pVnode->status != TAOS_VN_STATUS_INIT) { + pVnode->status = TAOS_VN_STATUS_RESET; + set = true; + } else { + vDebug("vgId:%d, cannot set status:reset, old:%s", pVnode->vgId, vnodeStatus[pVnode->status]); + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return set; +} + +bool vnodeInInitStatus(SVnodeObj* pVnode) { + bool in = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_INIT) { + in = true; + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return in; +} + +bool vnodeInReadyStatus(SVnodeObj* pVnode) { + bool in = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_READY) { + in = true; + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return in; +} + +bool vnodeInClosingStatus(SVnodeObj* pVnode) { + bool in = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_CLOSING) { + in = true; + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return in; +} + +bool vnodeInResetStatus(SVnodeObj* pVnode) { + bool in = false; + pthread_mutex_lock(&pVnode->statusMutex); + + if (pVnode->status == TAOS_VN_STATUS_RESET) { + in = true; + } + + pthread_mutex_unlock(&pVnode->statusMutex); + return in; +} diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c new file mode 100644 index 0000000000..c67132c41f --- /dev/null +++ b/src/vnode/src/vnodeSync.c @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taosmsg.h" +#include "query.h" +#include "dnode.h" +#include "vnodeVersion.h" +#include "vnodeMain.h" + +uint32_t vnodeGetFileInfo(int32_t vgId, char *name, uint32_t *index, uint32_t eindex, int64_t *size, uint64_t *fver) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while get file info", vgId); + return 0; + } + + *fver = pVnode->fversion; + uint32_t ret = tsdbGetFileInfo(pVnode->tsdb, name, index, eindex, size); + + vnodeRelease(pVnode); + return ret; +} + +int32_t vnodeGetWalInfo(int32_t vgId, char *fileName, int64_t *fileId) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while get wal info", vgId); + return -1; + } + + int32_t code = walGetWalFile(pVnode->wal, fileName, fileId); + + vnodeRelease(pVnode); + return code; +} + +void vnodeNotifyRole(int32_t vgId, int8_t role) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vTrace("vgId:%d, vnode not found while notify role", vgId); + return; + } + + vInfo("vgId:%d, sync role changed from %s to %s", pVnode->vgId, syncRole[pVnode->role], syncRole[role]); + pVnode->role = role; + dnodeSendStatusMsgToMnode(); + + if (pVnode->role == TAOS_SYNC_ROLE_MASTER) { + cqStart(pVnode->cq); + } else { + cqStop(pVnode->cq); + } + + vnodeRelease(pVnode); +} + +void vnodeCtrlFlow(int32_t vgId, int32_t level) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vTrace("vgId:%d, vnode not found while flow ctrl", vgId); + return; + } + + if (pVnode->flowctrlLevel != level) { + vDebug("vgId:%d, set flowctrl level from %d to %d", pVnode->vgId, pVnode->flowctrlLevel, level); + pVnode->flowctrlLevel = level; + } + + vnodeRelease(pVnode); +} + +int32_t vnodeNotifyFileSynced(int32_t vgId, uint64_t fversion) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while notify file synced", vgId); + return 0; + } + + pVnode->fversion = fversion; + pVnode->version = fversion; + vnodeSaveVersion(pVnode); + + vDebug("vgId:%d, data file is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); + int32_t code = vnodeReset(pVnode); + + vnodeRelease(pVnode); + return code; +} + +void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code) { + void *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while confirm forward", vgId); + return; + } + + dnodeSendRpcVWriteRsp(pVnode, wparam, code); + vnodeRelease(pVnode); +} + +int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while write to cache", vgId); + return TSDB_CODE_VND_INVALID_VGROUP_ID; + } + + int32_t code = vnodeWriteToWQueue(pVnode, wparam, qtype, rparam); + + vnodeRelease(pVnode); + return code; +} + +int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vError("vgId:%d, vnode not found while write to cache", vgId); + return -1; + } + + int32_t code = 0; + if (pVnode->isCommiting) { + vDebug("vgId:%d, vnode is commiting while get version", vgId); + code = -1; + } else { + *fver = pVnode->fversion; + *wver = pVnode->version; + } + + vnodeRelease(pVnode); + return code; +} + +void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code) { + SVnodeObj *pVnode = vparam; + syncConfirmForward(pVnode->sync, version, code); +} diff --git a/src/vnode/src/vnodeVersion.c b/src/vnode/src/vnodeVersion.c index 8f6360b4f9..fb3b3ebd9e 100644 --- a/src/vnode/src/vnodeVersion.c +++ b/src/vnode/src/vnodeVersion.c @@ -15,11 +15,8 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "taoserror.h" #include "cJSON.h" #include "tglobal.h" -#include "tsdb.h" -#include "vnodeInt.h" #include "vnodeVersion.h" int32_t vnodeReadVersion(SVnodeObj *pVnode) { diff --git a/src/vnode/src/vnodeWorker.c b/src/vnode/src/vnodeWorker.c new file mode 100644 index 0000000000..d6053cf18e --- /dev/null +++ b/src/vnode/src/vnodeWorker.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "taosmsg.h" +#include "tutil.h" +#include "tqueue.h" +#include "tglobal.h" +#include "vnodeWorker.h" +#include "vnodeMain.h" + +typedef enum { + VNODE_WORKER_ACTION_CLEANUP, + VNODE_WORKER_ACTION_DESTROUY +} EVMWorkerAction; + +typedef struct { + int32_t vgId; + int32_t code; + void * rpcHandle; + SVnodeObj *pVnode; + EVMWorkerAction action; +} SVMWorkerMsg; + +typedef struct { + pthread_t thread; + int32_t workerId; +} SVMWorker; + +typedef struct { + int32_t curNum; + int32_t maxNum; + SVMWorker *worker; +} SVMWorkerPool; + +static SVMWorkerPool tsVMWorkerPool; +static taos_qset tsVMWorkerQset; +static taos_queue tsVMWorkerQueue; + +static void *vnodeMWorkerFunc(void *param); + +static int32_t vnodeStartMWorker() { + tsVMWorkerQueue = taosOpenQueue(); + if (tsVMWorkerQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; + + taosAddIntoQset(tsVMWorkerQset, tsVMWorkerQueue, NULL); + + for (int32_t i = tsVMWorkerPool.curNum; i < tsVMWorkerPool.maxNum; ++i) { + SVMWorker *pWorker = tsVMWorkerPool.worker + i; + pWorker->workerId = i; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, vnodeMWorkerFunc, pWorker) != 0) { + vError("failed to create thread to process vmworker queue, reason:%s", strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + + tsVMWorkerPool.curNum = i + 1; + vDebug("vmworker:%d is launched, total:%d", pWorker->workerId, tsVMWorkerPool.maxNum); + } + + vDebug("vmworker queue:%p is allocated", tsVMWorkerQueue); + return TSDB_CODE_SUCCESS; +} + +int32_t vnodeInitMWorker() { + tsVMWorkerQset = taosOpenQset(); + + tsVMWorkerPool.maxNum = 1; + tsVMWorkerPool.curNum = 0; + tsVMWorkerPool.worker = calloc(sizeof(SVMWorker), tsVMWorkerPool.maxNum); + + if (tsVMWorkerPool.worker == NULL) return -1; + for (int32_t i = 0; i < tsVMWorkerPool.maxNum; ++i) { + SVMWorker *pWorker = tsVMWorkerPool.worker + i; + pWorker->workerId = i; + vDebug("vmworker:%d is created", i); + } + + vDebug("vmworker is initialized, num:%d qset:%p", tsVMWorkerPool.maxNum, tsVMWorkerQset); + + return vnodeStartMWorker(); +} + +static void vnodeStopMWorker() { + vDebug("vmworker queue:%p is freed", tsVMWorkerQueue); + taosCloseQueue(tsVMWorkerQueue); + tsVMWorkerQueue = NULL; +} + +void vnodeCleanupMWorker() { + for (int32_t i = 0; i < tsVMWorkerPool.maxNum; ++i) { + SVMWorker *pWorker = tsVMWorkerPool.worker + i; + if (pWorker->thread) { + taosQsetThreadResume(tsVMWorkerQset); + } + vDebug("vmworker:%d is closed", i); + } + + for (int32_t i = 0; i < tsVMWorkerPool.maxNum; ++i) { + SVMWorker *pWorker = tsVMWorkerPool.worker + i; + vDebug("vmworker:%d start to join", i); + if (pWorker->thread) { + pthread_join(pWorker->thread, NULL); + } + vDebug("vmworker:%d join success", i); + } + + vDebug("vmworker is closed, qset:%p", tsVMWorkerQset); + + taosCloseQset(tsVMWorkerQset); + tsVMWorkerQset = NULL; + tfree(tsVMWorkerPool.worker); + + vnodeStopMWorker(); +} + +static int32_t vnodeWriteIntoMWorker(SVnodeObj *pVnode, EVMWorkerAction action, void *rpcHandle) { + SVMWorkerMsg *pMsg = taosAllocateQitem(sizeof(SVMWorkerMsg)); + if (pMsg == NULL) return TSDB_CODE_VND_OUT_OF_MEMORY; + + pMsg->vgId = pVnode->vgId; + pMsg->pVnode = pVnode; + pMsg->rpcHandle = rpcHandle; + pMsg->action = action; + + int32_t code = taosWriteQitem(tsVMWorkerQueue, TAOS_QTYPE_RPC, pMsg); + if (code == 0) code = TSDB_CODE_DND_ACTION_IN_PROGRESS; + + return code; +} + +int32_t vnodeCleanupInMWorker(SVnodeObj *pVnode) { + vTrace("vgId:%d, will cleanup in vmworker", pVnode->vgId); + return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_CLEANUP, NULL); +} + +int32_t vnodeDestroyInMWorker(SVnodeObj *pVnode) { + vTrace("vgId:%d, will destroy in vmworker", pVnode->vgId); + return vnodeWriteIntoMWorker(pVnode, VNODE_WORKER_ACTION_DESTROUY, NULL); +} + +static void vnodeFreeMWorkerMsg(SVMWorkerMsg *pMsg) { + vTrace("vgId:%d, disposed in vmworker", pMsg->vgId); + taosFreeQitem(pMsg); +} + +static void vnodeSendVMWorkerRpcRsp(SVMWorkerMsg *pMsg) { + if (pMsg->rpcHandle != NULL) { + SRpcMsg rpcRsp = {.handle = pMsg->rpcHandle, .code = pMsg->code}; + rpcSendResponse(&rpcRsp); + } + + vnodeFreeMWorkerMsg(pMsg); +} + +static void vnodeProcessMWorkerMsg(SVMWorkerMsg *pMsg) { + pMsg->code = 0; + + switch (pMsg->action) { + case VNODE_WORKER_ACTION_CLEANUP: + vnodeCleanUp(pMsg->pVnode); + break; + case VNODE_WORKER_ACTION_DESTROUY: + vnodeDestroy(pMsg->pVnode); + break; + default: + break; + } +} + +static void *vnodeMWorkerFunc(void *param) { + while (1) { + SVMWorkerMsg *pMsg = NULL; + if (taosReadQitemFromQset(tsVMWorkerQset, NULL, (void **)&pMsg, NULL) == 0) { + vDebug("qset:%p, vmworker got no message from qset, exiting", tsVMWorkerQset); + break; + } + + vTrace("vgId:%d, action:%d will be processed in vmworker queue", pMsg->vgId, pMsg->action); + vnodeProcessMWorkerMsg(pMsg); + vnodeSendVMWorkerRpcRsp(pMsg); + } + + return NULL; +} diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index 268d1fb53b..80e2dc422e 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -19,17 +19,9 @@ #include "taoserror.h" #include "tglobal.h" #include "tqueue.h" -#include "trpc.h" -#include "tsdb.h" -#include "twal.h" -#include "tsync.h" #include "ttimer.h" -#include "tdataformat.h" -#include "vnode.h" -#include "vnodeInt.h" -#include "syncInt.h" -#include "tcq.h" #include "dnode.h" +#include "vnodeStatus.h" #define MAX_QUEUED_MSG_NUM 10000 @@ -43,15 +35,19 @@ static int32_t vnodeProcessDropStableMsg(SVnodeObj *pVnode, void *pCont, SRspRet static int32_t vnodeProcessUpdateTagValMsg(SVnodeObj *pVnode, void *pCont, SRspRet *); static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite); -void vnodeInitWriteFp(void) { +int32_t vnodeInitWrite(void) { vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessSubmitMsg; vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = vnodeProcessCreateTableMsg; vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessDropTableMsg; vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessAlterTableMsg; vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessDropStableMsg; vnodeProcessWriteMsgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessUpdateTagValMsg; + + return 0; } +void vnodeCleanupWrite() {} + int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rparam) { int32_t code = 0; SVnodeObj *pVnode = vparam; @@ -68,7 +64,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara taosMsg[pHead->msgType], qtypeStr[qtype], pHead->version, pVnode->version); if (pHead->version == 0) { // from client or CQ - if (pVnode->status != TAOS_VN_STATUS_READY) { + if (!vnodeInReadyStatus(pVnode)) { vDebug("vgId:%d, msg:%s not processed since vstatus:%d, qtype:%s hver:%" PRIu64, pVnode->vgId, taosMsg[pHead->msgType], pVnode->status, qtypeStr[qtype], pHead->version); return TSDB_CODE_APP_NOT_READY; // it may be in deleting or closing state @@ -118,7 +114,7 @@ static int32_t vnodeCheckWrite(void *vparam) { return TSDB_CODE_APP_NOT_READY; } - if (pVnode->status == TAOS_VN_STATUS_CLOSING) { + if (vnodeInClosingStatus(pVnode)) { vDebug("vgId:%d, vnode status is %s, refCount:%d pVnode:%p", pVnode->vgId, vnodeStatus[pVnode->status], pVnode->refCount, pVnode); return TSDB_CODE_APP_NOT_READY; @@ -132,11 +128,6 @@ static int32_t vnodeCheckWrite(void *vparam) { return TSDB_CODE_SUCCESS; } -void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code) { - SVnodeObj *pVnode = vparam; - syncConfirmForward(pVnode->sync, version, code); -} - static int32_t vnodeProcessSubmitMsg(SVnodeObj *pVnode, void *pCont, SRspRet *pRet) { int32_t code = TSDB_CODE_SUCCESS; @@ -252,8 +243,10 @@ int32_t vnodeWriteToWQueue(void *vparam, void *wparam, int32_t qtype, void *rpar int32_t queued = atomic_add_fetch_32(&pVnode->queuedWMsg, 1); if (queued > MAX_QUEUED_MSG_NUM) { - vDebug("vgId:%d, too many msg:%d in vwqueue, flow control", pVnode->vgId, queued); - taosMsleep(1); + int32_t ms = (queued / MAX_QUEUED_MSG_NUM) * 10 + 3; + if (ms > 100) ms = 100; + vDebug("vgId:%d, too many msg:%d in vwqueue, flow control %dms", pVnode->vgId, queued, ms); + taosMsleep(ms); } code = vnodePerformFlowCtrl(pWrite); @@ -280,6 +273,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { SVnodeObj * pVnode = pWrite->pVnode; int32_t code = TSDB_CODE_VND_SYNCING; + if (pVnode->flowctrlLevel <= 0) code = TSDB_CODE_VND_IS_FLOWCTRL; + pWrite->processedCount++; if (pWrite->processedCount > 100) { vError("vgId:%d, msg:%p, failed to process since %s, retry:%d", pVnode->vgId, pWrite, tstrerror(code), @@ -299,8 +294,8 @@ static void vnodeFlowCtrlMsgToWQueue(void *param, void *tmrId) { static int32_t vnodePerformFlowCtrl(SVWriteMsg *pWrite) { SVnodeObj *pVnode = pWrite->pVnode; - if (pVnode->flowctrlLevel <= 0) return 0; if (pWrite->qtype != TAOS_QTYPE_RPC) return 0; + if (pVnode->queuedWMsg < MAX_QUEUED_MSG_NUM && pVnode->flowctrlLevel <= 0) return 0; if (tsFlowCtrl == 0) { int32_t ms = pow(2, pVnode->flowctrlLevel + 2); diff --git a/src/wal/inc/walInt.h b/src/wal/inc/walInt.h index 06748d885f..890b404ce9 100644 --- a/src/wal/inc/walInt.h +++ b/src/wal/inc/walInt.h @@ -38,7 +38,7 @@ extern int32_t wDebugFlag; #define WAL_SIGNATURE ((uint32_t)(0xFAFBFDFE)) #define WAL_PATH_LEN (TSDB_FILENAME_LEN + 12) #define WAL_FILE_LEN (WAL_PATH_LEN + 32) -#define WAL_FILE_NUM 3 +#define WAL_FILE_NUM 1 // 3 typedef struct { uint64_t version; diff --git a/src/wal/src/walWrite.c b/src/wal/src/walWrite.c index 10e1b4dd61..2253ad5c33 100644 --- a/src/wal/src/walWrite.c +++ b/src/wal/src/walWrite.c @@ -166,14 +166,14 @@ int32_t walRestore(void *handle, void *pVnode, FWalWrite writeFp) { char walName[WAL_FILE_LEN]; snprintf(walName, sizeof(pWal->name), "%s/%s%" PRId64, pWal->path, WAL_PREFIX, fileId); - wDebug("vgId:%d, file:%s, will be restored", pWal->vgId, walName); + wInfo("vgId:%d, file:%s, will be restored", pWal->vgId, walName); int32_t code = walRestoreWalFile(pWal, pVnode, writeFp, walName, fileId); if (code != TSDB_CODE_SUCCESS) { wError("vgId:%d, file:%s, failed to restore since %s", pWal->vgId, walName, tstrerror(code)); continue; } - wDebug("vgId:%d, file:%s, restore success", pWal->vgId, walName); + wInfo("vgId:%d, file:%s, restore success, wver:%" PRIu64, pWal->vgId, walName, pWal->version); count++; } @@ -267,8 +267,6 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch return TAOS_SYSTEM_ERROR(errno); } - wDebug("vgId:%d, file:%s, start to restore", pWal->vgId, name); - int32_t code = TSDB_CODE_SUCCESS; int64_t offset = 0; SWalHead *pHead = buffer; @@ -326,7 +324,7 @@ static int32_t walRestoreWalFile(SWal *pWal, void *pVnode, FWalWrite writeFp, ch offset = offset + sizeof(SWalHead) + pHead->len; - wDebug("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, + wTrace("vgId:%d, restore wal, fileId:%" PRId64 " hver:%" PRIu64 " wver:%" PRIu64 " len:%d", pWal->vgId, fileId, pHead->version, pWal->version, pHead->len); pWal->version = pHead->version; diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile new file mode 100644 index 0000000000..e343de789e --- /dev/null +++ b/tests/Jenkinsfile @@ -0,0 +1,267 @@ +def pre_test(){ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + sudo rmtaos + ''' + } + sh ''' + cd ${WKC} + git reset --hard + git checkout ${BRANCH} + git pull + git submodule update + cd ${WK} + git reset --hard + git checkout ${BRANCH} + git pull + export TZ=Asia/Harbin + date + rm -rf ${WK}/debug + mkdir debug + cd debug + cmake .. > /dev/null + make > /dev/null + make install > /dev/null + ''' + return 1 +} +pipeline { + agent none + environment{ + BRANCH = 'develop' + WK = '/var/lib/jenkins/workspace/TDinternal' + WKC= '/var/lib/jenkins/workspace/TDinternal/community' + } + + stages { + stage('Parallel test stage') { + parallel { + stage('pytest') { + agent{label '184'} + steps { + pre_test() + sh ''' + cd ${WKC}/tests + ./test-all.sh pytest + date''' + } + } + stage('test_b1') { + agent{label 'master'} + steps { + pre_test() + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + python3 concurrent_inquiry.py -c 1 + ''' + } + sh ''' + cd ${WKC}/tests + ./test-all.sh b1 + date''' + } + } + + stage('test_crash_gen') { + agent{label "185"} + steps { + pre_test() + sh ''' + cd ${WKC}/tests/pytest + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./crash_gen.sh -a -p -t 4 -s 2000 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + ./handle_crash_gen_val_log.sh + ''' + } + sh ''' + cd ${WKC}/tests + ./test-all.sh b2 + date + ''' + } + } + + stage('test_valgrind') { + agent{label "186"} + + steps { + pre_test() + sh ''' + cd ${WKC}/tests/pytest + ./valgrind-test.sh 2>&1 > mem-error-out.log + ./handle_val_log.sh + + date + cd ${WKC}/tests + ./test-all.sh b3 + date''' + } + } + stage('connector'){ + agent{label "release"} + steps{ + sh''' + cd ${WORKSPACE} + git checkout develop + ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/gotest + bash batchtest.sh + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/python/PYTHONConnectorChecker + python3 PythonChecker.py + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WORKSPACE}/tests/examples/JDBC/JDBCDemo/ + mvn clean package assembly:single >/dev/null + java -jar target/jdbcChecker-SNAPSHOT-jar-with-dependencies.jar -host 127.0.0.1 + ''' + } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${JENKINS_HOME}/workspace/C#NET/src/CheckC# + dotnet run + ''' + } + + } + } + stage('arm64_build'){ + agent{label 'arm64'} + steps{ + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch64 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + } + stage('arm32_build'){ + agent{label 'arm32'} + steps{ + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WK} + git fetch + git checkout develop + git pull + cd ${WKC} + git fetch + git checkout develop + git pull + git submodule update + cd ${WKC}/packaging + ./release.sh -v cluster -c aarch32 -n 2.0.0.0 -m 2.0.0.0 + + ''' + } + + } + } + } + } + + } + post { + success { + emailext ( + subject: "SUCCESSFUL: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + failure { + emailext ( + subject: "FAILED: Job '${env.JOB_NAME} [${env.BUILD_NUMBER}]'", + body: ''' + + + + + + + + + + + + +

+ 构建信息 +
+
    +
    +
  • 构建名称>>分支:${PROJECT_NAME}
  • +
  • 构建结果: Successful
  • +
  • 构建编号:${BUILD_NUMBER}
  • +
  • 触发用户:${CAUSE}
  • +
  • 变更概要:${CHANGES}
  • +
  • 构建地址:${BUILD_URL}
  • +
  • 构建日志:${BUILD_URL}console
  • +
  • 变更集:${JELLY_SCRIPT}
  • +
    +
+
+ + ''', + to: "yqliu@taosdata.com,pxiao@taosdata.com", + from: "support@taosdata.com" + ) + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/mybatisplus-demo/pom.xml b/tests/examples/JDBC/mybatisplus-demo/pom.xml index 8535f3b797..e59b915d2c 100644 --- a/tests/examples/JDBC/mybatisplus-demo/pom.xml +++ b/tests/examples/JDBC/mybatisplus-demo/pom.xml @@ -38,11 +38,16 @@ h2 runtime
+ + com.alibaba + druid + 1.1.17 + com.taosdata.jdbc taos-jdbcdriver - 2.0.11 + 2.0.14 diff --git a/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml index 96667f28b8..71e518602e 100644 --- a/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml +++ b/tests/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -1,17 +1,5 @@ spring: datasource: - # driver-class-name: org.h2.Driver - # schema: classpath:db/schema-mysql.sql - # data: classpath:db/data-mysql.sql - # url: jdbc:h2:mem:test - # username: root - # password: test - - # driver-class-name: com.mysql.jdbc.Driver - # url: jdbc:mysql://master:3306/test?useSSL=false - # username: root - # password: 123456 - driver-class-name: com.taosdata.jdbc.TSDBDriver url: jdbc:TAOS://localhost:6030/mp_test user: root @@ -20,6 +8,12 @@ spring: locale: en_US.UTF-8 timezone: UTC-8 + druid: + initial-size: 5 + min-idle: 5 + max-active: 5 + + mybatis-plus: configuration: map-underscore-to-camel-case: false diff --git a/tests/examples/JDBC/taosdemo/.gitignore b/tests/examples/JDBC/taosdemo/.gitignore new file mode 100644 index 0000000000..549e00a2a9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.gitignore @@ -0,0 +1,33 @@ +HELP.md +target/ +!.mvn/wrapper/maven-wrapper.jar +!**/src/main/**/target/ +!**/src/test/**/target/ + +### STS ### +.apt_generated +.classpath +.factorypath +.project +.settings +.springBeans +.sts4-cache + +### IntelliJ IDEA ### +.idea +*.iws +*.iml +*.ipr + +### NetBeans ### +/nbproject/private/ +/nbbuild/ +/dist/ +/nbdist/ +/.nb-gradle/ +build/ +!**/src/main/**/build/ +!**/src/test/**/build/ + +### VS Code ### +.vscode/ diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java b/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java new file mode 100644 index 0000000000..a45eb6ba26 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.mvn/wrapper/MavenWrapperDownloader.java @@ -0,0 +1,118 @@ +/* + * Copyright 2007-present the original author or authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.net.*; +import java.io.*; +import java.nio.channels.*; +import java.util.Properties; + +public class MavenWrapperDownloader { + + private static final String WRAPPER_VERSION = "0.5.6"; + /** + * Default URL to download the maven-wrapper.jar from, if no 'downloadUrl' is provided. + */ + private static final String DEFAULT_DOWNLOAD_URL = "https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/" + + WRAPPER_VERSION + "/maven-wrapper-" + WRAPPER_VERSION + ".jar"; + + /** + * Path to the maven-wrapper.properties file, which might contain a downloadUrl property to + * use instead of the default one. + */ + private static final String MAVEN_WRAPPER_PROPERTIES_PATH = + ".mvn/wrapper/maven-wrapper.properties"; + + /** + * Path where the maven-wrapper.jar will be saved to. + */ + private static final String MAVEN_WRAPPER_JAR_PATH = + ".mvn/wrapper/maven-wrapper.jar"; + + /** + * Name of the property which should be used to override the default download url for the wrapper. + */ + private static final String PROPERTY_NAME_WRAPPER_URL = "wrapperUrl"; + + public static void main(String args[]) { + System.out.println("- Downloader started"); + File baseDirectory = new File(args[0]); + System.out.println("- Using base directory: " + baseDirectory.getAbsolutePath()); + + // If the maven-wrapper.properties exists, read it and check if it contains a custom + // wrapperUrl parameter. + File mavenWrapperPropertyFile = new File(baseDirectory, MAVEN_WRAPPER_PROPERTIES_PATH); + String url = DEFAULT_DOWNLOAD_URL; + if (mavenWrapperPropertyFile.exists()) { + FileInputStream mavenWrapperPropertyFileInputStream = null; + try { + mavenWrapperPropertyFileInputStream = new FileInputStream(mavenWrapperPropertyFile); + Properties mavenWrapperProperties = new Properties(); + mavenWrapperProperties.load(mavenWrapperPropertyFileInputStream); + url = mavenWrapperProperties.getProperty(PROPERTY_NAME_WRAPPER_URL, url); + } catch (IOException e) { + System.out.println("- ERROR loading '" + MAVEN_WRAPPER_PROPERTIES_PATH + "'"); + } finally { + try { + if (mavenWrapperPropertyFileInputStream != null) { + mavenWrapperPropertyFileInputStream.close(); + } + } catch (IOException e) { + // Ignore ... + } + } + } + System.out.println("- Downloading from: " + url); + + File outputFile = new File(baseDirectory.getAbsolutePath(), MAVEN_WRAPPER_JAR_PATH); + if (!outputFile.getParentFile().exists()) { + if (!outputFile.getParentFile().mkdirs()) { + System.out.println( + "- ERROR creating output directory '" + outputFile.getParentFile().getAbsolutePath() + "'"); + } + } + System.out.println("- Downloading to: " + outputFile.getAbsolutePath()); + try { + downloadFileFromURL(url, outputFile); + System.out.println("Done"); + System.exit(0); + } catch (Throwable e) { + System.out.println("- Error downloading"); + e.printStackTrace(); + System.exit(1); + } + } + + private static void downloadFileFromURL(String urlString, File destination) throws Exception { + if (System.getenv("MVNW_USERNAME") != null && System.getenv("MVNW_PASSWORD") != null) { + String username = System.getenv("MVNW_USERNAME"); + char[] password = System.getenv("MVNW_PASSWORD").toCharArray(); + Authenticator.setDefault(new Authenticator() { + @Override + protected PasswordAuthentication getPasswordAuthentication() { + return new PasswordAuthentication(username, password); + } + }); + } + URL website = new URL(urlString); + ReadableByteChannel rbc; + rbc = Channels.newChannel(website.openStream()); + FileOutputStream fos = new FileOutputStream(destination); + fos.getChannel().transferFrom(rbc, 0, Long.MAX_VALUE); + fos.close(); + rbc.close(); + } + +} diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar new file mode 100644 index 0000000000..2cc7d4a55c Binary files /dev/null and b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.jar differ diff --git a/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties new file mode 100644 index 0000000000..642d572ce9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/.mvn/wrapper/maven-wrapper.properties @@ -0,0 +1,2 @@ +distributionUrl=https://repo.maven.apache.org/maven2/org/apache/maven/apache-maven/3.6.3/apache-maven-3.6.3-bin.zip +wrapperUrl=https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar diff --git a/tests/examples/JDBC/taosdemo/mvnw b/tests/examples/JDBC/taosdemo/mvnw new file mode 100755 index 0000000000..3c8a553731 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/mvnw @@ -0,0 +1,322 @@ +#!/bin/sh +# ---------------------------------------------------------------------------- +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- + +# ---------------------------------------------------------------------------- +# Maven Start Up Batch script +# +# Required ENV vars: +# ------------------ +# JAVA_HOME - location of a JDK home dir +# +# Optional ENV vars +# ----------------- +# M2_HOME - location of maven2's installed home dir +# MAVEN_OPTS - parameters passed to the Java VM when running Maven +# e.g. to debug Maven itself, use +# set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +# MAVEN_SKIP_RC - flag to disable loading of mavenrc files +# ---------------------------------------------------------------------------- + +if [ -z "$MAVEN_SKIP_RC" ]; then + + if [ -f /etc/mavenrc ]; then + . /etc/mavenrc + fi + + if [ -f "$HOME/.mavenrc" ]; then + . "$HOME/.mavenrc" + fi + +fi + +# OS specific support. $var _must_ be set to either true or false. +cygwin=false +darwin=false +mingw=false +case "$(uname)" in +CYGWIN*) cygwin=true ;; +MINGW*) mingw=true ;; +Darwin*) + darwin=true + # Use /usr/libexec/java_home if available, otherwise fall back to /Library/Java/Home + # See https://developer.apple.com/library/mac/qa/qa1170/_index.html + if [ -z "$JAVA_HOME" ]; then + if [ -x "/usr/libexec/java_home" ]; then + export JAVA_HOME="$(/usr/libexec/java_home)" + else + export JAVA_HOME="/Library/Java/Home" + fi + fi + ;; +esac + +if [ -z "$JAVA_HOME" ]; then + if [ -r /etc/gentoo-release ]; then + JAVA_HOME=$(java-config --jre-home) + fi +fi + +if [ -z "$M2_HOME" ]; then + ## resolve links - $0 may be a link to maven's home + PRG="$0" + + # need this for relative symlinks + while [ -h "$PRG" ]; do + ls=$(ls -ld "$PRG") + link=$(expr "$ls" : '.*-> \(.*\)$') + if expr "$link" : '/.*' >/dev/null; then + PRG="$link" + else + PRG="$(dirname "$PRG")/$link" + fi + done + + saveddir=$(pwd) + + M2_HOME=$(dirname "$PRG")/.. + + # make it fully qualified + M2_HOME=$(cd "$M2_HOME" && pwd) + + cd "$saveddir" + # echo Using m2 at $M2_HOME +fi + +# For Cygwin, ensure paths are in UNIX format before anything is touched +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --unix "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --unix "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --unix "$CLASSPATH") +fi + +# For Mingw, ensure paths are in UNIX format before anything is touched +if $mingw; then + [ -n "$M2_HOME" ] && + M2_HOME="$( ( + cd "$M2_HOME" + pwd + ))" + [ -n "$JAVA_HOME" ] && + JAVA_HOME="$( ( + cd "$JAVA_HOME" + pwd + ))" +fi + +if [ -z "$JAVA_HOME" ]; then + javaExecutable="$(which javac)" + if [ -n "$javaExecutable" ] && ! [ "$(expr \"$javaExecutable\" : '\([^ ]*\)')" = "no" ]; then + # readlink(1) is not available as standard on Solaris 10. + readLink=$(which readlink) + if [ ! $(expr "$readLink" : '\([^ ]*\)') = "no" ]; then + if $darwin; then + javaHome="$(dirname \"$javaExecutable\")" + javaExecutable="$(cd \"$javaHome\" && pwd -P)/javac" + else + javaExecutable="$(readlink -f \"$javaExecutable\")" + fi + javaHome="$(dirname \"$javaExecutable\")" + javaHome=$(expr "$javaHome" : '\(.*\)/bin') + JAVA_HOME="$javaHome" + export JAVA_HOME + fi + fi +fi + +if [ -z "$JAVACMD" ]; then + if [ -n "$JAVA_HOME" ]; then + if [ -x "$JAVA_HOME/jre/sh/java" ]; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + else + JAVACMD="$(which java)" + fi +fi + +if [ ! -x "$JAVACMD" ]; then + echo "Error: JAVA_HOME is not defined correctly." >&2 + echo " We cannot execute $JAVACMD" >&2 + exit 1 +fi + +if [ -z "$JAVA_HOME" ]; then + echo "Warning: JAVA_HOME environment variable is not set." +fi + +CLASSWORLDS_LAUNCHER=org.codehaus.plexus.classworlds.launcher.Launcher + +# traverses directory structure from process work directory to filesystem root +# first directory with .mvn subdirectory is considered project base directory +find_maven_basedir() { + + if [ -z "$1" ]; then + echo "Path not specified to find_maven_basedir" + return 1 + fi + + basedir="$1" + wdir="$1" + while [ "$wdir" != '/' ]; do + if [ -d "$wdir"/.mvn ]; then + basedir=$wdir + break + fi + # workaround for JBEAP-8937 (on Solaris 10/Sparc) + if [ -d "${wdir}" ]; then + wdir=$( + cd "$wdir/.." + pwd + ) + fi + # end of workaround + done + echo "${basedir}" +} + +# concatenates all lines of a file +concat_lines() { + if [ -f "$1" ]; then + echo "$(tr -s '\n' ' ' <"$1")" + fi +} + +BASE_DIR=$(find_maven_basedir "$(pwd)") +if [ -z "$BASE_DIR" ]; then + exit 1 +fi + +########################################################################################## +# Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +# This allows using the maven wrapper in projects that prohibit checking in binary data. +########################################################################################## +if [ -r "$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found .mvn/wrapper/maven-wrapper.jar" + fi +else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Couldn't find .mvn/wrapper/maven-wrapper.jar, downloading it ..." + fi + if [ -n "$MVNW_REPOURL" ]; then + jarUrl="$MVNW_REPOURL/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + else + jarUrl="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + fi + while IFS="=" read key value; do + case "$key" in wrapperUrl) + jarUrl="$value" + break + ;; + esac + done <"$BASE_DIR/.mvn/wrapper/maven-wrapper.properties" + if [ "$MVNW_VERBOSE" = true ]; then + echo "Downloading from: $jarUrl" + fi + wrapperJarPath="$BASE_DIR/.mvn/wrapper/maven-wrapper.jar" + if $cygwin; then + wrapperJarPath=$(cygpath --path --windows "$wrapperJarPath") + fi + + if command -v wget >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found wget ... using wget" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + wget "$jarUrl" -O "$wrapperJarPath" + else + wget --http-user=$MVNW_USERNAME --http-password=$MVNW_PASSWORD "$jarUrl" -O "$wrapperJarPath" + fi + elif command -v curl >/dev/null; then + if [ "$MVNW_VERBOSE" = true ]; then + echo "Found curl ... using curl" + fi + if [ -z "$MVNW_USERNAME" ] || [ -z "$MVNW_PASSWORD" ]; then + curl -o "$wrapperJarPath" "$jarUrl" -f + else + curl --user $MVNW_USERNAME:$MVNW_PASSWORD -o "$wrapperJarPath" "$jarUrl" -f + fi + + else + if [ "$MVNW_VERBOSE" = true ]; then + echo "Falling back to using Java to download" + fi + javaClass="$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.java" + # For Cygwin, switch paths to Windows format before running javac + if $cygwin; then + javaClass=$(cygpath --path --windows "$javaClass") + fi + if [ -e "$javaClass" ]; then + if [ ! -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Compiling MavenWrapperDownloader.java ..." + fi + # Compiling the Java class + ("$JAVA_HOME/bin/javac" "$javaClass") + fi + if [ -e "$BASE_DIR/.mvn/wrapper/MavenWrapperDownloader.class" ]; then + # Running the downloader + if [ "$MVNW_VERBOSE" = true ]; then + echo " - Running MavenWrapperDownloader.java ..." + fi + ("$JAVA_HOME/bin/java" -cp .mvn/wrapper MavenWrapperDownloader "$MAVEN_PROJECTBASEDIR") + fi + fi + fi +fi +########################################################################################## +# End of extension +########################################################################################## + +export MAVEN_PROJECTBASEDIR=${MAVEN_BASEDIR:-"$BASE_DIR"} +if [ "$MVNW_VERBOSE" = true ]; then + echo $MAVEN_PROJECTBASEDIR +fi +MAVEN_OPTS="$(concat_lines "$MAVEN_PROJECTBASEDIR/.mvn/jvm.config") $MAVEN_OPTS" + +# For Cygwin, switch paths to Windows format before running java +if $cygwin; then + [ -n "$M2_HOME" ] && + M2_HOME=$(cygpath --path --windows "$M2_HOME") + [ -n "$JAVA_HOME" ] && + JAVA_HOME=$(cygpath --path --windows "$JAVA_HOME") + [ -n "$CLASSPATH" ] && + CLASSPATH=$(cygpath --path --windows "$CLASSPATH") + [ -n "$MAVEN_PROJECTBASEDIR" ] && + MAVEN_PROJECTBASEDIR=$(cygpath --path --windows "$MAVEN_PROJECTBASEDIR") +fi + +# Provide a "standardized" way to retrieve the CLI args that will +# work with both Windows and non-Windows executions. +MAVEN_CMD_LINE_ARGS="$MAVEN_CONFIG $@" +export MAVEN_CMD_LINE_ARGS + +WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +exec "$JAVACMD" \ + $MAVEN_OPTS \ + -classpath "$MAVEN_PROJECTBASEDIR/.mvn/wrapper/maven-wrapper.jar" \ + "-Dmaven.home=${M2_HOME}" "-Dmaven.multiModuleProjectDirectory=${MAVEN_PROJECTBASEDIR}" \ + ${WRAPPER_LAUNCHER} $MAVEN_CONFIG "$@" diff --git a/tests/examples/JDBC/taosdemo/mvnw.cmd b/tests/examples/JDBC/taosdemo/mvnw.cmd new file mode 100644 index 0000000000..c8d43372c9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/mvnw.cmd @@ -0,0 +1,182 @@ +@REM ---------------------------------------------------------------------------- +@REM Licensed to the Apache Software Foundation (ASF) under one +@REM or more contributor license agreements. See the NOTICE file +@REM distributed with this work for additional information +@REM regarding copyright ownership. The ASF licenses this file +@REM to you under the Apache License, Version 2.0 (the +@REM "License"); you may not use this file except in compliance +@REM with the License. You may obtain a copy of the License at +@REM +@REM https://www.apache.org/licenses/LICENSE-2.0 +@REM +@REM Unless required by applicable law or agreed to in writing, +@REM software distributed under the License is distributed on an +@REM "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +@REM KIND, either express or implied. See the License for the +@REM specific language governing permissions and limitations +@REM under the License. +@REM ---------------------------------------------------------------------------- + +@REM ---------------------------------------------------------------------------- +@REM Maven Start Up Batch script +@REM +@REM Required ENV vars: +@REM JAVA_HOME - location of a JDK home dir +@REM +@REM Optional ENV vars +@REM M2_HOME - location of maven2's installed home dir +@REM MAVEN_BATCH_ECHO - set to 'on' to enable the echoing of the batch commands +@REM MAVEN_BATCH_PAUSE - set to 'on' to wait for a keystroke before ending +@REM MAVEN_OPTS - parameters passed to the Java VM when running Maven +@REM e.g. to debug Maven itself, use +@REM set MAVEN_OPTS=-Xdebug -Xrunjdwp:transport=dt_socket,server=y,suspend=y,address=8000 +@REM MAVEN_SKIP_RC - flag to disable loading of mavenrc files +@REM ---------------------------------------------------------------------------- + +@REM Begin all REM lines with '@' in case MAVEN_BATCH_ECHO is 'on' +@echo off +@REM set title of command window +title %0 +@REM enable echoing by setting MAVEN_BATCH_ECHO to 'on' +@if "%MAVEN_BATCH_ECHO%" == "on" echo %MAVEN_BATCH_ECHO% + +@REM set %HOME% to equivalent of $HOME +if "%HOME%" == "" (set "HOME=%HOMEDRIVE%%HOMEPATH%") + +@REM Execute a user defined script before this one +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPre +@REM check for pre script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_pre.bat" call "%HOME%\mavenrc_pre.bat" +if exist "%HOME%\mavenrc_pre.cmd" call "%HOME%\mavenrc_pre.cmd" +:skipRcPre + +@setlocal + +set ERROR_CODE=0 + +@REM To isolate internal variables from possible post scripts, we use another setlocal +@setlocal + +@REM ==== START VALIDATION ==== +if not "%JAVA_HOME%" == "" goto OkJHome + +echo. +echo Error: JAVA_HOME not found in your environment. >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +:OkJHome +if exist "%JAVA_HOME%\bin\java.exe" goto init + +echo. +echo Error: JAVA_HOME is set to an invalid directory. >&2 +echo JAVA_HOME = "%JAVA_HOME%" >&2 +echo Please set the JAVA_HOME variable in your environment to match the >&2 +echo location of your Java installation. >&2 +echo. +goto error + +@REM ==== END VALIDATION ==== + +:init + +@REM Find the project base dir, i.e. the directory that contains the folder ".mvn". +@REM Fallback to current working directory if not found. + +set MAVEN_PROJECTBASEDIR=%MAVEN_BASEDIR% +IF NOT "%MAVEN_PROJECTBASEDIR%"=="" goto endDetectBaseDir + +set EXEC_DIR=%CD% +set WDIR=%EXEC_DIR% +:findBaseDir +IF EXIST "%WDIR%"\.mvn goto baseDirFound +cd .. +IF "%WDIR%"=="%CD%" goto baseDirNotFound +set WDIR=%CD% +goto findBaseDir + +:baseDirFound +set MAVEN_PROJECTBASEDIR=%WDIR% +cd "%EXEC_DIR%" +goto endDetectBaseDir + +:baseDirNotFound +set MAVEN_PROJECTBASEDIR=%EXEC_DIR% +cd "%EXEC_DIR%" + +:endDetectBaseDir + +IF NOT EXIST "%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config" goto endReadAdditionalConfig + +@setlocal EnableExtensions EnableDelayedExpansion +for /F "usebackq delims=" %%a in ("%MAVEN_PROJECTBASEDIR%\.mvn\jvm.config") do set JVM_CONFIG_MAVEN_PROPS=!JVM_CONFIG_MAVEN_PROPS! %%a +@endlocal & set JVM_CONFIG_MAVEN_PROPS=%JVM_CONFIG_MAVEN_PROPS% + +:endReadAdditionalConfig + +SET MAVEN_JAVA_EXE="%JAVA_HOME%\bin\java.exe" +set WRAPPER_JAR="%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.jar" +set WRAPPER_LAUNCHER=org.apache.maven.wrapper.MavenWrapperMain + +set DOWNLOAD_URL="https://repo.maven.apache.org/maven2/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + +FOR /F "tokens=1,2 delims==" %%A IN ("%MAVEN_PROJECTBASEDIR%\.mvn\wrapper\maven-wrapper.properties") DO ( + IF "%%A"=="wrapperUrl" SET DOWNLOAD_URL=%%B +) + +@REM Extension to allow automatically downloading the maven-wrapper.jar from Maven-central +@REM This allows using the maven wrapper in projects that prohibit checking in binary data. +if exist %WRAPPER_JAR% ( + if "%MVNW_VERBOSE%" == "true" ( + echo Found %WRAPPER_JAR% + ) +) else ( + if not "%MVNW_REPOURL%" == "" ( + SET DOWNLOAD_URL="%MVNW_REPOURL%/io/takari/maven-wrapper/0.5.6/maven-wrapper-0.5.6.jar" + ) + if "%MVNW_VERBOSE%" == "true" ( + echo Couldn't find %WRAPPER_JAR%, downloading it ... + echo Downloading from: %DOWNLOAD_URL% + ) + + powershell -Command "&{"^ + "$webclient = new-object System.Net.WebClient;"^ + "if (-not ([string]::IsNullOrEmpty('%MVNW_USERNAME%') -and [string]::IsNullOrEmpty('%MVNW_PASSWORD%'))) {"^ + "$webclient.Credentials = new-object System.Net.NetworkCredential('%MVNW_USERNAME%', '%MVNW_PASSWORD%');"^ + "}"^ + "[Net.ServicePointManager]::SecurityProtocol = [Net.SecurityProtocolType]::Tls12; $webclient.DownloadFile('%DOWNLOAD_URL%', '%WRAPPER_JAR%')"^ + "}" + if "%MVNW_VERBOSE%" == "true" ( + echo Finished downloading %WRAPPER_JAR% + ) +) +@REM End of extension + +@REM Provide a "standardized" way to retrieve the CLI args that will +@REM work with both Windows and non-Windows executions. +set MAVEN_CMD_LINE_ARGS=%* + +%MAVEN_JAVA_EXE% %JVM_CONFIG_MAVEN_PROPS% %MAVEN_OPTS% %MAVEN_DEBUG_OPTS% -classpath %WRAPPER_JAR% "-Dmaven.multiModuleProjectDirectory=%MAVEN_PROJECTBASEDIR%" %WRAPPER_LAUNCHER% %MAVEN_CONFIG% %* +if ERRORLEVEL 1 goto error +goto end + +:error +set ERROR_CODE=1 + +:end +@endlocal & set ERROR_CODE=%ERROR_CODE% + +if not "%MAVEN_SKIP_RC%" == "" goto skipRcPost +@REM check for post script, once with legacy .bat ending and once with .cmd ending +if exist "%HOME%\mavenrc_post.bat" call "%HOME%\mavenrc_post.bat" +if exist "%HOME%\mavenrc_post.cmd" call "%HOME%\mavenrc_post.cmd" +:skipRcPost + +@REM pause the script if MAVEN_BATCH_PAUSE is set to 'on' +if "%MAVEN_BATCH_PAUSE%" == "on" pause + +if "%MAVEN_TERMINATE_CMD%" == "on" exit %ERROR_CODE% + +exit /B %ERROR_CODE% diff --git a/tests/examples/JDBC/taosdemo/pom.xml b/tests/examples/JDBC/taosdemo/pom.xml new file mode 100644 index 0000000000..5cbf6cb700 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/pom.xml @@ -0,0 +1,117 @@ + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 2.4.0 + + + com.taosdata + taosdemo + 2.0 + taosdemo + Demo project for TDengine + + + 1.8 + + + + + + + com.taosdata.jdbc + taos-jdbcdriver + 2.0.14 + + + + mysql + mysql-connector-java + 5.1.47 + + + + com.baomidou + mybatis-plus-boot-starter + 3.1.2 + + + + log4j + log4j + 1.2.17 + + + + + org.springframework.boot + spring-boot-starter-jdbc + + + org.springframework.boot + spring-boot-starter-thymeleaf + + + org.springframework.boot + spring-boot-starter-web + + + org.mybatis.spring.boot + mybatis-spring-boot-starter + 2.1.4 + + + junit + junit + 4.12 + test + + + org.springframework.boot + spring-boot-devtools + runtime + true + + + org.projectlombok + lombok + true + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + src/main/resources + + **/*.properties + **/*.xml + + true + + + src/main/java + + **/*.properties + **/*.xml + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + + diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java new file mode 100644 index 0000000000..db1b20527d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosdemoApplication.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo; + +import org.mybatis.spring.annotation.MapperScan; +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; + +@MapperScan(basePackages = {"com.taosdata.taosdemo.mapper"}) +@SpringBootApplication +public class TaosdemoApplication { + + public static void main(String[] args) { + SpringApplication.run(TaosdemoApplication.class, args); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java new file mode 100644 index 0000000000..e58c68f7a5 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/components/TaosDemoCommandLineRunner.java @@ -0,0 +1,174 @@ +package com.taosdata.taosdemo.components; + +import com.taosdata.taosdemo.domain.*; +import com.taosdata.taosdemo.service.DatabaseService; +import com.taosdata.taosdemo.service.SubTableService; +import com.taosdata.taosdemo.service.SuperTableService; +import com.taosdata.taosdemo.service.data.SubTableMetaGenerator; +import com.taosdata.taosdemo.service.data.SubTableValueGenerator; +import com.taosdata.taosdemo.service.data.SuperTableMetaGenerator; +import com.taosdata.taosdemo.utils.JdbcTaosdemoConfig; +import org.apache.log4j.Logger; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.CommandLineRunner; +import org.springframework.stereotype.Component; + +import java.util.*; +import java.util.concurrent.TimeUnit; + + +@Component +public class TaosDemoCommandLineRunner implements CommandLineRunner { + + private static Logger logger = Logger.getLogger(TaosDemoCommandLineRunner.class); + @Autowired + private DatabaseService databaseService; + @Autowired + private SuperTableService superTableService; + @Autowired + private SubTableService subTableService; + + private SuperTableMeta superTableMeta; + private List subTableMetaList; + private List subTableValueList; + private List> dataList; + + + @Override + public void run(String... args) throws Exception { + // 读配置参数 + JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); + boolean isHelp = Arrays.asList(args).contains("--help"); + if (isHelp) { + JdbcTaosdemoConfig.printHelp(); + System.exit(0); + } + // 准备数据 + prepareData(config); + // 创建数据库 + createDatabaseTask(config); + // 建表 + createTableTask(config); + // 插入 + insertTask(config); + // 查询: 1. 生成查询语句, 2. 执行查询 + // 删除表 + if (config.dropTable) { + superTableService.drop(config.database, config.superTable); + } + + System.exit(0); + } + + private void createDatabaseTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + + Map databaseParam = new HashMap<>(); + databaseParam.put("database", config.database); + databaseParam.put("keep", Integer.toString(config.keep)); + databaseParam.put("days", Integer.toString(config.days)); + databaseParam.put("replica", Integer.toString(config.replica)); + //TODO: other database parameters + databaseService.dropDatabase(config.database); + databaseService.createDatabase(databaseParam); + databaseService.useDatabase(config.database); + + long end = System.currentTimeMillis(); + logger.info(">>> insert time cost : " + (end - start) + " ms."); + } + + // 建超级表,三种方式:1. 指定SQL,2. 指定field和tags的个数,3. 默认 + private void createTableTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + if (config.doCreateTable) { + superTableService.create(superTableMeta); + // 批量建子表 + subTableService.createSubTable(subTableMetaList, config.numOfThreadsForCreate); + } + long end = System.currentTimeMillis(); + logger.info(">>> create table time cost : " + (end - start) + " ms."); + } + + private void insertTask(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + + int numOfThreadsForInsert = config.numOfThreadsForInsert; + int sleep = config.sleep; + if (config.autoCreateTable) { + // 批量插入,自动建表 + dataList.stream().forEach(subTableValues -> { + subTableService.insertAutoCreateTable(subTableValues, numOfThreadsForInsert); + sleep(sleep); + }); + } else { + dataList.stream().forEach(subTableValues -> { + subTableService.insert(subTableValues, numOfThreadsForInsert); + sleep(sleep); + }); + } + long end = System.currentTimeMillis(); + logger.info(">>> insert time cost : " + (end - start) + " ms."); + } + + private void prepareData(JdbcTaosdemoConfig config) { + long start = System.currentTimeMillis(); + // 超级表的meta + superTableMeta = createSupertable(config); + // 子表的meta + subTableMetaList = SubTableMetaGenerator.generate(superTableMeta, config.numOfTables, config.tablePrefix); + // 子表的data + subTableValueList = SubTableValueGenerator.generate(subTableMetaList, config.numOfRowsPerTable, config.startTime, config.timeGap); + // 如果有乱序,给数据搞乱 + if (config.order != 0) { + SubTableValueGenerator.disrupt(subTableValueList, config.rate, config.range); + } + // 分割数据 + int numOfTables = config.numOfTables; + int numOfTablesPerSQL = config.numOfTablesPerSQL; + int numOfRowsPerTable = config.numOfRowsPerTable; + int numOfValuesPerSQL = config.numOfValuesPerSQL; + dataList = SubTableValueGenerator.split(subTableValueList, numOfTables, numOfTablesPerSQL, numOfRowsPerTable, numOfValuesPerSQL); + long end = System.currentTimeMillis(); + logger.info(">>> prepare data time cost : " + (end - start) + " ms."); + } + + private SuperTableMeta createSupertable(JdbcTaosdemoConfig config) { + SuperTableMeta tableMeta; + // create super table + logger.info(">>> create super table <<<"); + if (config.superTableSQL != null) { + // use a sql to create super table + tableMeta = SuperTableMetaGenerator.generate(config.superTableSQL); + } else if (config.numOfFields == 0) { + // default sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase(config.database); + superTableMeta.setName(config.superTable); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + return superTableMeta; + } else { + // create super table with specified field size and tag size + tableMeta = SuperTableMetaGenerator.generate(config.database, config.superTable, config.numOfFields, config.prefixOfFields, config.numOfTags, config.prefixOfTags); + } + return tableMeta; + } + + private static void sleep(int sleep) { + if (sleep <= 0) + return; + try { + TimeUnit.MILLISECONDS.sleep(sleep); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java new file mode 100644 index 0000000000..1cf1463f0a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/DatabaseController.java @@ -0,0 +1,40 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.service.DatabaseService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.*; + +import java.util.Map; + +@RestController +@RequestMapping +public class DatabaseController { + + @Autowired + private DatabaseService databaseService; + + /** + * create database + ***/ + @PostMapping + public int create(@RequestBody Map map) { + return databaseService.createDatabase(map); + } + + + /** + * drop database + **/ + @DeleteMapping("/{dbname}") + public int delete(@PathVariable("dbname") String dbname) { + return databaseService.dropDatabase(dbname); + } + + /** + * use database + **/ + @GetMapping("/{dbname}") + public int use(@PathVariable("dbname") String dbname) { + return databaseService.useDatabase(dbname); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java new file mode 100644 index 0000000000..788f68a30a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/InsertController.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.controller; + +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class InsertController { + + //TODO:多线程写一张表, thread = 10, table = 1 + //TODO:一个批次写多张表, insert into t1 using weather values() t2 using weather values() + //TODO:插入的频率, + //TODO:指定一张表内的records数量 + //TODO:是否乱序, + //TODO:乱序的比例,乱序的范围 + //TODO:先建表,自动建表 + //TODO:一个批次写多张表 + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java new file mode 100644 index 0000000000..797c3708d3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SubTableController.java @@ -0,0 +1,45 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.domain.TableValue; +import com.taosdata.taosdemo.service.SuperTableService; +import com.taosdata.taosdemo.service.TableService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; +import org.springframework.web.bind.annotation.RestController; + +@RestController +public class SubTableController { + + @Autowired + private TableService tableService; + @Autowired + private SuperTableService superTableService; + + //TODO: 使用supertable创建一个子表 + + //TODO:使用supertable创建多个子表 + + //TODO:使用supertable多线程创建子表 + + //TODO:使用supertable多线程创建子表,指定子表的name_prefix,子表的数量,使用线程的个数 + + /** + * 创建表,超级表或者普通表 + **/ + + + /** + * 创建超级表的子表 + **/ + @PostMapping("/{database}/{superTable}") + public int createTable(@PathVariable("database") String database, + @PathVariable("superTable") String superTable, + @RequestBody TableValue tableMetadta) { + tableMetadta.setDatabase(database); + return 0; + } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java new file mode 100644 index 0000000000..cf53c1440f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/SuperTableController.java @@ -0,0 +1,26 @@ +package com.taosdata.taosdemo.controller; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.service.SuperTableService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.web.bind.annotation.PathVariable; +import org.springframework.web.bind.annotation.PostMapping; +import org.springframework.web.bind.annotation.RequestBody; + +public class SuperTableController { + @Autowired + private SuperTableService superTableService; + + + @PostMapping("/{database}") + public int createTable(@PathVariable("database") String database, @RequestBody SuperTableMeta tableMetadta) { + tableMetadta.setDatabase(database); + return superTableService.create(tableMetadta); + } + + //TODO: 删除超级表 + + //TODO:查询超级表 + + //TODO:统计查询表 +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java new file mode 100644 index 0000000000..dbdd978e74 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/controller/TableController.java @@ -0,0 +1,11 @@ +package com.taosdata.taosdemo.controller; + +public class TableController { + + //TODO:创建普通表,create table(ts timestamp, temperature float) + + //TODO:创建普通表,指定表的列数,包括第一列timestamp + + //TODO:创建普通表,指定表每列的name和type + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java new file mode 100644 index 0000000000..8a45e99989 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldMeta.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class FieldMeta { + private String name; + private String type; + + public FieldMeta() { + } + + public FieldMeta(String name, String type) { + this.name = name; + this.type = type; + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java new file mode 100644 index 0000000000..44805c0d7c --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/FieldValue.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class FieldValue { + private String name; + private T value; + + public FieldValue() { + } + + public FieldValue(String name, T value) { + this.name = name; + this.value = value; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java new file mode 100644 index 0000000000..a9f216f679 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/RowValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class RowValue { + private List fields; + + + public RowValue(List fields) { + this.fields = fields; + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java new file mode 100644 index 0000000000..81de882448 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableMeta.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SubTableMeta { + + private String database; + private String supertable; + private String name; + private List tags; + private List fields; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java new file mode 100644 index 0000000000..74fb9598bc --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SubTableValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SubTableValue { + + private String database; + private String supertable; + private String name; + private List tags; + private List values; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java new file mode 100644 index 0000000000..c5c65a4599 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/SuperTableMeta.java @@ -0,0 +1,14 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class SuperTableMeta { + + private String database; + private String name; + private List fields; + private List tags; +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java new file mode 100644 index 0000000000..3ab0a75c0b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableMeta.java @@ -0,0 +1,13 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class TableMeta { + + private String database; + private String name; + private List fields; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java new file mode 100644 index 0000000000..d5502aa46f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TableValue.java @@ -0,0 +1,15 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +import java.util.List; + +@Data +public class TableValue { + + private String database; + private String name; + private List columns; + private List values; + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java new file mode 100644 index 0000000000..a385bb4e12 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagMeta.java @@ -0,0 +1,18 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class TagMeta { + private String name; + private String type; + + public TagMeta() { + + } + + public TagMeta(String name, String type) { + this.name = name; + this.type = type; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java new file mode 100644 index 0000000000..98ea8c0dc9 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/domain/TagValue.java @@ -0,0 +1,17 @@ +package com.taosdata.taosdemo.domain; + +import lombok.Data; + +@Data +public class TagValue { + private String name; + private T value; + + public TagValue() { + } + + public TagValue(String name, T value) { + this.name = name; + this.value = value; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java new file mode 100644 index 0000000000..e535ed1f98 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.java @@ -0,0 +1,27 @@ +package com.taosdata.taosdemo.mapper; + +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.Map; + +@Repository +public interface DatabaseMapper { + + // create database if not exists XXX + int createDatabase(@Param("database") String dbname); + + // drop database if exists XXX + int dropDatabase(@Param("database") String dbname); + + // create database if not exists XXX keep XX days XX replica XX + int createDatabaseWithParameters(Map map); + + // use XXX + int useDatabase(@Param("database") String dbname); + + //TODO: alter database + + //TODO: show database + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml new file mode 100644 index 0000000000..1a1de34842 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/DatabaseMapper.xml @@ -0,0 +1,48 @@ + + + + + + + + create database if not exists ${database} + + + + DROP database if exists ${database} + + + + CREATE database if not exists ${database} + + KEEP ${keep} + + + DAYS ${days} + + + REPLICA ${replica} + + + cache ${cache} + + + blocks ${blocks} + + + minrows ${minrows} + + + maxrows ${maxrows} + + + + + use ${database} + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java new file mode 100644 index 0000000000..d23473ba31 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.java @@ -0,0 +1,30 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface SubTableMapper { + + // 创建:子表 + int createUsingSuperTable(SubTableMeta subTableMeta); + + // 插入:一张子表多个values + int insertOneTableMultiValues(SubTableValue subTableValue); + + // 插入:一张子表多个values, 自动建表 + int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue); + + // 插入:多张表多个values + int insertMultiTableMultiValues(@Param("tables") List tables); + + // 插入:多张表多个values,自动建表 + int insertMultiTableMultiValuesUsingSuperTable(@Param("tables") List tables); + + // + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml new file mode 100644 index 0000000000..2fb94e99b7 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SubTableMapper.xml @@ -0,0 +1,81 @@ + + + + + + + + CREATE table IF NOT EXISTS ${database}.${name} USING ${supertable} TAGS + + #{tag.value} + + + + + + INSERT INTO ${database}.${name} + VALUES + + + #{field.value} + + + + + + + INSERT INTO ${database}.${name} USING ${supertable} TAGS + + #{tag.value} + + VALUES + + + #{field.value} + + + + + + + + + + + INSERT INTO + + ${table.database}.${table.name} + VALUES + + + #{field.value} + + + + + + + + INSERT INTO + + ${table.database}.${table.name} USING ${table.supertable} TAGS + + #{tag.value} + + VALUES + + + #{field.value} + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java new file mode 100644 index 0000000000..c8610fac90 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.java @@ -0,0 +1,33 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +@Repository +public interface SuperTableMapper { + + // 创建超级表,使用自己定义的SQL语句 + int createSuperTableUsingSQL(@Param("createSuperTableSQL") String sql); + + // 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...) + int createSuperTable(SuperTableMeta tableMetadata); + + // 删除超级表 drop table if exists xxx; + int dropSuperTable(@Param("database") String database, @Param("name") String name); + + // + + // + + // + + // + + // + + // + + // + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml new file mode 100644 index 0000000000..8b83d57a4b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/SuperTableMapper.xml @@ -0,0 +1,41 @@ + + + + + + + ${createSuperTableSQL} + + + + + create table if not exists ${database}.${name} + + ${field.name} ${field.type} + + tags + + ${tag.name} ${tag.type} + + + + + + drop table if exists ${database}.${name} + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java new file mode 100644 index 0000000000..f00f6c9694 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.java @@ -0,0 +1,28 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.TableMeta; +import com.taosdata.taosdemo.domain.TableValue; +import org.apache.ibatis.annotations.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface TableMapper { + + // 创建:普通表 + int create(TableMeta tableMeta); + + // 插入:一张表多个value + int insertOneTableMultiValues(TableValue values); + + // 插入: 一张表多个value,指定的列 + int insertOneTableMultiValuesWithColumns(TableValue values); + + // 插入:多个表多个value + int insertMultiTableMultiValues(@Param("tables") List tables); + + // 插入:多个表多个value, 指定的列 + int insertMultiTableMultiValuesWithColumns(@Param("tables") List tables); + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml new file mode 100644 index 0000000000..e2e7cbb30d --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/mapper/TableMapper.xml @@ -0,0 +1,68 @@ + + + + + + + + create table if not exists ${database}.${name} + + ${field.name} ${field.type} + + + + + + insert into ${database}.${name} values + + + ${field.value} + + + + + + + insert into ${database}.${name} + + ${column.name} + + values + + + ${field.value} + + + + + + + insert into + + ${table.database}.${table.name} values + + + ${field.value} + + + + + + + + insert into + + ${table.database}.${table.name} + + ${column.name} + + values + + + ${field.value} + + + + + + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java new file mode 100644 index 0000000000..4afbe9dae8 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/AbstractService.java @@ -0,0 +1,35 @@ +package com.taosdata.taosdemo.service; + +import java.util.List; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; + +public class AbstractService { + + protected int getAffectRows(List> futureList) { + int count = 0; + for (Future future : futureList) { + try { + count += future.get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + } + return count; + } + + protected int getAffectRows(Future future) { + int count = 0; + try { + count += future.get(); + } catch (InterruptedException e) { + e.printStackTrace(); + } catch (ExecutionException e) { + e.printStackTrace(); + } + return count; + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java new file mode 100644 index 0000000000..e9aa2727a0 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java @@ -0,0 +1,38 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.mapper.DatabaseMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.Map; + +@Service +public class DatabaseService { + + @Autowired + private DatabaseMapper databaseMapper; + + // 建库,指定 name + public int createDatabase(String database) { + return databaseMapper.createDatabase(database); + } + + // 建库,指定参数 keep,days,replica等 + public int createDatabase(Map map) { + if (map.isEmpty()) + return 0; + if (map.containsKey("database") && map.size() == 1) + return databaseMapper.createDatabase(map.get("database")); + return databaseMapper.createDatabaseWithParameters(map); + } + + // drop database + public int dropDatabase(String dbname) { + return databaseMapper.dropDatabase(dbname); + } + + // use database + public int useDatabase(String dbname) { + return databaseMapper.useDatabase(dbname); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java new file mode 100644 index 0000000000..07c315b65a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java @@ -0,0 +1,118 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.mapper.SubTableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +@Service +public class SubTableService extends AbstractService { + + @Autowired + private SubTableMapper mapper; + + /** + * 1. 选择database,找到所有supertable + * 2. 选择supertable,可以拿到表结构,包括field和tag + * 3. 指定子表的前缀和个数 + * 4. 指定创建子表的线程数 + */ + //TODO:指定database、supertable、子表前缀、子表个数、线程数 + + // 多线程创建表,指定线程个数 + public int createSubTable(List subTables, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + List> futureList = new ArrayList<>(); + for (SubTableMeta subTableMeta : subTables) { + Future future = executor.submit(() -> createSubTable(subTableMeta)); + futureList.add(future); + } + executor.shutdown(); + return getAffectRows(futureList); + } + + + // 创建一张子表,可以指定database,supertable,tablename,tag值 + public int createSubTable(SubTableMeta subTableMeta) { + return mapper.createUsingSuperTable(subTableMeta); + } + + // 单线程创建多张子表,每张子表分别可以指定自己的database,supertable,tablename,tag值 + public int createSubTable(List subTables) { + return createSubTable(subTables, 1); + } + + /*************************************************************************************************************************/ + // 插入:多线程,多表 + public int insert(List subTableValues, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + Future future = executor.submit(() -> insert(subTableValues)); + executor.shutdown(); + return getAffectRows(future); + } + + // 插入:多线程,多表, 自动建表 + public int insertAutoCreateTable(List subTableValues, int threadSize) { + ExecutorService executor = Executors.newFixedThreadPool(threadSize); + Future future = executor.submit(() -> insertAutoCreateTable(subTableValues)); + executor.shutdown(); + return getAffectRows(future); + } + + // 插入:单表,insert into xxx values(),()... + public int insert(SubTableValue subTableValue) { + return mapper.insertOneTableMultiValues(subTableValue); + } + + // 插入: 多表,insert into xxx values(),()... xxx values(),()... + public int insert(List subTableValues) { + return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); + } + + // 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()... + public int insertAutoCreateTable(SubTableValue subTableValue) { + return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue); + } + + // 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()... + public int insertAutoCreateTable(List subTableValues) { + return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); + } + + +// ExecutorService executors = Executors.newFixedThreadPool(threadSize); +// int count = 0; +// +// // +// List subTableValues = new ArrayList<>(); +// for (int tableIndex = 1; tableIndex <= numOfTablesPerSQL; tableIndex++) { +// // each table +// SubTableValue subTableValue = new SubTableValue(); +// subTableValue.setDatabase(); +// subTableValue.setName(); +// subTableValue.setSupertable(); +// +// List values = new ArrayList<>(); +// for (int valueCnt = 0; valueCnt < numOfValuesPerSQL; valueCnt++) { +// List fields = new ArrayList<>(); +// for (int fieldInd = 0; fieldInd <; fieldInd++) { +// FieldValue field = new FieldValue<>("", ""); +// fields.add(field); +// } +// RowValue row = new RowValue(); +// row.setFields(fields); +// values.add(row); +// } +// subTableValue.setValues(values); +// subTableValues.add(subTableValue); +// } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java new file mode 100644 index 0000000000..7f6836c999 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java @@ -0,0 +1,22 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.mapper.SuperTableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +@Service +public class SuperTableService { + + @Autowired + private SuperTableMapper superTableMapper; + + // 创建超级表,指定每个field的名称和类型,每个tag的名称和类型 + public int create(SuperTableMeta superTableMeta) { + return superTableMapper.createSuperTable(superTableMeta); + } + + public void drop(String database, String name) { + superTableMapper.dropSuperTable(database, name); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java new file mode 100644 index 0000000000..bada6de708 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -0,0 +1,42 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.TableMeta; +import com.taosdata.taosdemo.mapper.TableMapper; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.stereotype.Service; + +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; + +@Service +public class TableService extends AbstractService { + + @Autowired + private TableMapper tableMapper; + + //创建一张表 + public int create(TableMeta tableMeta) { + return tableMapper.create(tableMeta); + } + + //创建多张表 + public int create(List tables) { + return create(tables, 1); + } + + //多线程创建多张表 + public int create(List tables, int threadSize) { + ExecutorService executors = Executors.newFixedThreadPool(threadSize); + List> futures = new ArrayList<>(); + for (TableMeta table : tables) { + Future future = executors.submit(() -> create(table)); + futures.add(future); + } + return getAffectRows(futures); + } + + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java new file mode 100644 index 0000000000..73cd981a46 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java @@ -0,0 +1,48 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.FieldValue; +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.utils.DataGenerator; + +import java.util.*; + +public class FieldValueGenerator { + + public static Random random = new Random(System.currentTimeMillis()); + + // 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成 + public static List generate(long start, long end, long timeGap, List fieldMetaList) { + List values = new ArrayList<>(); + + for (long ts = start; ts < end; ts += timeGap) { + List fieldValues = new ArrayList<>(); + // timestamp + fieldValues.add(new FieldValue(fieldMetaList.get(0).getName(), ts)); + // other values + for (int fieldInd = 1; fieldInd < fieldMetaList.size(); fieldInd++) { + FieldMeta fieldMeta = fieldMetaList.get(fieldInd); + fieldValues.add(new FieldValue(fieldMeta.getName(), DataGenerator.randomValue(fieldMeta.getType()))); + } + values.add(new RowValue(fieldValues)); + } + return values; + } + + // 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成 + public static List disrupt(List values, int rate, long range) { + long timeGap = (long) (values.get(1).getFields().get(0).getValue()) - (long) (values.get(0).getFields().get(0).getValue()); + int bugSize = values.size() * rate / 100; + Set bugIndSet = new HashSet<>(); + while (bugIndSet.size() < bugSize) { + bugIndSet.add(random.nextInt(values.size())); + } + for (Integer bugInd : bugIndSet) { + Long timestamp = (Long) values.get(bugInd).getFields().get(0).getValue(); + Long newTimestamp = timestamp - timeGap - random.nextInt((int) range); + values.get(bugInd).getFields().get(0).setValue(newTimestamp); + } + + return values; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java new file mode 100644 index 0000000000..d15ad0d8bd --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java @@ -0,0 +1,30 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagValue; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableMetaGenerator { + + // 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据 + // create table xxx using XXX tags(XXX) + public static List generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) { + List subTableMetaList = new ArrayList<>(); + for (int i = 1; i <= tableSize; i++) { + SubTableMeta subTableMeta = new SubTableMeta(); + // create table xxx.xxx using xxx tags(...) + subTableMeta.setDatabase(superTableMeta.getDatabase()); + subTableMeta.setName(tablePrefix + i); + subTableMeta.setSupertable(superTableMeta.getName()); + subTableMeta.setFields(superTableMeta.getFields()); + List tagValues = TagValueGenerator.generate(superTableMeta.getTags()); + subTableMeta.setTags(tagValues); + subTableMetaList.add(subTableMeta); + } + return subTableMetaList; + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java new file mode 100644 index 0000000000..a36f718f83 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableValueGenerator.java @@ -0,0 +1,84 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SubTableValue; +import com.taosdata.taosdemo.utils.TimeStampUtil; +import org.springframework.beans.BeanUtils; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableValueGenerator { + + public static List generate(List subTableMetaList, int numOfRowsPerTable, long start, long timeGap) { + List subTableValueList = new ArrayList<>(); + + subTableMetaList.stream().forEach((subTableMeta) -> { + // insert into xxx.xxx using xxxx tags(...) values(),()... + SubTableValue subTableValue = new SubTableValue(); + subTableValue.setDatabase(subTableMeta.getDatabase()); + subTableValue.setName(subTableMeta.getName()); + subTableValue.setSupertable(subTableMeta.getSupertable()); + subTableValue.setTags(subTableMeta.getTags()); + TimeStampUtil.TimeTuple tuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable); + List values = FieldValueGenerator.generate(tuple.start, tuple.end, tuple.timeGap, subTableMeta.getFields()); + subTableValue.setValues(values); + subTableValueList.add(subTableValue); + }); + return subTableValueList; + } + + public static void disrupt(List subTableValueList, int rate, long range) { + subTableValueList.stream().forEach((tableValue) -> { + List values = tableValue.getValues(); + FieldValueGenerator.disrupt(values, rate, range); + }); + } + + public static List> split(List subTableValueList, int numOfTables, int numOfTablesPerSQL, int numOfRowsPerTable, int numOfValuesPerSQL) { + List> dataList = new ArrayList<>(); + + if (numOfRowsPerTable < numOfValuesPerSQL) + numOfValuesPerSQL = numOfRowsPerTable; + if (numOfTables < numOfTablesPerSQL) + numOfTablesPerSQL = numOfTables; + + //table + for (int tableCnt = 0; tableCnt < numOfTables; ) { + int tableSize = numOfTablesPerSQL; + if (tableCnt + tableSize > numOfTables) { + tableSize = numOfTables - tableCnt; + } + // row + for (int rowCnt = 0; rowCnt < numOfRowsPerTable; ) { + int rowSize = numOfValuesPerSQL; + if (rowCnt + rowSize > numOfRowsPerTable) { + rowSize = numOfRowsPerTable - rowCnt; + } + // System.out.println("rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", tableCnt: " + tableCnt + ", tableSize: " + tableSize); + // split + List blocks = subTableValueList.subList(tableCnt, tableCnt + tableSize); + List newBlocks = new ArrayList<>(); + for (int i = 0; i < blocks.size(); i++) { + SubTableValue subTableValue = blocks.get(i); + SubTableValue newSubTableValue = new SubTableValue(); + BeanUtils.copyProperties(subTableValue, newSubTableValue); + List values = subTableValue.getValues().subList(rowCnt, rowCnt + rowSize); + newSubTableValue.setValues(values); + newBlocks.add(newSubTableValue); + } + dataList.add(newBlocks); + + rowCnt += rowSize; + } + tableCnt += tableSize; + } + return dataList; + } + + public static void main(String[] args) { + split(null, 99, 10, 99, 10); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java new file mode 100644 index 0000000000..05aefd01ac --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java @@ -0,0 +1,80 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.utils.TaosConstants; + +import java.util.ArrayList; +import java.util.List; + +public class SuperTableMetaGenerator { + + // 创建超级表,使用指定SQL语句 + public static SuperTableMeta generate(String superTableSQL) { + SuperTableMeta tableMeta = new SuperTableMeta(); + // for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int) + superTableSQL = superTableSQL.trim().toLowerCase(); + if (!superTableSQL.startsWith("create")) + throw new RuntimeException("invalid create super table SQL"); + + if (superTableSQL.contains("tags")) { + String tagSQL = superTableSQL.substring(superTableSQL.indexOf("tags") + 4).trim(); + tagSQL = tagSQL.substring(tagSQL.indexOf("(") + 1, tagSQL.lastIndexOf(")")); + String[] tagPairs = tagSQL.split(","); + List tagMetaList = new ArrayList<>(); + for (String tagPair : tagPairs) { + String name = tagPair.trim().split("\\s+")[0]; + String type = tagPair.trim().split("\\s+")[1]; + tagMetaList.add(new TagMeta(name, type)); + } + tableMeta.setTags(tagMetaList); + superTableSQL = superTableSQL.substring(0, superTableSQL.indexOf("tags")); + } + if (superTableSQL.contains("(")) { + String fieldSQL = superTableSQL.substring(superTableSQL.indexOf("(") + 1, superTableSQL.indexOf(")")); + String[] fieldPairs = fieldSQL.split(","); + List fieldList = new ArrayList<>(); + for (String fieldPair : fieldPairs) { + String name = fieldPair.trim().split("\\s+")[0]; + String type = fieldPair.trim().split("\\s+")[1]; + fieldList.add(new FieldMeta(name, type)); + } + tableMeta.setFields(fieldList); + superTableSQL = superTableSQL.substring(0, superTableSQL.indexOf("(")); + } + superTableSQL = superTableSQL.substring(superTableSQL.indexOf("table") + 5).trim(); + if (superTableSQL.contains(".")) { + String database = superTableSQL.split("\\.")[0]; + tableMeta.setDatabase(database); + superTableSQL = superTableSQL.substring(superTableSQL.indexOf(".") + 1); + } + tableMeta.setName(superTableSQL.trim()); + + return tableMeta; + } + + // 创建超级表,指定field和tag的个数 + public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, String tagPrefix) { + if (fieldSize < 2 || tagSize < 1) { + throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1"); + } + SuperTableMeta tableMetadata = new SuperTableMeta(); + tableMetadata.setDatabase(database); + tableMetadata.setName(name); + // fields + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + for (int i = 1; i <= fieldSize; i++) { + fields.add(new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); + } + tableMetadata.setFields(fields); + // tags + List tags = new ArrayList<>(); + for (int i = 1; i <= tagSize; i++) { + tags.add(new TagMeta(tagPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); + } + tableMetadata.setTags(tags); + return tableMetadata; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java new file mode 100644 index 0000000000..b8024fea45 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java @@ -0,0 +1,24 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.domain.TagValue; +import com.taosdata.taosdemo.utils.DataGenerator; + +import java.util.ArrayList; +import java.util.List; + +public class TagValueGenerator { + + // 创建标签值:使用tagMetas + public static List generate(List tagMetas) { + List tagValues = new ArrayList<>(); + for (int i = 0; i < tagMetas.size(); i++) { + TagMeta tagMeta = tagMetas.get(i); + TagValue tagValue = new TagValue(); + tagValue.setName(tagMeta.getName()); + tagValue.setValue(DataGenerator.randomValue(tagMeta.getType())); + tagValues.add(tagValue); + } + return tagValues; + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java new file mode 100644 index 0000000000..a200d17ef6 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/DataGenerator.java @@ -0,0 +1,120 @@ +package com.taosdata.taosdemo.utils; + +import java.util.Random; + +public class DataGenerator { + private static Random random = new Random(System.currentTimeMillis()); + private static final String alphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; + + // "timestamp", "int", "bigint", "float", "double", "binary(64)", "smallint", "tinyint", "bool", "nchar(64)", + + public static Object randomValue(String type) { + int length = 64; + if (type.contains("(")) { + length = Integer.parseInt(type.substring(type.indexOf("(") + 1, type.indexOf(")"))); + type = type.substring(0, type.indexOf("(")); + } + switch (type.trim().toLowerCase()) { + case "timestamp": + return randomTimestamp(); + case "int": + return randomInt(); + case "bigint": + return randomBigint(); + case "float": + return randomFloat(); + case "double": + return randomDouble(); + case "binary": + return randomBinary(length); + case "smallint": + return randomSmallint(); + case "tinyint": + return randomTinyint(); + case "bool": + return randomBoolean(); + case "nchar": + return randomNchar(length); + default: + throw new IllegalArgumentException("Unexpected value: " + type); + } + } + + public static Long randomTimestamp() { + long start = System.currentTimeMillis(); + return randomTimestamp(start, start + 60l * 60l * 1000l); + } + + public static Long randomTimestamp(Long start, Long end) { + return start + (long) random.nextInt((int) (end - start)); + } + + public static String randomNchar(int length) { + return randomChinese(length); + } + + public static Boolean randomBoolean() { + return random.nextBoolean(); + } + + public static Integer randomTinyint() { + return randomInt(-127, 127); + } + + public static Integer randomSmallint() { + return randomInt(-32767, 32767); + } + + public static String randomBinary(int length) { + return randomString(length); + } + + public static String randomString(int length) { + String zh_en = ""; + for (int i = 0; i < length; i++) { + zh_en += alphabet.charAt(random.nextInt(alphabet.length())); + } + return zh_en; + } + + public static String randomChinese(int length) { + String zh_cn = ""; + int bottom = Integer.parseInt("4e00", 16); + int top = Integer.parseInt("9fa5", 16); + + for (int i = 0; i < length; i++) { + char c = (char) (random.nextInt(top - bottom + 1) + bottom); + zh_cn += new String(new char[]{c}); + } + return zh_cn; + } + + public static Double randomDouble() { + return randomDouble(0, 100); + } + + public static Double randomDouble(double bottom, double top) { + return bottom + (top - bottom) * random.nextDouble(); + } + + public static Float randomFloat() { + return randomFloat(0, 100); + } + + public static Float randomFloat(float bottom, float top) { + return bottom + (top - bottom) * random.nextFloat(); + } + + public static Long randomBigint() { + return random.nextLong(); + } + + public static Integer randomInt(int bottom, int top) { + return bottom + random.nextInt((top - bottom)); + } + + public static Integer randomInt() { + return randomInt(0, 100); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java new file mode 100644 index 0000000000..4e6f640330 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/JdbcTaosdemoConfig.java @@ -0,0 +1,204 @@ +package com.taosdata.taosdemo.utils; + +public final class JdbcTaosdemoConfig { + // instance + public String host; //host + public int port = 6030; //port + public String user = "root"; //user + public String password = "taosdata"; //password + // database + public String database = "test"; //database + public int keep = 3650; //keep + public int days = 30; //days + public int replica = 1; //replica + //super table + public boolean doCreateTable = true; + public String superTable = "weather"; //super table name + public String prefixOfFields = "col"; + public int numOfFields; + public String prefixOfTags = "tag"; + public int numOfTags; + public String superTableSQL; + //sub table + public String tablePrefix = "t"; + public int numOfTables = 100; + public int numOfThreadsForCreate = 1; + // insert task + public boolean autoCreateTable; + public int numOfRowsPerTable = 100; + public int numOfThreadsForInsert = 1; + public int numOfTablesPerSQL = 10; + public int numOfValuesPerSQL = 10; + public long startTime; + public long timeGap; + public int sleep = 0; + public int order = 0; + public int rate = 10; + public long range = 1000l; + // select task + + // drop task + public boolean dropTable = false; + + public static void printHelp() { + System.out.println("Usage: java -jar jdbc-taosdemo-2.0.jar [OPTION...]"); + // instance + System.out.println("-host The host to connect to TDengine which you must specify"); + System.out.println("-port The TCP/IP port number to use for the connection. Default is 6030"); + System.out.println("-user The TDengine user name to use when connecting to the server. Default is 'root'"); + System.out.println("-password The password to use when connecting to the server.Default is 'taosdata'"); + // database + System.out.println("-database Destination database. Default is 'test'"); + System.out.println("-keep database keep parameter. Default is 3650"); + System.out.println("-days database days parameter. Default is 30"); + System.out.println("-replica database replica parameter. Default 1, min: 1, max: 3"); + // super table + System.out.println("-doCreateTable do create super table and sub table, true or false, Default true"); + System.out.println("-superTable super table name. Default 'weather'"); + System.out.println("-prefixOfFields The prefix of field in super table. Default is 'col'"); + System.out.println("-numOfFields The number of field in super table. Default is (ts timestamp, temperature float, humidity int)."); + System.out.println("-prefixOfTags The prefix of tag in super table. Default is 'tag'"); + System.out.println("-numOfTags The number of tag in super table. Default is (location nchar(64), groupId int)."); + System.out.println("-superTableSQL specify a sql statement for the super table.\n" + + " Default is 'create table weather(ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int). \n" + + " if you use this parameter, the numOfFields and numOfTags will be invalid'"); + // sub table + System.out.println("-tablePrefix The prefix of sub tables. Default is 't'"); + System.out.println("-numOfTables The number of tables. Default is 1"); + System.out.println("-numOfThreadsForCreate The number of thread during create sub table. Default is 1"); + // insert task + System.out.println("-autoCreateTable Use auto Create sub tables SQL. Default is false"); + System.out.println("-numOfRowsPerTable The number of records per table. Default is 1"); + System.out.println("-numOfThreadsForInsert The number of threads during insert row. Default is 1"); + System.out.println("-numOfTablesPerSQL The number of table per SQL. Default is 1"); + System.out.println("-numOfValuesPerSQL The number of value per SQL. Default is 1"); + System.out.println("-startTime start time for insert task, The format is \"yyyy-MM-dd HH:mm:ss.SSS\"."); + System.out.println("-timeGap the number of time gap. Default is 1000 ms"); + System.out.println("-sleep The number of milliseconds for sleep after each insert. default is 0"); + System.out.println("-order Insert mode--0: In order, 1: Out of order. Default is in order"); + System.out.println("-rate The proportion of data out of order. effective only if order is 1. min 0, max 100, default is 10"); + System.out.println("-range The range of data out of order. effective only if order is 1. default is 1000 ms"); + + // query task +// System.out.println("-sqlFile The select sql file"); + // drop task + System.out.println("-dropTable Drop data before quit. Default is false"); + System.out.println("--help Give this help list"); + } + + /** + * parse args from command line + * + * @param args command line args + * @return JdbcTaosdemoConfig + */ + public JdbcTaosdemoConfig(String[] args) { + for (int i = 0; i < args.length; i++) { + // instance + if ("-host".equals(args[i]) && i < args.length - 1) { + host = args[++i]; + } + if ("-port".equals(args[i]) && i < args.length - 1) { + port = Integer.parseInt(args[++i]); + } + if ("-user".equals(args[i]) && i < args.length - 1) { + user = args[++i]; + } + if ("-password".equals(args[i]) && i < args.length - 1) { + password = args[++i]; + } + // database + if ("-database".equals(args[i]) && i < args.length - 1) { + database = args[++i]; + } + if ("-keep".equals(args[i]) && i < args.length - 1) { + keep = Integer.parseInt(args[++i]); + } + if ("-days".equals(args[i]) && i < args.length - 1) { + days = Integer.parseInt(args[++i]); + } + if ("-replica".equals(args[i]) && i < args.length - 1) { + replica = Integer.parseInt(args[++i]); + } + // super table + if ("-doCreateTable".equals(args[i]) && i < args.length - 1) { + doCreateTable = Boolean.parseBoolean(args[++i]); + } + if ("-superTable".equals(args[i]) && i < args.length - 1) { + superTable = args[++i]; + } + if ("-prefixOfFields".equals(args[i]) && i < args.length - 1) { + prefixOfFields = args[++i]; + } + if ("-numOfFields".equals(args[i]) && i < args.length - 1) { + numOfFields = Integer.parseInt(args[++i]); + } + if ("-prefixOfTags".equals(args[i]) && i < args.length - 1) { + prefixOfTags = args[++i]; + } + if ("-numOfTags".equals(args[i]) && i < args.length - 1) { + numOfTags = Integer.parseInt(args[++i]); + } + if ("-superTableSQL".equals(args[i]) && i < args.length - 1) { + superTableSQL = args[++i]; + } + // sub table + if ("-tablePrefix".equals(args[i]) && i < args.length - 1) { + tablePrefix = args[++i]; + } + if ("-numOfTables".equals(args[i]) && i < args.length - 1) { + numOfTables = Integer.parseInt(args[++i]); + } + if ("-autoCreateTable".equals(args[i]) && i < args.length - 1) { + autoCreateTable = Boolean.parseBoolean(args[++i]); + } + if ("-numOfThreadsForCreate".equals(args[i]) && i < args.length - 1) { + numOfThreadsForCreate = Integer.parseInt(args[++i]); + } + // insert task + if ("-numOfRowsPerTable".equals(args[i]) && i < args.length - 1) { + numOfRowsPerTable = Integer.parseInt(args[++i]); + } + if ("-numOfThreadsForInsert".equals(args[i]) && i < args.length - 1) { + numOfThreadsForInsert = Integer.parseInt(args[++i]); + } + if ("-numOfTablesPerSQL".equals(args[i]) && i < args.length - 1) { + numOfTablesPerSQL = Integer.parseInt(args[++i]); + } + if ("-numOfValuesPerSQL".equals(args[i]) && i < args.length - 1) { + numOfValuesPerSQL = Integer.parseInt(args[++i]); + } + if ("-startTime".equals(args[i]) && i < args.length - 1) { + startTime = TimeStampUtil.datetimeToLong(args[++i]); + } + if ("-timeGap".equals(args[i]) && i < args.length - 1) { + timeGap = Long.parseLong(args[++i]); + } + if ("-sleep".equals(args[i]) && i < args.length - 1) { + sleep = Integer.parseInt(args[++i]); + } + if ("-order".equals(args[i]) && i < args.length - 1) { + order = Integer.parseInt(args[++i]); + } + if ("-rate".equals(args[i]) && i < args.length - 1) { + rate = Integer.parseInt(args[++i]); + if (rate < 0 || rate > 100) + throw new IllegalArgumentException("rate must between 0 and 100"); + } + if ("-range".equals(args[i]) && i < args.length - 1) { + range = Integer.parseInt(args[++i]); + } + // select task + + // drop task + if ("-dropTable".equals(args[i]) && i < args.length - 1) { + dropTable = Boolean.parseBoolean(args[++i]); + } + } + } + + public static void main(String[] args) { + JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java new file mode 100644 index 0000000000..23c3c5279a --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TaosConstants.java @@ -0,0 +1,8 @@ +package com.taosdata.taosdemo.utils; + +public class TaosConstants { + public static final String[] DATA_TYPES = { + "timestamp", "int", "bigint", "float", "double", + "binary(64)", "smallint", "tinyint", "bool", "nchar(64)", + }; +} diff --git a/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java new file mode 100644 index 0000000000..9cfce16d82 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java @@ -0,0 +1,67 @@ +package com.taosdata.taosdemo.utils; + +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.Date; + +public class TimeStampUtil { + + private static final String datetimeFormat = "yyyy-MM-dd HH:mm:ss.SSS"; + + public static long datetimeToLong(String dateTime) { + SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); + try { + return sdf.parse(dateTime).getTime(); + } catch (ParseException e) { + throw new IllegalArgumentException("invalid datetime string >>> " + dateTime); + } + } + + public static String longToDatetime(long time) { + SimpleDateFormat sdf = new SimpleDateFormat(datetimeFormat); + return sdf.format(new Date(time)); + } + + public static class TimeTuple { + public Long start; + public Long end; + public Long timeGap; + + TimeTuple(long start, long end, long timeGap) { + this.start = start; + this.end = end; + this.timeGap = timeGap; + } + } + + public static TimeTuple range(long start, long timeGap, long size) { + long now = System.currentTimeMillis(); + if (timeGap < 1) + timeGap = 1; + if (start == 0) + start = now - size * timeGap; + + // 如果size小于1异常 + if (size < 1) + throw new IllegalArgumentException("size less than 1."); + // 如果timeGap为1,已经超长,需要前移start + if (start + size > now) { + start = now - size; + return new TimeTuple(start, now, 1); + } + long end = start + (long) (timeGap * size); + if (end > now) { + //压缩timeGap + end = now; + double gap = (end - start) / (size * 1.0f); + if (gap < 1.0f) { + timeGap = 1; + start = end - size; + } else { + timeGap = (long) gap; + end = start + (long) (timeGap * size); + } + } + return new TimeTuple(start, end, timeGap); + } +} diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/application.properties b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties new file mode 100644 index 0000000000..1e7a7de89f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/application.properties @@ -0,0 +1,14 @@ +#spring.datasource.url=jdbc:mysql://master:3306/?useSSL=false&useUnicode=true&characterEncoding=UTF-8 +#spring.datasource.driver-class-name=com.mysql.jdbc.Driver +#spring.datasource.username=root +#spring.datasource.password=123456 + +spring.datasource.url=jdbc:TAOS://master:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 +spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver +spring.datasource.username=root +spring.datasource.password=taosdata + +spring.datasource.hikari.maximum-pool-size=10 +spring.datasource.hikari.minimum-idle=10 +spring.datasource.hikari.max-lifetime=600000 +logging.level.com.taosdata.taosdemo.mapper=debug \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties new file mode 100644 index 0000000000..1299357be3 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/log4j.properties @@ -0,0 +1,21 @@ +### 设置### +log4j.rootLogger=debug,stdout,DebugLog,ErrorLog +### 输出信息到控制抬 ### +log4j.appender.stdout=org.apache.log4j.ConsoleAppender +log4j.appender.stdout.Target=System.out +log4j.appender.stdout.layout=org.apache.log4j.PatternLayout +log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n +### 输出DEBUG 级别以上的日志到=logs/debug.log +log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.DebugLog.File=logs/debug.log +log4j.appender.DebugLog.Append=true +log4j.appender.DebugLog.Threshold=DEBUG +log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout +log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n +### 输出ERROR 级别以上的日志到=logs/error.log +log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender +log4j.appender.ErrorLog.File=logs/error.log +log4j.appender.ErrorLog.Append=true +log4j.appender.ErrorLog.Threshold=ERROR +log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html new file mode 100644 index 0000000000..69f8851c9b --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/main/resources/templates/index.html @@ -0,0 +1,10 @@ + + + + + Index + + +

Hello~~~

+ + \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java new file mode 100644 index 0000000000..e872509187 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/TaosdemoApplicationTests.java @@ -0,0 +1,13 @@ +package com.taosdata.taosdemo; + +import org.junit.jupiter.api.Test; +import org.springframework.boot.test.context.SpringBootTest; + +@SpringBootTest +class TaosdemoApplicationTests { + + @Test + void contextLoads() { + } + +} diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java new file mode 100644 index 0000000000..8364e16ed0 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/DatabaseMapperTest.java @@ -0,0 +1,42 @@ +package com.taosdata.taosdemo.mapper; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.HashMap; +import java.util.Map; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class DatabaseMapperTest { + @Autowired + private DatabaseMapper databaseMapper; + + @Test + public void createDatabase() { + databaseMapper.createDatabase("db_test"); + } + + @Test + public void dropDatabase() { + databaseMapper.dropDatabase("db_test"); + } + + @Test + public void creatDatabaseWithParameters() { + Map map = new HashMap<>(); + map.put("dbname", "weather"); + map.put("keep", "3650"); + map.put("days", "30"); + map.put("replica", "1"); + databaseMapper.createDatabaseWithParameters(map); + } + + @Test + public void useDatabase() { + databaseMapper.useDatabase("test"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java new file mode 100644 index 0000000000..90faa20496 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SubTableMapperTest.java @@ -0,0 +1,88 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.*; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SubTableMapperTest { + @Autowired + private SubTableMapper subTableMapper; + private List tables; + + @Test + public void createUsingSuperTable() { + SubTableMeta subTableMeta = new SubTableMeta(); + subTableMeta.setDatabase("test"); + subTableMeta.setSupertable("weather"); + subTableMeta.setName("t1"); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "nchar(64)")); + } + subTableMeta.setTags(tags); + subTableMapper.createUsingSuperTable(subTableMeta); + } + + @Test + public void insertOneTableMultiValues() { + subTableMapper.insertOneTableMultiValues(tables.get(0)); + } + + @Test + public void insertOneTableMultiValuesUsingSuperTable() { + subTableMapper.insertOneTableMultiValuesUsingSuperTable(tables.get(0)); + } + + + @Test + public void insertMultiTableMultiValues() { + subTableMapper.insertMultiTableMultiValues(tables); + } + + @Test + public void insertMultiTableMultiValuesUsingSuperTable() { + subTableMapper.insertMultiTableMultiValuesUsingSuperTable(tables); + } + + + @Before + public void before() { + tables = new ArrayList<>(); + for (int ind = 0; ind < 3; ind++) { + + SubTableValue table = new SubTableValue(); + table.setDatabase("test"); + // supertable + table.setSupertable("weather"); + table.setName("t" + (ind + 1)); + // tags + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagValue("tag" + (i + 1), "beijing")); + } + table.setTags(tags); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 2; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 4; j++) { + fields.add(new FieldValue("f" + (j + 1), (j + 1) * 10)); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java new file mode 100644 index 0000000000..6c97874cfc --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/SuperTableMapperTest.java @@ -0,0 +1,50 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SuperTableMapperTest { + @Autowired + private SuperTableMapper superTableMapper; + + @Test + public void testCreateSuperTableUsingSQL() { + String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + superTableMapper.createSuperTableUsingSQL(sql); + } + + @Test + public void createSuperTable() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + for (int i = 0; i < 5; i++) { + fields.add(new FieldMeta("f" + (i + 1), "int")); + } + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + tags.add(new TagMeta("t" + (i + 1), "nchar(64)")); + } + superTableMeta.setTags(tags); + + superTableMapper.createSuperTable(superTableMeta); + } + + @Test + public void dropSuperTable() { + superTableMapper.dropSuperTable("test", "weather"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java new file mode 100644 index 0000000000..3a051b3112 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/mapper/TableMapperTest.java @@ -0,0 +1,142 @@ +package com.taosdata.taosdemo.mapper; + +import com.taosdata.taosdemo.domain.*; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +@SpringBootTest +@RunWith(SpringRunner.class) +public class TableMapperTest { + @Autowired + private TableMapper tableMapper; + private static Random random = new Random(System.currentTimeMillis()); + + @Test + public void create() { + TableMeta table = new TableMeta(); + table.setDatabase("test"); + table.setName("t1"); + List fields = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + field.setType("nchar(64)"); + fields.add(field); + } + table.setFields(fields); + tableMapper.create(table); + } + + @Test + public void insertOneTableMultiValues() { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t1"); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 100); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tableMapper.insertOneTableMultiValues(table); + } + + @Test + public void insertOneTableMultiValuesWithCoulmns() { + TableValue tableValue = new TableValue(); + tableValue.setDatabase("test"); + tableValue.setName("weather"); + // columns + List columns = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (i + 1)); + columns.add(field); + } + tableValue.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + List fields = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldValue field = new FieldValue(); + field.setValue(j); + fields.add(field); + } + values.add(new RowValue(fields)); + } + tableValue.setValues(values); + tableMapper.insertOneTableMultiValuesWithColumns(tableValue); + } + + @Test + public void insertMultiTableMultiValues() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < 2; k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + tableMapper.insertMultiTableMultiValues(tables); + } + + @Test + public void insertMultiTableMultiValuesWithCoulumns() { + List tables = new ArrayList<>(); + for (int i = 0; i < 3; i++) { + TableValue table = new TableValue(); + table.setDatabase("test"); + table.setName("t" + (i + 1)); + // columns + List columns = new ArrayList<>(); + for (int j = 0; j < 3; j++) { + FieldMeta field = new FieldMeta(); + field.setName("f" + (j + 1)); + columns.add(field); + } + table.setColumns(columns); + // values + List values = new ArrayList<>(); + for (int j = 0; j < 5; j++) { + List fields = new ArrayList<>(); + for (int k = 0; k < columns.size(); k++) { + FieldValue field = new FieldValue<>(); + field.setValue((k + 1) * 10); + fields.add(field); + } + values.add(new RowValue(fields)); + } + table.setValues(values); + + tables.add(table); + } + tableMapper.insertMultiTableMultiValuesWithColumns(tables); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java new file mode 100644 index 0000000000..2c1cdf6e00 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java @@ -0,0 +1,29 @@ +package com.taosdata.taosdemo.service; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class DatabaseServiceTest { + @Autowired + private DatabaseService service; + + @Test + public void testCreateDatabase1() { + service.createDatabase("testXXXX"); + } + + @Test + public void dropDatabase() { + service.dropDatabase("testXXXX"); + } + + @Test + public void useDatabase() { + service.useDatabase("test"); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java new file mode 100644 index 0000000000..4e54de3f13 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SubTableServiceTest.java @@ -0,0 +1,50 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.TagValue; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SubTableServiceTest { + @Autowired + private SubTableService service; + + private List subTables; + + @Before + public void before() { + subTables = new ArrayList<>(); + for (int i = 1; i <= 1; i++) { + SubTableMeta subTableMeta = new SubTableMeta(); + subTableMeta.setDatabase("test"); + subTableMeta.setSupertable("weather"); + subTableMeta.setName("t" + i); + List tags = new ArrayList<>(); + tags.add(new TagValue("location", "beijing")); + tags.add(new TagValue("groupId", i)); + subTableMeta.setTags(tags); + subTables.add(subTableMeta); + } + } + + @Test + public void testCreateSubTable() { + int count = service.createSubTable(subTables); + System.out.println("count >>> " + count); + } + + @Test + public void testCreateSubTableList() { + int count = service.createSubTable(subTables, 10); + System.out.println("count >>> " + count); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java new file mode 100644 index 0000000000..b9291fceaf --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java @@ -0,0 +1,39 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class SuperTableServiceTest { + + @Autowired + private SuperTableService service; + + @Test + public void testCreate() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + service.create(superTableMeta); + } + +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java new file mode 100644 index 0000000000..fdbd554629 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/TableServiceTest.java @@ -0,0 +1,43 @@ +package com.taosdata.taosdemo.service; + +import com.taosdata.taosdemo.domain.TableMeta; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringRunner; + +import java.util.ArrayList; +import java.util.List; + +@RunWith(SpringRunner.class) +@SpringBootTest +public class TableServiceTest { + @Autowired + private TableService tableService; + + private List tables; + + @Before + public void before() { + tables = new ArrayList<>(); + for (int i = 0; i < 1; i++) { + TableMeta tableMeta = new TableMeta(); + tableMeta.setDatabase("test"); + tableMeta.setName("weather" + (i + 1)); + tables.add(tableMeta); + } + } + + @Test + public void testCreate() { + int count = tableService.create(tables); + System.out.println(count); + } + + @Test + public void testCreateMultiThreads() { + System.out.println(tableService.create(tables, 10)); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java new file mode 100644 index 0000000000..aea3cc76ca --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/FieldValueGeneratorTest.java @@ -0,0 +1,59 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.RowValue; +import com.taosdata.taosdemo.utils.TimeStampUtil; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class FieldValueGeneratorTest { + + private List rowValues; + + @Test + public void generate() { + List fieldMetas = new ArrayList<>(); + fieldMetas.add(new FieldMeta("ts", "timestamp")); + fieldMetas.add(new FieldMeta("temperature", "float")); + fieldMetas.add(new FieldMeta("humidity", "int")); + + long start = TimeStampUtil.datetimeToLong("2020-01-01 00:00:00.000"); + long end = TimeStampUtil.datetimeToLong("2020-01-01 10:00:00.000"); + + rowValues = FieldValueGenerator.generate(start, end, 1000l * 3600, fieldMetas); + Assert.assertEquals(10, rowValues.size()); + } + + @Test + public void disrupt() { + List fieldMetas = new ArrayList<>(); + fieldMetas.add(new FieldMeta("ts", "timestamp")); + fieldMetas.add(new FieldMeta("temperature", "float")); + fieldMetas.add(new FieldMeta("humidity", "int")); + + long start = TimeStampUtil.datetimeToLong("2020-01-01 00:00:00.000"); + long end = TimeStampUtil.datetimeToLong("2020-01-01 10:00:00.000"); + + rowValues = FieldValueGenerator.generate(start, end, 1000l * 3600l, fieldMetas); + + FieldValueGenerator.disrupt(rowValues, 20, 1000); + Assert.assertEquals(10, rowValues.size()); + } + + @After + public void after() { + for (RowValue row : rowValues) { + row.getFields().stream().forEach(field -> { + if (field.getName().equals("ts")) { + System.out.print(TimeStampUtil.longToDatetime((Long) field.getValue())); + } else + System.out.print(" ," + field.getValue()); + }); + System.out.println(); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java new file mode 100644 index 0000000000..78c8e9283f --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SubTableMetaGeneratorTest.java @@ -0,0 +1,52 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SubTableMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class SubTableMetaGeneratorTest { + List subTableMetas; + + @Test + public void generate() { + SuperTableMeta superTableMeta = new SuperTableMeta(); + superTableMeta.setDatabase("test"); + superTableMeta.setName("weather"); + List fields = new ArrayList<>(); + fields.add(new FieldMeta("ts", "timestamp")); + fields.add(new FieldMeta("temperature", "float")); + fields.add(new FieldMeta("humidity", "int")); + superTableMeta.setFields(fields); + List tags = new ArrayList<>(); + tags.add(new TagMeta("location", "nchar(64)")); + tags.add(new TagMeta("groupId", "int")); + superTableMeta.setTags(tags); + + subTableMetas = SubTableMetaGenerator.generate(superTableMeta, 10, "t"); + Assert.assertEquals(10, subTableMetas.size()); + Assert.assertEquals("t1", subTableMetas.get(0).getName()); + Assert.assertEquals("t2", subTableMetas.get(1).getName()); + Assert.assertEquals("t3", subTableMetas.get(2).getName()); + Assert.assertEquals("t4", subTableMetas.get(3).getName()); + Assert.assertEquals("t5", subTableMetas.get(4).getName()); + Assert.assertEquals("t6", subTableMetas.get(5).getName()); + Assert.assertEquals("t7", subTableMetas.get(6).getName()); + Assert.assertEquals("t8", subTableMetas.get(7).getName()); + Assert.assertEquals("t9", subTableMetas.get(8).getName()); + Assert.assertEquals("t10", subTableMetas.get(9).getName()); + } + + @After + public void after() { + for (SubTableMeta subTableMeta : subTableMetas) { + System.out.println(subTableMeta); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java new file mode 100644 index 0000000000..11c5312cf6 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/SuperTableMetaGeneratorImplTest.java @@ -0,0 +1,60 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.FieldMeta; +import com.taosdata.taosdemo.domain.SuperTableMeta; +import com.taosdata.taosdemo.domain.TagMeta; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +public class SuperTableMetaGeneratorImplTest { + private SuperTableMeta meta; + + @Test + public void generate() { + String sql = "create table test.weather (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int)"; + meta = SuperTableMetaGenerator.generate(sql); + Assert.assertEquals("test", meta.getDatabase()); + Assert.assertEquals("weather", meta.getName()); + Assert.assertEquals(3, meta.getFields().size()); + Assert.assertEquals("ts", meta.getFields().get(0).getName()); + Assert.assertEquals("timestamp", meta.getFields().get(0).getType()); + Assert.assertEquals("temperature", meta.getFields().get(1).getName()); + Assert.assertEquals("float", meta.getFields().get(1).getType()); + Assert.assertEquals("humidity", meta.getFields().get(2).getName()); + Assert.assertEquals("int", meta.getFields().get(2).getType()); + + Assert.assertEquals("location", meta.getTags().get(0).getName()); + Assert.assertEquals("nchar(64)", meta.getTags().get(0).getType()); + Assert.assertEquals("groupid", meta.getTags().get(1).getName()); + Assert.assertEquals("int", meta.getTags().get(1).getType()); + } + + @Test + public void generate2() { + meta = SuperTableMetaGenerator.generate("test", "weather", 10, "col", 10, "tag"); + Assert.assertEquals("test", meta.getDatabase()); + Assert.assertEquals("weather", meta.getName()); + Assert.assertEquals(11, meta.getFields().size()); + for (FieldMeta fieldMeta : meta.getFields()) { + Assert.assertNotNull(fieldMeta.getName()); + Assert.assertNotNull(fieldMeta.getType()); + } + for (TagMeta tagMeta : meta.getTags()) { + Assert.assertNotNull(tagMeta.getName()); + Assert.assertNotNull(tagMeta.getType()); + } + } + + @After + public void after() { + System.out.println(meta.getDatabase()); + System.out.println(meta.getName()); + for (FieldMeta fieldMeta : meta.getFields()) { + System.out.println(fieldMeta); + } + for (TagMeta tagMeta : meta.getTags()) { + System.out.println(tagMeta); + } + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java new file mode 100644 index 0000000000..37c9051c94 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/data/TagValueGeneratorTest.java @@ -0,0 +1,37 @@ +package com.taosdata.taosdemo.service.data; + +import com.taosdata.taosdemo.domain.TagMeta; +import com.taosdata.taosdemo.domain.TagValue; +import org.junit.After; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +public class TagValueGeneratorTest { + List tagvalues; + + @Test + public void generate() { + List tagMetaList = new ArrayList<>(); + tagMetaList.add(new TagMeta("location", "nchar(10)")); + tagMetaList.add(new TagMeta("groupId", "int")); + tagMetaList.add(new TagMeta("ts", "timestamp")); + tagMetaList.add(new TagMeta("temperature", "float")); + tagMetaList.add(new TagMeta("humidity", "double")); + tagMetaList.add(new TagMeta("text", "binary(10)")); + tagvalues = TagValueGenerator.generate(tagMetaList); + Assert.assertEquals("location", tagvalues.get(0).getName()); + Assert.assertEquals("groupId", tagvalues.get(1).getName()); + Assert.assertEquals("ts", tagvalues.get(2).getName()); + Assert.assertEquals("temperature", tagvalues.get(3).getName()); + Assert.assertEquals("humidity", tagvalues.get(4).getName()); + Assert.assertEquals("text", tagvalues.get(5).getName()); + } + + @After + public void after() { + tagvalues.stream().forEach(System.out::println); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java new file mode 100644 index 0000000000..7d12782526 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/DataGeneratorTest.java @@ -0,0 +1,20 @@ +package com.taosdata.taosdemo.utils; + +import org.junit.Assert; +import org.junit.Test; + +public class DataGeneratorTest { + + @Test + public void randomValue() { + for (int i = 0; i < TaosConstants.DATA_TYPES.length; i++) { + System.out.println(TaosConstants.DATA_TYPES[i] + " >>> " + DataGenerator.randomValue(TaosConstants.DATA_TYPES[i])); + } + } + + @Test + public void randomNchar() { + String s = DataGenerator.randomNchar(10); + Assert.assertEquals(10, s.length()); + } +} \ No newline at end of file diff --git a/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java new file mode 100644 index 0000000000..628594c4b1 --- /dev/null +++ b/tests/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/utils/TimeStampUtilTest.java @@ -0,0 +1,38 @@ +package com.taosdata.taosdemo.utils; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TimeStampUtilTest { + + @Test + public void datetimeToLong() { + final String startTime = "2005-01-01 00:00:00.000"; + long start = TimeStampUtil.datetimeToLong(startTime); + assertEquals(1104508800000l, start); + String dateTimeStr = TimeStampUtil.longToDatetime(start); + assertEquals("2005-01-01 00:00:00.000", dateTimeStr); + } + + @Test + public void longToDatetime() { + String datetime = TimeStampUtil.longToDatetime(1510000000000L); + assertEquals("2017-11-07 04:26:40.000", datetime); + long timestamp = TimeStampUtil.datetimeToLong(datetime); + assertEquals(1510000000000L, timestamp); + } + + @Test + public void range() { + long start = TimeStampUtil.datetimeToLong("2020-10-01 00:00:00.000"); + long timeGap = 1000; + long numOfRowsPerTable = 1000l * 3600l * 24l * 90l; + TimeStampUtil.TimeTuple timeTuple = TimeStampUtil.range(start, timeGap, numOfRowsPerTable); + System.out.println(TimeStampUtil.longToDatetime(timeTuple.start)); + System.out.println(TimeStampUtil.longToDatetime(timeTuple.end)); + System.out.println(timeTuple.timeGap); + + } + +} \ No newline at end of file diff --git a/tests/examples/c/demo.c b/tests/examples/c/demo.c index 74a49288e9..54e81d33b9 100644 --- a/tests/examples/c/demo.c +++ b/tests/examples/c/demo.c @@ -50,7 +50,7 @@ static void queryDB(TAOS *taos, char *command) { taos_free_result(pSql); } -void Test(char *qstr, const char *input, int i); +void Test(TAOS *taos, char *qstr, int i); int main(int argc, char *argv[]) { char qstr[1024]; @@ -63,21 +63,22 @@ int main(int argc, char *argv[]) { // init TAOS taos_init(); - for (int i = 0; i < 4000000; i++) { - Test(qstr, argv[1], i); - } - taos_cleanup(); -} -void Test(char *qstr, const char *input, int index) { - TAOS *taos = taos_connect(input, "root", "taosdata", NULL, 0); - printf("==================test at %d\n================================", index); - queryDB(taos, "drop database if exists demo"); - queryDB(taos, "create database demo"); - TAOS_RES *result; + TAOS *taos = taos_connect(argv[1], "root", "taosdata", NULL, 0); if (taos == NULL) { printf("failed to connect to server, reason:%s\n", "null taos"/*taos_errstr(taos)*/); exit(1); } + for (int i = 0; i < 4000000; i++) { + Test(taos, qstr, i); + } + taos_close(taos); + taos_cleanup(); +} +void Test(TAOS *taos, char *qstr, int index) { + printf("==================test at %d\n================================", index); + queryDB(taos, "drop database if exists demo"); + queryDB(taos, "create database demo"); + TAOS_RES *result; queryDB(taos, "use demo"); queryDB(taos, "create table m1 (ts timestamp, ti tinyint, si smallint, i int, bi bigint, f float, d double, b binary(10))"); @@ -131,6 +132,5 @@ void Test(char *qstr, const char *input, int index) { taos_free_result(result); printf("====demo end====\n\n"); - taos_close(taos); } diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index 51bb9b36c3..b96daa5464 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -65,7 +65,18 @@ function runQueryPerfTest { echoInfo "Run Performance Test" cd $WORK_DIR/TDengine/tests/pytest - python3 query/queryPerformance.py 0 | tee -a $PERFORMANCE_TEST_REPORT + python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT + + python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT + + yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -t 1000 -n 1000 > taosdemoperf.txt + + CREATETABLETIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` + INSERTRECORDSTIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` + REQUESTSPERSECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $13}'` + + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -t $CREATETABLETIME -i $INSERTRECORDSTIME -r $REQUESTSPERSECOND | tee -a $PERFORMANCE_TEST_REPORT + [ -f taosdemoperf.txt ] && rm taosdemoperf.txt } diff --git a/tests/pytest/bug2265.py b/tests/pytest/bug2265.py new file mode 100644 index 0000000000..e78233928f --- /dev/null +++ b/tests/pytest/bug2265.py @@ -0,0 +1,85 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import taos +if __name__ == "__main__": + + logSql = True + deployPath = "" + testCluster = False + valgrind = 0 + + print("start to execute %s" % __file__) + tdDnodes.init(deployPath) + tdDnodes.setTestCluster(testCluster) + tdDnodes.setValgrind(valgrind) + + tdDnodes.stopAll() + tdDnodes.addSimExtraCfg("maxSQLLength", "1048576") + tdDnodes.deploy(1) + tdDnodes.start(1) + host = '127.0.0.1' + + tdLog.info("Procedures for tdengine deployed in %s" % (host)) + + tdCases.logSql(logSql) + print('1') + conn = taos.connect( + host, + config=tdDnodes.getSimCfgPath()) + + tdSql.init(conn.cursor(), True) + + print("==========step1") + print("create table ") + tdSql.execute("create database db") + tdSql.execute("use db") + tdSql.execute("create table t1 (ts timestamp, c1 int,c2 int ,c3 int)") + + print("==========step2") + print("insert maxSQLLength data ") + data = 'insert into t1 values' + ts = 1604298064000 + i = 0 + while ((len(data)<(1024*1024)) & (i < 32767 - 1) ): + data += '(%s,%d,%d,%d)'%(ts+i,i%1000,i%1000,i%1000) + i+=1 + tdSql.execute(data) + + print("==========step4") + print("insert data batch larger than 32767 ") + i = 0 + while ((len(data)<(1024*1024)) & (i < 32767) ): + data += '(%s,%d,%d,%d)'%(ts+i,i%1000,i%1000,i%1000) + i+=1 + tdSql.error(data) + + print("==========step4") + print("insert data larger than maxSQLLength ") + tdSql.execute("create table t2 (ts timestamp, c1 binary(50))") + data = 'insert into t2 values' + i = 0 + while ((len(data)<(1024*1024)) & (i < 32767 - 1 ) ): + data += '(%s,%s)'%(ts+i,'a'*50) + i+=1 + tdSql.error(data) + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + + diff --git a/tests/pytest/client/noConnectionErrorTest.py b/tests/pytest/client/noConnectionErrorTest.py new file mode 100644 index 0000000000..2c13016cf1 --- /dev/null +++ b/tests/pytest/client/noConnectionErrorTest.py @@ -0,0 +1,48 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import tdDnodes + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + tdDnodes.stop(1) + sql = "use db" + + try: + tdSql.execute(sql) + except Exception as e: + expectError = 'Unable to establish connection' + if expectError in str(e): + pass + else: + caller = inspect.getframeinfo(inspect.stack()[1][1]) + tdLog.exit("%s(%d) failed: sql:%s, expect error not occured" % (caller.filename, caller.lineno, sql)) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py index 6f4258312d..8ae74c5c86 100644 --- a/tests/pytest/concurrent_inquiry.py +++ b/tests/pytest/concurrent_inquiry.py @@ -43,7 +43,8 @@ class ConcurrentInquiry: self.subtb_stru_list=[] self.stb_tag_list=[] self.subtb_tag_list=[] - + self.probabilities = [0.05,0.95] + self.ifjoin = [0,1] def SetThreadsNum(self,num): self.numOfTherads=num @@ -105,7 +106,7 @@ class ConcurrentInquiry: conn.close() #query condition - def con_where(self,tlist): + def con_where(self,tlist,col_list,tag_list): l=[] for i in range(random.randint(0,len(tlist))): c = random.choice(where_list) @@ -115,19 +116,26 @@ class ConcurrentInquiry: l.append(random.choice(tlist)+c) return 'where '+random.choice([' and ',' or ']).join(l) - def con_interval(self,tlist): - return random.choice(['interval(10s)','interval(10d)','interval(1n)']) + def con_interval(self,tlist,col_list,tag_list): + interval = 'interval(' + str(random.randint(0,100)) + random.choice(['a','s','d','w','n','y']) + ')' + return interval - def con_limit(self,tlist): - return random.choice(['limit 10','limit 10 offset 10','slimit 10','slimit 10 offset 10','limit 10 slimit 10','limit 10 offset 5 slimit 5 soffset 10']) + def con_limit(self,tlist,col_list,tag_list): + rand1 = str(random.randint(0,1000)) + rand2 = str(random.randint(0,1000)) + return random.choice(['limit ' + rand1,'limit ' + rand1 + ' offset '+rand2, + ' slimit ' + rand1,' slimit ' + rand1 + ' offset ' + rand2,'limit '+rand1 + ' slimit '+ rand2, + 'limit '+ rand1 + ' offset' + rand2 + ' slimit '+ rand1 + ' soffset ' + rand2 ]) - def con_fill(self,tlist): + def con_fill(self,tlist,col_list,tag_list): return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)']) - def con_group(self,tlist): - return 'group by '+random.choice(tlist) + def con_group(self,tlist,col_list,tag_list): + rand_tag = random.randint(0,5) + rand_col = random.randint(0,1) + return 'group by '+','.join(random.sample(col_list,rand_col))+','.join(random.sample(tag_list,rand_tag)) - def con_order(self,tlist): + def con_order(self,tlist,col_list,tag_list): return 'order by '+random.choice(tlist) def gen_query_sql(self): #生成查询语句 @@ -158,22 +166,89 @@ class ConcurrentInquiry: sel_col_list=[] col_rand=random.randint(0,len(col_list)) for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数 + alias = 'as '+ str(i) + pick_func = '' if j == 'leastsquares': - sel_col_list.append(j+'('+i+',1,1)') + pick_func=j+'('+i+',1,1)' elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile': - sel_col_list.append(j+'('+i+',1)') + pick_func=j+'('+i+',1)' else: - sel_col_list.append(j+'('+i+')') + pick_func=j+'('+i+')' + if bool(random.getrandbits(1)): + pick_func+=alias + sel_col_list.append(pick_func) + sql=sql+','.join(sel_col_list)+' from '+random.choice(self.stb_list+self.subtb_list)+' ' #select col & func con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill] sel_con=random.sample(con_func,random.randint(0,len(con_func))) sel_con_list=[] for i in sel_con: - sel_con_list.append(i(tlist)) #获取对应的条件函数 + sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数 sql+=' '.join(sel_con_list) # condition print(sql) return sql + def gen_query_join(self): #生成join查询语句 + tbname = [] + col_list = [] + tag_list = [] + col_intersection = [] + tag_intersection = [] + subtable = None + + if bool(random.getrandbits(1)): + subtable = True + tbname = random.sample(self.subtb_list,2) + for i in tbname: + col_list.append(self.subtb_stru_list[self.subtb_list.index(i)]) + tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)]) + col_intersection = list(set(col_list[0]).intersection(set(col_list[1]))) + tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1]))) + else: + tbname = random.sample(self.stb_list,2) + for i in tbname: + col_list.append(self.stb_stru_list[self.stb_list.index(i)]) + tag_list.append(self.stb_stru_list[self.stb_list.index(i)]) + col_intersection = list(set(col_list[0]).intersection(set(col_list[1]))) + tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1]))) + + + con_rand=random.randint(0,len(condition_list)) + col_rand=random.randint(0,len(col_list)) + tag_rand=random.randint(0,len(tag_list)) + + sql='select ' #select + + sel_col_tag=[] + col_rand=random.randint(0,len(col_list)) + if bool(random.getrandbits(1)): + sql += '*' + else: + sel_col_tag.append('t1.' + str(random.choice(col_list[0] + tag_list[0]))) + sel_col_tag.append('t2.' + str(random.choice(col_list[1] + tag_list[1]))) + sql += ','.join(sel_col_tag) + + sql = sql + ' from '+ str(tbname[0]) +' t1,' + str(tbname[1]) + ' t2 ' #select col & func + join_section = None + if subtable: + join_section = ''.join(random.choices(col_intersection)) + sql += 'where t1._c0 = t2._c0 and ' + 't1.' + join_section + '=t2.' + join_section + else: + join_section = ''.join(random.choices(col_intersection+tag_intersection)) + sql += 'where t1._c0 = t2._c0 and ' + 't1.' + join_section + '=t2.' + join_section + + print(sql) + return sql + + def random_pick(self): + x = random.uniform(0,1) + cumulative_probability = 0.0 + for item, item_probability in zip(self.ifjoin, self.probabilities): + cumulative_probability += item_probability + if x < cumulative_probability:break + return item + + def rest_query(self,sql): #rest 接口 host = "127.0.0.1" user = "root" @@ -210,6 +285,7 @@ class ConcurrentInquiry: nRows = rj['rows'] if ('rows' in rj) else 0 return nRows + def query_thread_n(self,threadID): #使用原生python接口查询 host = "127.0.0.1" user = "root" @@ -227,7 +303,10 @@ class ConcurrentInquiry: while True: try: - sql=self.gen_query_sql() + if self.random_pick(): + sql=self.gen_query_sql() + else: + sql=self.gen_query_join() print("sql is ",sql) start = time.time() cl.execute(sql) @@ -247,7 +326,10 @@ class ConcurrentInquiry: print("Thread %d: starting" % threadID) while True: try: - sql=self.gen_query_sql() + if self.random_pick(): + sql=self.gen_query_sql() + else: + sql=self.gen_query_join() print("sql is ",sql) start = time.time() self.rest_query(sql) @@ -270,7 +352,6 @@ class ConcurrentInquiry: threads.append(thread) thread.start() for i in range(self.r_numOfTherads): - # for i in range(1): thread = threading.Thread(target=self.query_thread_r, args=(i,)) threads.append(thread) thread.start() diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 42af09e7eb..2b364ee7cd 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -20,13 +20,21 @@ python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py python3 ./test.py -f insert/before_1970.py +python3 bug2265.py +#table python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/max_table_length.py +python3 ./test.py -f table/alter_column.py +python3 ./test.py -f table/boundary.py +python3 ./test.py -f table/create.py +python3 ./test.py -f table/del_stable.py + # tag python3 ./test.py -f tag_lite/filter.py @@ -135,9 +143,6 @@ python3 ./test.py -f user/pass_len.py # stable python3 ./test.py -f stable/query_after_reset.py -# table -python3 ./test.py -f table/del_stable.py - #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -162,7 +167,12 @@ python3 ./test.py -f query/bug1876.py python3 ./test.py -f query/bug2218.py python3 ./test.py -f query/bug2117.py python3 ./test.py -f query/bug2143.py -python3 ./test.py -f query/sliding.py +python3 ./test.py -f query/sliding.py +python3 ./test.py -f query/unionAllTest.py +python3 ./test.py -f query/bug2281.py +python3 ./test.py -f query/bug2119.py +python3 ./test.py -f query/isNullTest.py +python3 ./test.py -f query/queryWithTaosdKilled.py #stream python3 ./test.py -f stream/metric_1.py @@ -179,6 +189,7 @@ python3 ./test.py -f alter/alter_table_crash.py python3 ./test.py -f client/client.py python3 ./test.py -f client/version.py python3 ./test.py -f client/alterDatabase.py +python3 ./test.py -f client/noConnectionErrorTest.py # Misc python3 testCompress.py @@ -203,6 +214,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 python3 ./test.py -f functions/function_top.py -r 1 #python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py @@ -213,6 +225,7 @@ python3 test.py -f query/queryFillTest.py python3 test.py -f tools/taosdemoTest.py python3 test.py -f tools/taosdumpTest.py python3 test.py -f tools/lowaTest.py +python3 test.py -f tools/taosdemoTest2.py # subscribe python3 test.py -f subscribe/singlemeter.py diff --git a/tests/pytest/functions/function_twa_test2.py b/tests/pytest/functions/function_twa_test2.py new file mode 100644 index 0000000000..b5cd24ce71 --- /dev/null +++ b/tests/pytest/functions/function_twa_test2.py @@ -0,0 +1,124 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import * +from util.cases import * +from util.sql import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table t1(ts timestamp, c int)") + for i in range(self.rowNum): + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i * 10000, i + 1)) + + # twa verifacation + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 5.5) + + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s)") + tdSql.checkRows(10) + tdSql.checkData(0, 1, 1.49995) + tdSql.checkData(1, 1, 2.49995) + tdSql.checkData(2, 1, 3.49995) + tdSql.checkData(3, 1, 4.49995) + tdSql.checkData(4, 1, 5.49995) + tdSql.checkData(5, 1, 6.49995) + tdSql.checkData(6, 1, 7.49995) + tdSql.checkData(7, 1, 8.49995) + tdSql.checkData(8, 1, 9.49995) + tdSql.checkData(9, 1, 10) + + tdSql.query("select twa(c) from t1 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(10s) sliding(5s)") + tdSql.checkRows(20) + tdSql.checkData(0, 1, 1.24995) + tdSql.checkData(1, 1, 1.49995) + tdSql.checkData(2, 1, 1.99995) + tdSql.checkData(3, 1, 2.49995) + tdSql.checkData(4, 1, 2.99995) + tdSql.checkData(5, 1, 3.49995) + tdSql.checkData(6, 1, 3.99995) + tdSql.checkData(7, 1, 4.49995) + tdSql.checkData(8, 1, 4.99995) + tdSql.checkData(9, 1, 5.49995) + tdSql.checkData(10, 1, 5.99995) + tdSql.checkData(11, 1, 6.49995) + tdSql.checkData(12, 1, 6.99995) + tdSql.checkData(13, 1, 7.49995) + tdSql.checkData(14, 1, 7.99995) + tdSql.checkData(15, 1, 8.49995) + tdSql.checkData(16, 1, 8.99995) + tdSql.checkData(17, 1, 9.49995) + tdSql.checkData(18, 1, 9.75000) + tdSql.checkData(19, 1, 10) + + tdSql.execute("create table t2(ts timestamp, c int)") + tdSql.execute("insert into t2 values(%d, 1)" % (self.ts + 3000)) + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) ") + tdSql.checkRows(1) + tdSql.checkData(0, 1, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s) ") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 1) + tdSql.checkData(1, 1, 1) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 09:00:04.000' and ts <= '2018-09-17 09:01:30.000' ") + tdSql.checkRows(0) + + tdSql.query("select twa(c) from t2 where ts >= '2018-09-17 08:00:00.000' and ts <= '2018-09-17 09:00:00.000' ") + tdSql.checkRows(0) + + tdSql.execute("create table t3(ts timestamp, c int)") + tdSql.execute("insert into t3 values(%d, 1)" % (self.ts)) + tdSql.execute("insert into t3 values(%d, -2)" % (self.ts + 3000)) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, -0.5) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(1s)") + tdSql.checkRows(2) + tdSql.checkData(0, 1, 0.5005) + tdSql.checkData(1, 1, -2) + + tdSql.query("select twa(c) from t3 where ts >= '2018-09-17 08:59:00.000' and ts <= '2018-09-17 09:01:30.000' interval(2s) sliding(1s)") + tdSql.checkRows(4) + tdSql.checkData(0, 1, 0.5005) + tdSql.checkData(1, 1, 0.0005) + tdSql.checkData(2, 1, -1.5) + tdSql.checkData(3, 1, -2) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/insert/insertFromCSVPerformance.py b/tests/pytest/insert/insertFromCSVPerformance.py new file mode 100644 index 0000000000..84fd1d7cca --- /dev/null +++ b/tests/pytest/insert/insertFromCSVPerformance.py @@ -0,0 +1,131 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +import time +import datetime +import csv +import random +import pandas as pd +import argparse +import os.path + +class insertFromCSVPerformace: + def __init__(self, commitID, dbName, stbName, branchName): + self.commitID = commitID + self.dbName = dbName + self.stbName = stbName + self.branchName = branchName + self.ts = 1500074556514 + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taosperf" + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + + def writeCSV(self): + with open('test3.csv','w', encoding='utf-8', newline='') as csvFile: + writer = csv.writer(csvFile, dialect='excel') + for i in range(1000000): + newTimestamp = self.ts + random.randint(10000000, 10000000000) + random.randint(1000, 10000000) + random.randint(1, 1000) + d = datetime.datetime.fromtimestamp(newTimestamp / 1000) + dt = str(d.strftime("%Y-%m-%d %H:%M:%S.%f")) + writer.writerow(["'%s'" % dt, random.randint(1, 100), random.uniform(1, 100), random.randint(1, 100), random.randint(1, 100)]) + + def removCSVHeader(self): + data = pd.read_csv("ordered.csv") + data = data.drop([0]) + data.to_csv("ordered.csv", header = False, index = False) + + def createTables(self): + cursor = self.conn.cursor() + + cursor.execute("create database if not exists %s" % self.dbName) + cursor.execute("use %s" % self.dbName) + cursor.execute("create table if not exists %s(ts timestamp, in_order_time float, out_of_order_time float, commit_id binary(50)) tags(branch binary(50))" % self.stbName) + cursor.execute("create table if not exists %s using %s tags('%s')" % (self.branchName, self.stbName, self.branchName)) + + cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)") + cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)") + + cursor.close() + + def run(self): + cursor = self.conn.cursor() + cursor.execute("use %s" % self.dbName) + print("==================== CSV insert performance ====================") + + totalTime = 0 + for i in range(10): + cursor.execute("create table if not exists t1(ts timestamp, c1 int, c2 float, c3 int, c4 int)") + startTime = time.time() + cursor.execute("insert into t1 file 'outoforder.csv'") + totalTime += time.time() - startTime + cursor.execute("drop table if exists t1") + out_of_order_time = (float) (totalTime / 10) + print("Out of Order - Insert time: %f" % out_of_order_time) + + totalTime = 0 + for i in range(10): + cursor.execute("create table if not exists t2(ts timestamp, c1 int, c2 float, c3 int, c4 int)") + startTime = time.time() + cursor.execute("insert into t2 file 'ordered.csv'") + totalTime += time.time() - startTime + cursor.execute("drop table if exists t2") + + in_order_time = (float) (totalTime / 10) + print("In order - Insert time: %f" % in_order_time) + cursor.execute("insert into %s values(now, %f, %f, '%s')" % (self.branchName, in_order_time, out_of_order_time, self.commitID)) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-c', + '--commit-id', + action='store', + default='null', + type=str, + help='git commit id (default: null)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='perf', + type=str, + help='Database name to be created (default: perf)') + parser.add_argument( + '-t', + '--stable-name', + action='store', + default='csv_insert', + type=str, + help='Database name to be created (default: csv_insert)') + parser.add_argument( + '-b', + '--branch-name', + action='store', + default='develop', + type=str, + help='branch name (default: develop)') + + args = parser.parse_args() + perftest = insertFromCSVPerformace(args.commit_id, args.database_name, args.stable_name, args.branch_name) + + perftest.createTables() + perftest.run() \ No newline at end of file diff --git a/tests/pytest/pytest_1.sh b/tests/pytest/pytest_1.sh index 52f5a30f4e..86d93703b4 100755 --- a/tests/pytest/pytest_1.sh +++ b/tests/pytest/pytest_1.sh @@ -19,13 +19,21 @@ python3 ./test.py -f insert/randomNullCommit.py python3 insert/retentionpolicy.py python3 ./test.py -f insert/alterTableAndInsert.py python3 ./test.py -f insert/insertIntoTwoTables.py +python3 ./test.py -f query/isNullTest.py +#table python3 ./test.py -f table/alter_wal0.py python3 ./test.py -f table/column_name.py python3 ./test.py -f table/column_num.py python3 ./test.py -f table/db_table.py python3 ./test.py -f table/create_sensitive.py #python3 ./test.py -f table/tablename-boundary.py +python3 ./test.py -f table/max_table_length.py +python3 ./test.py -f table/alter_column.py +python3 ./test.py -f table/boundary.py +python3 ./test.py -f table/create.py +python3 ./test.py -f table/del_stable.py +python3 ./test.py -f table/queryWithTaosdKilled.py # tag python3 ./test.py -f tag_lite/filter.py @@ -134,9 +142,6 @@ python3 ./test.py -f user/pass_len.py # stable python3 ./test.py -f stable/query_after_reset.py -# table -python3 ./test.py -f table/del_stable.py - #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -159,7 +164,9 @@ python3 ./test.py -f query/bug1874.py python3 ./test.py -f query/bug1875.py python3 ./test.py -f query/bug1876.py python3 ./test.py -f query/bug2218.py - +python3 ./test.py -f query/bug2281.py +python3 ./test.py -f query/bug2119.py +python3 bug2265.py #stream python3 ./test.py -f stream/metric_1.py python3 ./test.py -f stream/new.py @@ -199,6 +206,7 @@ python3 ./test.py -f functions/function_stddev.py -r 1 python3 ./test.py -f functions/function_sum.py -r 1 python3 ./test.py -f functions/function_top.py -r 1 #python3 ./test.py -f functions/function_twa.py -r 1 +python3 ./test.py -f functions/function_twa_test2.py python3 queryCount.py python3 ./test.py -f query/queryGroupbyWithInterval.py python3 client/twoClients.py diff --git a/tests/pytest/query/bug2119.py b/tests/pytest/query/bug2119.py new file mode 100644 index 0000000000..4224e55596 --- /dev/null +++ b/tests/pytest/query/bug2119.py @@ -0,0 +1,41 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table t1 (ts timestamp, c1 int, c2 float)") + + + print("==========step2") + print("query percentile from blank table") + tdSql.query('select percentile(c1,1) from t1') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug2281.py b/tests/pytest/query/bug2281.py new file mode 100644 index 0000000000..b8eb17f5cd --- /dev/null +++ b/tests/pytest/query/bug2281.py @@ -0,0 +1,47 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + print("==========step1") + print("create table && insert data") + + tdSql.execute("create table t1 (ts timestamp, c1 int, c2 float)") + insertRows = 10 + t0 = 1604298064000 + tdLog.info("insert %d rows" % (insertRows)) + for i in range(insertRows): + ret = tdSql.execute( + "insert into t1 values (%d , %d,%d)" % + (t0+i,i%100,i/2.0)) + + print("==========step2") + print("query diff && top") + tdSql.error('select diff(c1),top(c2) from t1') + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/isNullTest.py b/tests/pytest/query/isNullTest.py new file mode 100644 index 0000000000..7b79679c7d --- /dev/null +++ b/tests/pytest/query/isNullTest.py @@ -0,0 +1,128 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1537146000000 + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table st(ts timestamp, c1 int, c2 binary(20), c3 nchar(20)) tags(t1 int, t2 binary(20), t3 nchar(20))") + tdSql.execute("create table t1 using st tags(1, 'binary1', 'nchar1')") + tdSql.execute("insert into t2(ts, c2) using st(t2) tags('') values(%d, '')" % (self.ts + 10)) + tdSql.execute("insert into t3(ts, c2) using st(t3) tags('') values(%d, '')" % (self.ts + 10)) + + for i in range(10): + tdSql.execute("insert into t1 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + tdSql.execute("insert into t2 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + tdSql.execute("insert into t3 values(%d, %d, 'binary%d', 'nchar%d')" % (self.ts + i, i, i, i)) + + tdSql.execute("insert into t1(ts, c2) values(%d, '')" % (self.ts + 10)) + tdSql.execute("insert into t1(ts, c3) values(%d, '')" % (self.ts + 11)) + tdSql.execute("insert into t2(ts, c3) values(%d, '')" % (self.ts + 11)) + tdSql.execute("insert into t3(ts, c3) values(%d, '')" % (self.ts + 11)) + + tdSql.query("select count(*) from st") + tdSql.checkData(0, 0, 36) + + tdSql.query("select count(*) from st where t1 is null") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where t1 is not null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 is null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 is not null") + tdSql.checkData(0, 0, 24) + + tdSql.error("select count(*) from st where t2 <> null") + tdSql.error("select count(*) from st where t2 = null") + + tdSql.query("select count(*) from st where t2 = '' ") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t2 <> '' ") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where t3 is null") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t3 is not null") + tdSql.checkData(0, 0, 24) + + tdSql.error("select count(*) from st where t3 <> null") + tdSql.error("select count(*) from st where t3 = null") + + tdSql.query("select count(*) from st where t3 = '' ") + tdSql.checkData(0, 0, 12) + + tdSql.query("select count(*) from st where t3 <> '' ") + tdSql.checkData(0, 0, 24) + + tdSql.query("select count(*) from st where c1 is not null") + tdSql.checkData(0, 0, 30) + + tdSql.query("select count(*) from st where c1 is null") + tdSql.checkData(0, 0, 6) + + tdSql.query("select count(*) from st where c2 is not null") + tdSql.checkData(0, 0, 33) + + tdSql.query("select count(*) from st where c2 is null") + tdSql.checkData(0, 0, 3) + + tdSql.error("select count(*) from st where c2 <> null") + tdSql.error("select count(*) from st where c2 = null") + + tdSql.query("select count(*) from st where c2 = '' ") + tdSql.checkData(0, 0, 3) + + tdSql.query("select count(*) from st where c2 <> '' ") + tdSql.checkData(0, 0, 30) + + tdSql.query("select count(*) from st where c3 is not null") + tdSql.checkData(0, 0, 33) + + tdSql.query("select count(*) from st where c3 is null") + tdSql.checkData(0, 0, 3) + + tdSql.error("select count(*) from st where c3 <> null") + tdSql.error("select count(*) from st where c3 = null") + + tdSql.query("select count(*) from st where c3 = '' ") + tdSql.checkData(0, 0, 3) + + tdSql.query("select count(*) from st where c3 <> '' ") + tdSql.checkData(0, 0, 30) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/queryPerformance.py b/tests/pytest/query/queryPerformance.py index 72af38450c..720ae745cb 100644 --- a/tests/pytest/query/queryPerformance.py +++ b/tests/pytest/query/queryPerformance.py @@ -16,10 +16,16 @@ import sys import os import taos import time +import argparse class taosdemoQueryPerformace: - def initConnection(self): + def __init__(self, clearCache, commitID, dbName, stbName, tbPerfix): + self.clearCache = clearCache + self.commitID = commitID + self.dbName = dbName + self.stbName = stbName + self.tbPerfix = tbPerfix self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -30,92 +36,109 @@ class taosdemoQueryPerformace: self.password, self.config) + def createPerfTables(self): + cursor = self.conn.cursor() + cursor.execute("create database if not exists %s" % self.dbName) + cursor.execute("use %s" % self.dbName) + cursor.execute("create table if not exists %s(ts timestamp, query_time float, commit_id binary(50)) tags(query_id int, query_sql binary(300))" % self.stbName) + + sql = "select count(*) from test.meters" + tableid = 1 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.meters" + tableid = 2 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select count(*) from test.meters where loc='beijing'" + tableid = 3 + cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.meters where areaid=10" + tableid = 4 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from test.t10 interval(10s)" + tableid = 5 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select last_row(*) from meters" + tableid = 6 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select * from meters" + tableid = 7 + cursor.execute("create table if not exists %s%d using %s tags(%d, '%s')" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + sql = "select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'" + tableid = 8 + cursor.execute("create table if not exists %s%d using %s tags(%d, \"%s\")" % (self.tbPerfix, tableid, self.stbName, tableid, sql)) + + cursor.close() def query(self): - cursor = self.conn.cursor() - cursor.execute("use test") + cursor = self.conn.cursor() + print("==================== query performance ====================") - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select count(*) from test.meters") - totalTime += time.time() - startTime - print("query time for: select count(*) from test.meters %f seconds" % (totalTime / 100)) + cursor.execute("use %s" % self.dbName) + cursor.execute("select tbname, query_id, query_sql from %s" % self.stbName) - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select avg(f1), max(f2), min(f3) from test.meters") - totalTime += time.time() - startTime - print("query time for: select avg(f1), max(f2), min(f3) from test.meters %f seconds" % (totalTime / 100)) + for data in cursor: + table_name = data[0] + query_id = data[1] + sql = data[2] + + totalTime = 0 + cursor2 = self.conn.cursor() + cursor2.execute("use test") + for i in range(100): + if(self.clearCache == True): + # root permission is required + os.system("echo 3 > /proc/sys/vm/drop_caches") + + startTime = time.time() + cursor2.execute(sql) + totalTime += time.time() - startTime + cursor2.close() + print("query time for: %s %f seconds" % (sql, totalTime / 100)) + + cursor3 = self.conn.cursor() + cursor3.execute("insert into %s.%s values(now, %f, '%s')" % (self.dbName, table_name, totalTime / 100, self.commitID)) - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select count(*) from test.meters where loc='beijing'") - totalTime += time.time() - startTime - print("query time for: select count(*) from test.meters where loc='beijing' %f seconds" % (totalTime / 100)) - - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select avg(f1), max(f2), min(f3) from test.meters where areaid=10") - totalTime += time.time() - startTime - print("query time for: select avg(f1), max(f2), min(f3) from test.meters where areaid=10 %f seconds" % (totalTime / 100)) - - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select avg(f1), max(f2), min(f3) from test.t10 interval(10s)") - totalTime += time.time() - startTime - print("query time for: select avg(f1), max(f2), min(f3) from test.t10 interval(10s) %f seconds" % (totalTime / 100)) - - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select last_row(*) from meters") - totalTime += time.time() - startTime - print("query time for: select last_row(*) from meters %f seconds" % (totalTime / 100)) - - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select * from meters") - totalTime += time.time() - startTime - print("query time for: select * from meters %f seconds" % (totalTime / 100)) - - totalTime = 0 - for i in range(100): - if(sys.argv[1] == '1'): - # root permission is required - os.system("echo 3 > /proc/sys/vm/drop_caches") - startTime = time.time() - cursor.execute("select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000'") - totalTime += time.time() - startTime - print("query time for: select avg(f1), max(f2), min(f3) from meters where ts <= '2017-07-15 10:40:01.000' and ts <= '2017-07-15 14:00:40.000' %f seconds" % (totalTime / 100)) + cursor3.close() + cursor.close() if __name__ == '__main__': - perftest = taosdemoQueryPerformace() - perftest.initConnection() - perftest.query() \ No newline at end of file + parser = argparse.ArgumentParser() + parser.add_argument( + '-r', + '--remove-cache', + action='store_true', + default=False, + help='clear cache before query (default: False)') + parser.add_argument( + '-c', + '--commit-id', + action='store', + default='null', + type=str, + help='git commit id (default: null)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='perf', + type=str, + help='Database name to be created (default: perf)') + parser.add_argument( + '-t', + '--stable-name', + action='store', + default='query_tb', + type=str, + help='table name to be created (default: query_tb)') + parser.add_argument( + '-p', + '--table-perfix', + action='store', + default='q', + type=str, + help='table name perfix (default: q)') + + args = parser.parse_args() + perftest = taosdemoQueryPerformace(args.remove_cache, args.commit_id, args.database_name, args.stable_name, args.table_perfix) + perftest.createPerfTables() + perftest.query() diff --git a/tests/pytest/query/queryWithTaosdKilled.py b/tests/pytest/query/queryWithTaosdKilled.py new file mode 100644 index 0000000000..28f9b87636 --- /dev/null +++ b/tests/pytest/query/queryWithTaosdKilled.py @@ -0,0 +1,68 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import taos +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import numpy as np + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + self.conn = conn + tdSql.init(conn.cursor()) + + self.rowNum = 10 + self.ts = 1537146000000 + + def createOldDir(self): + path = tdDnodes.dnodes[1].getDnodeRootDir(1) + print(path) + tdLog.info("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + os.system("sudo mkdir -p %s/data/vnode/vnode2/wal/old" % path) + + def run(self): + # os.system("rm -rf %s/ " % tdDnodes.getDnodesRootDir()) + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, speed int)") + tdSql.execute("insert into st values(now, 1)") + tdSql.query("select count(*) from st") + tdSql.checkRows(1) + + + self.createOldDir() + tdLog.sleep(10) + + print("force kill taosd") + os.system("sudo kill -9 $(pgrep -x taosd)") + os.system("") + tdDnodes.start(1) + + tdSql.init(self.conn.cursor()) + tdSql.execute("use db") + tdSql.query("select count(*) from st") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/query/unionAllTest.py b/tests/pytest/query/unionAllTest.py new file mode 100644 index 0000000000..1b69c8ac4d --- /dev/null +++ b/tests/pytest/query/unionAllTest.py @@ -0,0 +1,65 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql +import random + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.ts = 1500000000000 + self.num = 10 + + def run(self): + tdSql.prepare() + + tdSql.execute("create table st(ts timestamp, c1 int) tags(loc nchar(20))") + tdSql.execute("create table t0 using st tags('nchar0')") + tdSql.execute("create table t1 using st tags('nchar1')") + tdSql.execute("create table t2 using st tags('nchar2')") + tdSql.execute("create table t3 using st tags('nchar3')") + tdSql.execute("create table t4 using st tags('nchar4')") + tdSql.execute("create table t5 using st tags('nchar5')") + + for i in range(self.num): + tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i)) + tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i)) + tdSql.execute("insert into t2 values(%d, %d)" % (self.ts + i, i)) + tdSql.execute("insert into t3 values(%d, %d)" % (self.ts + i, i)) + tdSql.execute("insert into t4 values(%d, %d)" % (self.ts + i, i)) + tdSql.execute("insert into t5 values(%d, %d)" % (self.ts + i, i)) + + sql = ''' select * from st where loc = 'nchar0' limit 1 union all select * from st where loc = 'nchar1' limit 1 union all select * from st where loc = 'nchar2' limit 1 + union all select * from st where loc = 'nchar3' limit 1 union all select * from st where loc = 'nchar4' limit 1''' + tdSql.query(sql) + tdSql.checkRows(5) + + sql = ''' select * from st where loc = 'nchar0' limit 1 union all select * from st where loc = 'nchar1' limit 1 union all select * from st where loc = 'nchar2' limit 1 + union all select * from st where loc = 'nchar3' limit 1 union all select * from st where loc = 'nchar4' limit 1 union all select * from st where loc = 'nchar5' limit 1''' + tdSql.query(sql) + tdSql.checkRows(6) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/stream/stream2.py b/tests/pytest/stream/stream2.py index 44882f5972..d71742048a 100644 --- a/tests/pytest/stream/stream2.py +++ b/tests/pytest/stream/stream2.py @@ -87,6 +87,10 @@ class TDTestCase: tdSql.checkData(0, 3, rowNum) except Exception as e: tdLog.info(repr(e)) + + tdSql.query("show streams") + tdSql.checkRows(1) + tdSql.checkData(0, 2, 's0') tdLog.info("===== step8 =====") tdSql.query( @@ -142,6 +146,12 @@ class TDTestCase: except Exception as e: tdLog.info(repr(e)) + tdSql.query("show streams") + tdSql.checkRows(2) + tdSql.checkData(0, 2, 's1') + tdSql.checkData(1, 2, 's0') + + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/table/createTableFromAnotherDb.py b/tests/pytest/table/createTableFromAnotherDb.py new file mode 100644 index 0000000000..b40e72404c --- /dev/null +++ b/tests/pytest/table/createTableFromAnotherDb.py @@ -0,0 +1,41 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + tdSql.execute("create table db.cars(ts timestamp, c int) tags(id int);") + tdSql.execute("create database db2") + tdSql.error("create table db2.car1 using db.cars tags(1)") + tdSql.error("insert into db2.car1 using db1.cars tags(1) values(now, 1);") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/table/max_table_length.py b/tests/pytest/table/max_table_length.py new file mode 100644 index 0000000000..ec34f3008f --- /dev/null +++ b/tests/pytest/table/max_table_length.py @@ -0,0 +1,55 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, db_test.stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +from util.log import tdLog +from util.cases import tdCases +from util.sql import tdSql + + +class TDTestCase: + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + + print("==============step1") + + tdLog.info("check nchar") + tdSql.error("create database anal (ts timestamp ,i nchar(4094))") + tdSql.execute( + "create table anal (ts timestamp ,i nchar(4093))") + + print("==============step2") + tdLog.info("check binary") + tdSql.error("create database anal (ts timestamp ,i binary(16375))") + tdSql.execute( + "create table anal1 (ts timestamp ,i binary(16374))") + + print("==============step3") + tdLog.info("check int & binary") + tdSql.error("create table anal2 (ts timestamp ,i binary(16371),j int)") + tdSql.execute("create table anal2 (ts timestamp ,i binary(16370),j int)") + tdSql.execute("create table anal3 (ts timestamp ,i binary(16366), j int, k int)") + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py new file mode 100644 index 0000000000..6b6296e61a --- /dev/null +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -0,0 +1,93 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import taos +import time +import datetime +import csv +import random +import pandas as pd +import argparse +import os.path + +class taosdemoPerformace: + def __init__(self, commitID, dbName, createTableTime, insertRecordsTime, recordsPerSecond): + self.commitID = commitID + self.dbName = dbName + self.createTableTime = createTableTime + self.insertRecordsTime = insertRecordsTime + self.recordsPerSecond = recordsPerSecond + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = "/etc/taosperf" + self.conn = taos.connect( + self.host, + self.user, + self.password, + self.config) + + def createTablesAndStoreData(self): + cursor = self.conn.cursor() + + cursor.execute("create database if not exists %s" % self.dbName) + cursor.execute("use %s" % self.dbName) + cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50))") + print("==================== taosdemo performance ====================") + print("create tables time: %f" % self.createTableTime) + print("insert records time: %f" % self.insertRecordsTime) + print("records per second: %f" % self.recordsPerSecond) + cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s')" % (self.createTableTime, self.insertRecordsTime, self.recordsPerSecond, self.commitID)) + cursor.execute("drop database if exists taosdemo_insert_test") + + cursor.close() + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument( + '-c', + '--commit-id', + action='store', + type=str, + help='git commit id (default: null)') + parser.add_argument( + '-d', + '--database-name', + action='store', + default='perf', + type=str, + help='Database name to be created (default: perf)') + parser.add_argument( + '-t', + '--create-table', + action='store', + type=float, + help='create table time') + parser.add_argument( + '-i', + '--insert-records', + action='store', + type=float, + help='insert records time') + parser.add_argument( + '-r', + '---records-per-second', + action='store', + type=float, + help='records per request') + + args = parser.parse_args() + + perftest = taosdemoPerformace(args.commit_id, args.database_name, args.create_table, args.insert_records, args.records_per_second) + perftest.createTablesAndStoreData() \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTest2.py new file mode 100644 index 0000000000..7d5627be43 --- /dev/null +++ b/tests/pytest/tools/taosdemoTest2.py @@ -0,0 +1,64 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import threading +import time + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 10 + self.numberOfRecords = 1000000 + + def insertDataAndAlterTable(self, threadID): + if(threadID == 0): + os.system("yes | taosdemo -t %d -n %d" % (self.numberOfTables, self.numberOfRecords)) + if(threadID == 1): + print("use test") + tdSql.execute("use test") + print("alter table test.meters add column f4 int") + tdSql.execute("alter table test.meters add column f4 int") + print("insert into test.t0 values (now, 1, 2, 3, 4)") + tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4)") + + def run(self): + tdSql.prepare() + + t1 = threading.Thread(target=self.insertDataAndAlterTable, args=(0, )) + t2 = threading.Thread(target=self.insertDataAndAlterTable, args=(1, )) + + t1.start() + time.sleep(2) + t2.start() + t1.join() + t2.join() + + tdSql.query("select count(*) from test.meters") + tdSql.checkData(0, 0, self.numberOfRecords * self.numberOfTables + 1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/script/general/parser/function.sim b/tests/script/general/parser/function.sim index 34e9844f71..3505ad1a28 100644 --- a/tests/script/general/parser/function.sim +++ b/tests/script/general/parser/function.sim @@ -22,7 +22,7 @@ $db = $dbPrefix . $i $mt = $mtPrefix . $i sql drop database if exists $db -sql create database $db +sql create database $db keep 36500 sql use $db print =====================================> test case for twa in single block @@ -111,7 +111,7 @@ if $rows != 2 then return -1 endi -if $data00 != @15-08-18 00:06:00.00@ then +if $data00 != @15-08-18 00:06:00.000@ then return -1 endi @@ -219,10 +219,28 @@ if $data02 != 6 then return -1 endi +sql select twa(k) from t1 where ts>'2015-8-18 00:00:00' and ts<'2015-8-18 00:00:1' +if $rows != 0 then + return -1 +endi + sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts asc sql select twa(k),avg(k),count(1) from t1 where ts>='2015-8-18 00:00:00' and ts<='2015-8-18 00:30:00' interval(10m) order by ts desc -#todo add test case while column filte exists. +#todo add test case while column filter exists. -select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s) +#sql select count(*),TWA(k) from tm0 where ts>='1970-1-1 13:43:00' and ts<='1970-1-1 13:44:10' interval(9s) + +sql create table tm0 (ts timestamp, k float); +sql insert into tm0 values(100000000, 5); +sql insert into tm0 values(100003000, -9); +sql select twa(k) from tm0 where tstd-2424 +sql create table t1(ts timestamp, k float) +sql insert into t1 values(now, 8.001) +sql select * from t1 where k=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k<8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k<>8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 and k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>=8.0009999 and k<=8.001 +if $rows != 1 then + return -1 +endi + +sql select * from t1 where k>8.001 and k<=8.001 +if $rows != 0 then + return -1 +endi + +sql select * from t1 where k>=8.001 and k<8.001 +if $rows != 0 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/general/parser/testSuite.sim b/tests/script/general/parser/testSuite.sim index 8593400ce8..cea6d98679 100644 --- a/tests/script/general/parser/testSuite.sim +++ b/tests/script/general/parser/testSuite.sim @@ -103,6 +103,8 @@ sleep 500 run general/parser/timestamp.sim sleep 500 run general/parser/sliding.sim +sleep 500 +run general/parser/function.sim #sleep 500 #run general/parser/repeatStream.sim diff --git a/tests/script/general/stable/values.sim b/tests/script/general/stable/values.sim index d0c1783851..51488aabef 100644 --- a/tests/script/general/stable/values.sim +++ b/tests/script/general/stable/values.sim @@ -18,16 +18,25 @@ sql create table vdb0.vtb00 using vdb0.mt tags( 0 ) sql create table vdb0.vtb01 using vdb0.mt tags( 0 ) sql create database vdb1 -sql create table vdb1.vtb10 using vdb0.mt tags( 1 ) -sql create table vdb1.vtb11 using vdb0.mt tags( 1 ) +sql create table vdb1.mt (ts timestamp, tbcol int) TAGS(tgcol int) +sql_error create table vdb1.vtb10 using vdb0.mt tags( 1 ) +sql_error create table vdb1.vtb11 using vdb0.mt tags( 1 ) +sql create table vdb1.vtb10 using vdb1.mt tags( 1 ) +sql create table vdb1.vtb11 using vdb1.mt tags( 1 ) sql create database vdb2 -sql create table vdb2.vtb20 using vdb0.mt tags( 2 ) -sql create table vdb2.vtb21 using vdb0.mt tags( 2 ) +sql create table vdb2.mt (ts timestamp, tbcol int) TAGS(tgcol int) +sql_error create table vdb2.vtb20 using vdb0.mt tags( 2 ) +sql_error create table vdb2.vtb21 using vdb0.mt tags( 2 ) +sql create table vdb2.vtb20 using vdb2.mt tags( 2 ) +sql create table vdb2.vtb21 using vdb2.mt tags( 2 ) sql create database vdb3 -sql create table vdb3.vtb30 using vdb0.mt tags( 3 ) -sql create table vdb3.vtb31 using vdb0.mt tags( 3 ) +sql create table vdb3.mt (ts timestamp, tbcol int) TAGS(tgcol int) +sql_error create table vdb3.vtb20 using vdb0.mt tags( 2 ) +sql_error create table vdb3.vtb21 using vdb0.mt tags( 2 ) +sql create table vdb3.vtb30 using vdb3.mt tags( 3 ) +sql create table vdb3.vtb31 using vdb3.mt tags( 3 ) print =============== step2 sql insert into vdb0.vtb00 values (1519833600000 , 10) (1519833600001, 20) (1519833600002, 30) @@ -40,7 +49,7 @@ sql insert into vdb3.vtb30 values (1519833600000 , 13) (1519833600001, 23) (1519 sql insert into vdb3.vtb31 values (1519833600000 , 13) (1519833600001, 23) (1519833600002, 33) sql select * from vdb0.mt -if $rows != 24 then +if $rows != 6 then return -1 endi @@ -55,7 +64,7 @@ sql insert into vdb3.vtb30 values (1519833600003 , 43) (1519833600005, 53) (1519 sql insert into vdb3.vtb31 values (1519833600003 , 43) (1519833600005, 53) (1519833600004, 63) sql select * from vdb0.mt -if $rows != 48 then +if $rows != 12 then return -1 endi @@ -66,7 +75,7 @@ sql insert into vdb2.vtb20 values(1519833600006, 62) (1519833600007, 72) vdb2.vt sql insert into vdb3.vtb30 values(1519833600006, 63) (1519833600007, 73) vdb3.vtb31 values(1519833600006, 63) (1519833600007, 73) sql select * from vdb0.mt -if $rows != 64 then +if $rows != 16 then return -1 endi @@ -77,7 +86,7 @@ sql insert into vdb2.vtb20 values(1519833600008, 82) (1519833600007, 72) vdb2.vt sql insert into vdb3.vtb30 values(1519833600008, 83) (1519833600007, 73) vdb3.vtb31 values(1519833600006, 83) (1519833600007, 73) sql select * from vdb0.mt -if $rows != 68 then +if $rows != 17 then return -1 endi @@ -87,7 +96,7 @@ sql insert into vdb0.vtb01 values(1519833600009, 90) (1519833600010, 100) vdb1.v sql select * from vdb0.mt -if $rows != 84 then +if $rows != 21 then return -1 endi @@ -97,7 +106,7 @@ sql insert into vdb0.vtb01 values(1519833600012, 120) (1519833600011, 110) vdb1. sql select * from vdb0.mt -if $rows != 100 then +if $rows != 25 then return -1 endi diff --git a/tests/script/unique/arbitrator/insert_duplicationTs.sim b/tests/script/unique/arbitrator/insert_duplicationTs.sim index a873bf02ae..7c6c6e6e92 100644 --- a/tests/script/unique/arbitrator/insert_duplicationTs.sim +++ b/tests/script/unique/arbitrator/insert_duplicationTs.sim @@ -91,8 +91,11 @@ while $i < $tblNum $i = $i + 1 endw +sql show db.vgroups; +print d1: $data04 $data05 , d2: $data06 $data07 + sql select count(*) from $stb -print rows:$rows data00:$data00 +print rtest1==> rows:$rows data00:$data00 if $rows != 1 then return -1 endi @@ -103,6 +106,15 @@ endi $totalRows = $data00 +sql select count(*) from $stb +print test2==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test3==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test4==> rows:$rows data00:$data00 +sql select count(*) from $stb +print test5==> rows:$rows data00:$data00 + print ============== step3: insert old data(now-15d) and new data(now+15d), control data rows in order to save in cache, not falling disc sql insert into $tb values ( now - 20d , -20 ) sql insert into $tb values ( now - 40d , -40 ) @@ -153,12 +165,21 @@ if $data00 != $totalRows then return -1 endi +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 +sql select count(*) from $stb +print data00 $data00 + print ============== step5: insert two data rows: now-16d, now+16d, sql insert into $tb values ( now - 21d , -21 ) sql insert into $tb values ( now - 41d , -41 ) $totalRows = $totalRows + 2 -print ============== step5: restart dnode2, waiting sync end +print ============== step6: restart dnode2, waiting sync end system sh/exec.sh -n dnode2 -s start sleep 3000 $loopCnt = 0 @@ -192,9 +213,81 @@ endi sleep $sleepTimer # check using select +sleep 5000 sql select count(*) from $stb print data00 $data00 if $data00 != $totalRows then return -1 endi +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi + +sql select count(*) from $stb +print data00 $data00 +if $data00 != $totalRows then + return -1 +endi diff --git a/tests/test/c/hashPerformance.c b/tests/test/c/hashPerformance.c index db3be0e950..111ea25a09 100644 --- a/tests/test/c/hashPerformance.c +++ b/tests/test/c/hashPerformance.c @@ -24,15 +24,17 @@ #define GREEN "\033[1;32m" #define NC "\033[0m" -int32_t capacity = 100000; -int32_t q1Times = 1; -int32_t q2Times = 1; +int32_t capacity = 128; +int32_t q1Times = 10; +int32_t q2Times = 10; int32_t keyNum = 100000; -int32_t printInterval = 10000; +int32_t printInterval = 1000; +void * hashHandle; +pthread_t thread; typedef struct HashTestRow { - int32_t size; - void * ptr; + int32_t keySize; + char key[100]; } HashTestRow; void shellParseArgument(int argc, char *argv[]); @@ -40,7 +42,7 @@ void shellParseArgument(int argc, char *argv[]); void testHashPerformance() { int64_t initialMs = taosGetTimestampMs(); _hash_fn_t hashFp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); - void * hashHandle = taosHashInit(capacity, hashFp, true); + hashHandle = taosHashInit(128, hashFp, true, HASH_NO_LOCK); int64_t startMs = taosGetTimestampMs(); float seconds = (startMs - initialMs) / 1000.0; @@ -48,17 +50,25 @@ void testHashPerformance() { for (int32_t t = 1; t <= keyNum; ++t) { HashTestRow row = {0}; - char key[100] = {0}; - int32_t keySize = sprintf(key, "0.db.st%d", t); + row.keySize = sprintf(row.key, "0.db.st%d", t); for (int32_t q = 0; q < q1Times; q++) { - taosHashGet(hashHandle, &key, keySize); + taosHashGet(hashHandle, row.key, row.keySize); } - taosHashPut(hashHandle, key, keySize, &row, sizeof(HashTestRow)); + taosHashPut(hashHandle, row.key, row.keySize, &row, sizeof(HashTestRow)); for (int32_t q = 0; q < q2Times; q++) { - taosHashGet(hashHandle, &key, keySize); + taosHashGet(hashHandle, row.key, row.keySize); + } + + // test iterator + { + HashTestRow *row = taosHashIterate(hashHandle, NULL); + while (row) { + taosHashGet(hashHandle, row->key, row->keySize); + row = taosHashIterate(hashHandle, row); + } } if (t % printInterval == 0) { @@ -80,9 +90,35 @@ void testHashPerformance() { taosHashCleanup(hashHandle); } +void *multiThreadFunc(void *param) { + for (int i = 0; i < 100; ++i) { + taosMsleep(1000); + HashTestRow *row = taosHashIterate(hashHandle, NULL); + while (row) { + taosHashGet(hashHandle, row->key, row->keySize); + row = taosHashIterate(hashHandle, row); + } + int64_t hashSize = taosHashGetSize(hashHandle); + pPrint("i:%d hashSize:%ld", i, hashSize); + } + + return NULL; +} + +void multiThreadTest() { + pthread_attr_t thattr; + pthread_attr_init(&thattr); + pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE); + + // Start threads to write + pthread_create(&thread, &thattr, multiThreadFunc, NULL); +} + int main(int argc, char *argv[]) { shellParseArgument(argc, argv); + multiThreadTest(); testHashPerformance(); + pthread_join(thread, NULL); } void printHelp() {