diff --git a/CMakeLists.txt b/CMakeLists.txt index be97e679d1..e0d6e82923 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -17,6 +17,7 @@ SET(TD_MQTT FALSE) SET(TD_TSDB_PLUGINS FALSE) SET(TD_STORAGE FALSE) SET(TD_TOPIC FALSE) +SET(TD_MODULE FALSE) SET(TD_COVER FALSE) SET(TD_MEM_CHECK FALSE) diff --git a/Jenkinsfile b/Jenkinsfile index d96eeaa724..bac0cce33b 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -6,6 +6,7 @@ node { } def skipstage=0 + def abortPreviousBuilds() { def currentJobName = env.JOB_NAME def currentBuildNumber = env.BUILD_NUMBER.toInteger() @@ -24,7 +25,7 @@ def abortPreviousBuilds() { build.doKill() //doTerm(),doKill(),doTerm() } } -//abort previous build +// abort previous build abortPreviousBuilds() def abort_previous(){ def buildNumber = env.BUILD_NUMBER as int @@ -32,20 +33,21 @@ def abort_previous(){ milestone(buildNumber) } def pre_test(){ - + + sh ''' sudo rmtaos || echo "taosd has not installed" ''' sh ''' - + killall -9 taosd ||echo "no taosd running" + killall -9 gdb || echo "no gdb running" cd ${WKC} - git checkout develop git reset --hard HEAD~10 >/dev/null + git checkout develop git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD - git --no-pager diff --name-only FETCH_HEAD $(git merge-base FETCH_HEAD develop)|grep -v -E '.*md|//src//connector|Jenkinsfile' - find ${WKC}/tests/pytest -name \'*\'.sql -exec rm -rf {} \\; + git clean -dfx cd ${WK} git reset --hard HEAD~10 git checkout develop @@ -53,7 +55,7 @@ def pre_test(){ cd ${WK} export TZ=Asia/Harbin date - rm -rf ${WK}/debug + git clean -dfx mkdir debug cd debug cmake .. > /dev/null @@ -79,6 +81,10 @@ pipeline { changeRequest() } steps { + script{ + abort_previous() + abortPreviousBuilds() + } sh''' cp -r ${WORKSPACE} ${WORKSPACE}.tes cd ${WORKSPACE}.tes @@ -179,6 +185,14 @@ pipeline { rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' + } timeout(time: 45, unit: 'MINUTES'){ sh ''' date @@ -209,6 +223,11 @@ pipeline { cd ${WKC}/tests ./test-all.sh b3fq date''' + sh ''' + date + cd ${WKC}/tests + ./test-all.sh full example + date''' } } } @@ -266,7 +285,7 @@ pipeline { date cd ${WKC}/tests ./test-all.sh b7fq - date''' + date''' } } } diff --git a/README-CN.md b/README-CN.md index 9601cde3af..d4c10e71d6 100644 --- a/README-CN.md +++ b/README-CN.md @@ -258,10 +258,16 @@ TDengine 社区生态中也有一些非常友好的第三方连接器,可以 TDengine 的测试框架和所有测试例全部开源。 -点击[这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。 +点击 [这里](tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md),了解如何运行测试例和添加新的测试例。 # 成为社区贡献者 -点击[这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。 -#加入技术交流群 -TDengine官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。 +点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。 + +# 加入技术交流群 + +TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小T为好友,即可入群。 + +# [谁在使用TDengine](https://github.com/taosdata/TDengine/issues/2432) + +欢迎所有 TDengine 用户及贡献者在 [这里](https://github.com/taosdata/TDengine/issues/2432) 分享您在当前工作中开发/使用 TDengine 的故事。 diff --git a/README.md b/README.md index 79c140c741..45a955f458 100644 --- a/README.md +++ b/README.md @@ -250,3 +250,6 @@ Please follow the [contribution guidelines](CONTRIBUTING.md) to contribute to th Add WeChat “tdengine” to join the group,you can communicate with other users. +# [User List](https://github.com/taosdata/TDengine/issues/2432) + +If you are using TDengine and feel it helps or you'd like to do some contributions, please add your company to [user list](https://github.com/taosdata/TDengine/issues/2432) and let us know your needs. diff --git a/cmake/define.inc b/cmake/define.inc index ff4583d02b..6f49630d5c 100755 --- a/cmake/define.inc +++ b/cmake/define.inc @@ -29,6 +29,10 @@ IF (TD_TOPIC) ADD_DEFINITIONS(-D_TOPIC) ENDIF () +IF (TD_MODULE) + ADD_DEFINITIONS(-D_MODULE) +ENDIF () + IF (TD_GODLL) ADD_DEFINITIONS(-D_TD_GO_DLL_) ENDIF () diff --git a/cmake/input.inc b/cmake/input.inc index b1a993c996..00e0e1bc0f 100755 --- a/cmake/input.inc +++ b/cmake/input.inc @@ -17,6 +17,14 @@ ELSEIF (${TOPIC} MATCHES "false") MESSAGE(STATUS "Build without topic plugins") ENDIF () +IF (${TD_MODULE} MATCHES "true") + SET(TD_MODULE TRUE) + MESSAGE(STATUS "Build with module plugins") +ELSEIF (${TOPIC} MATCHES "false") + SET(TD_MODULE FALSE) + MESSAGE(STATUS "Build without module plugins") +ENDIF () + IF (${COVER} MATCHES "true") SET(TD_COVER TRUE) MESSAGE(STATUS "Build with test coverage") diff --git a/cmake/install.inc b/cmake/install.inc index 1d50ca292d..5823ef743e 100755 --- a/cmake/install.inc +++ b/cmake/install.inc @@ -32,7 +32,7 @@ ELSEIF (TD_WINDOWS) #INSTALL(TARGETS taos RUNTIME DESTINATION driver) #INSTALL(TARGETS shell RUNTIME DESTINATION .) IF (TD_MVN_INSTALLED) - INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.22-dist.jar DESTINATION connector/jdbc) + INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.25-dist.jar DESTINATION connector/jdbc) ENDIF () ELSEIF (TD_DARWIN) SET(TD_MAKE_INSTALL_SH "${TD_COMMUNITY_DIR}/packaging/tools/make_install.sh") diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index c16673cba5..aba10a14e3 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -120,7 +120,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 * [TDengine性能对比测试工具](https://www.taosdata.com/blog/2020/01/18/1166.html) * [IDEA数据库管理工具可视化使用TDengine](https://www.taosdata.com/blog/2020/08/27/1767.html) * [基于eletron开发的跨平台TDengine图形化管理工具](https://github.com/skye0207/TDengineGUI) -* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/alibaba/DataX) +* [DataX,支持TDengine的离线数据采集/同步工具](https://github.com/wgzhao/DataX)(文档:[读取插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/reader/tdenginereader.md)、[写入插件](https://github.com/wgzhao/DataX/blob/master/docs/src/main/sphinx/writer/tdenginewriter.md)) ## TDengine与其他数据库的对比测试 diff --git a/documentation20/cn/02.getting-started/docs.md b/documentation20/cn/02.getting-started/docs.md index c9c82942f3..43392e0325 100644 --- a/documentation20/cn/02.getting-started/docs.md +++ b/documentation20/cn/02.getting-started/docs.md @@ -179,19 +179,20 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); ### TDengine服务器支持的平台列表 -| | **CentOS** **6/7/8** | **Ubuntu** **16/18/20** | **Other Linux** | **统信****UOS** | **银河****/****中标麒麟** | **凝思** **V60/V80** | -| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | -| X64 | ● | ● | | ○ | ● | ● | -| 树莓派ARM32 | | ● | ● | | | | -| 龙芯MIPS64 | | | ● | | | | -| 鲲鹏 ARM64 | | ○ | ○ | | ● | | -| 申威 Alpha64 | | | ○ | ● | | | -| 飞腾ARM64 | | ○优麒麟 | | | | | -| 海光X64 | ● | ● | ● | ○ | ● | ● | -| 瑞芯微ARM64/32 | | | ○ | | | | -| 全志ARM64/32 | | | ○ | | | | -| 炬力ARM64/32 | | | ○ | | | | -| TI ARM32 | | | ○ | | | | +| | **CentOS 6/7/8** | **Ubuntu 16/18/20** | **Other Linux** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **华为 EulerOS** | +| -------------- | --------------------- | ------------------------ | --------------- | --------------- | ------------------------- | --------------------- | --------------------- | +| X64 | ● | ● | | ○ | ● | ● | ● | +| 树莓派 ARM32 | | ● | ● | | | | | +| 龙芯 MIPS64 | | | ● | | | | | +| 鲲鹏 ARM64 | | ○ | ○ | | ● | | | +| 申威 Alpha64 | | | ○ | ● | | | | +| 飞腾 ARM64 | | ○ 优麒麟 | | | | | | +| 海光 X64 | ● | ● | ● | ○ | ● | ● | | +| 瑞芯微 ARM64/32 | | | ○ | | | | | +| 全志 ARM64/32 | | | ○ | | | | | +| 炬力 ARM64/32 | | | ○ | | | | | +| TI ARM32 | | | ○ | | | | | +| 华为云 ARM64 | | | | | | | ● | 注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 @@ -203,7 +204,7 @@ taos> select avg(f1), max(f2), min(f3) from test.t10 interval(10s); 对照矩阵如下: -| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS ** **龙芯** | **Alpha ** **申威** | **X64 ** **海光** | +| **CPU** | **X64 64bit** | | | **X86 32bit** | **ARM64** | **ARM32** | **MIPS 龙芯** | **Alpha 申威** | **X64 海光** | | ----------- | --------------- | --------- | --------- | --------------- | --------- | --------- | ------------------- | -------------------- | ------------------ | | **OS** | **Linux** | **Win64** | **Win32** | **Win32** | **Linux** | **Linux** | **Linux** | **Linux** | **Linux** | | **C/C++** | ● | ● | ● | ○ | ● | ● | ● | ● | ● | diff --git a/documentation20/cn/07.advanced-features/docs.md b/documentation20/cn/07.advanced-features/docs.md index bdf93fdc3d..650a2ca96b 100644 --- a/documentation20/cn/07.advanced-features/docs.md +++ b/documentation20/cn/07.advanced-features/docs.md @@ -1,27 +1,16 @@ # 高级功能 -## 连续查询(Continuous Query) +## 连续查询(Continuous Query) -连续查询是TDengine定期自动执行的查询,采用滑动窗口的方式进行计算,是一种简化的时间驱动的流式计算。 -针对库中的表或超级表,TDengine可提供定期自动执行的连续查询, -用户可让TDengine推送查询的结果,也可以将结果再写回到TDengine中。 -每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。 -在定义连续查询的时候需要指定时间窗口(time window, 参数interval)大小和每次前向增量时间(forward sliding times, 参数sliding)。 +连续查询是TDengine定期自动执行的查询,采用滑动窗口的方式进行计算,是一种简化的时间驱动的流式计算。针对库中的表或超级表,TDengine可提供定期自动执行的连续查询,用户可让TDengine推送查询的结果,也可以将结果再写回到TDengine中。每次执行的查询是一个时间窗口,时间窗口随着时间流动向前滑动。在定义连续查询的时候需要指定时间窗口(time window, 参数interval)大小和每次前向增量时间(forward sliding times, 参数sliding)。 -TDengine的连续查询采用时间驱动模式,可以直接使用TAOS SQL进行定义,不需要额外的操作。 -使用连续查询,可以方便快捷地按照时间窗口生成结果,从而对原始采集数据进行降采样(down sampling)。 -用户通过TAOS SQL定义连续查询以后,TDengine自动在最后的一个完整的时间周期末端拉起查询, -并将计算获得的结果推送给用户或者写回TDengine。 +TDengine的连续查询采用时间驱动模式,可以直接使用TAOS SQL进行定义,不需要额外的操作。使用连续查询,可以方便快捷地按照时间窗口生成结果,从而对原始采集数据进行降采样(down sampling)。用户通过TAOS SQL定义连续查询以后,TDengine自动在最后的一个完整的时间周期末端拉起查询,并将计算获得的结果推送给用户或者写回TDengine。 TDengine提供的连续查询与普通流计算中的时间窗口计算具有以下区别: -- 不同于流计算的实时反馈计算结果,连续查询只在时间窗口关闭以后才开始计算。 -例如时间周期是1天,那么当天的结果只会在23:59:59以后才会生成。 -- 如果有历史记录写入到已经计算完成的时间区间,连续查询并不会重新进行计算, -也不会重新将结果推送给用户。对于写回TDengine的模式,也不会更新已经存在的计算结果。 -- 使用连续查询推送结果的模式,服务端并不缓存客户端计算状态,也不提供Exactly-Once的语意保证。 -如果用户的应用端崩溃,再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。 -如果使用写回模式,TDengine可确保数据写回的有效性和连续性。 +- 不同于流计算的实时反馈计算结果,连续查询只在时间窗口关闭以后才开始计算。例如时间周期是1天,那么当天的结果只会在23:59:59以后才会生成。 +- 如果有历史记录写入到已经计算完成的时间区间,连续查询并不会重新进行计算,也不会重新将结果推送给用户。对于写回TDengine的模式,也不会更新已经存在的计算结果。 +- 使用连续查询推送结果的模式,服务端并不缓存客户端计算状态,也不提供Exactly-Once的语意保证。如果用户的应用端崩溃,再次拉起的连续查询将只会从再次拉起的时间开始重新计算最近的一个完整的时间窗口。如果使用写回模式,TDengine可确保数据写回的有效性和连续性。 ### 使用连续查询 @@ -40,23 +29,19 @@ create table D1002 using meters tags ("Beijing.Haidian", 2); select avg(voltage) from meters interval(1m) sliding(30s); ``` -每次执行这条语句,都会重新计算所有数据。 -如果需要每隔30秒执行一次来增量计算最近一分钟的数据, -可以把上面的语句改进成下面的样子,每次使用不同的 `startTime` 并定期执行: +每次执行这条语句,都会重新计算所有数据。 如果需要每隔30秒执行一次来增量计算最近一分钟的数据,可以把上面的语句改进成下面的样子,每次使用不同的 `startTime` 并定期执行: ```sql select avg(voltage) from meters where ts > {startTime} interval(1m) sliding(30s); ``` -这样做没有问题,但TDengine提供了更简单的方法, -只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如: +这样做没有问题,但TDengine提供了更简单的方法,只要在最初的查询语句前面加上 `create table {tableName} as ` 就可以了, 例如: ```sql create table avg_vol as select avg(voltage) from meters interval(1m) sliding(30s); ``` -会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句, -并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如: +会自动创建一个名为 `avg_vol` 的新表,然后每隔30秒,TDengine会增量执行 `as` 后面的 SQL 语句,并将查询结果写入这个表中,用户程序后续只要从 `avg_vol` 中查询数据即可。 例如: ```mysql taos> select * from avg_vol; @@ -70,43 +55,27 @@ taos> select * from avg_vol; 需要注意,查询时间窗口的最小值是10毫秒,没有时间窗口范围的上限。 -此外,TDengine还支持用户指定连续查询的起止时间。 -如果不输入开始时间,连续查询将从第一条原始数据所在的时间窗口开始; -如果没有输入结束时间,连续查询将永久运行; -如果用户指定了结束时间,连续查询在系统时间达到指定的时间以后停止运行。 -比如使用下面的SQL创建的连续查询将运行一小时,之后会自动停止。 +此外,TDengine还支持用户指定连续查询的起止时间。如果不输入开始时间,连续查询将从第一条原始数据所在的时间窗口开始;如果没有输入结束时间,连续查询将永久运行;如果用户指定了结束时间,连续查询在系统时间达到指定的时间以后停止运行。比如使用下面的SQL创建的连续查询将运行一小时,之后会自动停止。 ```mysql create table avg_vol as select avg(voltage) from meters where ts > now and ts <= now + 1h interval(1m) sliding(30s); ``` -需要说明的是,上面例子中的 `now` 是指创建连续查询的时间,而不是查询执行的时间,否则,查询就无法自动停止了。 -另外,为了尽量避免原始数据延迟写入导致的问题,TDengine中连续查询的计算有一定的延迟。 -也就是说,一个时间窗口过去后,TDengine并不会立即计算这个窗口的数据, -所以要稍等一会(一般不会超过1分钟)才能查到计算结果。 +需要说明的是,上面例子中的 `now` 是指创建连续查询的时间,而不是查询执行的时间,否则,查询就无法自动停止了。另外,为了尽量避免原始数据延迟写入导致的问题,TDengine中连续查询的计算有一定的延迟。也就是说,一个时间窗口过去后,TDengine并不会立即计算这个窗口的数据,所以要稍等一会(一般不会超过1分钟)才能查到计算结果。 ### 管理连续查询 -用户可在控制台中通过 `show streams` 命令来查看系统中全部运行的连续查询, -并可以通过 `kill stream` 命令杀掉对应的连续查询。 -后续版本会提供更细粒度和便捷的连续查询管理命令。 +用户可在控制台中通过 `show streams` 命令来查看系统中全部运行的连续查询,并可以通过 `kill stream` 命令杀掉对应的连续查询。后续版本会提供更细粒度和便捷的连续查询管理命令。 -## 数据订阅(Publisher/Subscriber) +## 数据订阅(Publisher/Subscriber) -基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致, -均可视为系统中插入一条带时间戳的新记录。 -同时,TDengine在内部严格按照数据时间序列单调递增的方式保存数据。 -本质上来说,TDengine中里每一张表均可视为一个标准的消息队列。 +基于数据天然的时间序列特性,TDengine的数据写入(insert)与消息系统的数据发布(pub)逻辑上一致,均可视为系统中插入一条带时间戳的新记录。同时,TDengine在内部严格按照数据时间序列单调递增的方式保存数据。本质上来说,TDengine中里每一张表均可视为一个标准的消息队列。 -TDengine内嵌支持轻量级的消息订阅与推送服务。 -使用系统提供的API,用户可使用普通查询语句订阅数据库中的一张或多张表。 -订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达, -有新的记录到达就会将结果反馈到客户。 +TDengine内嵌支持轻量级的消息订阅与推送服务。使用系统提供的API,用户可使用普通查询语句订阅数据库中的一张或多张表。订阅的逻辑和操作状态的维护均是由客户端完成,客户端定时轮询服务器是否有新的记录到达,有新的记录到达就会将结果反馈到客户。 -TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。 -因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 +TDengine的订阅与推送服务的状态是客户端维持,TDengine服务器并不维持。因此如果应用重启,从哪个时间点开始获取最新数据,由应用决定。 TDengine的API中,与订阅相关的主要有以下三个: @@ -116,12 +85,9 @@ taos_consume taos_unsubscribe ``` -这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector/), -下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”), -完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c) 找到。 +这些API的文档请见 [C/C++ Connector](https://www.taosdata.com/cn/documentation/connector#c-cpp),下面仍以智能电表场景为例介绍一下它们的具体用法(超级表和子表结构请参考上一节“连续查询”),完整的示例代码可以在 [这里](https://github.com/taosdata/TDengine/blob/master/tests/examples/c/subscribe.c) 找到。 -如果我们希望当某个电表的电流超过一定限制(比如10A)后能得到通知并进行一些处理, 有两种方法: -一是分别对每张子表进行查询,每次查询后记录最后一条数据的时间戳,后续只查询这个时间戳之后的数据: +如果我们希望当某个电表的电流超过一定限制(比如10A)后能得到通知并进行一些处理, 有两种方法:一是分别对每张子表进行查询,每次查询后记录最后一条数据的时间戳,后续只查询这个时间戳之后的数据: ```sql select * from D1001 where ts > {last_timestamp1} and current > 10; @@ -129,8 +95,7 @@ select * from D1002 where ts > {last_timestamp2} and current > 10; ... ``` -这确实可行,但随着电表数量的增加,查询数量也会增加,客户端和服务端的性能都会受到影响, -当电表数增长到一定的程度,系统就无法承受了。 +这确实可行,但随着电表数量的增加,查询数量也会增加,客户端和服务端的性能都会受到影响,当电表数增长到一定的程度,系统就无法承受了。 另一种方法是对超级表进行查询。这样,无论有多少电表,都只需一次查询: @@ -138,12 +103,7 @@ select * from D1002 where ts > {last_timestamp2} and current > 10; select * from meters where ts > {last_timestamp} and current > 10; ``` -但是,如何选择 `last_timestamp` 就成了一个新的问题。 -因为,一方面数据的产生时间(也就是数据时间戳)和数据入库的时间一般并不相同,有时偏差还很大; -另一方面,不同电表的数据到达TDengine的时间也会有差异。 -所以,如果我们在查询中使用最慢的那台电表的数据的时间戳作为 `last_timestamp`, -就可能重复读入其它电表的数据; -如果使用最快的电表的时间戳,其它电表的数据就可能被漏掉。 +但是,如何选择 `last_timestamp` 就成了一个新的问题。因为,一方面数据的产生时间(也就是数据时间戳)和数据入库的时间一般并不相同,有时偏差还很大;另一方面,不同电表的数据到达TDengine的时间也会有差异。所以,如果我们在查询中使用最慢的那台电表的数据的时间戳作为 `last_timestamp`,就可能重复读入其它电表的数据;如果使用最快的电表的时间戳,其它电表的数据就可能被漏掉。 TDengine的订阅功能为上面这个问题提供了一个彻底的解决方案。 @@ -160,47 +120,29 @@ if (async) { } ``` -TDengine中的订阅既可以是同步的,也可以是异步的, -上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。 -这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据, -而异步则由API在内部的另一个线程中调用`taos_consume`, -然后把拉取到的数据交给回调函数`subscribe_callback`去处理。 +TDengine中的订阅既可以是同步的,也可以是异步的,上面的代码会根据从命令行获取的参数`async`的值来决定使用哪种方式。这里,同步的意思是用户程序要直接调用`taos_consume`来拉取数据,而异步则由API在内部的另一个线程中调用`taos_consume`,然后把拉取到的数据交给回调函数`subscribe_callback`去处理。 -参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。 -但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误, -因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。 +参数`taos`是一个已经建立好的数据库连接,在同步模式下无特殊要求。但在异步模式下,需要注意它不会被其它线程使用,否则可能导致不可预计的错误,因为回调函数在API的内部线程中被调用,而TDengine的部分API不是线程安全的。 -参数`sql`是查询语句,可以在其中使用where子句指定过滤条件。 -在我们的例子中,如果只想订阅电流超过10A时的数据,可以这样写: +参数`sql`是查询语句,可以在其中使用where子句指定过滤条件。在我们的例子中,如果只想订阅电流超过10A时的数据,可以这样写: ```sql select * from meters where current > 10; ``` -注意,这里没有指定起始时间,所以会读到所有时间的数据。 -如果只想从一天前的数据开始订阅,而不需要更早的历史数据,可以再加上一个时间条件: +注意,这里没有指定起始时间,所以会读到所有时间的数据。如果只想从一天前的数据开始订阅,而不需要更早的历史数据,可以再加上一个时间条件: ```sql select * from meters where ts > now - 1d and current > 10; ``` -订阅的`topic`实际上是它的名字,因为订阅功能是在客户端API中实现的, -所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。 +订阅的`topic`实际上是它的名字,因为订阅功能是在客户端API中实现的,所以没必要保证它全局唯一,但需要它在一台客户端机器上唯一。 -如果名`topic`的订阅不存在,参数`restart`没有意义; -但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时, -`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。 -本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。 -但如果这个订阅之前就存在了,并且已经读取了一部分数据, -且`restart`是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。 +如果名`topic`的订阅不存在,参数`restart`没有意义;但如果用户程序创建这个订阅后退出,当它再次启动并重新使用这个`topic`时,`restart`就会被用于决定是从头开始读取数据,还是接续上次的位置进行读取。本例中,如果`restart`是 **true**(非零值),用户程序肯定会读到所有数据。但如果这个订阅之前就存在了,并且已经读取了一部分数据,且`restart`是 **false**(**0**),用户程序就不会读到之前已经读取的数据了。 -`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。 -在同步模式下,如果前后两次调用`taos_consume`的时间间隔小于此时间, -`taos_consume`会阻塞,直到间隔超过此时间。 -异步模式下,这个时间是两次调用回调函数的最小时间间隔。 +`taos_subscribe`的最后一个参数是以毫秒为单位的轮询周期。在同步模式下,如果前后两次调用`taos_consume`的时间间隔小于此时间,`taos_consume`会阻塞,直到间隔超过此时间。异步模式下,这个时间是两次调用回调函数的最小时间间隔。 -`taos_subscribe`的倒数第二个参数用于用户程序向回调函数传递附加参数, -订阅API不对其做任何处理,只原样传递给回调函数。此参数在同步模式下无意义。 +`taos_subscribe`的倒数第二个参数用于用户程序向回调函数传递附加参数,订阅API不对其做任何处理,只原样传递给回调函数。此参数在同步模式下无意义。 订阅创建以后,就可以消费其数据了,同步模式下,示例代码是下面的 else 部分: @@ -219,9 +161,7 @@ if (async) { } ``` -这里是一个 **while** 循环,用户每按一次回车键就调用一次`taos_consume`, -而`taos_consume`的返回值是查询到的结果集,与`taos_use_result`完全相同, -例子中使用这个结果集的代码是函数`print_result`: +这里是一个 **while** 循环,用户每按一次回车键就调用一次`taos_consume`,而`taos_consume`的返回值是查询到的结果集,与`taos_use_result`完全相同,例子中使用这个结果集的代码是函数`print_result`: ```c void print_result(TAOS_RES* res, int blockFetch) { @@ -247,8 +187,7 @@ void print_result(TAOS_RES* res, int blockFetch) { } ``` -其中的 `taos_print_row` 用于处理订阅到数据,在我们的例子中,它会打印出所有符合条件的记录。 -而异步模式下,消费订阅到的数据则显得更为简单: +其中的 `taos_print_row` 用于处理订阅到数据,在我们的例子中,它会打印出所有符合条件的记录。而异步模式下,消费订阅到的数据则显得更为简单: ```c void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { @@ -262,11 +201,7 @@ void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int code) { taos_unsubscribe(tsub, keep); ``` -其第二个参数,用于决定是否在客户端保留订阅的进度信息。 -如果这个参数是**false**(**0**),那无论下次调用`taos_subscribe`的时的`restart`参数是什么, -订阅都只能重新开始。 -另外,进度信息的保存位置是 *{DataDir}/subscribe/* 这个目录下, -每个订阅有一个与其`topic`同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 +其第二个参数,用于决定是否在客户端保留订阅的进度信息。如果这个参数是**false**(**0**),那无论下次调用`taos_subscribe`时的`restart`参数是什么,订阅都只能重新开始。另外,进度信息的保存位置是 *{DataDir}/subscribe/* 这个目录下,每个订阅有一个与其`topic`同名的文件,删掉某个文件,同样会导致下次创建其对应的订阅时只能重新开始。 代码介绍完毕,我们来看一下实际的运行效果。假设: @@ -289,12 +224,11 @@ $ taos > insert into D1001 values(now, 12, 220, 1); ``` -这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。 -您可以继续插入一些数据观察示例程序的输出。 +这时,因为电流超过了10A,您应该可以看到示例程序将它输出到了屏幕上。您可以继续插入一些数据观察示例程序的输出。 ### Java 使用数据订阅功能 -订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation/connector/)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 +订阅功能也提供了 Java 开发接口,相关说明请见 [Java Connector](https://www.taosdata.com/cn/documentation/connector/java#subscribe)。需要注意的是,目前 Java 接口没有提供异步订阅模式,但用户程序可以通过创建 `TimerTask` 等方式达到同样的效果。 下面以一个示例程序介绍其具体使用方法。它所完成的功能与前面介绍的 C 语言示例基本相同,也是订阅数据库中所有电流超过 10A 的记录。 @@ -404,7 +338,7 @@ ts: 1597466400000 current: 12.4 voltage: 220 phase: 1 location: Beijing.Chaoyang ``` -## 缓存(Cache) +## 缓存(Cache) TDengine采用时间驱动缓存管理策略(First-In-First-Out,FIFO),又称为写驱动的缓存管理机制。这种策略有别于读驱动的数据缓存模式(Least-Recent-Use,LRU),直接将最近写入的数据保存在系统的缓存中。当缓存达到临界值的时候,将最早的数据批量写入磁盘。一般意义上来说,对于物联网数据的使用,用户最为关心最近产生的数据,即当前状态。TDengine充分利用了这一特性,将最近到达的(当前状态)数据保存在缓存中。 @@ -423,7 +357,7 @@ select last_row(voltage) from meters where location='Beijing.Chaoyang'; 该SQL语句将获取所有位于北京朝阳区的电表最后记录的电压值。 -## 报警监测(Alert) +## 报警监测(Alert) 在 TDengine 的应用场景中,报警监测是一个常见需求,从概念上说,它要求程序从最近一段时间的数据中筛选出符合一定条件的数据,并基于这些数据根据定义好的公式计算出一个结果,当这个结果符合某个条件且持续一定时间后,以某种形式通知用户。 diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index 3ed7343579..3442a2248c 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -285,7 +285,7 @@ JDBC连接器可能报错的错误码包括3种:JDBC driver本身的报错( * https://github.com/taosdata/TDengine/blob/develop/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java * https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h -### 订阅 +### 订阅 #### 创建 @@ -471,9 +471,11 @@ TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对 | BIGINT | java.lang.Long | | FLOAT | java.lang.Float | | DOUBLE | java.lang.Double | -| SMALLINT, TINYINT | java.lang.Short | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | | BOOL | java.lang.Boolean | -| BINARY, NCHAR | java.lang.String | +| BINARY | byte array | +| NCHAR | java.lang.String | diff --git a/documentation20/cn/10.cluster/docs.md b/documentation20/cn/10.cluster/docs.md index 7b4073a883..a430ce8277 100644 --- a/documentation20/cn/10.cluster/docs.md +++ b/documentation20/cn/10.cluster/docs.md @@ -111,9 +111,10 @@ taos> **提示:** -- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的firstEP。 -- firstEp这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的mnode的End Point列表,不再依赖这个参数。 -- 两个没有配置firstEp参数的数据节点dnode启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 +- 任何已经加入集群在线的数据节点,都可以作为后续待加入节点的 firstEP。 +- firstEp 这个参数仅仅在该数据节点首次加入集群时有作用,加入集群后,该数据节点会保存最新的 mnode 的 End Point 列表,不再依赖这个参数。 + - 接下来,配置文件中的 firstEp 参数就主要在客户端连接的时候使用了,例如 taos shell 如果不加参数,会默认连接由 firstEp 指定的节点。 +- 两个没有配置 firstEp 参数的数据节点 dnode 启动后,会独立运行起来。这个时候,无法将其中一个数据节点加入到另外一个数据节点,形成集群。**无法将两个独立的集群合并成为新的集群**。 ## 数据节点管理 diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 027828d903..cc8689786d 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -6,19 +6,27 @@ ### 内存需求 -每个 DB 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算: +每个 Database 可以创建固定数目的 vgroup,默认与 CPU 核数相同,可通过 maxVgroupsPerDb 配置;vgroup 中的每个副本会是一个 vnode;每个 vnode 会占用固定大小的内存(大小与数据库的配置参数 blocks 和 cache 有关);每个 Table 会占用与标签总长度有关的内存;此外,系统会有一些固定的内存开销。因此,每个 DB 需要的系统内存可通过如下公式计算: ``` -Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) +Database Memory Size = maxVgroupsPerDb * (blocks * cache + 10MB) + numOfTables * (tagSizePerTable + 0.5KB) ``` -示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,假设有 10 万张表,标签总长度是 256 字节,则总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。 +示例:假设是 4 核机器,cache 是缺省大小 16M, blocks 是缺省值 6,并且一个 DB 中有 10 万张表,标签总长度是 256 字节,则这个 DB 总的内存需求为:4 \* (16 \* 6 + 10) + 100000 \* (0.25 + 0.5) / 1000 = 499M。 -注意:从这个公式计算得到的内存容量,应理解为系统的“必要需求”,而不是“内存总数”。在实际运行的生产系统中,由于操作系统缓存、资源管理调度等方面的需要,内存规划应当在计算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。 +在实际的系统运维中,我们通常会更关心 TDengine 服务进程(taosd)会占用的内存量。 +``` +taosd 内存总量 = vnode 内存 + mnode 内存 + 查询内存 +``` -实际运行的系统往往会根据数据特点的不同,将数据存放在不同的 DB 里。因此做规划时,也需要考虑。 +其中: +1. “vnode 内存”指的是集群中所有的 Database 存储分摊到当前 taosd 节点上所占用的内存资源。可以按上文“Database Memory Size”计算公式估算每个 DB 的内存占用量进行加总,再按集群中总共的 TDengine 节点数做平均(如果设置为多副本,则还需要乘以对应的副本倍数)。 +2. “mnode 内存”指的是集群中管理节点所占用的资源。如果一个 taosd 节点上分布有 mnode 管理节点,则内存消耗还需要增加“0.2KB * 集群中数据表总数”。 +3. “查询内存”指的是服务端处理查询请求时所需要占用的内存。单条查询语句至少会占用“0.2KB * 查询涉及的数据表总数”的内存量。 -如果内存充裕,可以加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。 +注意:以上内存估算方法,主要讲解了系统的“必须内存需求”,而不是“内存总数上限”。在实际运行的生产环境中,由于操作系统缓存、资源管理调度等方面的原因,内存规划应当在估算结果的基础上保留一定冗余,以维持系统状态和系统性能的稳定性。并且,生产环境通常会配置系统资源的监控工具,以便及时发现硬件资源的紧缺情况。 + +最后,如果内存充裕,可以考虑加大 Blocks 的配置,这样更多数据将保存在内存里,提高查询速度。 ### CPU 需求 @@ -112,17 +120,17 @@ taosd -C 不同应用场景的数据往往具有不同的数据特征,比如保留天数、副本数、采集频次、记录大小、采集点的数量、压缩等都可完全不同。为获得在存储上的最高效率,TDengine提供如下存储相关的系统配置参数: - days:一个数据文件存储数据的时间跨度,单位为天,默认值:10。 -- keep:数据库中数据保留的天数,单位为天,默认值:3650。 +- keep:数据库中数据保留的天数,单位为天,默认值:3650。(可通过 alter database 修改) - minRows:文件块中记录的最小条数,单位为条,默认值:100。 - maxRows:文件块中记录的最大条数,单位为条,默认值:4096。 -- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。 +- comp:文件压缩标志位,0:关闭;1:一阶段压缩;2:两阶段压缩。默认值:2。(可通过 alter database 修改) - walLevel:WAL级别。1:写wal,但不执行fsync;2:写wal, 而且执行fsync。默认值:1。 - fsync:当wal设置为2时,执行fsync的周期。设置为0,表示每次写入,立即执行fsync。单位为毫秒,默认值:3000。 - cache:内存块的大小,单位为兆字节(MB),默认值:16。 -- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。 -- replica:副本个数,取值范围:1-3。单位为个,默认值:1 -- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms -- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(从 2.0.11 版本开始支持此参数) +- blocks:每个VNODE(TSDB)中有多少cache大小的内存块。因此一个VNODE的用的内存大小粗略为(cache * blocks)。单位为块,默认值:4。(可通过 alter database 修改) +- replica:副本个数,取值范围:1-3。单位为个,默认值:1。(可通过 alter database 修改) +- precision:时间戳精度标识,ms表示毫秒,us表示微秒。默认值:ms。 +- cacheLast:是否在内存中缓存子表 last_row,0:关闭;1:开启。默认值:0。(可通过 alter database 修改)(从 2.0.11 版本开始支持此参数) 对于一个应用场景,可能有多种数据特征的数据并存,最佳的设计是将具有相同数据特征的表放在一个库里,这样一个应用有多个库,而每个库可以配置不同的存储参数,从而保证系统有最优的性能。TDengine允许应用在创建库时指定上述存储参数,如果指定,该参数就将覆盖对应的系统配置参数。举例,有下述SQL: diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index b4fa2b160a..58191e0bd8 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -29,21 +29,21 @@ taos> DESCRIBE meters; ## 支持的数据类型 -使用TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: +使用 TDengine,最重要的是时间戳。创建并插入记录、查询历史记录的时候,均需要指定时间戳。时间戳有如下规则: -- 时间格式为```YYYY-MM-DD HH:mm:ss.MS```, 默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` -- 内部函数now是服务器的当前时间 -- 插入记录时,如果时间戳为now,插入数据时使用服务器当前时间 -- Epoch Time: 时间戳也可以是一个长整数,表示从1970-01-01 08:00:00.000开始的毫秒数 -- 时间可以加减,比如 now-2h,表明查询时刻向前推2个小时(最近2小时)。 数字后面的时间单位可以是 a(毫秒)、s(秒)、 m(分)、h(小时)、d(天)、w(周)。 比如select * from t1 where ts > now-2w and ts <= now-1w, 表示查询两周前整整一周的数据。 在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 +- 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` +- 内部函数 now 是客户端的当前时间 +- 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 +- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数 +- 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 -TDengine缺省的时间戳是毫秒精度,但通过修改配置参数enableMicrosecond就可支持微秒。 +TDengine 缺省的时间戳是毫秒精度,但通过修改配置参数 enableMicrosecond 就可以支持微秒。 -在TDengine中,普通表的数据模型中可使用以下10种数据类型。 +在TDengine中,普通表的数据模型中可使用以下 10 种数据类型。 | | 类型 | Bytes | 说明 | | ---- | :-------: | ------ | ------------------------------------------------------------ | -| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。 | +| 1 | TIMESTAMP | 8 | 时间戳。缺省精度毫秒,可支持微秒。从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始,计时不能早于该时间。(从 2.0.18 版本开始,已经去除了这一时间范围限制) | | 2 | INT | 4 | 整型,范围 [-2^31+1, 2^31-1], -2^31 用作 NULL | | 3 | BIGINT | 8 | 长整型,范围 [-2^63+1, 2^63-1], -2^63 用于 NULL | | 4 | FLOAT | 4 | 浮点型,有效位数 6-7,范围 [-3.4E38, 3.4E38] | diff --git a/documentation20/cn/13.faq/docs.md b/documentation20/cn/13.faq/docs.md index e2285b29e2..c01247d345 100644 --- a/documentation20/cn/13.faq/docs.md +++ b/documentation20/cn/13.faq/docs.md @@ -156,3 +156,13 @@ ALTER LOCAL RESETLOG; ``` 其含义是,清空本机所有由客户端生成的日志文件。 + +## 18. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 diff --git a/packaging/cfg/taos.cfg b/packaging/cfg/taos.cfg index a1178e2eef..83b70ed9f8 100644 --- a/packaging/cfg/taos.cfg +++ b/packaging/cfg/taos.cfg @@ -29,6 +29,9 @@ # number of threads per CPU core # numOfThreadsPerCore 1.0 +# number of threads to commit cache data +# numOfCommitThreads 4 + # the proportion of total CPU cores available for query processing # 2.0: the query threads will be set to double of the CPU cores. # 1.0: all CPU cores are available for query processing [default]. diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index e63889aff1..9241f01efa 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -120,7 +120,7 @@ function clean_service_on_systemd() { if [ "$verMode" == "cluster" ]; then nginx_service_config="${service_config_dir}/${nginx_service_name}.service" - if [ -d ${bin_dir}/web ]; then + if [ -d ${install_nginxd_dir} ]; then if systemctl is-active --quiet ${nginx_service_name}; then echo "Nginx for TDengine is running, stopping it..." ${csudo} systemctl stop ${nginx_service_name} &> /dev/null || echo &> /dev/null diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 6c7cbbc615..b8291a923a 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -83,6 +83,22 @@ typedef struct SJoinSupporter { SArray* pVgroupTables; } SJoinSupporter; + +typedef struct SMergeCtx { + SJoinSupporter* p; + int32_t idx; + SArray* res; + int8_t compared; +}SMergeCtx; + +typedef struct SMergeTsCtx { + SJoinSupporter* p; + STSBuf* res; + int64_t numOfInput; + int8_t compared; +}SMergeTsCtx; + + typedef struct SVgroupTableInfo { SVgroupInfo vgInfo; SArray* itemList; //SArray @@ -192,7 +208,9 @@ void tscSqlExprAssign(SExprInfo* dst, const SExprInfo* src); void tscSqlExprInfoDestroy(SArray* pExprInfo); SColumn* tscColumnClone(const SColumn* src); +bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex); SColumn* tscColumnListInsert(SArray* pColList, SColumnIndex* colIndex, SSchema* pSchema); +SArray* tscColumnListClone(const SArray* src, int16_t tableIndex); void tscColumnListDestroy(SArray* pColList); void tscDequoteAndTrimToken(SStrToken* pToken); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 29718ea267..0856efe518 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -126,15 +126,15 @@ typedef struct SCond { } SCond; typedef struct SJoinNode { - char tableName[TSDB_TABLE_FNAME_LEN]; uint64_t uid; int16_t tagColId; + SArray* tsJoin; + SArray* tagJoin; } SJoinNode; typedef struct SJoinInfo { bool hasJoin; - SJoinNode left; - SJoinNode right; + SJoinNode* joinTables[TSDB_MAX_JOIN_TABLE_NUM]; } SJoinInfo; typedef struct STagCond { @@ -279,7 +279,7 @@ typedef struct { char * pRsp; int32_t rspType; int32_t rspLen; - uint64_t qid; + uint64_t qId; int64_t useconds; int64_t offset; // offset value from vnode during projection query of stable int32_t row; @@ -362,7 +362,7 @@ typedef struct SSqlObj { int64_t svgroupRid; int64_t squeryLock; - + int32_t retryReason; // previous error code struct SSqlObj *prev, *next; int64_t self; } SSqlObj; @@ -435,6 +435,8 @@ void tscFreeSqlResult(SSqlObj *pSql); * @param pObj */ void tscFreeSqlObj(SSqlObj *pSql); +void tscFreeSubobj(SSqlObj* pSql); + void tscFreeRegisteredSqlObj(void *pSql); void tscCloseTscObj(void *pObj); diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index 255b965d77..129bd425f7 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -161,8 +161,8 @@ static void tscProcessAsyncRetrieveImpl(void *param, TAOS_RES *tres, int numOfRo SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if ((pRes->qid == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) { - if (pRes->qid == 0 && numOfRows != 0) { + if ((pRes->qId == 0 || numOfRows != 0) && pCmd->command < TSDB_SQL_LOCAL) { + if (pRes->qId == 0 && numOfRows != 0) { tscError("qhandle is NULL"); } else { pRes->code = numOfRows; @@ -210,7 +210,7 @@ void taos_fetch_rows_a(TAOS_RES *tres, __async_cb_func_t fp, void *param) { pSql->fp = tscAsyncFetchRowsProxy; pSql->param = param; - if (pRes->qid == 0) { + if (pRes->qId == 0) { tscError("qhandle is invalid"); pRes->code = TSDB_CODE_TSC_INVALID_QHANDLE; tscAsyncResultOnError(pSql); @@ -301,7 +301,7 @@ static void tscAsyncResultCallback(SSchedMsg *pMsg) { taosReleaseRef(tscObjRef, pSql->self); } -void tscAsyncResultOnError(SSqlObj* pSql) { +void tscAsyncResultOnError(SSqlObj* pSql) { SSchedMsg schedMsg = {0}; schedMsg.fp = tscAsyncResultCallback; schedMsg.ahandle = (void *)pSql->self; @@ -310,10 +310,51 @@ void tscAsyncResultOnError(SSqlObj* pSql) { taosScheduleTask(tscQhandle, &schedMsg); } - - int tscSendMsgToServer(SSqlObj *pSql); +static int32_t updateMetaBeforeRetryQuery(SSqlObj* pSql, STableMetaInfo* pTableMetaInfo, SQueryInfo* pQueryInfo) { + // handle the invalid table error code for super table. + // update the pExpr info, colList info, number of table columns + // TODO Re-parse this sql and issue the corresponding subquery as an alternative for this case. + if (pSql->retryReason == TSDB_CODE_TDB_INVALID_TABLE_ID) { + int32_t numOfExprs = (int32_t) tscSqlExprNumOfExprs(pQueryInfo); + int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + int32_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); + + SSchema *pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); + for (int32_t i = 0; i < numOfExprs; ++i) { + SSqlExpr *pExpr = &(tscSqlExprGet(pQueryInfo, i)->base); + pExpr->uid = pTableMetaInfo->pTableMeta->id.uid; + + if (pExpr->colInfo.colIndex >= 0) { + int32_t index = pExpr->colInfo.colIndex; + + if ((TSDB_COL_IS_NORMAL_COL(pExpr->colInfo.flag) && index >= numOfCols) || + (TSDB_COL_IS_TAG(pExpr->colInfo.flag) && (index < numOfCols || index >= (numOfCols + numOfTags)))) { + return pSql->retryReason; + } + + if ((pSchema[pExpr->colInfo.colIndex].colId != pExpr->colInfo.colId) && + strcasecmp(pExpr->colInfo.name, pSchema[pExpr->colInfo.colIndex].name) != 0) { + return pSql->retryReason; + } + } + } + + // validate the table columns information + for (int32_t i = 0; i < taosArrayGetSize(pQueryInfo->colList); ++i) { + SColumn *pCol = taosArrayGetP(pQueryInfo->colList, i); + if (pCol->colIndex.columnIndex >= numOfCols) { + return pSql->retryReason; + } + } + } else { + // do nothing + } + + return TSDB_CODE_SUCCESS; +} + void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { SSqlObj* pSql = (SSqlObj*)taosAcquireRef(tscObjRef, (int64_t)param); if (pSql == NULL) return; @@ -339,7 +380,8 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { if (TSDB_QUERY_HAS_TYPE(pQueryInfo->type, (TSDB_QUERY_TYPE_STABLE_SUBQUERY|TSDB_QUERY_TYPE_SUBQUERY|TSDB_QUERY_TYPE_TAG_FILTER_QUERY))) { tscDebug("%p update local table meta, continue to process sql and send the corresponding query", pSql); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + code = tscGetTableMeta(pSql, pTableMetaInfo); assert(code == TSDB_CODE_TSC_ACTION_IN_PROGRESS || code == TSDB_CODE_SUCCESS); @@ -349,6 +391,10 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { } assert((tscGetNumOfTags(pTableMetaInfo->pTableMeta) != 0)); + code = updateMetaBeforeRetryQuery(pSql, pTableMetaInfo, pQueryInfo); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } // tscProcessSql can add error into async res tscProcessSql(pSql, NULL); @@ -463,10 +509,7 @@ void tscTableMetaCallBack(void *param, TAOS_RES *res, int code) { return; _error: - if (code != TSDB_CODE_SUCCESS) { - pSql->res.code = code; - tscAsyncResultOnError(pSql); - } - + pRes->code = code; + tscAsyncResultOnError(pSql); taosReleaseRef(tscObjRef, pSql->self); } diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index aa89512a5e..b4d03ec461 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -309,7 +309,7 @@ TAOS_ROW tscFetchRow(void *param) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if (pRes->qid == 0 || + if (pRes->qId == 0 || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->command == TSDB_SQL_INSERT) { return NULL; @@ -905,7 +905,7 @@ int tscProcessLocalCmd(SSqlObj *pSql) { * set the qhandle to be 1 in order to pass the qhandle check, and to call partial release function to * free allocated resources and remove the SqlObj from sql query linked list */ - pRes->qid = 0x1; + pRes->qId = 0x1; pRes->numOfRows = 0; } else if (pCmd->command == TSDB_SQL_SHOW_CREATE_TABLE) { pRes->code = tscProcessShowCreateTable(pSql); diff --git a/src/client/src/tscLocalMerge.c b/src/client/src/tscLocalMerge.c index d181b9ea96..f590082cf0 100644 --- a/src/client/src/tscLocalMerge.c +++ b/src/client/src/tscLocalMerge.c @@ -381,6 +381,8 @@ void tscCreateLocalMerger(tExtMemBuffer **pMemBuffer, int32_t numOfBuffer, tOrde tsCreateSQLFunctionCtx(pQueryInfo, pReducer->pCtx, pschema); setCtxInputOutputBuffer(pQueryInfo, pReducer->pCtx, pReducer, pDesc); + tfree(pschema); + // we change the capacity of schema to denote that there is only one row in temp buffer pReducer->pDesc->pColumnModel->capacity = 1; @@ -1624,7 +1626,7 @@ void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen) tscDestroyLocalMerger(pObj); } - pRes->qid = 1; // hack to pass the safety check in fetch_row function + pRes->qId = 1; // hack to pass the safety check in fetch_row function pRes->numOfRows = 0; pRes->row = 0; diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 0914f3cbb5..05fa264179 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -903,7 +903,7 @@ int taos_stmt_prepare(TAOS_STMT* stmt, const char* sql, unsigned long length) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - pRes->qid = 0; + pRes->qId = 0; pRes->numOfRows = 1; strtolower(pSql->sqlstr, sql); diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c index 848f20c4ad..3222784648 100644 --- a/src/client/src/tscProfile.c +++ b/src/client/src/tscProfile.c @@ -250,7 +250,7 @@ int tscBuildQueryStreamDesc(void *pMsg, STscObj *pObj) { pQdesc->queryId = htonl(pSql->queryId); //pQdesc->useconds = htobe64(pSql->res.useconds); pQdesc->useconds = htobe64(now - pSql->stime); - pQdesc->qid = htobe64(pSql->res.qid); + pQdesc->qId = htobe64(pSql->res.qId); pHeartbeat->numOfQueries++; pQdesc++; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 4a4def1c0c..a80a23240a 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -673,7 +673,18 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { if ((code = setKillInfo(pSql, pInfo, pInfo->type)) != TSDB_CODE_SUCCESS) { return code; } + break; + } + case TSDB_SQL_SYNC_DB_REPLICA: { + const char* msg1 = "invalid db name"; + SStrToken* pzName = taosArrayGet(pInfo->pMiscInfo->a, 0); + + assert(taosArrayGetSize(pInfo->pMiscInfo->a) == 1); + code = tNameSetDbName(&pTableMetaInfo->name, getAccountId(pSql), pzName); + if (code != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); + } break; } @@ -969,12 +980,18 @@ int32_t parseSlidingClause(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SStrToken* pSl int32_t tscSetTableFullName(STableMetaInfo* pTableMetaInfo, SStrToken* pTableName, SSqlObj* pSql) { const char* msg1 = "name too long"; const char* msg2 = "acctId too long"; + const char* msg3 = "no acctId"; SSqlCmd* pCmd = &pSql->cmd; int32_t code = TSDB_CODE_SUCCESS; if (hasSpecifyDB(pTableName)) { // db has been specified in sql string so we ignore current db path - code = tNameSetAcctId(&pTableMetaInfo->name, getAccountId(pSql)); + char* acctId = getAccountId(pSql); + if (acctId == NULL || strlen(acctId) <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); + } + + code = tNameSetAcctId(&pTableMetaInfo->name, acctId); if (code != 0) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); } @@ -3288,7 +3305,8 @@ static int32_t extractColumnFilterInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SC } if (pSchema->type == TSDB_DATA_TYPE_BOOL) { - if (pExpr->tokenId != TK_EQ && pExpr->tokenId != TK_NE) { + int32_t t = pExpr->tokenId; + if (t != TK_EQ && t != TK_NE && t != TK_NOTNULL && t != TK_ISNULL) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } } @@ -3354,23 +3372,25 @@ static int32_t getColumnQueryCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSq } } -static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { - const char* msg1 = "invalid join query condition"; - const char* msg2 = "invalid table name in join query"; +static int32_t checkAndSetJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { + int32_t code = 0; + const char* msg1 = "timestamp required for join tables"; const char* msg3 = "type of join columns must be identical"; const char* msg4 = "invalid column name in join condition"; + const char* msg5 = "only support one join tag for each table"; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } if (!tSqlExprIsParentOfLeaf(pExpr)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); - } + code = checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pLeft); + if (code) { + return code; + } - STagCond* pTagCond = &pQueryInfo->tagCond; - SJoinNode* pLeft = &pTagCond->joinInfo.left; - SJoinNode* pRight = &pTagCond->joinInfo.right; + return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr->pRight); + } SColumnIndex index = COLUMN_INDEX_INITIALIZER; if (getColumnIndexByName(pCmd, &pExpr->pLeft->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { @@ -3380,14 +3400,28 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pTagSchema1 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - pLeft->uid = pTableMetaInfo->pTableMeta->id.uid; - pLeft->tagColId = pTagSchema1->colId; + assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); - int32_t code = tNameExtractFullName(&pTableMetaInfo->name, pLeft->tableName); - if (code != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + if (*leftNode == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + (*leftNode)->uid = pTableMetaInfo->pTableMeta->id.uid; + (*leftNode)->tagColId = pTagSchema1->colId; + + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) { +// tscColumnListInsert(pTableMetaInfo->tagColList, &index, ); + if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + } + } + + int16_t leftIdx = index.tableIndex; + index = (SColumnIndex)COLUMN_INDEX_INITIALIZER; if (getColumnIndexByName(pCmd, &pExpr->pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); @@ -3396,20 +3430,56 @@ static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); SSchema* pTagSchema2 = tscGetTableColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); - pRight->uid = pTableMetaInfo->pTableMeta->id.uid; - pRight->tagColId = pTagSchema2->colId; + assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); - code = tNameExtractFullName(&pTableMetaInfo->name, pRight->tableName); - if (code != TSDB_CODE_SUCCESS) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + if (*rightNode == NULL) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } + (*rightNode)->uid = pTableMetaInfo->pTableMeta->id.uid; + (*rightNode)->tagColId = pTagSchema2->colId; + + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + index.columnIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + if (!tscColumnExists(pTableMetaInfo->tagColList, &index)) { + + tscColumnListInsert(pTableMetaInfo->tagColList, &index, pTagSchema2); + if (taosArrayGetSize(pTableMetaInfo->tagColList) > 1) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg5); + } + } + } + + int16_t rightIdx = index.tableIndex; + if (pTagSchema1->type != pTagSchema2->type) { return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg3); } - pTagCond->joinInfo.hasJoin = true; + if ((*leftNode)->tagJoin == NULL) { + (*leftNode)->tagJoin = taosArrayInit(2, sizeof(int16_t)); + } + + if ((*rightNode)->tagJoin == NULL) { + (*rightNode)->tagJoin = taosArrayInit(2, sizeof(int16_t)); + } + + taosArrayPush((*leftNode)->tagJoin, &rightIdx); + taosArrayPush((*rightNode)->tagJoin, &leftIdx); + + pQueryInfo->tagCond.joinInfo.hasJoin = true; + return TSDB_CODE_SUCCESS; + +} + +static int32_t getJoinCondInfo(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { + if (pExpr == NULL) { + return TSDB_CODE_SUCCESS; + } + + return checkAndSetJoinCondInfo(pCmd, pQueryInfo, pExpr); } static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQueryInfo, SColumnList* pList, @@ -3436,7 +3506,8 @@ static int32_t validateSQLExpr(SSqlCmd* pCmd, tSqlExpr* pExpr, SQueryInfo* pQuer } pList->ids[pList->num++] = index; - } else if (pExpr->tokenId == TK_FLOAT && (isnan(pExpr->value.dKey) || isinf(pExpr->value.dKey))) { + } else if ((pExpr->tokenId == TK_FLOAT && (isnan(pExpr->value.dKey) || isinf(pExpr->value.dKey))) || + pExpr->tokenId == TK_NULL) { return TSDB_CODE_TSC_INVALID_SQL; } else if (pExpr->type == SQL_NODE_SQLFUNCTION) { if (*type == NON_ARITHMEIC_EXPR) { @@ -3670,17 +3741,49 @@ static int32_t setExprToCond(tSqlExpr** parent, tSqlExpr* pExpr, const char* msg return TSDB_CODE_SUCCESS; } +static int32_t validateNullExpr(tSqlExpr* pExpr, char* msgBuf) { + const char* msg = "only support is [not] null"; + + tSqlExpr* pRight = pExpr->pRight; + if (pRight->tokenId == TK_NULL && (!(pExpr->tokenId == TK_ISNULL || pExpr->tokenId == TK_NOTNULL))) { + return invalidSqlErrMsg(msgBuf, msg); + } + + return TSDB_CODE_SUCCESS; +} + +// check for like expression +static int32_t validateLikeExpr(tSqlExpr* pExpr, STableMeta* pTableMeta, int32_t index, char* msgBuf) { + const char* msg1 = "wildcard string should be less than 20 characters"; + const char* msg2 = "illegal column name"; + + tSqlExpr* pLeft = pExpr->pLeft; + tSqlExpr* pRight = pExpr->pRight; + + if (pExpr->tokenId == TK_LIKE) { + if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { + return invalidSqlErrMsg(msgBuf, msg1); + } + + SSchema* pSchema = tscGetTableSchema(pTableMeta); + if ((!isTablenameToken(&pLeft->colInfo)) && !IS_VAR_DATA_TYPE(pSchema[index].type)) { + return invalidSqlErrMsg(msgBuf, msg2); + } + } + + return TSDB_CODE_SUCCESS; +} + static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SCondExpr* pCondExpr, int32_t* type, int32_t parentOptr) { const char* msg1 = "table query cannot use tags filter"; const char* msg2 = "illegal column name"; const char* msg3 = "only one query time range allowed"; - const char* msg4 = "only one join condition allowed"; + const char* msg4 = "too many join tables"; const char* msg5 = "not support ordinary column join"; const char* msg6 = "only one query condition on tbname allowed"; const char* msg7 = "only in/like allowed in filter table name"; - const char* msg8 = "wildcard string should be less than 20 characters"; - + tSqlExpr* pLeft = (*pExpr)->pLeft; tSqlExpr* pRight = (*pExpr)->pRight; @@ -3696,6 +3799,18 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, index.tableIndex); STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + // validate the null expression + int32_t code = validateNullExpr(*pExpr, tscGetErrorMsgPayload(pCmd)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + // validate the like expression + code = validateLikeExpr(*pExpr, pTableMeta, index.columnIndex, tscGetErrorMsgPayload(pCmd)); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + if (index.columnIndex == PRIMARYKEY_TIMESTAMP_COL_INDEX) { // query on time range if (!validateJoinExprNode(pCmd, pQueryInfo, *pExpr, &index)) { return TSDB_CODE_TSC_INVALID_SQL; @@ -3706,6 +3821,46 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql TSDB_QUERY_SET_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY); pCondExpr->tsJoin = true; + assert(index.tableIndex >= 0 && index.tableIndex < TSDB_MAX_JOIN_TABLE_NUM); + SJoinNode **leftNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + if (*leftNode == NULL) { + *leftNode = calloc(1, sizeof(SJoinNode)); + if (*leftNode == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } + + int16_t leftIdx = index.tableIndex; + + if (getColumnIndexByName(pCmd, &pRight->colInfo, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + + if (index.tableIndex < 0 || index.tableIndex >= TSDB_MAX_JOIN_TABLE_NUM) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); + } + + SJoinNode **rightNode = &pQueryInfo->tagCond.joinInfo.joinTables[index.tableIndex]; + if (*rightNode == NULL) { + *rightNode = calloc(1, sizeof(SJoinNode)); + if (*rightNode == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + } + + int16_t rightIdx = index.tableIndex; + + if ((*leftNode)->tsJoin == NULL) { + (*leftNode)->tsJoin = taosArrayInit(2, sizeof(int16_t)); + } + + if ((*rightNode)->tsJoin == NULL) { + (*rightNode)->tsJoin = taosArrayInit(2, sizeof(int16_t)); + } + + taosArrayPush((*leftNode)->tsJoin, &rightIdx); + taosArrayPush((*rightNode)->tsJoin, &leftIdx); + /* * to release expression, e.g., m1.ts = m2.ts, * since this expression is used to set the join query type @@ -3723,20 +3878,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - // check for like expression - if ((*pExpr)->tokenId == TK_LIKE) { - if (pRight->value.nLen > TSDB_PATTERN_STRING_MAX_LEN) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg8); - } - - SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); - - if ((!isTablenameToken(&pLeft->colInfo)) && pSchema[index.columnIndex].type != TSDB_DATA_TYPE_BINARY && - pSchema[index.columnIndex].type != TSDB_DATA_TYPE_NCHAR) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - } - // in case of in operator, keep it in a seprate attribute if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { if (!validTableNameOptr(*pExpr)) { @@ -3763,10 +3904,6 @@ static int32_t handleExprInQueryCond(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSql return TSDB_CODE_TSC_INVALID_SQL; } - if (pCondExpr->pJoinExpr != NULL) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } - pQueryInfo->type |= TSDB_QUERY_TYPE_JOIN_QUERY; ret = setExprToCond(&pCondExpr->pJoinExpr, *pExpr, NULL, parentOptr, pQueryInfo->msg); *pExpr = NULL; @@ -3994,6 +4131,7 @@ static bool validateFilterExpr(SQueryInfo* pQueryInfo) { static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlExpr* pExpr) { const char* msg0 = "invalid timestamp"; const char* msg1 = "only one time stamp window allowed"; + int32_t code = 0; if (pExpr == NULL) { return TSDB_CODE_SUCCESS; @@ -4004,7 +4142,10 @@ static int32_t getTimeRangeFromExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, tSqlE return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg1); } - getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft); + code = getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pLeft); + if (code) { + return code; + } return getTimeRangeFromExpr(pCmd, pQueryInfo, pExpr->pRight); } else { @@ -4086,6 +4227,7 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { } } +/* static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondExpr* pCondExpr) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (QUERY_IS_JOIN_QUERY(pQueryInfo->type) && UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { @@ -4112,6 +4254,7 @@ static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SQueryInfo* pQueryInf tscColumnListInsert(pTableMetaInfo->tagColList, &index, &pSchema[index.columnIndex]); } } +*/ static int32_t validateTagCondExpr(SSqlCmd* pCmd, tExprNode *p) { const char *msg1 = "invalid tag operator"; @@ -4257,6 +4400,102 @@ static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SCondE return ret; } +int32_t validateJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { + const char* msg1 = "timestamp required for join tables"; + const char* msg2 = "tag required for join stables"; + + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; + + if (node == NULL || node->tsJoin == NULL || taosArrayGetSize(node->tsJoin) <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1); + } + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { + SJoinNode *node = pQueryInfo->tagCond.joinInfo.joinTables[i]; + + if (node == NULL || node->tagJoin == NULL || taosArrayGetSize(node->tagJoin) <= 0) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); + } + } + } + + return TSDB_CODE_SUCCESS; +} + + +void mergeJoinNodesImpl(int8_t* r, int8_t* p, int16_t* tidx, SJoinNode** nodes, int32_t type) { + SJoinNode *node = nodes[*tidx]; + SArray* arr = (type == 0) ? node->tsJoin : node->tagJoin; + size_t size = taosArrayGetSize(arr); + + p[*tidx] = 1; + + for (int32_t j = 0; j < size; j++) { + int16_t* idx = taosArrayGet(arr, j); + r[*idx] = 1; + if (p[*idx] == 0) { + mergeJoinNodesImpl(r, p, idx, nodes, type); + } + } +} + +int32_t mergeJoinNodes(SQueryInfo* pQueryInfo, SSqlObj* pSql) { + const char* msg1 = "not all join tables have same timestamp"; + const char* msg2 = "not all join tables have same tag"; + + int8_t r[TSDB_MAX_JOIN_TABLE_NUM] = {0}; + int8_t p[TSDB_MAX_JOIN_TABLE_NUM] = {0}; + + for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { + mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 0); + + taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin); + + for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) { + if (r[j]) { + taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tsJoin, &j); + } + } + + memset(r, 0, sizeof(r)); + memset(p, 0, sizeof(p)); + } + + if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tsJoin) != pQueryInfo->numOfTables) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg1); + } + + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + for (int16_t i = 0; i < pQueryInfo->numOfTables; ++i) { + mergeJoinNodesImpl(r, p, &i, pQueryInfo->tagCond.joinInfo.joinTables, 1); + + taosArrayClear(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin); + + for (int32_t j = 0; j < TSDB_MAX_JOIN_TABLE_NUM; ++j) { + if (r[j]) { + taosArrayPush(pQueryInfo->tagCond.joinInfo.joinTables[i]->tagJoin, &j); + } + } + + memset(r, 0, sizeof(r)); + memset(p, 0, sizeof(p)); + } + + if (taosArrayGetSize(pQueryInfo->tagCond.joinInfo.joinTables[0]->tagJoin) != pQueryInfo->numOfTables) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); + } + + } + + return TSDB_CODE_SUCCESS; +} + + int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSql) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; @@ -4302,17 +4541,17 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq // 4. get the table name query condition if ((ret = getTablenameCond(&pSql->cmd, pQueryInfo, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 5. other column query condition if ((ret = getColumnQueryCondInfo(&pSql->cmd, pQueryInfo, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 6. join condition if ((ret = getJoinCondInfo(&pSql->cmd, pQueryInfo, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { - return ret; + goto PARSE_WHERE_EXIT; } // 7. query condition for table name @@ -4320,12 +4559,29 @@ int32_t validateWhereNode(SQueryInfo* pQueryInfo, tSqlExpr** pExpr, SSqlObj* pSq ret = setTableCondForSTableQuery(&pSql->cmd, pQueryInfo, getAccountId(pSql), condExpr.pTableCond, condExpr.tableCondIndex, &sb); taosStringBuilderDestroy(&sb); - - if (!validateFilterExpr(pQueryInfo)) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); + if (ret) { + goto PARSE_WHERE_EXIT; } - doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr); + if (!validateFilterExpr(pQueryInfo)) { + ret = invalidSqlErrMsg(tscGetErrorMsgPayload(&pSql->cmd), msg2); + goto PARSE_WHERE_EXIT; + } + + //doAddJoinTagsColumnsIntoTagList(&pSql->cmd, pQueryInfo, &condExpr); + if (condExpr.tsJoin) { + ret = validateJoinNodes(pQueryInfo, pSql); + if (ret) { + goto PARSE_WHERE_EXIT; + } + + ret = mergeJoinNodes(pQueryInfo, pSql); + if (ret) { + goto PARSE_WHERE_EXIT; + } + } + +PARSE_WHERE_EXIT: cleanQueryExpr(&condExpr); return ret; @@ -6648,14 +6904,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t ind const char* msg1 = "point interpolation query needs timestamp"; const char* msg2 = "too many tables in from clause"; const char* msg3 = "start(end) time of query range required or time range too large"; - const char* msg4 = "illegal number of tables in from clause"; int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; - SQueryInfo* pQueryInfo = tscGetQueryInfo(pCmd, index); - STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, index); + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); if (pTableMetaInfo == NULL) { pTableMetaInfo = tscAddEmptyMetaInfo(pQueryInfo); } @@ -6675,48 +6930,6 @@ int32_t validateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t ind return doLocalQueryProcess(pCmd, pQueryInfo, pQuerySqlNode); } - //validate the subquery recursively - if (pQuerySqlNode->from->type == SQL_NODE_FROM_SUBQUERY) { - for(int32_t j = 0; j < pQuerySqlNode->from->pNode.numOfClause; ++j) { - int32_t ret = validateSqlNode(pSql, pQuerySqlNode->from->pNode.pClause[j], 0); - if (ret != TSDB_CODE_SUCCESS) { - return ret; - } - } - - SQueryInfo* pQueryInfo1 = calloc(1, sizeof(SQueryInfo)); - tscInitQueryInfo(pQueryInfo1); - - pQueryInfo1->pUpstream = taosArrayInit(4, POINTER_BYTES); - pQueryInfo1->pDownstream = taosArrayInit(4, POINTER_BYTES); - - for(int32_t x = 0; x < pCmd->numOfClause; ++x) { - taosArrayPush(pQueryInfo1->pUpstream, &pCmd->pQueryInfo[x]); - taosArrayPush(pCmd->pQueryInfo[x]->pDownstream, &pQueryInfo1); - } - - pQueryInfo1->numOfTables = 1; - pCmd->numOfClause = 1; - pQueryInfo1->command = TSDB_SQL_SELECT; - - pCmd->pQueryInfo[0] = pQueryInfo1; - } - - size_t numOfTables = 0; - if (pQuerySqlNode->from->type == SQL_NODE_FROM_SUBQUERY) { - numOfTables = 1; - } else { - numOfTables = taosArrayGetSize(pQuerySqlNode->from->tableList); - if (numOfTables > TSDB_MAX_JOIN_TABLE_NUM) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg4); - } - } - - pQueryInfo->command = TSDB_SQL_SELECT; - if (numOfTables > 2) { - return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); - } - if (pQuerySqlNode->from->type == SQL_NODE_FROM_SUBQUERY) { // support only one nestquery pQueryInfo = pCmd->pQueryInfo[0]; @@ -6734,8 +6947,15 @@ int32_t validateSqlNode(SSqlObj* pSql, SQuerySqlNode* pQuerySqlNode, int32_t ind } } else { + pQueryInfo->command = TSDB_SQL_SELECT; + + size_t fromSize = taosArrayGetSize(pQuerySqlNode->from->tableList); + if (fromSize > TSDB_MAX_JOIN_TABLE_NUM) { + return invalidSqlErrMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + // set all query tables, which are maybe more than one. - code = doLoadAllTableMeta(pSql, index, pQuerySqlNode, numOfTables); + code = doLoadAllTableMeta(pSql, index, pQuerySqlNode, fromSize); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 4fcb6914c7..768a68217f 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -351,8 +351,8 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) { taosMsleep(duration); } + pSql->retryReason = rpcMsg->code; rpcMsg->code = tscRenewTableMeta(pSql, 0); - // if there is an error occurring, proceed to the following error handling procedure. if (rpcMsg->code == TSDB_CODE_TSC_ACTION_IN_PROGRESS) { taosReleaseRef(tscObjRef, handle); @@ -510,7 +510,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SQueryInfo *pQueryInfo = tscGetActiveQueryInfo(&pSql->cmd); pRetrieveMsg->free = htons(pQueryInfo->type); - pRetrieveMsg->qid = htobe64(pSql->res.qid); + pRetrieveMsg->qId = htobe64(pSql->res.qId); // todo valid the vgroupId at the client side STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); @@ -522,7 +522,7 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { assert(pVgroupInfo->vgroups[vgIndex].vgId > 0 && vgIndex < pTableMetaInfo->vgroupList->numOfVgroups); pRetrieveMsg->header.vgId = htonl(pVgroupInfo->vgroups[vgIndex].vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qid:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qid); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pVgroupInfo->vgroups[vgIndex].vgId, vgIndex, pSql->res.qId); } else { int32_t numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); assert(vgIndex >= 0 && vgIndex < numOfVgroups); @@ -530,12 +530,12 @@ int tscBuildFetchMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SVgroupTableInfo* pTableIdList = taosArrayGet(pTableMetaInfo->pVgroupTables, vgIndex); pRetrieveMsg->header.vgId = htonl(pTableIdList->vgInfo.vgId); - tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qid:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qid); + tscDebug("%p build fetch msg from vgId:%d, vgIndex:%d, qId:%" PRIu64, pSql, pTableIdList->vgInfo.vgId, vgIndex, pSql->res.qId); } } else { STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; pRetrieveMsg->header.vgId = htonl(pTableMeta->vgId); - tscDebug("%p build fetch msg from only one vgroup, vgId:%d, qid:%" PRIu64, pSql, pTableMeta->vgId, pSql->res.qid); + tscDebug("%p build fetch msg from only one vgroup, vgId:%d, qId:%" PRIu64, pSql, pTableMeta->vgId, pSql->res.qId); } pSql->cmd.payloadLen = sizeof(SRetrieveTableMsg); @@ -615,10 +615,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql, int32_t clauseIndex) { tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } -static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg) { - SQueryInfo* pQueryInfo = tscGetActiveQueryInfo(&pSql->cmd); - STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - +static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char *pMsg, int32_t *succeed) { + STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(&pSql->cmd, pSql->cmd.clauseIndex, 0); TSKEY dfltKey = htobe64(pQueryMsg->window.skey); STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; @@ -630,9 +628,14 @@ static char *doSerializeTableInfo(SQueryTableMsg* pQueryMsg, SSqlObj *pSql, char assert(index >= 0); SVgroupInfo* pVgroupInfo = NULL; - if (pTableMetaInfo->vgroupList->numOfVgroups > 0) { + if (pTableMetaInfo->vgroupList && pTableMetaInfo->vgroupList->numOfVgroups > 0) { assert(index < pTableMetaInfo->vgroupList->numOfVgroups); pVgroupInfo = &pTableMetaInfo->vgroupList->vgroups[index]; + } else { + tscError("%p No vgroup info found", pSql); + + *succeed = 0; + return pMsg; } vgId = pVgroupInfo->vgId; @@ -711,7 +714,7 @@ static int32_t serializeSqlExpr(SSqlExpr* pExpr, STableMetaInfo* pTableMetaInfo, SSqlExpr* pSqlExpr = (SSqlExpr *)(*pMsg); SColIndex* pIndex = &pSqlExpr->colInfo; - + pIndex->colId = htons(pExpr->colInfo.colId); pIndex->colIndex = htons(pExpr->colInfo.colIndex); pIndex->flag = htons(pExpr->colInfo.flag); @@ -876,11 +879,16 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { } } - // serialize the table info (sid, uid, tags) - pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg); + int32_t succeed = 1; + // serialize the table info (sid, uid, tags) + pMsg = doSerializeTableInfo(pQueryMsg, pSql, pMsg, &succeed); + if (succeed == 0) { + return TSDB_CODE_TSC_APP_ERROR; + } + SSqlGroupbyExpr *pGroupbyExpr = query.pGroupbyExpr; - if (pGroupbyExpr != NULL && pGroupbyExpr->numOfGroupCols > 0) { + if (pGroupbyExpr->numOfGroupCols > 0) { pQueryMsg->orderByIdx = htons(pGroupbyExpr->orderIndex); pQueryMsg->orderType = htons(pGroupbyExpr->orderType); @@ -1206,6 +1214,23 @@ int32_t tscBuildUseDbMsg(SSqlObj *pSql, SSqlInfo *pInfo) { return TSDB_CODE_SUCCESS; } +int32_t tscBuildSyncDbReplicaMsg(SSqlObj* pSql, SSqlInfo *pInfo) { + SSqlCmd *pCmd = &pSql->cmd; + pCmd->payloadLen = sizeof(SSyncDbMsg); + + if (TSDB_CODE_SUCCESS != tscAllocPayload(pCmd, pCmd->payloadLen)) { + tscError("%p failed to malloc for query msg", pSql); + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + SSyncDbMsg *pSyncMsg = (SSyncDbMsg *)pCmd->payload; + STableMetaInfo *pTableMetaInfo = tscGetTableMetaInfoFromCmd(pCmd, pCmd->clauseIndex, 0); + tNameExtractFullName(&pTableMetaInfo->name, pSyncMsg->db); + pCmd->msgType = TSDB_MSG_TYPE_CM_SYNC_DB; + + return TSDB_CODE_SUCCESS; +} + int32_t tscBuildShowMsg(SSqlObj *pSql, SSqlInfo *pInfo) { STscObj *pObj = pSql->pTscObj; SSqlCmd *pCmd = &pSql->cmd; @@ -1478,7 +1503,7 @@ int tscBuildRetrieveFromMgmtMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, 0); SRetrieveTableMsg *pRetrieveMsg = (SRetrieveTableMsg*)pCmd->payload; - pRetrieveMsg->qid = htobe64(pSql->res.qid); + pRetrieveMsg->qId = htobe64(pSql->res.qId); pRetrieveMsg->free = htons(pQueryInfo->type); return TSDB_CODE_SUCCESS; @@ -1986,19 +2011,24 @@ int tscProcessSTableVgroupRsp(SSqlObj *pSql) { assert(pInfo->vgroupList != NULL); pInfo->vgroupList->numOfVgroups = pVgroupMsg->numOfVgroups; - for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { - //just init, no need to lock - SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; + if (pInfo->vgroupList->numOfVgroups <= 0) { + //tfree(pInfo->vgroupList); + tscError("%p empty vgroup info", pSql); + } else { + for (int32_t j = 0; j < pInfo->vgroupList->numOfVgroups; ++j) { + //just init, no need to lock + SVgroupInfo *pVgroups = &pInfo->vgroupList->vgroups[j]; - SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; - pVgroups->vgId = htonl(vmsg->vgId); - pVgroups->numOfEps = vmsg->numOfEps; + SVgroupMsg *vmsg = &pVgroupMsg->vgroups[j]; + pVgroups->vgId = htonl(vmsg->vgId); + pVgroups->numOfEps = vmsg->numOfEps; - assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1); + assert(pVgroups->numOfEps >= 1 && pVgroups->vgId >= 1); - for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { - pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port); - pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); + for (int32_t k = 0; k < pVgroups->numOfEps; ++k) { + pVgroups->epAddr[k].port = htons(vmsg->epAddr[k].port); + pVgroups->epAddr[k].fqdn = strndup(vmsg->epAddr[k].fqdn, tListLen(vmsg->epAddr[k].fqdn)); + } } } @@ -2024,7 +2054,7 @@ int tscProcessShowRsp(SSqlObj *pSql) { pShow = (SShowRsp *)pRes->pRsp; pShow->qhandle = htobe64(pShow->qhandle); - pRes->qid = pShow->qhandle; + pRes->qId = pShow->qhandle; tscResetForNextRetrieve(pRes); pMetaMsg = &(pShow->tableMeta); @@ -2206,12 +2236,13 @@ int tscProcessQueryRsp(SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; SQueryTableRsp *pQueryAttr = (SQueryTableRsp *)pRes->pRsp; - pQueryAttr->qid = htobe64(pQueryAttr->qid); + pQueryAttr->qId = htobe64(pQueryAttr->qId); - pRes->qid = pQueryAttr->qid; + pRes->qId = pQueryAttr->qId; pRes->data = NULL; tscResetForNextRetrieve(pRes); + tscDebug("%p query rsp received, qId:%"PRIu64, pSql, pRes->qId); return 0; } @@ -2271,7 +2302,8 @@ int tscProcessRetrieveRspFromNode(SSqlObj *pSql) { } pRes->row = 0; - tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d", pSql, pRes->numOfRows, pRes->offset, pRes->completed); + tscDebug("%p numOfRows:%d, offset:%" PRId64 ", complete:%d, qId:%"PRIu64, pSql, pRes->numOfRows, pRes->offset, + pRes->completed, pRes->qId); return 0; } @@ -2484,6 +2516,7 @@ void tscInitMsgsFp() { tscBuildMsg[TSDB_SQL_DROP_USER] = tscBuildDropUserAcctMsg; tscBuildMsg[TSDB_SQL_DROP_ACCT] = tscBuildDropUserAcctMsg; tscBuildMsg[TSDB_SQL_DROP_DB] = tscBuildDropDbMsg; + tscBuildMsg[TSDB_SQL_SYNC_DB_REPLICA] = tscBuildSyncDbReplicaMsg; tscBuildMsg[TSDB_SQL_DROP_TABLE] = tscBuildDropTableMsg; tscBuildMsg[TSDB_SQL_ALTER_USER] = tscBuildUserMsg; tscBuildMsg[TSDB_SQL_CREATE_DNODE] = tscBuildCreateDnodeMsg; @@ -2536,7 +2569,6 @@ void tscInitMsgsFp() { tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_TABLE] = tscProcessShowCreateRsp; tscProcessMsgRsp[TSDB_SQL_SHOW_CREATE_DATABASE] = tscProcessShowCreateRsp; - tscKeepConn[TSDB_SQL_SHOW] = 1; tscKeepConn[TSDB_SQL_RETRIEVE] = 1; diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index f11d419229..3fad00977b 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -476,7 +476,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if (pRes->qid == 0 || + if (pRes->qId == 0 || pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->command == TSDB_SQL_INSERT) { @@ -508,7 +508,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; - if (pRes->qid == 0 || + if (pRes->qId == 0 || pRes->code == TSDB_CODE_TSC_QUERY_CANCELLED || pCmd->command == TSDB_SQL_RETRIEVE_EMPTY_RESULT || pCmd->command == TSDB_SQL_INSERT) { @@ -554,7 +554,7 @@ static bool tscKillQueryInDnode(SSqlObj* pSql) { SSqlCmd* pCmd = &pSql->cmd; SSqlRes* pRes = &pSql->res; - if (pRes == NULL || pRes->qid == 0) { + if (pRes == NULL || pRes->qId == 0) { return true; } @@ -1050,7 +1050,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { * If qhandle is NOT set 0, the function of taos_free_result() will send message to server by calling tscProcessSql() * to free connection, which may cause segment fault, when the parse phrase is not even successfully executed. */ - pRes->qid = 0; + pRes->qId = 0; free(str); if (code != TSDB_CODE_SUCCESS) { diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c index 75f2b9c46c..41d2e89838 100644 --- a/src/client/src/tscStream.c +++ b/src/client/src/tscStream.c @@ -299,6 +299,7 @@ static void tscProcessStreamRetrieveResult(void *param, TAOS_RES *res, int numOf tfree(pTableMetaInfo->pTableMeta); tscFreeSqlResult(pSql); + tscFreeSubobj(pSql); tfree(pSql->pSubs); pSql->subState.numOfSub = 0; pTableMetaInfo->vgroupList = tscVgroupInfoClear(pTableMetaInfo->vgroupList); diff --git a/src/client/src/tscSub.c b/src/client/src/tscSub.c index 7f7d9abe74..05d3cedf35 100644 --- a/src/client/src/tscSub.c +++ b/src/client/src/tscSub.c @@ -149,7 +149,7 @@ static SSub* tscCreateSubscription(STscObj* pObj, const char* topic, const char* } strtolower(pSql->sqlstr, pSql->sqlstr); - pRes->qid = 0; + pRes->qId = 0; pRes->numOfRows = 1; code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); @@ -448,7 +448,7 @@ SSqlObj* recreateSqlObj(SSub* pSub) { return NULL; } - pRes->qid = 0; + pRes->qId = 0; pRes->numOfRows = 1; int code = tscAllocPayload(pCmd, TSDB_DEFAULT_PAYLOAD_SIZE); @@ -548,7 +548,7 @@ TAOS_RES *taos_consume(TAOS_SUB *tsub) { uint32_t type = pQueryInfo->type; tscFreeSqlResult(pSql); pRes->numOfRows = 1; - pRes->qid = 0; + pRes->qId = 0; pSql->cmd.command = TSDB_SQL_SELECT; pQueryInfo->type = type; diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index 995eea30a3..d8ccceea14 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -47,6 +47,13 @@ static int32_t tsCompare(int32_t order, int64_t left, int64_t right) { } static void skipRemainValue(STSBuf* pTSBuf, tVariant* tag1) { + STSElem el1 = tsBufGetElem(pTSBuf); + + int32_t res = tVariantCompare(el1.tag, tag1); + if (res != 0) { // it is a record with new tag + return; + } + while (tsBufNextPos(pTSBuf)) { STSElem el1 = tsBufGetElem(pTSBuf); @@ -117,123 +124,235 @@ static bool subAndCheckDone(SSqlObj *pSql, SSqlObj *pParentSql, int idx) { return done; } -static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJoinSupporter* pSupporter2, STimeWindow * win) { - SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); - STSBuf* output1 = tsBufCreate(true, pQueryInfo->order.order); - STSBuf* output2 = tsBufCreate(true, pQueryInfo->order.order); + +static int64_t doTSBlockIntersect(SSqlObj* pSql, STimeWindow * win) { + SQueryInfo* pQueryInfo = tscGetQueryInfo(&pSql->cmd, pSql->cmd.clauseIndex); win->skey = INT64_MAX; win->ekey = INT64_MIN; SLimitVal* pLimit = &pQueryInfo->limit; int32_t order = pQueryInfo->order.order; + int32_t joinNum = pSql->subState.numOfSub; + SMergeTsCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}}; + SMergeTsCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0}; + int32_t slot = 0; + size_t tableNum = 0; + int16_t* tableMIdx = 0; + int32_t equalNum = 0; + int32_t stackidx = 0; + SMergeTsCtx* ctx = NULL; + SMergeTsCtx* pctx = NULL; + SMergeTsCtx* mainCtx = NULL; + STSElem cur; + STSElem prev; + SArray* tsCond = NULL; + int32_t mergeDone = 0; - SQueryInfo* pSubQueryInfo1 = tscGetQueryInfo(&pSql->pSubs[0]->cmd, 0); - SQueryInfo* pSubQueryInfo2 = tscGetQueryInfo(&pSql->pSubs[1]->cmd, 0); + for (int32_t i = 0; i < joinNum; ++i) { + STSBuf* output = tsBufCreate(true, pQueryInfo->order.order); + SQueryInfo* pSubQueryInfo = tscGetQueryInfo(&pSql->pSubs[i]->cmd, 0); - pSubQueryInfo1->tsBuf = output1; - pSubQueryInfo2->tsBuf = output2; + pSubQueryInfo->tsBuf = output; + + SJoinSupporter* pSupporter = pSql->pSubs[i]->param; + + if (pSupporter->pTSBuf == NULL) { + tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); + return 0; + } + + tsBufResetPos(pSupporter->pTSBuf); + + if (!tsBufNextPos(pSupporter->pTSBuf)) { + tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql); + return 0; + } + + tscDebug("%p sub:%p table idx:%d, input group number:%d", pSql, pSql->pSubs[i], i, pSupporter->pTSBuf->numOfGroups); + + ctxlist[i].p = pSupporter; + ctxlist[i].res = output; + } TSKEY st = taosGetTimestampUs(); - // no result generated, return directly - if (pSupporter1->pTSBuf == NULL || pSupporter2->pTSBuf == NULL) { - tscDebug("%p at least one ts-comp is empty, 0 for secondary query after ts blocks intersecting", pSql); - return 0; - } - - tsBufResetPos(pSupporter1->pTSBuf); - tsBufResetPos(pSupporter2->pTSBuf); - - if (!tsBufNextPos(pSupporter1->pTSBuf)) { - tsBufFlush(output1); - tsBufFlush(output2); - - tscDebug("%p input1 is empty, 0 for secondary query after ts blocks intersecting", pSql); - return 0; - } - - if (!tsBufNextPos(pSupporter2->pTSBuf)) { - tsBufFlush(output1); - tsBufFlush(output2); - - tscDebug("%p input2 is empty, 0 for secondary query after ts blocks intersecting", pSql); - return 0; - } - - int64_t numOfInput1 = 1; - int64_t numOfInput2 = 1; - - while(1) { - STSElem elem = tsBufGetElem(pSupporter1->pTSBuf); - - // no data in pSupporter1 anymore, jump out of loop - if (!tsBufIsValidElem(&elem)) { - break; + for (int16_t tidx = 0; tidx < joinNum; tidx++) { + pctx = &ctxlist[tidx]; + if (pctx->compared) { + continue; } - // find the data in supporter2 with the same tag value - STSElem e2 = tsBufFindElemStartPosByTag(pSupporter2->pTSBuf, elem.tag); + assert(pctx->numOfInput == 0); - /** - * there are elements in pSupporter2 with the same tag, continue - */ - tVariant tag1 = {0}; - tVariantAssign(&tag1, elem.tag); + tsCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tsJoin; + + tableNum = taosArrayGetSize(tsCond); + assert(tableNum >= 2); + + for (int32_t i = 0; i < tableNum; ++i) { + tableMIdx = taosArrayGet(tsCond, i); + SMergeTsCtx* tctx = &ctxlist[*tableMIdx]; + tctx->compared = 1; + } + + tableMIdx = taosArrayGet(tsCond, 0); + pctx = &ctxlist[*tableMIdx]; + + mainCtx = pctx; + + while (1) { + pctx = mainCtx; + + prev = tsBufGetElem(pctx->p->pTSBuf); + + ctxStack[stackidx++] = pctx; + + if (!tsBufIsValidElem(&prev)) { + break; + } + + tVariant tag = {0}; + tVariantAssign(&tag, prev.tag); + + int32_t skipped = 0; + + for (int32_t i = 1; i < tableNum; ++i) { + SMergeTsCtx* tctx = &ctxlist[i]; + + // find the data in supporter2 with the same tag value + STSElem e2 = tsBufFindElemStartPosByTag(tctx->p->pTSBuf, &tag); + + if (!tsBufIsValidElem(&e2)) { + skipRemainValue(pctx->p->pTSBuf, &tag); + skipped = 1; + break; + } + } + + if (skipped) { + slot = 0; + stackidx = 0; + continue; + } + + tableMIdx = taosArrayGet(tsCond, ++slot); + equalNum = 1; - if (tsBufIsValidElem(&e2)) { while (1) { - STSElem elem1 = tsBufGetElem(pSupporter1->pTSBuf); - STSElem elem2 = tsBufGetElem(pSupporter2->pTSBuf); + ctx = &ctxlist[*tableMIdx]; + + prev = tsBufGetElem(pctx->p->pTSBuf); + cur = tsBufGetElem(ctx->p->pTSBuf); // data with current are exhausted - if (!tsBufIsValidElem(&elem1) || tVariantCompare(elem1.tag, &tag1) != 0) { + if (!tsBufIsValidElem(&prev) || tVariantCompare(prev.tag, &tag) != 0) { break; } - if (!tsBufIsValidElem(&elem2) || tVariantCompare(elem2.tag, &tag1) != 0) { // ignore all records with the same tag - skipRemainValue(pSupporter1->pTSBuf, &tag1); + if (!tsBufIsValidElem(&cur) || tVariantCompare(cur.tag, &tag) != 0) { // ignore all records with the same tag break; } - /* - * in case of stable query, limit/offset is not applied here. the limit/offset is applied to the - * final results which is acquired after the secondary merge of in the client. - */ - int32_t re = tsCompare(order, elem1.ts, elem2.ts); - if (re < 0) { - tsBufNextPos(pSupporter1->pTSBuf); - numOfInput1++; - } else if (re > 0) { - tsBufNextPos(pSupporter2->pTSBuf); - numOfInput2++; - } else { + ctxStack[stackidx++] = ctx; + + int32_t ret = tsCompare(order, prev.ts, cur.ts); + if (ret == 0) { + if (++equalNum < tableNum) { + pctx = ctx; + + if (++slot >= tableNum) { + slot = 0; + } + + tableMIdx = taosArrayGet(tsCond, slot); + continue; + } + + assert(stackidx == tableNum); + if (pLimit->offset == 0 || pQueryInfo->interval.interval > 0 || QUERY_IS_STABLE_QUERY(pQueryInfo->type)) { - if (win->skey > elem1.ts) { - win->skey = elem1.ts; + if (win->skey > prev.ts) { + win->skey = prev.ts; } - if (win->ekey < elem1.ts) { - win->ekey = elem1.ts; + if (win->ekey < prev.ts) { + win->ekey = prev.ts; } - tsBufAppend(output1, elem1.id, elem1.tag, (const char*)&elem1.ts, sizeof(elem1.ts)); - tsBufAppend(output2, elem2.id, elem2.tag, (const char*)&elem2.ts, sizeof(elem2.ts)); + for (int32_t i = 0; i < stackidx; ++i) { + SMergeTsCtx* tctx = ctxStack[i]; + prev = tsBufGetElem(tctx->p->pTSBuf); + + tsBufAppend(tctx->res, prev.id, prev.tag, (const char*)&prev.ts, sizeof(prev.ts)); + } } else { pLimit->offset -= 1;//offset apply to projection? } - tsBufNextPos(pSupporter1->pTSBuf); - numOfInput1++; + for (int32_t i = 0; i < stackidx; ++i) { + SMergeTsCtx* tctx = ctxStack[i]; - tsBufNextPos(pSupporter2->pTSBuf); - numOfInput2++; + if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) { + mergeDone = 1; + } + tctx->numOfInput++; + } + + if (mergeDone) { + break; + } + + stackidx = 0; + equalNum = 1; + + ctxStack[stackidx++] = pctx; + } else if (ret > 0) { + if (!tsBufNextPos(ctx->p->pTSBuf) && ctx == mainCtx) { + mergeDone = 1; + break; + } + + ctx->numOfInput++; + stackidx--; + } else { + stackidx--; + + for (int32_t i = 0; i < stackidx; ++i) { + SMergeTsCtx* tctx = ctxStack[i]; + + if (!tsBufNextPos(tctx->p->pTSBuf) && tctx == mainCtx) { + mergeDone = 1; + } + tctx->numOfInput++; + } + + if (mergeDone) { + break; + } + + stackidx = 0; + equalNum = 1; + + ctxStack[stackidx++] = pctx; } + } - } else { // no data in pSupporter2, ignore current data in pSupporter2 - skipRemainValue(pSupporter1->pTSBuf, &tag1); + + if (mergeDone) { + break; + } + + slot = 0; + stackidx = 0; + + skipRemainValue(mainCtx->p->pTSBuf, &tag); } + + stackidx = 0; + slot = 0; + mergeDone = 0; } /* @@ -241,28 +360,32 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSupporter* pSupporter1, SJ * 1. only one element * 2. only one element for each tag. */ - if (output1->tsOrder == -1) { - output1->tsOrder = TSDB_ORDER_ASC; - output2->tsOrder = TSDB_ORDER_ASC; + if (ctxlist[0].res->tsOrder == -1) { + for (int32_t i = 0; i < joinNum; ++i) { + ctxlist[i].res->tsOrder = TSDB_ORDER_ASC; + } } - tsBufFlush(output1); - tsBufFlush(output2); + for (int32_t i = 0; i < joinNum; ++i) { + tsBufFlush(ctxlist[i].res); - tsBufDestroy(pSupporter1->pTSBuf); - pSupporter1->pTSBuf = NULL; - tsBufDestroy(pSupporter2->pTSBuf); - pSupporter2->pTSBuf = NULL; + tsBufDestroy(ctxlist[i].p->pTSBuf); + ctxlist[i].p->pTSBuf = NULL; + } TSKEY et = taosGetTimestampUs(); - tscDebug("%p input1:%" PRId64 ", input2:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " - "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us", - pSql, numOfInput1, numOfInput2, output1->numOfTotal, output1->numOfGroups, win->skey, win->ekey, - tsBufGetNumOfGroup(output1), et - st); - return output1->numOfTotal; + for (int32_t i = 0; i < joinNum; ++i) { + tscDebug("%p sub:%p tblidx:%d, input:%" PRId64 ", final:%" PRId64 " in %d vnodes for secondary query after ts blocks " + "intersecting, skey:%" PRId64 ", ekey:%" PRId64 ", numOfVnode:%d, elapsed time:%" PRId64 " us", + pSql, pSql->pSubs[i], i, ctxlist[i].numOfInput, ctxlist[i].res->numOfTotal, ctxlist[i].res->numOfGroups, win->skey, win->ekey, + tsBufGetNumOfGroup(ctxlist[i].res), et - st); + } + + return ctxlist[0].res->numOfTotal; } + // todo handle failed to create sub query SJoinSupporter* tscCreateJoinSupporter(SSqlObj* pSql, int32_t index) { SJoinSupporter* pSupporter = calloc(1, sizeof(SJoinSupporter)); @@ -771,76 +894,218 @@ static bool checkForDuplicateTagVal(SSchema* pColSchema, SJoinSupporter* p1, SSq return true; } -static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray** s1, SArray** s2) { - SJoinSupporter* p1 = pParentSql->pSubs[0]->param; - SJoinSupporter* p2 = pParentSql->pSubs[1]->param; - - tscDebug("%p all subquery retrieve complete, do tags match, %d, %d", pParentSql, p1->num, p2->num); - - // sort according to the tag value - qsort(p1->pIdTagList, p1->num, p1->tagSize, tagValCompar); - qsort(p2->pIdTagList, p2->num, p2->tagSize, tagValCompar); +static int32_t getIntersectionOfTableTuple(SQueryInfo* pQueryInfo, SSqlObj* pParentSql, SArray* resList) { + int16_t joinNum = pParentSql->subState.numOfSub; STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); int16_t tagColId = tscGetJoinTagColIdByUid(&pQueryInfo->tagCond, pTableMetaInfo->pTableMeta->id.uid); + SJoinSupporter* p0 = pParentSql->pSubs[0]->param; + SMergeCtx ctxlist[TSDB_MAX_JOIN_TABLE_NUM] = {{0}}; + SMergeCtx* ctxStack[TSDB_MAX_JOIN_TABLE_NUM] = {0}; + + // int16_t for padding + int32_t size = p0->tagSize - sizeof(int16_t); SSchema* pColSchema = tscGetColumnSchemaById(pTableMetaInfo->pTableMeta, tagColId); - // int16_t for padding - int32_t size = p1->tagSize - sizeof(int16_t); - *s1 = taosArrayInit(p1->num, size); - *s2 = taosArrayInit(p2->num, size); + tscDebug("%p all subquery retrieve complete, do tags match", pParentSql); - if (!(checkForDuplicateTagVal(pColSchema, p1, pParentSql) && checkForDuplicateTagVal(pColSchema, p2, pParentSql))) { - return TSDB_CODE_QRY_DUP_JOIN_KEY; - } + for (int32_t i = 0; i < joinNum; i++) { + SJoinSupporter* p = pParentSql->pSubs[i]->param; - int32_t i = 0, j = 0; - while(i < p1->num && j < p2->num) { - STidTags* pp1 = (STidTags*) varDataVal(p1->pIdTagList + i * p1->tagSize); - STidTags* pp2 = (STidTags*) varDataVal(p2->pIdTagList + j * p2->tagSize); - assert(pp1->tid != 0 && pp2->tid != 0); + ctxlist[i].p = p; + ctxlist[i].res = taosArrayInit(p->num, size); - int32_t ret = doCompare(pp1->tag, pp2->tag, pColSchema->type, pColSchema->bytes); - if (ret == 0) { - tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, pp1->vgId, - *(int*) pp1->tag, pp1->tid, pp1->uid, pp2->tid, pp2->uid); + tscDebug("Join %d - num:%d", i, p->num); - taosArrayPush(*s1, pp1); - taosArrayPush(*s2, pp2); - j++; - i++; - } else if (ret > 0) { - j++; - } else { - i++; + // sort according to the tag valu + qsort(p->pIdTagList, p->num, p->tagSize, tagValCompar); + + if (!checkForDuplicateTagVal(pColSchema, p, pParentSql)) { + for (int32_t j = 0; j <= i; j++) { + taosArrayDestroy(ctxlist[j].res); + } + return TSDB_CODE_QRY_DUP_JOIN_KEY; } } - // reorganize the tid-tag value according to both the vgroup id and tag values - // sort according to the tag value - size_t t1 = taosArrayGetSize(*s1); - size_t t2 = taosArrayGetSize(*s2); + int32_t slot = 0; + size_t tableNum = 0; + int16_t* tableMIdx = 0; + int32_t equalNum = 0; + int32_t stackidx = 0; + int32_t mergeDone = 0; + SMergeCtx* ctx = NULL; + SMergeCtx* pctx = NULL; + STidTags* cur = NULL; + STidTags* prev = NULL; + SArray* tagCond = NULL; - qsort((*s1)->pData, t1, size, tidTagsCompar); - qsort((*s2)->pData, t2, size, tidTagsCompar); + for (int16_t tidx = 0; tidx < joinNum; tidx++) { + pctx = &ctxlist[tidx]; + if (pctx->compared) { + continue; + } -#if 0 - for(int32_t k = 0; k < t1; ++k) { - STidTags* p = (*s1)->pData + size * k; - printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data); + assert(pctx->idx == 0 && taosArrayGetSize(pctx->res) == 0); + + tagCond = pQueryInfo->tagCond.joinInfo.joinTables[tidx]->tagJoin; + + tableNum = taosArrayGetSize(tagCond); + assert(tableNum >= 2); + + for (int32_t i = 0; i < tableNum; ++i) { + tableMIdx = taosArrayGet(tagCond, i); + SMergeCtx* tctx = &ctxlist[*tableMIdx]; + tctx->compared = 1; + } + + for (int32_t i = 0; i < tableNum; ++i) { + tableMIdx = taosArrayGet(tagCond, i); + SMergeCtx* tctx = &ctxlist[*tableMIdx]; + if (tctx->p->num <= 0 || tctx->p->pIdTagList == NULL) { + mergeDone = 1; + break; + } + } + + if (mergeDone) { + mergeDone = 0; + continue; + } + + tableMIdx = taosArrayGet(tagCond, slot); + + pctx = &ctxlist[*tableMIdx]; + + prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); + + ctxStack[stackidx++] = pctx; + + tableMIdx = taosArrayGet(tagCond, ++slot); + + equalNum = 1; + + while (1) { + ctx = &ctxlist[*tableMIdx]; + + cur = (STidTags*) varDataVal(ctx->p->pIdTagList + ctx->idx * ctx->p->tagSize); + + assert(cur->tid != 0 && prev->tid != 0); + + ctxStack[stackidx++] = ctx; + + int32_t ret = doCompare(prev->tag, cur->tag, pColSchema->type, pColSchema->bytes); + if (ret == 0) { + if (++equalNum < tableNum) { + prev = cur; + pctx = ctx; + + if (++slot >= tableNum) { + slot = 0; + } + + tableMIdx = taosArrayGet(tagCond, slot); + continue; + } + + tscDebug("%p tag matched, vgId:%d, val:%d, tid:%d, uid:%"PRIu64", tid:%d, uid:%"PRIu64, pParentSql, prev->vgId, + *(int*) prev->tag, prev->tid, prev->uid, cur->tid, cur->uid); + + assert(stackidx == tableNum); + + for (int32_t i = 0; i < stackidx; ++i) { + SMergeCtx* tctx = ctxStack[i]; + prev = (STidTags*) varDataVal(tctx->p->pIdTagList + tctx->idx * tctx->p->tagSize); + + taosArrayPush(tctx->res, prev); + } + + for (int32_t i = 0; i < stackidx; ++i) { + SMergeCtx* tctx = ctxStack[i]; + + if (++tctx->idx >= tctx->p->num) { + mergeDone = 1; + break; + } + } + + if (mergeDone) { + break; + } + + stackidx = 0; + equalNum = 1; + + prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); + + ctxStack[stackidx++] = pctx; + } else if (ret > 0) { + stackidx--; + + if (++ctx->idx >= ctx->p->num) { + break; + } + } else { + stackidx--; + + for (int32_t i = 0; i < stackidx; ++i) { + SMergeCtx* tctx = ctxStack[i]; + if (++tctx->idx >= tctx->p->num) { + mergeDone = 1; + break; + } + } + + if (mergeDone) { + break; + } + + stackidx = 0; + equalNum = 1; + + prev = (STidTags*) varDataVal(pctx->p->pIdTagList + pctx->idx * pctx->p->tagSize); + ctxStack[stackidx++] = pctx; + } + + } + + slot = 0; + mergeDone = 0; + stackidx = 0; } - for(int32_t k = 0; k < t1; ++k) { - STidTags* p = (*s2)->pData + size * k; - printf("%d, tag:%s\n", p->vgId, ((tstr*)(p->tag))->data); - } -#endif + for (int32_t i = 0; i < joinNum; ++i) { + // reorganize the tid-tag value according to both the vgroup id and tag values + // sort according to the tag value + size_t num = taosArrayGetSize(ctxlist[i].res); + + qsort((ctxlist[i].res)->pData, num, size, tidTagsCompar); + + taosArrayPush(resList, &ctxlist[i].res); + + tscDebug("%p tags match complete, result num: %"PRIzu, pParentSql, num); + } - tscDebug("%p tags match complete, result: %"PRIzu", %"PRIzu, pParentSql, t1, t2); return TSDB_CODE_SUCCESS; } +bool emptyTagList(SArray* resList, int32_t size) { + size_t rsize = taosArrayGetSize(resList); + if (rsize != size) { + return true; + } + + for (int32_t i = 0; i < size; ++i) { + SArray** s = taosArrayGet(resList, i); + if (taosArrayGetSize(*s) <= 0) { + return true; + } + } + + return false; +} + static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { SJoinSupporter* pSupporter = (SJoinSupporter*)param; @@ -942,19 +1207,19 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow return; } - SArray *s1 = NULL, *s2 = NULL; - int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, &s1, &s2); + SArray* resList = taosArrayInit(pParentSql->subState.numOfSub, sizeof(SArray *)); + + int32_t code = getIntersectionOfTableTuple(pQueryInfo, pParentSql, resList); if (code != TSDB_CODE_SUCCESS) { freeJoinSubqueryObj(pParentSql); pParentSql->res.code = code; tscAsyncResultOnError(pParentSql); - taosArrayDestroy(s1); - taosArrayDestroy(s2); + taosArrayDestroy(resList); return; } - if (taosArrayGetSize(s1) == 0 || taosArrayGetSize(s2) == 0) { // no results,return. + if (emptyTagList(resList, pParentSql->subState.numOfSub)) { // no results,return. assert(pParentSql->fp != tscJoinQueryCallback); tscDebug("%p tag intersect does not generated qualified tables for join, free all sub SqlObj and quit", pParentSql); @@ -966,37 +1231,34 @@ static void tidTagRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow (*pParentSql->fp)(pParentSql->param, pParentSql, 0); } else { - // proceed to for ts_comp query - SSqlCmd* pSubCmd1 = &pParentSql->pSubs[0]->cmd; - SSqlCmd* pSubCmd2 = &pParentSql->pSubs[1]->cmd; - - SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pSubCmd1, 0); - STableMetaInfo* pTableMetaInfo1 = tscGetMetaInfo(pQueryInfo1, 0); - tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo1, s1); - - SQueryInfo* pQueryInfo2 = tscGetQueryInfo(pSubCmd2, 0); - STableMetaInfo* pTableMetaInfo2 = tscGetMetaInfo(pQueryInfo2, 0); - tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo2, s2); - - SSqlObj* psub1 = pParentSql->pSubs[0]; - ((SJoinSupporter*)psub1->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo1->pVgroupTables); - - SSqlObj* psub2 = pParentSql->pSubs[1]; - ((SJoinSupporter*)psub2->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo2->pVgroupTables); - - pParentSql->subState.numOfSub = 2; - - memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub); - tscDebug("%p reset all sub states to 0", pParentSql); - for (int32_t m = 0; m < pParentSql->subState.numOfSub; ++m) { - SSqlObj* sub = pParentSql->pSubs[m]; - issueTsCompQuery(sub, sub->param, pParentSql); + // proceed to for ts_comp query + SSqlCmd* pSubCmd = &pParentSql->pSubs[m]->cmd; + SArray** s = taosArrayGet(resList, m); + + SQueryInfo* pQueryInfo1 = tscGetQueryInfo(pSubCmd, 0); + STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo1, 0); + tscBuildVgroupTableInfo(pParentSql, pTableMetaInfo, *s); + + SSqlObj* psub = pParentSql->pSubs[m]; + ((SJoinSupporter*)psub->param)->pVgroupTables = tscVgroupTableInfoDup(pTableMetaInfo->pVgroupTables); + + memset(pParentSql->subState.states, 0, sizeof(pParentSql->subState.states[0]) * pParentSql->subState.numOfSub); + tscDebug("%p reset all sub states to 0", pParentSql); + + issueTsCompQuery(psub, psub->param, pParentSql); } } - taosArrayDestroy(s1); - taosArrayDestroy(s2); + size_t rsize = taosArrayGetSize(resList); + for (int32_t i = 0; i < rsize; ++i) { + SArray** s = taosArrayGet(resList, i); + if (*s) { + taosArrayDestroy(*s); + } + } + + taosArrayDestroy(resList); } static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRows) { @@ -1127,12 +1389,8 @@ static void tsCompRetrieveCallback(void* param, TAOS_RES* tres, int32_t numOfRow tscDebug("%p all subquery retrieve ts complete, do ts block intersect", pParentSql); - // proceeds to launched secondary query to retrieve final data - SJoinSupporter* p1 = pParentSql->pSubs[0]->param; - SJoinSupporter* p2 = pParentSql->pSubs[1]->param; - STimeWindow win = TSWINDOW_INITIALIZER; - int64_t num = doTSBlockIntersect(pParentSql, p1, p2, &win); + int64_t num = doTSBlockIntersect(pParentSql, &win); if (num <= 0) { // no result during ts intersect tscDebug("%p no results generated in ts intersection, free all sub SqlObj and quit", pParentSql); freeJoinSubqueryObj(pParentSql); @@ -1585,7 +1843,7 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter SSqlCmd * pCmd = &pSql->cmd; SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); - pSql->res.qid = 0x1; + pSql->res.qId = 0x1; assert(pSql->res.numOfRows == 0); if (pSql->pSubs == NULL) { @@ -1640,6 +1898,8 @@ int32_t tscCreateJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSupporter pNewQueryInfo->limit.limit = -1; pNewQueryInfo->limit.offset = 0; + pNewQueryInfo->order.orderColId = INT32_MIN; + // backup the data and clear it in the sqlcmd object memset(&pNewQueryInfo->groupbyExpr, 0, sizeof(SSqlGroupbyExpr)); @@ -1744,7 +2004,7 @@ void tscHandleMasterJoinQuery(SSqlObj* pSql) { memset(pSql->subState.states, 0, sizeof(*pSql->subState.states) * pSql->subState.numOfSub); tscDebug("%p reset all sub states to 0", pSql); - + tscDebug("%p start subquery, total:%d", pSql, pQueryInfo->numOfTables); for (int32_t i = 0; i < pQueryInfo->numOfTables; ++i) { @@ -2179,7 +2439,7 @@ int32_t tscHandleMasterSTableQuery(SSqlObj *pSql) { SColumnModel *pModel = NULL; SColumnModel *pFinalModel = NULL; - pRes->qid = 0x1; // hack the qhandle check + pRes->qId = 0x1; // hack the qhandle check const uint32_t nBufferSize = (1u << 16u); // 64KB @@ -2727,7 +2987,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { tscDebug("%p sub:%p query complete, ep:%s, vgId:%d, orderOfSub:%d, retrieve data", trsupport->pParentSql, pSql, pVgroup->epAddr[0].fqdn, pVgroup->vgId, trsupport->subqueryIndex); - if (pSql->res.qid == 0) { // qhandle is 0, code is TSDB_CODE_SUCCESS means no results generated from this vnode + if (pSql->res.qId == 0) { // qhandle is NULL, code is TSDB_CODE_SUCCESS means no results generated from this vnode tscRetrieveFromDnodeCallBack(param, pSql, 0); } else { taos_fetch_rows_a(tres, tscRetrieveFromDnodeCallBack, param); @@ -3325,9 +3585,9 @@ void* createQueryInfoFromQueryNode(SQueryInfo* pQueryInfo, SExprInfo* pExprs, ST STsBufInfo bufInfo = {0}; SQueryParam param = {.pOperator = pa}; /*int32_t code = */initQInfo(&bufInfo, NULL, pQInfo, ¶m, NULL, 0); - pQInfo->runtimeEnv.proot->upstream = pOperator; - qTableQuery(pQInfo); + + qTableQuery(pQInfo, NULL); return pQInfo; diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 209c3223aa..29fd7306a3 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -711,7 +711,7 @@ void tscFreeSqlResult(SSqlObj* pSql) { memset(&pSql->res, 0, sizeof(SSqlRes)); } -static void tscFreeSubobj(SSqlObj* pSql) { +void tscFreeSubobj(SSqlObj* pSql) { if (pSql->subState.numOfSub == 0) { return; } @@ -1533,6 +1533,33 @@ int32_t tscSqlExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepco return 0; } +bool tscColumnExists(SArray* pColumnList, SColumnIndex* pColIndex) { + // ignore the tbname columnIndex to be inserted into source list + if (pColIndex->columnIndex < 0) { + return false; + } + + size_t numOfCols = taosArrayGetSize(pColumnList); + int16_t col = pColIndex->columnIndex; + + int32_t i = 0; + while (i < numOfCols) { + SColumn* pCol = taosArrayGetP(pColumnList, i); + if ((pCol->colIndex.columnIndex != col) || (pCol->colIndex.tableIndex != pColIndex->tableIndex)) { + ++i; + continue; + } else { + break; + } + } + + if (i >= numOfCols || numOfCols == 0) { + return false; + } + + return true; +} + void tscSqlExprAssign(SExprInfo* dst, const SExprInfo* src) { assert(dst != NULL && src != NULL); @@ -1860,7 +1887,25 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { dest->tbnameCond.uid = src->tbnameCond.uid; dest->tbnameCond.len = src->tbnameCond.len; - memcpy(&dest->joinInfo, &src->joinInfo, sizeof(SJoinInfo)); + dest->joinInfo.hasJoin = src->joinInfo.hasJoin; + + for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) { + if (src->joinInfo.joinTables[i]) { + dest->joinInfo.joinTables[i] = calloc(1, sizeof(SJoinNode)); + + memcpy(dest->joinInfo.joinTables[i], src->joinInfo.joinTables[i], sizeof(SJoinNode)); + + if (src->joinInfo.joinTables[i]->tsJoin) { + dest->joinInfo.joinTables[i]->tsJoin = taosArrayDup(src->joinInfo.joinTables[i]->tsJoin); + } + + if (src->joinInfo.joinTables[i]->tagJoin) { + dest->joinInfo.joinTables[i]->tagJoin = taosArrayDup(src->joinInfo.joinTables[i]->tagJoin); + } + } + } + + dest->relType = src->relType; if (src->pCond == NULL) { @@ -1906,6 +1951,23 @@ void tscTagCondRelease(STagCond* pTagCond) { taosArrayDestroy(pTagCond->pCond); } + for (int32_t i = 0; i < TSDB_MAX_JOIN_TABLE_NUM; ++i) { + SJoinNode *node = pTagCond->joinInfo.joinTables[i]; + if (node == NULL) { + continue; + } + + if (node->tsJoin != NULL) { + taosArrayDestroy(node->tsJoin); + } + + if (node->tagJoin != NULL) { + taosArrayDestroy(node->tagJoin); + } + + tfree(node); + } + memset(pTagCond, 0, sizeof(STagCond)); } @@ -2037,7 +2099,7 @@ void tscInitQueryInfo(SQueryInfo* pQueryInfo) { pQueryInfo->resColumnId = TSDB_RES_COL_ID; pQueryInfo->limit.limit = -1; pQueryInfo->limit.offset = 0; - + pQueryInfo->slimit.limit = -1; pQueryInfo->slimit.offset = 0; pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES); @@ -2598,7 +2660,7 @@ void tscDoQuery(SSqlObj* pSql) { } else { SQueryInfo *pQueryInfo = tscGetQueryInfo(pCmd, pCmd->clauseIndex); uint16_t type = pQueryInfo->type; - + if (QUERY_IS_JOIN_QUERY(type)) { if (!TSDB_QUERY_HAS_TYPE(type, TSDB_QUERY_TYPE_SUBQUERY)) { tscHandleMasterJoinQuery(pSql); @@ -2632,16 +2694,21 @@ void tscDoQuery(SSqlObj* pSql) { } int16_t tscGetJoinTagColIdByUid(STagCond* pTagCond, uint64_t uid) { - if (pTagCond->joinInfo.left.uid == uid) { - return pTagCond->joinInfo.left.tagColId; - } else if (pTagCond->joinInfo.right.uid == uid) { - return pTagCond->joinInfo.right.tagColId; - } else { - assert(0); - return -1; + int32_t i = 0; + while (i < TSDB_MAX_JOIN_TABLE_NUM) { + SJoinNode* node = pTagCond->joinInfo.joinTables[i]; + if (node && node->uid == uid) { + return node->tagColId; + } + + i++; } + + assert(0); + return -1; } + int16_t tscGetTagColIndexById(STableMeta* pTableMeta, int16_t colId) { int32_t numOfTags = tscGetNumOfTags(pTableMeta); @@ -3240,7 +3307,7 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->queryBlockDist = isBlockDistQuery(pQueryInfo); pQueryAttr->pointInterpQuery = tscIsPointInterpQuery(pQueryInfo); pQueryAttr->timeWindowInterpo = timeWindowInterpoRequired(pQueryInfo); - + pQueryAttr->numOfCols = numOfCols; pQueryAttr->numOfOutput = numOfOutput; pQueryAttr->limit = pQueryInfo->limit; @@ -3256,6 +3323,12 @@ int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAt pQueryAttr->pGroupbyExpr = calloc(1, sizeof(SSqlGroupbyExpr)); *(pQueryAttr->pGroupbyExpr) = pQueryInfo->groupbyExpr; + if (pQueryInfo->groupbyExpr.numOfGroupCols > 0) { + pQueryAttr->pGroupbyExpr->columnInfo = taosArrayDup(pQueryInfo->groupbyExpr.columnInfo); + } else { + assert(pQueryInfo->groupbyExpr.columnInfo == NULL); + } + pQueryAttr->pExpr1 = calloc(pQueryAttr->numOfOutput, sizeof(SExprInfo)); for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { SExprInfo* pExpr = tscSqlExprGet(pQueryInfo, i); diff --git a/src/common/inc/tcmdtype.h b/src/common/inc/tcmdtype.h index bec8590536..be16e80124 100644 --- a/src/common/inc/tcmdtype.h +++ b/src/common/inc/tcmdtype.h @@ -51,6 +51,7 @@ enum { TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_ACCT, "alter-acct" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_TABLE, "alter-table" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_ALTER_DB, "alter-db" ) + TSDB_DEFINE_SQL_TYPE(TSDB_SQL_SYNC_DB_REPLICA, "sync db-replica") TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_MNODE, "create-mnode" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_DROP_MNODE, "drop-mnode" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CREATE_DNODE, "create-dnode" ) @@ -87,13 +88,13 @@ enum { */ TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RETRIEVE_EMPTY_RESULT, "retrieve-empty-result" ) - TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" ) - TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" ) - TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_RESET_CACHE, "reset-cache" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_STATUS, "serv-status" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_DB, "current-db" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_SERV_VERSION, "serv-version" ) - TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CLI_VERSION, "cli-version" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CURRENT_USER, "current-user ") - TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" ) + TSDB_DEFINE_SQL_TYPE( TSDB_SQL_CFG_LOCAL, "cfg-local" ) TSDB_DEFINE_SQL_TYPE( TSDB_SQL_MAX, "max" ) }; diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c index 4a5df9361b..5f4ce046ed 100644 --- a/src/common/src/tglobal.c +++ b/src/common/src/tglobal.c @@ -51,7 +51,7 @@ int32_t tsMaxShellConns = 50000; int32_t tsMaxConnections = 5000; int32_t tsShellActivityTimer = 3; // second float tsNumOfThreadsPerCore = 1.0f; -int32_t tsNumOfCommitThreads = 1; +int32_t tsNumOfCommitThreads = 4; float tsRatioOfQueryCores = 1.0f; int8_t tsDaylight = 0; char tsTimezone[TSDB_TIMEZONE_LEN] = {0}; @@ -71,7 +71,7 @@ int32_t tsMaxBinaryDisplayWidth = 30; int32_t tsCompressMsgSize = -1; // client -int32_t tsMaxSQLStringLen = TSDB_MAX_SQL_LEN; +int32_t tsMaxSQLStringLen = TSDB_MAX_ALLOWED_SQL_LEN; int8_t tsTscEnableRecordSql = 0; // the maximum number of results for projection query on super table that are returned from diff --git a/src/connector/jdbc/CMakeLists.txt b/src/connector/jdbc/CMakeLists.txt index 86ddfcb022..eb158b1f76 100644 --- a/src/connector/jdbc/CMakeLists.txt +++ b/src/connector/jdbc/CMakeLists.txt @@ -8,7 +8,7 @@ IF (TD_MVN_INSTALLED) ADD_CUSTOM_COMMAND(OUTPUT ${JDBC_CMD_NAME} POST_BUILD COMMAND mvn -Dmaven.test.skip=true install -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml - COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.22-dist.jar ${LIBRARY_OUTPUT_PATH} + COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/target/taos-jdbcdriver-2.0.25-dist.jar ${LIBRARY_OUTPUT_PATH} COMMAND mvn -Dmaven.test.skip=true clean -f ${CMAKE_CURRENT_SOURCE_DIR}/pom.xml COMMENT "build jdbc driver") ADD_CUSTOM_TARGET(${JDBC_TARGET_NAME} ALL WORKING_DIRECTORY ${EXECUTABLE_OUTPUT_PATH} DEPENDS ${JDBC_CMD_NAME}) diff --git a/src/connector/jdbc/deploy-pom.xml b/src/connector/jdbc/deploy-pom.xml index fe93e54c69..eb8c92575c 100755 --- a/src/connector/jdbc/deploy-pom.xml +++ b/src/connector/jdbc/deploy-pom.xml @@ -5,7 +5,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.22 + 2.0.25 jar JDBCDriver @@ -37,17 +37,6 @@ - - commons-logging - commons-logging - 1.2 - - - * - * - - - junit junit @@ -61,21 +50,20 @@ httpclient 4.5.8 - - org.apache.commons - commons-lang3 - 3.9 - com.alibaba fastjson 1.2.58 + + com.google.guava + guava + 29.0-jre + - org.apache.maven.plugins maven-assembly-plugin diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 51cb0b3808..1f75754b0c 100755 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -3,7 +3,7 @@ 4.0.0 com.taosdata.jdbc taos-jdbcdriver - 2.0.22 + 2.0.25 jar JDBCDriver https://github.com/taosdata/TDengine/tree/master/src/connector/jdbc @@ -43,7 +43,6 @@ 4.13 test - org.apache.httpcomponents @@ -55,10 +54,22 @@ fastjson 1.2.58 - + + com.google.guava + guava + 29.0-jre + + + + src/main/resources + + **/*.md + + + org.apache.maven.plugins diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java index a2496b0099..976078da95 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractConnection.java @@ -30,9 +30,12 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); // do nothing + return sql; } + + @Override public void setAutoCommit(boolean autoCommit) throws SQLException { if (isClosed()) @@ -448,7 +451,6 @@ public abstract class AbstractConnection extends WrapperImpl implements Connecti if (isClosed) throw (SQLClientInfoException) TSDBError.createSQLException(TSDBErrorNumbers.ERROR_SQLCLIENT_EXCEPTION_ON_CONNECTION_CLOSED); - for (Enumeration enumer = properties.keys(); enumer.hasMoreElements(); ) { String name = (String) enumer.nextElement(); clientInfoProps.put(name, properties.getProperty(name)); diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java new file mode 100644 index 0000000000..7df7252ae2 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractParameterMetaData.java @@ -0,0 +1,138 @@ +package com.taosdata.jdbc; + +import java.sql.ParameterMetaData; +import java.sql.SQLException; +import java.sql.Timestamp; +import java.sql.Types; + +public abstract class AbstractParameterMetaData extends WrapperImpl implements ParameterMetaData { + + private final Object[] parameters; + + public AbstractParameterMetaData(Object[] parameters) { + this.parameters = parameters; + } + + @Override + public int getParameterCount() throws SQLException { + return parameters == null ? 0 : parameters.length; + } + + @Override + public int isNullable(int param) throws SQLException { + return ParameterMetaData.parameterNullableUnknown; + } + + @Override + public boolean isSigned(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + if (parameters[param - 1] instanceof Byte) + return true; + if (parameters[param - 1] instanceof Short) + return true; + if (parameters[param - 1] instanceof Integer) + return true; + if (parameters[param - 1] instanceof Long) + return true; + if (parameters[param - 1] instanceof Float) + return true; + if (parameters[param - 1] instanceof Double) + return true; + + return false; + } + + @Override + public int getPrecision(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + if (parameters[param - 1] instanceof String) + return ((String) parameters[param - 1]).length(); + if (parameters[param - 1] instanceof byte[]) + return ((byte[]) parameters[param - 1]).length; + return 0; + } + + @Override + public int getScale(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + return 0; + } + + @Override + public int getParameterType(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + if (parameters[param - 1] instanceof Timestamp) + return Types.TIMESTAMP; + if (parameters[param - 1] instanceof Byte) + return Types.TINYINT; + if (parameters[param - 1] instanceof Short) + return Types.SMALLINT; + if (parameters[param - 1] instanceof Integer) + return Types.INTEGER; + if (parameters[param - 1] instanceof Long) + return Types.BIGINT; + if (parameters[param - 1] instanceof Float) + return Types.FLOAT; + if (parameters[param - 1] instanceof Double) + return Types.DOUBLE; + if (parameters[param - 1] instanceof String) + return Types.NCHAR; + if (parameters[param - 1] instanceof byte[]) + return Types.BINARY; + if (parameters[param - 1] instanceof Boolean) + return Types.BOOLEAN; + return Types.OTHER; + } + + @Override + public String getParameterTypeName(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + if (parameters[param - 1] instanceof Timestamp) + return TSDBConstants.jdbcType2TaosTypeName(Types.TIMESTAMP); + if (parameters[param - 1] instanceof Byte) + return TSDBConstants.jdbcType2TaosTypeName(Types.TINYINT); + if (parameters[param - 1] instanceof Short) + return TSDBConstants.jdbcType2TaosTypeName(Types.SMALLINT); + if (parameters[param - 1] instanceof Integer) + return TSDBConstants.jdbcType2TaosTypeName(Types.INTEGER); + if (parameters[param - 1] instanceof Long) + return TSDBConstants.jdbcType2TaosTypeName(Types.BIGINT); + if (parameters[param - 1] instanceof Float) + return TSDBConstants.jdbcType2TaosTypeName(Types.FLOAT); + if (parameters[param - 1] instanceof Double) + return TSDBConstants.jdbcType2TaosTypeName(Types.DOUBLE); + if (parameters[param - 1] instanceof String) + return TSDBConstants.jdbcType2TaosTypeName(Types.NCHAR); + if (parameters[param - 1] instanceof byte[]) + return TSDBConstants.jdbcType2TaosTypeName(Types.BINARY); + if (parameters[param - 1] instanceof Boolean) + return TSDBConstants.jdbcType2TaosTypeName(Types.BOOLEAN); + + return parameters[param - 1].getClass().getName(); + } + + @Override + public String getParameterClassName(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + return parameters[param - 1].getClass().getName(); + } + + @Override + public int getParameterMode(int param) throws SQLException { + if (param < 1 && param >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + return ParameterMetaData.parameterModeUnknown; + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java index 238d18039d..abd348e68c 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractResultSet.java @@ -11,6 +11,15 @@ import java.util.Map; public abstract class AbstractResultSet extends WrapperImpl implements ResultSet { private int fetchSize; + protected void checkAvailability(int columnIndex, int bounds) throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + if (columnIndex < 1) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " < 1"); + if (columnIndex > bounds) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + bounds); + } + @Override public abstract boolean next() throws SQLException; @@ -46,38 +55,20 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public abstract double getDouble(int columnIndex) throws SQLException; + @Deprecated @Override public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getBigDecimal(columnIndex); } @Override - public byte[] getBytes(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public abstract byte[] getBytes(int columnIndex) throws SQLException; @Override - public Date getDate(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - - } + public abstract Date getDate(int columnIndex) throws SQLException; @Override - public Time getTime(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public abstract Time getTime(int columnIndex) throws SQLException; @Override public abstract Timestamp getTimestamp(int columnIndex) throws SQLException; @@ -147,9 +138,10 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet return getDouble(findColumn(columnLabel)); } + @Deprecated @Override public BigDecimal getBigDecimal(String columnLabel, int scale) throws SQLException { - return getBigDecimal(findColumn(columnLabel)); + return getBigDecimal(findColumn(columnLabel), scale); } @Override @@ -214,12 +206,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet public abstract ResultSetMetaData getMetaData() throws SQLException; @Override - public Object getObject(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public abstract Object getObject(int columnIndex) throws SQLException; @Override public Object getObject(String columnLabel) throws SQLException { @@ -243,12 +230,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet } @Override - public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public abstract BigDecimal getBigDecimal(int columnIndex) throws SQLException; @Override public BigDecimal getBigDecimal(String columnLabel) throws SQLException { @@ -718,9 +700,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public Object getObject(String columnLabel, Map> map) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getObject(findColumn(columnLabel), map); } @Override @@ -760,9 +740,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public Date getDate(String columnLabel, Calendar cal) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getDate(findColumn(columnLabel), cal); } @Override @@ -774,23 +752,15 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public Time getTime(String columnLabel, Calendar cal) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getTime(findColumn(columnLabel), cal); } @Override - public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public abstract Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException; @Override public Timestamp getTimestamp(String columnLabel, Calendar cal) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getTimestamp(findColumn(columnLabel), cal); } @Override @@ -1198,9 +1168,7 @@ public abstract class AbstractResultSet extends WrapperImpl implements ResultSet @Override public T getObject(String columnLabel, Class type) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + return getObject(findColumn(columnLabel), type); } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java index 8b6c074d1b..9dc559339a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/AbstractStatement.java @@ -9,6 +9,8 @@ public abstract class AbstractStatement extends WrapperImpl implements Statement protected List batchedArgs; private int fetchSize; + + @Override public abstract ResultSet executeQuery(String sql) throws SQLException; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SavedPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SavedPreparedStatement.java deleted file mode 100644 index 512fcd26b8..0000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/SavedPreparedStatement.java +++ /dev/null @@ -1,452 +0,0 @@ -package com.taosdata.jdbc; - -import com.taosdata.jdbc.bean.TSDBPreparedParam; - -import java.sql.SQLException; -import java.sql.Timestamp; -import java.util.ArrayList; -import java.util.List; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * this class is used to precompile the sql of tdengine insert or import ops - */ -public class SavedPreparedStatement { - - private TSDBPreparedStatement tsdbPreparedStatement; - - /** - * sql param List - */ - private List sqlParamList; - - /** - * init param according the sql - */ - private TSDBPreparedParam initPreparedParam; - - /** - * is table name dynamic in the prepared sql - */ - private boolean isTableNameDynamic; - - /** - * insert or import sql template pattern, the template are the following: - *

- * insert/import into tableName [(field1, field2, ...)] [using stables tags(?, ?, ...) ] values(?, ?, ...) (?, ?, ...) - *

- * we split it to three part: - * 1. prefix, insert/import - * 2. middle, tableName [(field1, field2, ...)] [using stables tags(?, ?, ...) ] - * 3. valueList, the content after values, for example (?, ?, ...) (?, ?, ...) - */ - private Pattern sqlPattern = Pattern.compile("(?s)(?i)^\\s*(INSERT|IMPORT)\\s+INTO\\s+((?\\S+)\\s*(\\(.*\\))?\\s+(USING\\s+(?\\S+)\\s+TAGS\\s*\\((?.+)\\))?)\\s*VALUES\\s*(?\\(.*\\)).*"); - - /** - * the raw sql template - */ - private String sql; - - /** - * the prefix part of sql - */ - private String prefix; - - /** - * the middle part of sql - */ - private String middle; - - private int middleParamSize; - - /** - * the valueList part of sql - */ - private String valueList; - - private int valueListSize; - - /** - * default param value - */ - private static final String DEFAULT_VALUE = "NULL"; - - private static final String PLACEHOLDER = "?"; - - private String tableName; - - /** - * is the parameter add to batch list - */ - private boolean isAddBatch; - - public SavedPreparedStatement(String sql, TSDBPreparedStatement tsdbPreparedStatement) throws SQLException { - this.sql = sql; - this.tsdbPreparedStatement = tsdbPreparedStatement; - this.sqlParamList = new ArrayList<>(); - - parsePreparedParam(this.sql); - } - - /** - * parse the init param according the sql param - * - * @param sql - */ - private void parsePreparedParam(String sql) throws SQLException { - - Matcher matcher = sqlPattern.matcher(sql); - - if (matcher.find()) { - - tableName = matcher.group("tablename"); - - if (tableName != null && PLACEHOLDER.equals(tableName)) { - // the table name is dynamic - this.isTableNameDynamic = true; - } - - prefix = matcher.group(1); - middle = matcher.group(2); - valueList = matcher.group("valueList"); - - if (middle != null && !"".equals(middle)) { - middleParamSize = parsePlaceholder(middle); - } - - if (valueList != null && !"".equals(valueList)) { - valueListSize = parsePlaceholder(valueList); - } - - initPreparedParam = initDefaultParam(tableName, middleParamSize, valueListSize); - - } else { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_INVALID_SQL); - } - - } - - private TSDBPreparedParam initDefaultParam(String tableName, int middleParamSize, int valueListSize) { - - TSDBPreparedParam tsdbPreparedParam = new TSDBPreparedParam(tableName); - - tsdbPreparedParam.setMiddleParamList(getDefaultParamList(middleParamSize)); - - tsdbPreparedParam.setValueList(getDefaultParamList(valueListSize)); - - return tsdbPreparedParam; - } - - /** - * generate the default param value list - * - * @param paramSize - * @return - */ - private List getDefaultParamList(int paramSize) { - - List paramList = new ArrayList<>(paramSize); - if (paramSize > 0) { - for (int i = 0; i < paramSize; i++) { - paramList.add(i, DEFAULT_VALUE); - } - } - - return paramList; - } - - /** - * calculate the placeholder num - * - * @param value - * @return - */ - private int parsePlaceholder(String value) { - - Pattern pattern = Pattern.compile("[?]"); - - Matcher matcher = pattern.matcher(value); - - int result = 0; - while (matcher.find()) { - result++; - } - return result; - } - - /** - * set current row params - * - * @param parameterIndex the first parameter is 1, the second is 2, ... - * @param x the parameter value - */ - public void setParam(int parameterIndex, Object x) throws SQLException { - - int paramSize = this.middleParamSize + this.valueListSize; - - String errorMsg = String.format("the parameterIndex %s out of the range [1, %s]", parameterIndex, paramSize); - - if (parameterIndex < 1 || parameterIndex > paramSize) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE,errorMsg); - } - - this.isAddBatch = false; //set isAddBatch to false - - if (x == null) { - x = DEFAULT_VALUE; // set default null string - } - - parameterIndex = parameterIndex - 1; // start from 0 in param list - - if (this.middleParamSize > 0 && parameterIndex >= 0 && parameterIndex < this.middleParamSize) { - - this.initPreparedParam.setMiddleParam(parameterIndex, x); - return; - } - - if (this.valueListSize > 0 && parameterIndex >= this.middleParamSize && parameterIndex < paramSize) { - - this.initPreparedParam.setValueParam(parameterIndex - this.middleParamSize, x); - return; - } - - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE,errorMsg); - } - - public void addBatch() { - - addCurrentRowParamToList(); - this.initPreparedParam = initDefaultParam(tableName, middleParamSize, valueListSize); - } - - /** - * add current param to batch list - */ - private void addCurrentRowParamToList() { - - if (initPreparedParam != null && (this.middleParamSize > 0 || this.valueListSize > 0)) { - this.sqlParamList.add(initPreparedParam); // add current param to batch list - } - this.isAddBatch = true; - } - - - /** - * execute the sql with batch sql - * - * @return - * @throws SQLException - */ - public int[] executeBatch() throws SQLException { - - int result = executeBatchInternal(); - - return new int[]{result}; - } - - - public int executeBatchInternal() throws SQLException { - - if (!isAddBatch) { - addCurrentRowParamToList(); // add current param to batch list - } - - //1. generate batch sql - String sql = generateExecuteSql(); - //2. execute batch sql - int result = executeSql(sql); - - //3. clear batch param list - this.sqlParamList.clear(); - - return result; - } - - /** - * generate the batch sql - * - * @return - */ - private String generateExecuteSql() { - - StringBuilder stringBuilder = new StringBuilder(); - - stringBuilder.append(prefix); - stringBuilder.append(" into "); - - if (!isTableNameDynamic) { - // tablename will not need to be replaced - String middleValue = replaceMiddleListParam(middle, sqlParamList); - stringBuilder.append(middleValue); - stringBuilder.append(" values"); - - stringBuilder.append(replaceValueListParam(valueList, sqlParamList)); - - } else { - // need to replace tablename - - if (sqlParamList.size() > 0) { - - TSDBPreparedParam firstPreparedParam = sqlParamList.get(0); - - //replace middle part and value part of first row - String firstRow = replaceMiddleAndValuePart(firstPreparedParam); - stringBuilder.append(firstRow); - - //the first param in the middleParamList is the tableName - String lastTableName = firstPreparedParam.getMiddleParamList().get(0).toString(); - - if (sqlParamList.size() > 1) { - - for (int i = 1; i < sqlParamList.size(); i++) { - TSDBPreparedParam currentParam = sqlParamList.get(i); - String currentTableName = currentParam.getMiddleParamList().get(0).toString(); - if (lastTableName.equalsIgnoreCase(currentTableName)) { - // tablename is same with the last row ,so only need to append the part of value - - String values = replaceTemplateParam(valueList, currentParam.getValueList()); - stringBuilder.append(values); - - } else { - // tablename difference with the last row - //need to replace middle part and value part - String row = replaceMiddleAndValuePart(currentParam); - stringBuilder.append(row); - lastTableName = currentTableName; - } - } - } - - } else { - - stringBuilder.append(middle); - stringBuilder.append(" values"); - stringBuilder.append(valueList); - } - - } - - return stringBuilder.toString(); - } - - /** - * replace the middle and value part - * - * @param tsdbPreparedParam - * @return - */ - private String replaceMiddleAndValuePart(TSDBPreparedParam tsdbPreparedParam) { - - StringBuilder stringBuilder = new StringBuilder(" "); - - String middlePart = replaceTemplateParam(middle, tsdbPreparedParam.getMiddleParamList()); - - stringBuilder.append(middlePart); - stringBuilder.append(" values "); - - String valuePart = replaceTemplateParam(valueList, tsdbPreparedParam.getValueList()); - stringBuilder.append(valuePart); - stringBuilder.append(" "); - - return stringBuilder.toString(); - } - - /** - * replace the placeholder of the middle part of sql template with TSDBPreparedParam list - * - * @param template - * @param sqlParamList - * @return - */ - private String replaceMiddleListParam(String template, List sqlParamList) { - - if (sqlParamList.size() > 0) { - - //becase once the subTableName is static then will be ignore the tag which after the first setTag - return replaceTemplateParam(template, sqlParamList.get(0).getMiddleParamList()); - - } - - return template; - } - - - /** - * replace the placeholder of the template with TSDBPreparedParam list - * - * @param template - * @param sqlParamList - * @return - */ - private String replaceValueListParam(String template, List sqlParamList) { - - StringBuilder stringBuilder = new StringBuilder(); - - if (sqlParamList.size() > 0) { - - for (TSDBPreparedParam tsdbPreparedParam : sqlParamList) { - - String tmp = replaceTemplateParam(template, tsdbPreparedParam.getValueList()); - - stringBuilder.append(tmp); - } - - } else { - stringBuilder.append(template); - } - - return stringBuilder.toString(); - } - - /** - * replace the placeholder of the template with paramList - * - * @param template - * @param paramList - * @return - */ - private String replaceTemplateParam(String template, List paramList) { - - if (paramList.size() > 0) { - - String tmp = template; - - for (int i = 0; i < paramList.size(); ++i) { - - String paraStr = getParamString(paramList.get(i)); - - tmp = tmp.replaceFirst("[" + PLACEHOLDER + "]", paraStr); - - } - - return tmp; - - } else { - return template; - } - - } - - /** - * get the string of param object - * - * @param paramObj - * @return - */ - private String getParamString(Object paramObj) { - - String paraStr = paramObj.toString(); - if (paramObj instanceof Timestamp || (paramObj instanceof String && !DEFAULT_VALUE.equalsIgnoreCase(paraStr))) { - paraStr = "'" + paraStr + "'"; - } - return paraStr; - } - - - private int executeSql(String sql) throws SQLException { - - return tsdbPreparedStatement.executeUpdate(sql); - } - -} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java index a8653eb172..5b4615485d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBConnection.java @@ -27,10 +27,6 @@ public class TSDBConnection extends AbstractConnection { return this.batchFetch; } - public void setBatchFetch(Boolean batchFetch) { - this.batchFetch = batchFetch; - } - public TSDBConnection(Properties info, TSDBDatabaseMetaData meta) throws SQLException { this.databaseMetaData = meta; connect(info.getProperty(TSDBDriver.PROPERTY_KEY_HOST), @@ -61,9 +57,7 @@ public class TSDBConnection extends AbstractConnection { throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); } - TSDBStatement statement = new TSDBStatement(this, this.connector); - statement.setConnection(this); - return statement; + return new TSDBStatement(this, this.connector); } public TSDBSubscribe subscribe(String topic, String sql, boolean restart) throws SQLException { @@ -79,10 +73,8 @@ public class TSDBConnection extends AbstractConnection { } public PreparedStatement prepareStatement(String sql) throws SQLException { - if (isClosed()) { + if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - return new TSDBPreparedStatement(this, this.connector, sql); } @@ -104,11 +96,4 @@ public class TSDBConnection extends AbstractConnection { return this.databaseMetaData; } - public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { - if (isClosed()) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - } - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } - -} +} \ No newline at end of file diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 7d3741917c..5e3ffffa4f 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -29,45 +29,25 @@ public class TSDBJNIConnector { private static volatile Boolean isInitialized = false; private TaosInfo taosInfo = TaosInfo.getInstance(); + // Connection pointer used in C + private long taos = TSDBConstants.JNI_NULL_POINTER; + // result set status in current connection + private boolean isResultsetClosed = true; + private int affectedRows = -1; static { System.loadLibrary("taos"); System.out.println("java.library.path:" + System.getProperty("java.library.path")); } - /** - * Connection pointer used in C - */ - private long taos = TSDBConstants.JNI_NULL_POINTER; - - /** - * Result set pointer for the current connection - */ -// private long taosResultSetPointer = TSDBConstants.JNI_NULL_POINTER; - - /** - * result set status in current connection - */ - private boolean isResultsetClosed = true; - private int affectedRows = -1; - - /** - * Whether the connection is closed - */ public boolean isClosed() { return this.taos == TSDBConstants.JNI_NULL_POINTER; } - /** - * Returns the status of last result set in current connection - */ public boolean isResultsetClosed() { return this.isResultsetClosed; } - /** - * Initialize static variables in JNI to optimize performance - */ public static void init(String configDir, String locale, String charset, String timezone) throws SQLWarning { synchronized (isInitialized) { if (!isInitialized) { @@ -93,11 +73,6 @@ public class TSDBJNIConnector { public static native String getTsCharset(); - /** - * Get connection pointer - * - * @throws SQLException - */ public boolean connect(String host, int port, String dbName, String user, String password) throws SQLException { if (this.taos != TSDBConstants.JNI_NULL_POINTER) { // this.closeConnectionImp(this.taos); @@ -185,13 +160,6 @@ public class TSDBJNIConnector { private native String getErrMsgImp(long pSql); - /** - * Get resultset pointer - * Each connection should have a single open result set at a time - */ -// public long getResultSet() { -// return taosResultSetPointer; -// } private native long getResultSetImp(long connection, long pSql); public boolean isUpdateQuery(long pSql) { @@ -231,6 +199,7 @@ public class TSDBJNIConnector { // } // return resCode; // } + private native int freeResultSetImp(long connection, long result); /** @@ -323,8 +292,7 @@ public class TSDBJNIConnector { * Validate if a create table sql statement is correct without actually creating that table */ public boolean validateCreateTableSql(String sql) { - long connection = taos; - int res = validateCreateTableSqlImp(connection, sql.getBytes()); + int res = validateCreateTableSqlImp(taos, sql.getBytes()); return res != 0 ? false : true; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java new file mode 100644 index 0000000000..9ee23e3a26 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBParameterMetaData.java @@ -0,0 +1,8 @@ +package com.taosdata.jdbc; + +public class TSDBParameterMetaData extends AbstractParameterMetaData { + + public TSDBParameterMetaData(Object[] parameters) { + super(parameters); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java index d294c89745..728b537f71 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBPreparedStatement.java @@ -18,6 +18,7 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; +import java.nio.charset.Charset; import java.sql.*; import java.util.ArrayList; import java.util.Calendar; @@ -30,36 +31,49 @@ import java.util.regex.Pattern; * compatibility needs. */ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStatement { - protected String rawSql; - protected String sql; - protected ArrayList parameters = new ArrayList<>(); + + private String rawSql; + private String sql; + // private ArrayList parameters = new ArrayList<>(); + private Object[] parameters; + private boolean isPrepared; //start with insert or import and is case-insensitive private static Pattern savePattern = Pattern.compile("(?i)^\\s*(insert|import)"); - // is insert or import private boolean isSaved; - private SavedPreparedStatement savedPreparedStatement; - private ParameterMetaData parameterMetaData; + // private SavedPreparedStatement savedPreparedStatement; + private volatile TSDBParameterMetaData parameterMetaData; TSDBPreparedStatement(TSDBConnection connection, TSDBJNIConnector connecter, String sql) { super(connection, connecter); init(sql); + + if (sql.contains("?")) { + int parameterCnt = 0; + for (int i = 0; i < sql.length(); i++) { + if ('?' == sql.charAt(i)) { + parameterCnt++; + } + } + parameters = new Object[parameterCnt]; + this.isPrepared = true; + } } private void init(String sql) { this.rawSql = sql; preprocessSql(); +// this.isSaved = isSavedSql(this.rawSql); +// if (this.isSaved) { +// try { +// this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this); +// } catch (SQLException e) { +// e.printStackTrace(); +// } +// } - this.isSaved = isSavedSql(this.rawSql); - if (this.isSaved) { - try { - this.savedPreparedStatement = new SavedPreparedStatement(this.rawSql, this); - } catch (SQLException e) { - e.printStackTrace(); - } - } } /** @@ -75,19 +89,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat @Override public int[] executeBatch() throws SQLException { - if (isSaved) { - return this.savedPreparedStatement.executeBatch(); - } else { - return super.executeBatch(); - } - } - - public ArrayList getParameters() { - return parameters; - } - - public void setParameters(ArrayList parameters) { - this.parameters = parameters; +// if (isSaved) { +// return this.savedPreparedStatement.executeBatch(); +// } else { + return super.executeBatch(); +// } } /* @@ -151,41 +157,73 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat * * @return a string of the native sql statement for TSDB */ - private String getNativeSql() { - this.sql = this.rawSql; - for (int i = 0; i < parameters.size(); ++i) { - Object para = parameters.get(i); +// private String getNativeSql(String rawSql) { +// for (int i = 0; i < parameters.length; i++) { +// Object para = parameters[i]; +// if (para != null) { +// String paraStr = para.toString(); +// if (para instanceof Timestamp || para instanceof String) { +// paraStr = "'" + paraStr + "'"; +// } +// this.sql = this.sql.replaceFirst("[?]", paraStr); +// } else { +// this.sql = this.sql.replaceFirst("[?]", "NULL"); +// } +// } +// parameters = new Object[parameters.length]; +// return sql; +// } + + private String getNativeSql(String rawSql) throws SQLException { + String sql = rawSql; + for (int i = 0; i < parameters.length; ++i) { + Object para = parameters[i]; if (para != null) { - String paraStr = para.toString(); - if (para instanceof Timestamp || para instanceof String) { + String paraStr; + if (para instanceof byte[]) { + paraStr = new String((byte[]) para, Charset.forName("UTF-8")); + } else { + paraStr = para.toString(); + } + // if para is timestamp or String or byte[] need to translate ' character + if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) { + paraStr = paraStr.replaceAll("'", "\\\\\\\\'"); paraStr = "'" + paraStr + "'"; } - this.sql = this.sql.replaceFirst("[?]", paraStr); + sql = sql.replaceFirst("[?]", paraStr); } else { - this.sql = this.sql.replaceFirst("[?]", "NULL"); + sql = sql.replaceFirst("[?]", "NULL"); } } - parameters.clear(); + clearParameters(); return sql; } @Override public ResultSet executeQuery() throws SQLException { - if (isSaved) { - this.savedPreparedStatement.executeBatchInternal(); - return null; - } else { - return super.executeQuery(getNativeSql()); - } +// if (isSaved) { +// this.savedPreparedStatement.executeBatchInternal(); +// return null; +// } else { + + if (!isPrepared) + return executeQuery(this.rawSql); + + final String sql = getNativeSql(this.rawSql); + return executeQuery(sql); +// } } @Override public int executeUpdate() throws SQLException { - if (isSaved) { - return this.savedPreparedStatement.executeBatchInternal(); - } else { - return super.executeUpdate(getNativeSql()); - } +// if (isSaved) { +// return this.savedPreparedStatement.executeBatchInternal(); +// } else { + if (!isPrepared) + return executeUpdate(this.rawSql); + String sql = getNativeSql(this.rawSql); + return executeUpdate(sql); +// } } private boolean isSupportedSQLType(int sqlType) { @@ -201,35 +239,6 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat case Types.BINARY: case Types.NCHAR: return true; - case Types.ARRAY: - case Types.BIT: - case Types.BLOB: - case Types.CHAR: - case Types.CLOB: - case Types.DATALINK: - case Types.DATE: - case Types.DECIMAL: - case Types.DISTINCT: - case Types.JAVA_OBJECT: - case Types.LONGNVARCHAR: - case Types.LONGVARBINARY: - case Types.LONGVARCHAR: - case Types.NCLOB: - case Types.NULL: - case Types.NUMERIC: - case Types.NVARCHAR: - case Types.OTHER: - case Types.REAL: - case Types.REF: - case Types.REF_CURSOR: - case Types.ROWID: - case Types.SQLXML: - case Types.STRUCT: - case Types.TIME: - case Types.TIME_WITH_TIMEZONE: - case Types.TIMESTAMP_WITH_TIMEZONE: - case Types.VARBINARY: - case Types.VARCHAR: default: return false; } @@ -259,7 +268,7 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setByte(int parameterIndex, byte x) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + setObject(parameterIndex,x); } @Override @@ -315,7 +324,8 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void setBytes(int parameterIndex, byte[] x) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + setObject(parameterIndex,x); } @Override @@ -365,45 +375,63 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public void clearParameters() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - parameters.clear(); + +// parameters.clear(); + parameters = new Object[parameters.length]; } @Override public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); } @Override public void setObject(int parameterIndex, Object x) throws SQLException { - if (isSaved) { - this.savedPreparedStatement.setParam(parameterIndex, x); - } else { - parameters.add(x); - } +// if (isSaved) { +// this.savedPreparedStatement.setParam(parameterIndex, x); +// } else { + if (parameterIndex < 1 && parameterIndex >= parameters.length) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); + + parameters[parameterIndex - 1] = x; +// parameters.add(x); +// } } @Override public boolean execute() throws SQLException { - if (isSaved) { - int result = this.savedPreparedStatement.executeBatchInternal(); - return result > 0; - } else { - return super.execute(getNativeSql()); - } +// if (isSaved) { +// int result = this.savedPreparedStatement.executeBatchInternal(); +// return result > 0; +// } else { + if (!isPrepared) + return execute(this.rawSql); + + final String sql = getNativeSql(this.rawSql); + + return execute(sql); +// } } @Override public void addBatch() throws SQLException { - if (isSaved) { - this.savedPreparedStatement.addBatch(); - } else { - if (this.batchedArgs == null) { - batchedArgs = new ArrayList<>(); - } - super.addBatch(getNativeSql()); +// if (isSaved) { +// this.savedPreparedStatement.addBatch(); +// } else { + if (this.batchedArgs == null) { + batchedArgs = new ArrayList<>(); } + + if (!isPrepared) { + addBatch(this.rawSql); + } else { + String sql = this.getConnection().nativeSQL(this.rawSql); + addBatch(sql); + } +// } } @Override @@ -491,9 +519,11 @@ public class TSDBPreparedStatement extends TSDBStatement implements PreparedStat public ParameterMetaData getParameterMetaData() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - //TODO: parameterMetaData not supported -// return null; - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + if (parameterMetaData == null) { + this.parameterMetaData = new TSDBParameterMetaData(parameters); + } + return this.parameterMetaData; } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java index 7c2940fdc2..a20ddaa836 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSet.java @@ -14,9 +14,14 @@ *****************************************************************************/ package com.taosdata.jdbc; +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.common.primitives.Shorts; + import java.math.BigDecimal; import java.sql.*; import java.util.ArrayList; +import java.util.Calendar; import java.util.List; public class TSDBResultSet extends AbstractResultSet implements ResultSet { @@ -120,158 +125,189 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { } public String getString(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + String res = null; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return this.blockData.getString(colIndex); + return this.blockData.getString(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getString(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getString(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public boolean getBoolean(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + boolean res = false; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return this.blockData.getBoolean(colIndex); + return this.blockData.getBoolean(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getBoolean(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getBoolean(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public byte getByte(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + byte res = 0; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return (byte) this.blockData.getInt(colIndex); + return (byte) this.blockData.getInt(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = (byte) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = (byte) this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public short getShort(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + short res = 0; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return (short) this.blockData.getInt(colIndex); + return (short) this.blockData.getInt(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = (short) this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = (short) this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public int getInt(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + int res = 0; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return this.blockData.getInt(colIndex); + return this.blockData.getInt(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getInt(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getInt(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public long getLong(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + long res = 0L; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return this.blockData.getLong(colIndex); + return this.blockData.getLong(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getLong(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } public float getFloat(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + float res = 0; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return (float) this.blockData.getDouble(colIndex); + return (float) this.blockData.getDouble(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) - res = this.rowData.getFloat(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getFloat(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); return res; } public double getDouble(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + double res = 0; - int colIndex = getTrueColumnIndex(columnIndex); - if (this.getBatchFetch()) - return this.blockData.getDouble(colIndex); + return this.blockData.getDouble(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getDouble(colIndex, this.columnMetaDataList.get(colIndex).getColType()); + res = this.rowData.getDouble(columnIndex - 1, this.columnMetaDataList.get(columnIndex - 1).getColType()); } return res; } - @Deprecated - public BigDecimal getBigDecimal(int columnIndex, int scale) throws SQLException { - return new BigDecimal(getLong(columnIndex)); + public byte[] getBytes(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + + Object value = this.rowData.get(columnIndex - 1); + if (value == null) + return null; + + int colType = this.columnMetaDataList.get(columnIndex - 1).getColType(); + switch (colType) { + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + return Longs.toByteArray((Long) value); + case TSDBConstants.TSDB_DATA_TYPE_INT: + return Ints.toByteArray((int) value); + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + return Shorts.toByteArray((Short) value); + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + return new byte[]{(byte) value}; + } + return value.toString().getBytes(); } - public byte[] getBytes(int columnIndex) throws SQLException { - return getString(columnIndex).getBytes(); + @Override + public Date getDate(int columnIndex) throws SQLException { + Timestamp timestamp = getTimestamp(columnIndex); + return timestamp == null ? null : new Date(timestamp.getTime()); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + Timestamp timestamp = getTimestamp(columnIndex); + return timestamp == null ? null : new Time(timestamp.getTime()); } public Timestamp getTimestamp(int columnIndex) throws SQLException { + checkAvailability(columnIndex, this.columnMetaDataList.size()); + Timestamp res = null; - int colIndex = getTrueColumnIndex(columnIndex); if (this.getBatchFetch()) - return this.blockData.getTimestamp(columnIndex); + return this.blockData.getTimestamp(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); if (!lastWasNull) { - res = this.rowData.getTimestamp(colIndex); + res = this.rowData.getTimestamp(columnIndex - 1); } return res; } public ResultSetMetaData getMetaData() throws SQLException { + if (isClosed()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + return new TSDBResultSetMetaData(this.columnMetaDataList); } @Override public Object getObject(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); + checkAvailability(columnIndex, this.columnMetaDataList.size()); + Object res = null; if (this.getBatchFetch()) - return this.blockData.get(colIndex); + return this.blockData.get(columnIndex - 1); - this.lastWasNull = this.rowData.wasNull(colIndex); - return this.rowData.get(colIndex); - } - - @Override - public Object getObject(String columnLabel) throws SQLException { - return this.getObject(this.findColumn(columnLabel)); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); + if (!lastWasNull) { + int colType = this.columnMetaDataList.get(columnIndex - 1).getColType(); + if (colType == TSDBConstants.TSDB_DATA_TYPE_BINARY) + res = ((String) this.rowData.get(columnIndex - 1)).getBytes(); + else + res = this.rowData.get(columnIndex - 1); + } + return res; } public int findColumn(String columnLabel) throws SQLException { @@ -285,14 +321,31 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { @Override public BigDecimal getBigDecimal(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); + if (this.getBatchFetch()) + return new BigDecimal(this.blockData.getLong(columnIndex - 1)); - if (!this.getBatchFetch()) { - this.lastWasNull = this.rowData.wasNull(colIndex); - return new BigDecimal(this.rowData.getLong(colIndex, this.columnMetaDataList.get(colIndex).getColType())); - } else { - return new BigDecimal(this.blockData.getLong(colIndex)); + this.lastWasNull = this.rowData.wasNull(columnIndex - 1); + BigDecimal res = null; + if (!lastWasNull) { + int colType = this.columnMetaDataList.get(columnIndex - 1).getColType(); + switch (colType) { + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + res = new BigDecimal(Long.valueOf(this.rowData.get(columnIndex - 1).toString())); + break; + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + res = new BigDecimal(Double.valueOf(this.rowData.get(columnIndex - 1).toString())); + break; + case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: + return new BigDecimal(((Timestamp) this.rowData.get(columnIndex - 1)).getTime()); + default: + res = new BigDecimal(this.rowData.get(columnIndex - 1).toString()); + } } + return res; } @Override @@ -398,6 +451,12 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { return this.statement; } + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + //TODO:did not use the specified timezone in cal + return getTimestamp(columnIndex); + } + public boolean isClosed() throws SQLException { if (isClosed) return true; @@ -408,17 +467,7 @@ public class TSDBResultSet extends AbstractResultSet implements ResultSet { } public String getNString(int columnIndex) throws SQLException { - int colIndex = getTrueColumnIndex(columnIndex); - return (String) rowData.get(colIndex); + return getString(columnIndex); } - private int getTrueColumnIndex(int columnIndex) throws SQLException { - if (columnIndex < 1) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "columnIndex(" + columnIndex + "): < 1"); - - int numOfCols = this.columnMetaDataList.size(); - if (columnIndex > numOfCols) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "columnIndex: " + columnIndex); - return columnIndex - 1; - } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java index 7abd997459..e4ac5303d0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetMetaData.java @@ -22,136 +22,160 @@ import java.util.List; public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaData { - List colMetaDataList = null; + List colMetaDataList; - public TSDBResultSetMetaData(List metaDataList) { - this.colMetaDataList = metaDataList; - } + public TSDBResultSetMetaData(List metaDataList) { + this.colMetaDataList = metaDataList; + } - public int getColumnCount() throws SQLException { - return colMetaDataList.size(); - } + public int getColumnCount() throws SQLException { + return colMetaDataList.size(); + } - public boolean isAutoIncrement(int column) throws SQLException { - return false; - } + public boolean isAutoIncrement(int column) throws SQLException { + return false; + } - public boolean isCaseSensitive(int column) throws SQLException { - return false; - } + public boolean isCaseSensitive(int column) throws SQLException { + return false; + } - public boolean isSearchable(int column) throws SQLException { - if (column == 1) { - return true; - } - return false; - } + public boolean isSearchable(int column) throws SQLException { + if (column == 1) { + return true; + } + return false; + } - public boolean isCurrency(int column) throws SQLException { - return false; - } + public boolean isCurrency(int column) throws SQLException { + return false; + } - public int isNullable(int column) throws SQLException { - if (column == 1) { - return columnNoNulls; - } - return columnNullable; - } + public int isNullable(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public boolean isSigned(int column) throws SQLException { - ColumnMetaData meta = this.colMetaDataList.get(column - 1); - switch (meta.getColType()) { - case TSDBConstants.TSDB_DATA_TYPE_TINYINT: - case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: - case TSDBConstants.TSDB_DATA_TYPE_INT: - case TSDBConstants.TSDB_DATA_TYPE_BIGINT: - case TSDBConstants.TSDB_DATA_TYPE_FLOAT: - case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: - return true; - default: - return false; - } - } + if (column == 1) { + return columnNoNulls; + } + return columnNullable; + } - public int getColumnDisplaySize(int column) throws SQLException { - return colMetaDataList.get(column - 1).getColSize(); - } + public boolean isSigned(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public String getColumnLabel(int column) throws SQLException { - return colMetaDataList.get(column - 1).getColName(); - } + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + switch (meta.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_TINYINT: + case TSDBConstants.TSDB_DATA_TYPE_SMALLINT: + case TSDBConstants.TSDB_DATA_TYPE_INT: + case TSDBConstants.TSDB_DATA_TYPE_BIGINT: + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return true; + default: + return false; + } + } - public String getColumnName(int column) throws SQLException { - return colMetaDataList.get(column - 1).getColName(); - } + public int getColumnDisplaySize(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public String getSchemaName(int column) throws SQLException { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + return colMetaDataList.get(column - 1).getColSize(); + } - public int getPrecision(int column) throws SQLException { - ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1); - switch (columnMetaData.getColType()) { - case TSDBConstants.TSDB_DATA_TYPE_FLOAT: - return 5; - case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: - return 9; - case TSDBConstants.TSDB_DATA_TYPE_BINARY: - case TSDBConstants.TSDB_DATA_TYPE_NCHAR: - return columnMetaData.getColSize(); - default: - return 0; - } - } + public String getColumnLabel(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public int getScale(int column) throws SQLException { - ColumnMetaData meta = this.colMetaDataList.get(column - 1); - switch (meta.getColType()) { - case TSDBConstants.TSDB_DATA_TYPE_FLOAT: - return 5; - case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: - return 9; - default: - return 0; - } - } + return colMetaDataList.get(column - 1).getColName(); + } - public String getTableName(int column) throws SQLException { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public String getColumnName(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public String getCatalogName(int column) throws SQLException { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + return colMetaDataList.get(column - 1).getColName(); + } - public int getColumnType(int column) throws SQLException { - ColumnMetaData meta = this.colMetaDataList.get(column - 1); - return TSDBConstants.taosType2JdbcType(meta.getColType()); - } + public String getSchemaName(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public String getColumnTypeName(int column) throws SQLException { - ColumnMetaData meta = this.colMetaDataList.get(column - 1); - return TSDBConstants.taosType2JdbcTypeName(meta.getColType()); - } + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } - public boolean isReadOnly(int column) throws SQLException { - return true; - } + public int getPrecision(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public boolean isWritable(int column) throws SQLException { - return false; - } + ColumnMetaData columnMetaData = this.colMetaDataList.get(column - 1); + switch (columnMetaData.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return 5; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return 9; + case TSDBConstants.TSDB_DATA_TYPE_BINARY: + case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + return columnMetaData.getColSize(); + default: + return 0; + } + } - public boolean isDefinitelyWritable(int column) throws SQLException { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); - } + public int getScale(int column) throws SQLException { + if (column < 1 && column >= colMetaDataList.size()) + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); - public String getColumnClassName(int column) throws SQLException { - int columnType = getColumnType(column); - String columnClassName = ""; - switch (columnType) { - case Types.TIMESTAMP: - columnClassName = Timestamp.class.getName(); + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + switch (meta.getColType()) { + case TSDBConstants.TSDB_DATA_TYPE_FLOAT: + return 5; + case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: + return 9; + default: + return 0; + } + } + + public String getTableName(int column) throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + public String getCatalogName(int column) throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + public int getColumnType(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + return TSDBConstants.taosType2JdbcType(meta.getColType()); + } + + public String getColumnTypeName(int column) throws SQLException { + ColumnMetaData meta = this.colMetaDataList.get(column - 1); + return TSDBConstants.taosType2JdbcTypeName(meta.getColType()); + } + + public boolean isReadOnly(int column) throws SQLException { + return true; + } + + public boolean isWritable(int column) throws SQLException { + return false; + } + + public boolean isDefinitelyWritable(int column) throws SQLException { + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + } + + public String getColumnClassName(int column) throws SQLException { + int columnType = getColumnType(column); + String columnClassName = ""; + switch (columnType) { + case Types.TIMESTAMP: + columnClassName = Timestamp.class.getName(); break; case Types.CHAR: columnClassName = String.class.getName(); @@ -168,8 +192,8 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD case Types.INTEGER: columnClassName = Integer.class.getName(); break; - case Types.SMALLINT: - columnClassName = Short.class.getName(); + case Types.SMALLINT: + columnClassName = Short.class.getName(); break; case Types.TINYINT: columnClassName = Byte.class.getName(); @@ -177,7 +201,7 @@ public class TSDBResultSetMetaData extends WrapperImpl implements ResultSetMetaD case Types.BIT: columnClassName = Boolean.class.getName(); break; - } + } return columnClassName; - } + } } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java index 44d760897b..7cf5f0d79a 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBResultSetRowData.java @@ -21,18 +21,13 @@ import java.util.ArrayList; import java.util.Collections; public class TSDBResultSetRowData { - private ArrayList data = null; + private ArrayList data; private int colSize = 0; public TSDBResultSetRowData(int colSize) { this.setColSize(colSize); } - public TSDBResultSetRowData() { - this.data = new ArrayList<>(); - this.setColSize(0); - } - public void clear() { if (this.data != null) { this.data.clear(); @@ -71,9 +66,9 @@ public class TSDBResultSetRowData { case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: case TSDBConstants.TSDB_DATA_TYPE_BIGINT: return ((Long) obj) == 1L ? Boolean.TRUE : Boolean.FALSE; + default: + return false; } - - return Boolean.TRUE; } public void setByte(int col, byte value) { @@ -198,7 +193,7 @@ public class TSDBResultSetRowData { data.set(col, value); } - public float getFloat(int col, int srcType) throws SQLException { + public float getFloat(int col, int srcType) { Object obj = data.get(col); switch (srcType) { @@ -226,7 +221,7 @@ public class TSDBResultSetRowData { data.set(col, value); } - public double getDouble(int col, int srcType) throws SQLException { + public double getDouble(int col, int srcType) { Object obj = data.get(col); switch (srcType) { @@ -267,9 +262,8 @@ public class TSDBResultSetRowData { * * @param col column index * @return - * @throws SQLException */ - public String getString(int col, int srcType) throws SQLException { + public String getString(int col, int srcType) { switch (srcType) { case TSDBConstants.TSDB_DATA_TYPE_BINARY: case TSDBConstants.TSDB_DATA_TYPE_NCHAR: @@ -305,11 +299,11 @@ public class TSDBResultSetRowData { } public void setTimestamp(int col, long ts) { - data.set(col, ts); + data.set(col, new Timestamp(ts)); } public Timestamp getTimestamp(int col) { - return new Timestamp((Long) data.get(col)); + return (Timestamp) data.get(col); } public Object get(int col) { @@ -320,7 +314,7 @@ public class TSDBResultSetRowData { return colSize; } - public void setColSize(int colSize) { + private void setColSize(int colSize) { this.colSize = colSize; this.clear(); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/bean/TSDBPreparedParam.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/bean/TSDBPreparedParam.java deleted file mode 100644 index ef78292de6..0000000000 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/bean/TSDBPreparedParam.java +++ /dev/null @@ -1,62 +0,0 @@ -package com.taosdata.jdbc.bean; - -import java.util.List; - -/** - * tdengine batch insert or import param object - */ -public class TSDBPreparedParam { - - /** - * tableName, if sTable Name is not null, and this is sub table name. - */ - private String tableName; - - /** - * sub middle param list - */ - private List middleParamList; - - /** - * value list - */ - private List valueList; - - public TSDBPreparedParam(String tableName) { - this.tableName = tableName; - } - - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public List getMiddleParamList() { - return middleParamList; - } - - public void setMiddleParamList(List middleParamList) { - this.middleParamList = middleParamList; - } - - public void setMiddleParam(int parameterIndex, Object x) { - this.middleParamList.set(parameterIndex, x); - } - - public List getValueList() { - return valueList; - } - - public void setValueList(List valueList) { - this.valueList = valueList; - } - - - public void setValueParam(int parameterIndex, Object x) { - this.valueList.set(parameterIndex, x); - } - -} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java index c1834a7b80..1f3ed2d144 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulConnection.java @@ -1,8 +1,14 @@ package com.taosdata.jdbc.rs; -import com.taosdata.jdbc.*; +import com.taosdata.jdbc.AbstractConnection; +import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.TSDBError; +import com.taosdata.jdbc.TSDBErrorNumbers; -import java.sql.*; +import java.sql.DatabaseMetaData; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; import java.util.Properties; public class RestfulConnection extends AbstractConnection { @@ -55,7 +61,6 @@ public class RestfulConnection extends AbstractConnection { public DatabaseMetaData getMetaData() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_CONNECTION_CLOSED); - ; return this.metadata; } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java index a94cfa6e07..6efe13561d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulDriver.java @@ -17,7 +17,7 @@ public class RestfulDriver extends AbstractDriver { static { try { - DriverManager.registerDriver(new RestfulDriver()); + java.sql.DriverManager.registerDriver(new RestfulDriver()); } catch (SQLException e) { throw TSDBError.createRuntimeException(TSDBErrorNumbers.ERROR_URL_NOT_SET, e); } diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulParameterMetaData.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulParameterMetaData.java new file mode 100644 index 0000000000..7a130eb72b --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulParameterMetaData.java @@ -0,0 +1,10 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.AbstractParameterMetaData; + +public class RestfulParameterMetaData extends AbstractParameterMetaData { + + RestfulParameterMetaData(Object[] parameters) { + super(parameters); + } +} diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java index 3a0ff56dd7..f82955ca9d 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulPreparedStatement.java @@ -7,6 +7,7 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.net.URL; +import java.nio.charset.Charset; import java.sql.*; import java.util.Calendar; @@ -30,7 +31,9 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar parameters = new Object[parameterCnt]; this.isPrepared = true; } - //TODO: build parameterMetaData + + // build parameterMetaData + this.parameterMetaData = new RestfulParameterMetaData(parameters); } @Override @@ -60,8 +63,15 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar for (int i = 0; i < parameters.length; ++i) { Object para = parameters[i]; if (para != null) { - String paraStr = para.toString(); - if (para instanceof Timestamp || para instanceof String) { + String paraStr; + if (para instanceof byte[]) { + paraStr = new String((byte[]) para, Charset.forName("UTF-8")); + } else { + paraStr = para.toString(); + } + // if para is timestamp or String or byte[] need to translate ' character + if (para instanceof Timestamp || para instanceof String || para instanceof byte[]) { + paraStr = paraStr.replaceAll("'", "\\\\\\\\'"); paraStr = "'" + paraStr + "'"; } sql = sql.replaceFirst("[?]", paraStr); @@ -92,7 +102,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar public void setByte(int parameterIndex, byte x) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + setObject(parameterIndex, x); } @Override @@ -153,7 +163,7 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar public void setBytes(int parameterIndex, byte[] x) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + setObject(parameterIndex, x); } @Override @@ -210,19 +220,16 @@ public class RestfulPreparedStatement extends RestfulStatement implements Prepar public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_UNSUPPORTED_METHOD); + + setObject(parameterIndex,x); } @Override public void setObject(int parameterIndex, Object x) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_STATEMENT_CLOSED); - if (parameterIndex < 1 && parameterIndex >= parameters.length) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE); parameters[parameterIndex - 1] = x; - } @Override diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java index 0cbb40dbb6..5c2d4c45b0 100644 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/rs/RestfulResultSet.java @@ -2,13 +2,18 @@ package com.taosdata.jdbc.rs; import com.alibaba.fastjson.JSONArray; import com.alibaba.fastjson.JSONObject; +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.common.primitives.Shorts; import com.taosdata.jdbc.AbstractResultSet; import com.taosdata.jdbc.TSDBConstants; import com.taosdata.jdbc.TSDBError; import com.taosdata.jdbc.TSDBErrorNumbers; +import java.math.BigDecimal; import java.sql.*; import java.util.ArrayList; +import java.util.Calendar; public class RestfulResultSet extends AbstractResultSet implements ResultSet { private volatile boolean isClosed; @@ -16,7 +21,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { private final String database; private final Statement statement; -// private final JSONObject resultJson; // data private final ArrayList> resultSet; // meta @@ -32,7 +36,6 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { public RestfulResultSet(String database, Statement statement, JSONObject resultJson) throws SQLException { this.database = database; this.statement = statement; -// this.resultJson = resultJson; // column metadata JSONArray columnMeta = resultJson.getJSONArray("column_meta"); @@ -73,7 +76,7 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case TSDBConstants.TSDB_DATA_TYPE_INT: return row.getInteger(colIndex); case TSDBConstants.TSDB_DATA_TYPE_BIGINT: - return row.getBigInteger(colIndex); + return row.getLong(colIndex); case TSDBConstants.TSDB_DATA_TYPE_FLOAT: return row.getFloat(colIndex); case TSDBConstants.TSDB_DATA_TYPE_DOUBLE: @@ -81,9 +84,11 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { case TSDBConstants.TSDB_DATA_TYPE_TIMESTAMP: return new Timestamp(row.getDate(colIndex).getTime()); case TSDBConstants.TSDB_DATA_TYPE_BINARY: + return row.getString(colIndex) == null ? null : row.getString(colIndex).getBytes(); case TSDBConstants.TSDB_DATA_TYPE_NCHAR: + return row.getString(colIndex) == null ? null : row.getString(colIndex); default: - return row.getString(colIndex); + return row.get(colIndex); } } @@ -130,37 +135,33 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { @Override public String getString(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - Object value = resultSet.get(pos).get(columnIndex); - return value == null ? null : value.toString(); + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + if (value instanceof byte[]) + return new String((byte[]) value); + return value.toString(); } @Override public boolean getBoolean(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - int result = getInt(columnIndex); - return result == 0 ? false : true; + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return false; + if (value instanceof Boolean) + return (boolean) value; + return Boolean.valueOf(value.toString()); } @Override public byte getByte(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - Object value = resultSet.get(pos).get(columnIndex); + Object value = resultSet.get(pos).get(columnIndex - 1); if (value == null) return 0; long valueAsLong = Long.parseLong(value.toString()); @@ -179,13 +180,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { @Override public short getShort(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - Object value = resultSet.get(pos).get(columnIndex); + Object value = resultSet.get(pos).get(columnIndex - 1); if (value == null) return 0; long valueAsLong = Long.parseLong(value.toString()); @@ -198,13 +195,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { @Override public int getInt(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - Object value = resultSet.get(pos).get(columnIndex); + Object value = resultSet.get(pos).get(columnIndex - 1); if (value == null) return 0; long valueAsLong = Long.parseLong(value.toString()); @@ -217,13 +210,9 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { @Override public long getLong(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - Object value = resultSet.get(pos).get(columnIndex); + Object value = resultSet.get(pos).get(columnIndex - 1); if (value == null) return 0; @@ -240,64 +229,99 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { @Override public float getFloat(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - return Float.parseFloat(resultSet.get(pos).get(columnIndex).toString()); + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return 0; + if (value instanceof Float || value instanceof Double) + return (float) value; + return Float.parseFloat(value.toString()); } @Override public double getDouble(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - if (columnIndex > resultSet.get(pos).size()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE, "Column Index out of range, " + columnIndex + " > " + resultSet.get(pos).size()); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - return Double.parseDouble(resultSet.get(pos).get(columnIndex).toString()); + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return 0; + if (value instanceof Double || value instanceof Float) + return (double) value; + return Double.parseDouble(value.toString()); } - private int getTrueColumnIndex(int columnIndex) throws SQLException { - if (columnIndex < 1) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE - , "Column Index out of range, " + columnIndex + " < 1"); - } + @Override + public byte[] getBytes(int columnIndex) throws SQLException { + checkAvailability(columnIndex, resultSet.get(pos).size()); - int numOfCols = resultSet.get(pos).size(); - if (columnIndex > numOfCols) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_PARAMETER_INDEX_OUT_RANGE - , "Column Index out of range, " + columnIndex + " > " + numOfCols); - } + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + if (value instanceof byte[]) + return (byte[]) value; + if (value instanceof String) + return ((String) value).getBytes(); + if (value instanceof Long) + return Longs.toByteArray((long) value); + if (value instanceof Integer) + return Ints.toByteArray((int) value); + if (value instanceof Short) + return Shorts.toByteArray((short) value); + if (value instanceof Byte) + return new byte[]{(byte) value}; - return columnIndex - 1; + return value.toString().getBytes(); + } + + @Override + public Date getDate(int columnIndex) throws SQLException { + checkAvailability(columnIndex, resultSet.get(pos).size()); + + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + if (value instanceof Timestamp) + return new Date(((Timestamp) value).getTime()); + return Date.valueOf(value.toString()); + } + + @Override + public Time getTime(int columnIndex) throws SQLException { + checkAvailability(columnIndex, resultSet.get(pos).size()); + + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + if (value instanceof Timestamp) + return new Time(((Timestamp) value).getTime()); + return Time.valueOf(value.toString()); } @Override public Timestamp getTimestamp(int columnIndex) throws SQLException { - if (isClosed()) - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); + checkAvailability(columnIndex, resultSet.get(pos).size()); - columnIndex = getTrueColumnIndex(columnIndex); - String strDate = resultSet.get(pos).get(columnIndex).toString(); -// strDate = strDate.substring(1, strDate.length() - 1); - return Timestamp.valueOf(strDate); + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + if (value instanceof Timestamp) + return (Timestamp) value; + return Timestamp.valueOf(value.toString()); } - /*************************************************************************************************************/ @Override public ResultSetMetaData getMetaData() throws SQLException { if (isClosed()) throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_RESULTSET_CLOSED); - return this.metaData; } @Override - public Object getObject(String columnLabel) throws SQLException { - return getObject(findColumn(columnLabel)); + public Object getObject(int columnIndex) throws SQLException { + checkAvailability(columnIndex, resultSet.get(pos).size()); + + return resultSet.get(pos).get(columnIndex - 1); } @Override @@ -311,6 +335,23 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return columnIndex + 1; } + @Override + public BigDecimal getBigDecimal(int columnIndex) throws SQLException { + checkAvailability(columnIndex, resultSet.get(pos).size()); + + Object value = resultSet.get(pos).get(columnIndex - 1); + if (value == null) + return null; + + if (value instanceof Long || value instanceof Integer || value instanceof Short || value instanceof Byte) + return new BigDecimal(Long.valueOf(value.toString())); + if (value instanceof Double || value instanceof Float) + return new BigDecimal(Double.valueOf(value.toString())); + if (value instanceof Timestamp) + return new BigDecimal(((Timestamp) value).getTime()); + return new BigDecimal(value.toString()); + } + @Override public boolean isBeforeFirst() throws SQLException { if (isClosed()) @@ -471,6 +512,12 @@ public class RestfulResultSet extends AbstractResultSet implements ResultSet { return this.statement; } + @Override + public Timestamp getTimestamp(int columnIndex, Calendar cal) throws SQLException { + //TODO:did not use the specified timezone in cal + return getTimestamp(columnIndex); + } + @Override public boolean isClosed() throws SQLException { return isClosed; diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java new file mode 100644 index 0000000000..a67b4763f9 --- /dev/null +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/utils/OSUtils.java @@ -0,0 +1,17 @@ +package com.taosdata.jdbc.utils; + +public class OSUtils { + private static final String OS = System.getProperty("os.name").toLowerCase(); + + public static boolean isWindows() { + return OS.indexOf("win") >= 0; + } + + public static boolean isMac() { + return OS.indexOf("mac") >= 0; + } + + public static boolean isLinux() { + return OS.indexOf("nux") >= 0; + } +} diff --git a/src/connector/jdbc/src/main/resources/META-INF/services/java.sql.Driver b/src/connector/jdbc/src/main/resources/META-INF/services/java.sql.Driver index e65b4e3b22..893f7cdf34 100644 --- a/src/connector/jdbc/src/main/resources/META-INF/services/java.sql.Driver +++ b/src/connector/jdbc/src/main/resources/META-INF/services/java.sql.Driver @@ -1 +1,2 @@ com.taosdata.jdbc.TSDBDriver +com.taosdata.jdbc.rs.RestfulDriver diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java deleted file mode 100644 index 87348f9026..0000000000 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/ResultSetTest.java +++ /dev/null @@ -1,266 +0,0 @@ -package com.taosdata.jdbc; - -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; - -import javax.sql.rowset.serial.SerialBlob; -import javax.sql.rowset.serial.SerialClob; -import java.io.UnsupportedEncodingException; -import java.sql.*; -import java.util.HashMap; -import java.util.Properties; - -import static org.junit.Assert.assertEquals; - -public class ResultSetTest { - static Connection connection; - static Statement statement; - static String dbName = "test"; - static String tName = "t0"; - static String host = "localhost"; - static ResultSet resSet; - - @BeforeClass - public static void createDatabaseAndTable() { - try { - Class.forName("com.taosdata.jdbc.TSDBDriver"); - Properties properties = new Properties(); - properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); - properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); - connection = DriverManager.getConnection("jdbc:TAOS://" + host + ":0/", properties); - statement = connection.createStatement(); - statement.executeUpdate("drop database if exists " + dbName); - statement.executeUpdate("create database if not exists " + dbName); - statement.execute("use " + dbName); - statement.executeUpdate("create table if not exists " + dbName + "." + tName + " (ts timestamp, k1 int, k2 bigint, k3 float, k4 double, k5 binary(30), k6 smallint, k7 bool, k8 nchar(20))"); - } catch (ClassNotFoundException | SQLException e) { - return; - } - } - - @Test - public void testResultSet() { - String sql; - long ts = 1496732686000l; - int v1 = 2147483600; - long v2 = ts + 1000; - float v3 = 3.1415926f; - double v4 = 3.1415926535897; - String v5 = "涛思数据,强~!"; - short v6 = 12; - boolean v7 = false; - String v8 = "TDengine is powerful"; - sql = "insert into " + dbName + "." + tName + " values (" + ts + "," + v1 + "," + v2 + "," + v3 + "," + v4 - + ",\"" + v5 + "\"," + v6 + "," + v7 + ",\"" + v8 + "\")"; - try { - statement.executeUpdate(sql); - assertEquals(1, statement.getUpdateCount()); - } catch (SQLException e) { - assert false : "insert error " + e.getMessage(); - } - try { - statement.execute("select * from " + dbName + "." + tName + " where ts = " + ts); - resSet = statement.getResultSet(); - System.out.println(((TSDBResultSet) resSet).getRowData()); - while (resSet.next()) { - assertEquals(ts, resSet.getLong(1)); - assertEquals(ts, resSet.getLong("ts")); - System.out.println(resSet.getTimestamp(1)); - assertEquals(v1, resSet.getInt(2)); - assertEquals(v1, resSet.getInt("k1")); - assertEquals(v2, resSet.getLong(3)); - assertEquals(v2, resSet.getLong("k2")); - assertEquals(v3, resSet.getFloat(4), 7); - assertEquals(v3, resSet.getFloat("k3"), 7); - assertEquals(v4, resSet.getDouble(5), 13); - assertEquals(v4, resSet.getDouble("k4"), 13); - assertEquals(v5, resSet.getString(6)); - assertEquals(v5, resSet.getString("k5")); - assertEquals(v6, resSet.getShort(7)); - assertEquals(v6, resSet.getShort("k6")); - assertEquals(v7, resSet.getBoolean(8)); - assertEquals(v7, resSet.getBoolean("k7")); - assertEquals(v8, resSet.getString(9)); - assertEquals(v8, resSet.getString("k8")); - resSet.getBytes(9); - resSet.getObject(6); - resSet.getObject("k8"); - } - if (!resSet.isClosed()) { - resSet.close(); - } - } catch (SQLException e) { - assert false : "insert error " + e.getMessage(); - } - } - - @Test(expected = SQLException.class) - public void testUnsupport() throws SQLException, UnsupportedEncodingException { - statement.execute("show databases"); - resSet = statement.getResultSet(); - Assert.assertNotNull(resSet.unwrap(TSDBResultSet.class)); - Assert.assertTrue(resSet.isWrapperFor(TSDBResultSet.class)); - resSet.getUnicodeStream(null); - resSet.getBinaryStream(null); - resSet.getAsciiStream(""); - resSet.getUnicodeStream(null); - resSet.getBinaryStream(null); - resSet.getWarnings(); - resSet.clearWarnings(); - resSet.getCursorName(); - resSet.getCharacterStream(null); - resSet.getCharacterStream(null); - resSet.isBeforeFirst(); - resSet.isAfterLast(); - resSet.isFirst(); - resSet.isLast(); - resSet.beforeFirst(); - resSet.afterLast(); - resSet.first(); - resSet.last(); - resSet.getRow(); - resSet.absolute(1); - resSet.relative(1); - resSet.previous(); - resSet.setFetchDirection(0); - resSet.getFetchDirection(); - resSet.setFetchSize(0); - resSet.getFetchSize(); - resSet.getConcurrency(); - resSet.rowUpdated(); - resSet.rowInserted(); - resSet.rowDeleted(); - resSet.updateNull(null); - resSet.updateBoolean(0, true); - resSet.updateByte(0, (byte) 2); - resSet.updateShort(0, (short) 1); - resSet.updateInt(0, 0); - resSet.updateLong(0, 0l); - resSet.updateFloat(0, 3.14f); - resSet.updateDouble(0, 3.1415); - resSet.updateBigDecimal(null, null); - resSet.updateString(null, null); - resSet.updateBytes(null, null); - resSet.updateDate(null, null); - resSet.updateTime(null, null); - resSet.updateTimestamp(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - resSet.updateObject(null, null); - resSet.updateObject(null, null); - resSet.updateNull(null); - resSet.updateBoolean("", false); - resSet.updateByte("", (byte) 1); - resSet.updateShort("", (short) 1); - resSet.updateInt("", 0); - resSet.updateLong("", 0l); - resSet.updateFloat("", 3.14f); - resSet.updateDouble("", 3.1415); - resSet.updateBigDecimal(null, null); - resSet.updateString(null, null); - resSet.updateBytes(null, null); - resSet.updateDate(null, null); - resSet.updateTime(null, null); - resSet.updateTimestamp(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - resSet.updateObject(null, null); - resSet.updateObject(null, null); - resSet.insertRow(); - resSet.updateRow(); - resSet.deleteRow(); - resSet.refreshRow(); - resSet.cancelRowUpdates(); - resSet.moveToInsertRow(); - resSet.moveToCurrentRow(); - resSet.getStatement(); - resSet.getObject(0, new HashMap<>()); - resSet.getRef(null); - resSet.getBlob(null); - resSet.getClob(null); - resSet.getArray(null); - resSet.getObject("", new HashMap<>()); - resSet.getRef(null); - resSet.getBlob(null); - resSet.getClob(null); - resSet.getArray(null); - resSet.getDate(null, null); - resSet.getDate(null, null); - resSet.getTime(null, null); - resSet.getTime(null, null); - resSet.getTimestamp(null, null); - resSet.getTimestamp(null, null); - resSet.getURL(null); - resSet.getURL(null); - resSet.updateRef(null, null); - resSet.updateRef(null, null); - resSet.updateBlob(0, new SerialBlob("".getBytes("UTF8"))); - resSet.updateBlob("", new SerialBlob("".getBytes("UTF8"))); - resSet.updateClob("", new SerialClob("".toCharArray())); - resSet.updateClob(0, new SerialClob("".toCharArray())); - resSet.updateArray(null, null); - resSet.updateArray(null, null); - resSet.getRowId(null); - resSet.getRowId(null); - resSet.updateRowId(null, null); - resSet.updateRowId(null, null); - resSet.getHoldability(); - resSet.updateNString(null, null); - resSet.updateNString(null, null); - resSet.getNClob(null); - resSet.getNClob(null); - resSet.getSQLXML(null); - resSet.getSQLXML(null); - resSet.updateSQLXML(null, null); - resSet.updateSQLXML(null, null); - resSet.getNCharacterStream(null); - resSet.getNCharacterStream(null); - resSet.updateNCharacterStream(null, null); - resSet.updateNCharacterStream(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - resSet.updateNCharacterStream(null, null); - resSet.updateNCharacterStream(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - resSet.updateAsciiStream(null, null); - resSet.updateBinaryStream(null, null); - resSet.updateCharacterStream(null, null); - } - - @Test - public void testBatch() throws SQLException { - String[] sqls = new String[]{"insert into test.t0 values (1496732686001,2147483600,1496732687000,3.1415925,3.1415926535897," + - "'涛思数据,强~',12,0,'TDengine is powerful')", "insert into test.t0 values (1496732686002,2147483600,1496732687000,3.1415925,3.1415926535897," + - "'涛思数据,强~',12,1,'TDengine is powerful')"}; - for (String sql : sqls) { - statement.addBatch(sql); - } - int[] res = statement.executeBatch(); - assertEquals(res.length, 2); - statement.clearBatch(); - } - - @AfterClass - public static void close() { - try { - statement.executeUpdate("drop database " + dbName); - if (statement != null) - statement.close(); - if (connection != null) - connection.close(); - } catch (SQLException e) { - e.printStackTrace(); - } - } -} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java index 0a4ecb739c..ca7251bb0e 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBConnectionTest.java @@ -233,7 +233,7 @@ public class TSDBConnectionTest { int status = rs.getInt("server_status()"); Assert.assertEquals(1, status); - conn.createStatement(ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); + conn.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, ResultSet.CONCUR_READ_ONLY, ResultSet.HOLD_CURSORS_OVER_COMMIT); } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java new file mode 100644 index 0000000000..12bcc43917 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBParameterMetaDataTest.java @@ -0,0 +1,194 @@ +package com.taosdata.jdbc; + +import com.taosdata.jdbc.rs.RestfulParameterMetaData; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; + +public class TSDBParameterMetaDataTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + private static PreparedStatement pstmt_insert; + private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; + private static PreparedStatement pstmt_select; + private static ParameterMetaData parameterMetaData_insert; + private static ParameterMetaData parameterMetaData_select; + + @Test + public void getParameterCount() throws SQLException { + Assert.assertEquals(10, parameterMetaData_insert.getParameterCount()); + } + + @Test + public void isNullable() throws SQLException { + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(1)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(2)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(3)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(4)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(5)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(6)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(7)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(8)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(9)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(10)); + } + + @Test + public void isSigned() throws SQLException { + Assert.assertEquals(false, parameterMetaData_insert.isSigned(1)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(2)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(3)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(4)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(5)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(6)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(7)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(8)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(9)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(10)); + } + + @Test + public void getPrecision() throws SQLException { + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(1)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(2)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(3)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(4)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(5)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(6)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(7)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(8)); + Assert.assertEquals(5, parameterMetaData_insert.getPrecision(9)); + Assert.assertEquals(5, parameterMetaData_insert.getPrecision(10)); + } + + @Test + public void getScale() throws SQLException { + Assert.assertEquals(0, parameterMetaData_insert.getScale(1)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(2)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(3)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(4)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(5)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(6)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(7)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(8)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(9)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(10)); + } + + @Test + public void getParameterType() throws SQLException { + Assert.assertEquals(Types.TIMESTAMP, parameterMetaData_insert.getParameterType(1)); + Assert.assertEquals(Types.INTEGER, parameterMetaData_insert.getParameterType(2)); + Assert.assertEquals(Types.BIGINT, parameterMetaData_insert.getParameterType(3)); + Assert.assertEquals(Types.FLOAT, parameterMetaData_insert.getParameterType(4)); + Assert.assertEquals(Types.DOUBLE, parameterMetaData_insert.getParameterType(5)); + Assert.assertEquals(Types.SMALLINT, parameterMetaData_insert.getParameterType(6)); + Assert.assertEquals(Types.TINYINT, parameterMetaData_insert.getParameterType(7)); + Assert.assertEquals(Types.BOOLEAN, parameterMetaData_insert.getParameterType(8)); + Assert.assertEquals(Types.BINARY, parameterMetaData_insert.getParameterType(9)); + Assert.assertEquals(Types.NCHAR, parameterMetaData_insert.getParameterType(10)); + } + + @Test + public void getParameterTypeName() throws SQLException { + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.TIMESTAMP), parameterMetaData_insert.getParameterTypeName(1)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.INTEGER), parameterMetaData_insert.getParameterTypeName(2)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BIGINT), parameterMetaData_insert.getParameterTypeName(3)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.FLOAT), parameterMetaData_insert.getParameterTypeName(4)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.DOUBLE), parameterMetaData_insert.getParameterTypeName(5)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.SMALLINT), parameterMetaData_insert.getParameterTypeName(6)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.TINYINT), parameterMetaData_insert.getParameterTypeName(7)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BOOLEAN), parameterMetaData_insert.getParameterTypeName(8)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BINARY), parameterMetaData_insert.getParameterTypeName(9)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.NCHAR), parameterMetaData_insert.getParameterTypeName(10)); + } + + @Test + public void getParameterClassName() throws SQLException { + Assert.assertEquals(Timestamp.class.getName(), parameterMetaData_insert.getParameterClassName(1)); + Assert.assertEquals(Integer.class.getName(), parameterMetaData_insert.getParameterClassName(2)); + Assert.assertEquals(Long.class.getName(), parameterMetaData_insert.getParameterClassName(3)); + Assert.assertEquals(Float.class.getName(), parameterMetaData_insert.getParameterClassName(4)); + Assert.assertEquals(Double.class.getName(), parameterMetaData_insert.getParameterClassName(5)); + Assert.assertEquals(Short.class.getName(), parameterMetaData_insert.getParameterClassName(6)); + Assert.assertEquals(Byte.class.getName(), parameterMetaData_insert.getParameterClassName(7)); + Assert.assertEquals(Boolean.class.getName(), parameterMetaData_insert.getParameterClassName(8)); + Assert.assertEquals(byte[].class.getName(), parameterMetaData_insert.getParameterClassName(9)); + Assert.assertEquals(String.class.getName(), parameterMetaData_insert.getParameterClassName(10)); + } + + @Test + public void getParameterMode() throws SQLException { + for (int i = 1; i <= parameterMetaData_insert.getParameterCount(); i++) { + int parameterMode = parameterMetaData_insert.getParameterMode(i); + Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMode); + } + } + + @Test + public void unwrap() throws SQLException { + TSDBParameterMetaData unwrap = parameterMetaData_insert.unwrap(TSDBParameterMetaData.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(parameterMetaData_insert.isWrapperFor(TSDBParameterMetaData.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_pstmt"); + stmt.execute("create database if not exists test_pstmt"); + stmt.execute("use test_pstmt"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); + stmt.execute("create table t1 using weather tags('beijing')"); + } + pstmt_insert = conn.prepareStatement(sql_insert); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(2, 111); + pstmt_insert.setObject(3, Long.MAX_VALUE); + pstmt_insert.setObject(4, 3.14159265354f); + pstmt_insert.setObject(5, Double.MAX_VALUE); + pstmt_insert.setObject(6, Short.MAX_VALUE); + pstmt_insert.setObject(7, Byte.MAX_VALUE); + pstmt_insert.setObject(8, true); + pstmt_insert.setObject(9, "hello".getBytes()); + pstmt_insert.setObject(10, "Hello"); + parameterMetaData_insert = pstmt_insert.getParameterMetaData(); + + pstmt_select = conn.prepareStatement(sql_select); + pstmt_select.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_select.setTimestamp(2, new Timestamp(System.currentTimeMillis() + 10000)); + pstmt_select.setInt(3, 0); + parameterMetaData_select = pstmt_select.getParameterMetaData(); + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (pstmt_insert != null) + pstmt_insert.close(); + if (pstmt_select != null) + pstmt_select.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java index 475cfef060..434095efa2 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBPreparedStatementTest.java @@ -5,14 +5,16 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import java.io.IOException; +import java.io.Serializable; import java.sql.*; public class TSDBPreparedStatementTest { private static final String host = "127.0.0.1"; private static Connection conn; - private static final String sql_insert = "insert into t1 values(?, ?)"; + private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static PreparedStatement pstmt_insert; - private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and temperature >= ?"; + private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; private static PreparedStatement pstmt_select; @Test @@ -21,7 +23,7 @@ public class TSDBPreparedStatementTest { long start = end - 1000 * 60 * 60; pstmt_select.setTimestamp(1, new Timestamp(start)); pstmt_select.setTimestamp(2, new Timestamp(end)); - pstmt_select.setFloat(3, 0); + pstmt_select.setInt(3, 0); ResultSet rs = pstmt_select.executeQuery(); Assert.assertNotNull(rs); @@ -37,48 +39,73 @@ public class TSDBPreparedStatementTest { @Test public void executeUpdate() throws SQLException { pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); - pstmt_insert.setFloat(2, 3.14f); + pstmt_insert.setFloat(4, 3.14f); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); } @Test public void setNull() throws SQLException { - pstmt_insert.setNull(2, Types.FLOAT); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(2, Types.INTEGER); + int result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test public void setBoolean() throws SQLException { - pstmt_insert.setBoolean(2, true); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setBoolean(8, true); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void setByte() throws SQLException { - pstmt_insert.setByte(1, (byte) 0x001); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setByte(7, (byte) 0x001); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setShort() { - + public void setShort() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setShort(6, (short) 2); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setInt() { - + public void setInt() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setInt(2, 10086); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setLong() { - + public void setLong() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setLong(3, Long.MAX_VALUE); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setFloat() { - + public void setFloat() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setFloat(4, 3.14f); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setDouble() { + public void setDouble() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setDouble(5, 3.14444); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -87,12 +114,56 @@ public class TSDBPreparedStatementTest { } @Test - public void setString() { + public void setString() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, "aaaa"); + boolean execute = pstmt_insert.execute(); + Assert.assertFalse(execute); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, new Person("john", 33, true).toString()); + Assert.assertFalse(pstmt_insert.execute()); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, new Person("john", 33, true).toString().replaceAll("'", "\"")); + Assert.assertFalse(pstmt_insert.execute()); } - @Test(expected = SQLFeatureNotSupportedException.class) - public void setBytes() throws SQLException { - pstmt_insert.setBytes(1, new byte[]{}); + class Person implements Serializable { + String name; + int age; + boolean sex; + + public Person(String name, int age, boolean sex) { + this.name = name; + this.age = age; + this.sex = sex; + } + + @Override + public String toString() { + return "Person{" + + "name='" + name + '\'' + + ", age=" + age + + ", sex=" + sex + + '}'; + } + } + + @Test + public void setBytes() throws SQLException, IOException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + +// ByteArrayOutputStream baos = new ByteArrayOutputStream(); +// ObjectOutputStream oos = new ObjectOutputStream(baos); +// oos.writeObject(new Person("john", 33, true)); +// oos.flush(); +// byte[] bytes = baos.toByteArray(); +// pstmt_insert.setBytes(9, bytes); + + pstmt_insert.setBytes(9, new Person("john", 33, true).toString().getBytes()); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -106,8 +177,10 @@ public class TSDBPreparedStatementTest { } @Test - public void setTimestamp() { - //TODO + public void setTimestamp() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -121,24 +194,69 @@ public class TSDBPreparedStatementTest { } @Test - public void clearParameters() { - //TODO + public void clearParameters() throws SQLException { + pstmt_insert.clearParameters(); } @Test public void setObject() throws SQLException { - pstmt_insert.setObject(1, System.currentTimeMillis()); - //TODO + pstmt_insert.setObject(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(2, 111); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(3, Long.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(4, 3.14159265354f); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(5, Double.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(6, Short.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(7, Byte.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(8, true); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(9, "hello".getBytes()); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(10, "Hello"); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void execute() { - //TODO - } + public void execute() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); - @Test - public void addBatch() { - //TODO: + executeQuery(); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -176,11 +294,11 @@ public class TSDBPreparedStatementTest { pstmt_insert.setURL(1, null); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getParameterMetaData() throws SQLException { ParameterMetaData parameterMetaData = pstmt_insert.getParameterMetaData(); -// Assert.assertNotNull(parameterMetaData); - //TODO: + Assert.assertNotNull(parameterMetaData); + //TODO: modify the test case } @Test(expected = SQLFeatureNotSupportedException.class) @@ -215,10 +333,10 @@ public class TSDBPreparedStatementTest { Class.forName("com.taosdata.jdbc.TSDBDriver"); conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); try (Statement stmt = conn.createStatement()) { - stmt.execute("drop database if exists test_pstmt"); - stmt.execute("create database if not exists test_pstmt"); - stmt.execute("use test_pstmt"); - stmt.execute("create table weather(ts timestamp, temperature float) tags(loc nchar(64))"); + stmt.execute("drop database if exists test_pstmt_jni"); + stmt.execute("create database if not exists test_pstmt_jni"); + stmt.execute("use test_pstmt_jni"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); stmt.execute("create table t1 using weather tags('beijing')"); } pstmt_insert = conn.prepareStatement(sql_insert); @@ -231,7 +349,10 @@ public class TSDBPreparedStatementTest { @AfterClass public static void afterClass() { try { - + if (pstmt_insert != null) + pstmt_insert.close(); + if (pstmt_select != null) + pstmt_select.close(); if (conn != null) conn.close(); } catch (SQLException e) { diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java new file mode 100644 index 0000000000..374b1335fc --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/TSDBResultSetTest.java @@ -0,0 +1,678 @@ +package com.taosdata.jdbc; + +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.common.primitives.Shorts; +import com.taosdata.jdbc.rs.RestfulResultSet; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.sql.*; +import java.text.ParseException; +import java.text.SimpleDateFormat; + +public class TSDBResultSetTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + private static Statement stmt; + private static ResultSet rs; + + @Test + public void wasNull() throws SQLException { + Assert.assertFalse(rs.wasNull()); + } + + @Test + public void getString() throws SQLException { + String f10 = rs.getString("f10"); + Assert.assertEquals("涛思数据", f10); + f10 = rs.getString(10); + Assert.assertEquals("涛思数据", f10); + } + + @Test + public void getBoolean() throws SQLException { + Boolean f9 = rs.getBoolean("f9"); + Assert.assertEquals(true, f9); + f9 = rs.getBoolean(9); + Assert.assertEquals(true, f9); + } + + @Test + public void getByte() throws SQLException { + byte f8 = rs.getByte("f8"); + Assert.assertEquals(10, f8); + f8 = rs.getByte(8); + Assert.assertEquals(10, f8); + } + + @Test + public void getShort() throws SQLException { + short f7 = rs.getShort("f7"); + Assert.assertEquals(10, f7); + f7 = rs.getShort(7); + Assert.assertEquals(10, f7); + } + + @Test + public void getInt() throws SQLException { + int f2 = rs.getInt("f2"); + Assert.assertEquals(1, f2); + f2 = rs.getInt(2); + Assert.assertEquals(1, f2); + } + + @Test + public void getLong() throws SQLException { + long f3 = rs.getLong("f3"); + Assert.assertEquals(100, f3); + f3 = rs.getLong(3); + Assert.assertEquals(100, f3); + } + + @Test + public void getFloat() throws SQLException { + float f4 = rs.getFloat("f4"); + Assert.assertEquals(3.1415f, f4, 0f); + f4 = rs.getFloat(4); + Assert.assertEquals(3.1415f, f4, 0f); + } + + @Test + public void getDouble() throws SQLException { + double f5 = rs.getDouble("f5"); + Assert.assertEquals(3.1415926, f5, 0.0); + f5 = rs.getDouble(5); + Assert.assertEquals(3.1415926, f5, 0.0); + } + + @Test + public void getBigDecimal() throws SQLException { + BigDecimal f1 = rs.getBigDecimal("f1"); + Assert.assertEquals(1609430400000l, f1.longValue()); + + BigDecimal f2 = rs.getBigDecimal("f2"); + Assert.assertEquals(1, f2.intValue()); + + BigDecimal f3 = rs.getBigDecimal("f3"); + Assert.assertEquals(100l, f3.longValue()); + + BigDecimal f4 = rs.getBigDecimal("f4"); + Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f); + + BigDecimal f5 = rs.getBigDecimal("f5"); + Assert.assertEquals(3.1415926, f5.doubleValue(), 0.0000000); + + BigDecimal f7 = rs.getBigDecimal("f7"); + Assert.assertEquals(10, f7.intValue()); + + BigDecimal f8 = rs.getBigDecimal("f8"); + Assert.assertEquals(10, f8.intValue()); + } + + @Test + public void getBytes() throws SQLException { + byte[] f1 = rs.getBytes("f1"); + Assert.assertEquals("2021-01-01 00:00:00.0", new String(f1)); + + byte[] f2 = rs.getBytes("f2"); + Assert.assertEquals(1, Ints.fromByteArray(f2)); + + byte[] f3 = rs.getBytes("f3"); + Assert.assertEquals(100l, Longs.fromByteArray(f3)); + + byte[] f4 = rs.getBytes("f4"); + Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f); + + byte[] f5 = rs.getBytes("f5"); + Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f); + + byte[] f6 = rs.getBytes("f6"); + Assert.assertEquals("abc", new String(f6)); + + byte[] f7 = rs.getBytes("f7"); + Assert.assertEquals((short) 10, Shorts.fromByteArray(f7)); + + byte[] f8 = rs.getBytes("f8"); + Assert.assertEquals(1, f8.length); + Assert.assertEquals((byte) 10, f8[0]); + + byte[] f9 = rs.getBytes("f9"); + Assert.assertEquals("true", new String(f9)); + + byte[] f10 = rs.getBytes("f10"); + Assert.assertEquals("涛思数据", new String(f10)); + } + + @Test + public void getDate() throws SQLException, ParseException { + Date f1 = rs.getDate("f1"); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); + Assert.assertEquals(sdf.parse("2021-01-01"), f1); + } + + @Test + public void getTime() throws SQLException { + Time f1 = rs.getTime("f1"); + Assert.assertEquals("00:00:00", f1.toString()); + } + + @Test + public void getTimestamp() throws SQLException { + Timestamp f1 = rs.getTimestamp("f1"); + Assert.assertEquals("2021-01-01 00:00:00.0", f1.toString()); + f1 = rs.getTimestamp(1); + Assert.assertEquals("2021-01-01 00:00:00.0", f1.toString()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getAsciiStream() throws SQLException { + rs.getAsciiStream("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getUnicodeStream() throws SQLException { + rs.getUnicodeStream("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getBinaryStream() throws SQLException { + rs.getBinaryStream("f1"); + } + + @Test + public void getWarnings() throws SQLException { + Assert.assertNull(rs.getWarnings()); + } + + @Test + public void clearWarnings() throws SQLException { + rs.clearWarnings(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getCursorName() throws SQLException { + rs.getCursorName(); + } + + @Test + public void getMetaData() throws SQLException { + ResultSetMetaData meta = rs.getMetaData(); + Assert.assertNotNull(meta); + } + + @Test + public void getObject() throws SQLException, ParseException { + Object f1 = rs.getObject("f1"); + Assert.assertEquals(Timestamp.class, f1.getClass()); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss.sss"); + java.util.Date date = sdf.parse("2021-01-01 00:00:00.000"); + Assert.assertEquals(new Timestamp(date.getTime()), f1); + + Object f2 = rs.getObject("f2"); + Assert.assertEquals(Integer.class, f2.getClass()); + Assert.assertEquals(1, f2); + + Object f3 = rs.getObject("f3"); + Assert.assertEquals(Long.class, f3.getClass()); + Assert.assertEquals(100l, f3); + + Object f4 = rs.getObject("f4"); + Assert.assertEquals(Float.class, f4.getClass()); + Assert.assertEquals(3.1415f, f4); + + Object f5 = rs.getObject("f5"); + Assert.assertEquals(Double.class, f5.getClass()); + Assert.assertEquals(3.1415926, f5); + + Object f6 = rs.getObject("f6"); + Assert.assertEquals(byte[].class, f6.getClass()); + Assert.assertEquals("abc", new String((byte[]) f6)); + + Object f7 = rs.getObject("f7"); + Assert.assertEquals(Short.class, f7.getClass()); + Assert.assertEquals((short) 10, f7); + + Object f8 = rs.getObject("f8"); + Assert.assertEquals(Byte.class, f8.getClass()); + Assert.assertEquals((byte) 10, f8); + + Object f9 = rs.getObject("f9"); + Assert.assertEquals(Boolean.class, f9.getClass()); + Assert.assertEquals(true, f9); + + Object f10 = rs.getObject("f10"); + Assert.assertEquals(String.class, f10.getClass()); + Assert.assertEquals("涛思数据", f10); + } + + @Test(expected = SQLException.class) + public void findColumn() throws SQLException { + int columnIndex = rs.findColumn("f1"); + Assert.assertEquals(1, columnIndex); + columnIndex = rs.findColumn("f2"); + Assert.assertEquals(2, columnIndex); + columnIndex = rs.findColumn("f3"); + Assert.assertEquals(3, columnIndex); + columnIndex = rs.findColumn("f4"); + Assert.assertEquals(4, columnIndex); + columnIndex = rs.findColumn("f5"); + Assert.assertEquals(5, columnIndex); + columnIndex = rs.findColumn("f6"); + Assert.assertEquals(6, columnIndex); + columnIndex = rs.findColumn("f7"); + Assert.assertEquals(7, columnIndex); + columnIndex = rs.findColumn("f8"); + Assert.assertEquals(8, columnIndex); + columnIndex = rs.findColumn("f9"); + Assert.assertEquals(9, columnIndex); + columnIndex = rs.findColumn("f10"); + Assert.assertEquals(10, columnIndex); + + rs.findColumn("f11"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getCharacterStream() throws SQLException { + rs.getCharacterStream(1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void isBeforeFirst() throws SQLException { + rs.isBeforeFirst(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void isAfterLast() throws SQLException { + rs.isAfterLast(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void isFirst() throws SQLException { + rs.isFirst(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void isLast() throws SQLException { + rs.isLast(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void beforeFirst() throws SQLException { + rs.beforeFirst(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void afterLast() throws SQLException { + rs.afterLast(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void first() throws SQLException { + rs.first(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void last() throws SQLException { + rs.last(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getRow() throws SQLException { + int row = rs.getRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void absolute() throws SQLException { + rs.absolute(-1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void relative() throws SQLException { + rs.relative(-1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void previous() throws SQLException { + rs.previous(); + } + + @Test + public void setFetchDirection() throws SQLException { + rs.setFetchDirection(ResultSet.FETCH_FORWARD); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); + rs.setFetchDirection(ResultSet.FETCH_UNKNOWN); + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); + } + + @Test + public void getFetchDirection() throws SQLException { + Assert.assertEquals(ResultSet.FETCH_FORWARD, rs.getFetchDirection()); + } + + @Test + public void setFetchSize() throws SQLException { + rs.setFetchSize(0); + Assert.assertEquals(0, rs.getFetchSize()); + } + + @Test + public void getFetchSize() throws SQLException { + Assert.assertEquals(0, rs.getFetchSize()); + } + + @Test + public void getType() throws SQLException { + Assert.assertEquals(ResultSet.TYPE_FORWARD_ONLY, rs.getType()); + } + + @Test + public void getConcurrency() throws SQLException { + Assert.assertEquals(ResultSet.CONCUR_READ_ONLY, rs.getConcurrency()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void rowUpdated() throws SQLException { + rs.rowUpdated(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void rowInserted() throws SQLException { + rs.rowInserted(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void rowDeleted() throws SQLException { + rs.rowDeleted(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateNull() throws SQLException { + rs.updateNull("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateBoolean() throws SQLException { + rs.updateBoolean(1, false); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateByte() throws SQLException { + rs.updateByte(1, new Byte("0")); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateShort() throws SQLException { + rs.updateShort(1, new Short("0")); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateInt() throws SQLException { + rs.updateInt(1, 1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateLong() throws SQLException { + rs.updateLong(1, 1l); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateFloat() throws SQLException { + rs.updateFloat(1, 1f); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateDouble() throws SQLException { + rs.updateDouble(1, 1.0); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateBigDecimal() throws SQLException { + rs.updateBigDecimal(1, new BigDecimal(1)); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateString() throws SQLException { + rs.updateString(1, "abc"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateBytes() throws SQLException { + rs.updateBytes(1, new byte[]{}); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateDate() throws SQLException { + rs.updateDate(1, new Date(System.currentTimeMillis())); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateTime() throws SQLException { + rs.updateTime(1, new Time(System.currentTimeMillis())); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateTimestamp() throws SQLException { + rs.updateTimestamp(1, new Timestamp(System.currentTimeMillis())); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateAsciiStream() throws SQLException { + rs.updateAsciiStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateBinaryStream() throws SQLException { + rs.updateBinaryStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateCharacterStream() throws SQLException { + rs.updateCharacterStream(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateObject() throws SQLException { + rs.updateObject(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void insertRow() throws SQLException { + rs.insertRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateRow() throws SQLException { + rs.updateRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void deleteRow() throws SQLException { + rs.deleteRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void refreshRow() throws SQLException { + rs.refreshRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void cancelRowUpdates() throws SQLException { + rs.cancelRowUpdates(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void moveToInsertRow() throws SQLException { + rs.moveToInsertRow(); + } + + @Test + public void getStatement() throws SQLException { + Statement stmt = rs.getStatement(); + Assert.assertNotNull(stmt); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void moveToCurrentRow() throws SQLException { + rs.moveToCurrentRow(); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getRef() throws SQLException { + rs.getRef(1); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getBlob() throws SQLException { + rs.getBlob("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getClob() throws SQLException { + rs.getClob("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getArray() throws SQLException { + rs.getArray("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getURL() throws SQLException { + rs.getURL("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateRef() throws SQLException { + rs.updateRef("f1", null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateBlob() throws SQLException { + rs.updateBlob(1, (InputStream) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateClob() throws SQLException { + rs.updateClob(1, (Reader) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateArray() throws SQLException { + rs.updateArray(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getRowId() throws SQLException { + rs.getRowId("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateRowId() throws SQLException { + rs.updateRowId(1, null); + } + + @Test + public void getHoldability() throws SQLException { + Assert.assertEquals(ResultSet.HOLD_CURSORS_OVER_COMMIT, rs.getHoldability()); + } + + @Test + public void isClosed() throws SQLException { + Assert.assertFalse(rs.isClosed()); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateNString() throws SQLException { + rs.updateNString(1, null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateNClob() throws SQLException { + rs.updateNClob(1, (Reader) null); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNClob() throws SQLException { + rs.getNClob("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getSQLXML() throws SQLException { + rs.getSQLXML("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateSQLXML() throws SQLException { + rs.updateSQLXML(1, null); + } + + @Test + public void getNString() throws SQLException { + String f10 = rs.getNString("f10"); + Assert.assertEquals("涛思数据", f10); + f10 = rs.getNString(10); + Assert.assertEquals("涛思数据", f10); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void getNCharacterStream() throws SQLException { + rs.getNCharacterStream("f1"); + } + + @Test(expected = SQLFeatureNotSupportedException.class) + public void updateNCharacterStream() throws SQLException { + rs.updateNCharacterStream(1, null); + } + + @Test + public void unwrap() throws SQLException { + TSDBResultSet unwrap = rs.unwrap(TSDBResultSet.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(rs.isWrapperFor(TSDBResultSet.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.TSDBDriver"); + conn = DriverManager.getConnection("jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"); + stmt = conn.createStatement(); + stmt.execute("create database if not exists restful_test"); + stmt.execute("use restful_test"); + stmt.execute("drop table if exists weather"); + stmt.execute("create table if not exists weather(f1 timestamp, f2 int, f3 bigint, f4 float, f5 double, f6 binary(64), f7 smallint, f8 tinyint, f9 bool, f10 nchar(64))"); + stmt.execute("insert into restful_test.weather values('2021-01-01 00:00:00.000', 1, 100, 3.1415, 3.1415926, 'abc', 10, 10, true, '涛思数据')"); + rs = stmt.executeQuery("select * from restful_test.weather"); + rs.next(); + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + + } + + @AfterClass + public static void afterClass() { + try { + if (rs != null) + rs.close(); + if (stmt != null) + stmt.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java new file mode 100644 index 0000000000..9826e6ed76 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/DriverAutoloadTest.java @@ -0,0 +1,41 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.SQLException; +import java.util.Properties; + +public class DriverAutoloadTest { + + private Properties properties; + private String host = "127.0.0.1"; + + @Test + public void testRestful() throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + Connection conn = DriverManager.getConnection(url, properties); + Assert.assertNotNull(conn); + } + + @Test + public void testJni() throws SQLException { + final String url = "jdbc:TAOS://" + host + ":6030/?user=root&password=taosdata"; + Connection conn = DriverManager.getConnection(url, properties); + Assert.assertNotNull(conn); + } + + + @Before + public void before() { + properties = new Properties(); + properties.setProperty(TSDBDriver.PROPERTY_KEY_CHARSET, "UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_LOCALE, "en_US.UTF-8"); + properties.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + } + +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java index 54ee8fd6eb..84149775c3 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/InsertDbwithoutUseDbTest.java @@ -14,7 +14,6 @@ import java.util.Random; public class InsertDbwithoutUseDbTest { private static String host = "127.0.0.1"; - // private static String host = "master"; private static Properties properties; private static Random random = new Random(System.currentTimeMillis()); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java new file mode 100644 index 0000000000..f2ac94adc1 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/NullValueInResultSetForJdbcRestfulTest.java @@ -0,0 +1,64 @@ +package com.taosdata.jdbc.cases; + +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.sql.*; + +public class NullValueInResultSetForJdbcRestfulTest { + + private static final String host = "127.0.0.1"; + Connection conn; + + @Test + public void test() { + try (Statement stmt = conn.createStatement()) { + ResultSet rs = stmt.executeQuery("select * from weather"); + ResultSetMetaData meta = rs.getMetaData(); + while (rs.next()) { + for (int i = 1; i <= meta.getColumnCount(); i++) { + Object value = rs.getObject(i); + System.out.print(meta.getColumnLabel(i) + ": " + value + "\t"); + } + System.out.println(); + } + + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @Before + public void before() throws SQLException { + final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; + conn = DriverManager.getConnection(url); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_null"); + stmt.execute("create database if not exists test_null"); + stmt.execute("use test_null"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64))"); + stmt.executeUpdate("insert into weather(ts, f1) values(now+1s, 1)"); + stmt.executeUpdate("insert into weather(ts, f2) values(now+2s, 2)"); + stmt.executeUpdate("insert into weather(ts, f3) values(now+3s, 3.0)"); + stmt.executeUpdate("insert into weather(ts, f4) values(now+4s, 4.0)"); + stmt.executeUpdate("insert into weather(ts, f5) values(now+5s, 5)"); + stmt.executeUpdate("insert into weather(ts, f6) values(now+6s, 6)"); + stmt.executeUpdate("insert into weather(ts, f7) values(now+7s, true)"); + stmt.executeUpdate("insert into weather(ts, f8) values(now+8s, 'hello')"); + stmt.executeUpdate("insert into weather(ts, f9) values(now+9s, '涛思数据')"); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + @After + public void after() { + try { + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java index d0e6306704..4ae2f36fe9 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/UnsignedNumberRestfulTest.java @@ -10,8 +10,8 @@ import java.util.Properties; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class UnsignedNumberRestfulTest { + private static final String host = "127.0.0.1"; -// private static final String host = "master"; private static Connection conn; @Test diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java index b0666989ba..a6fb6cfda0 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/AuthenticationTest.java @@ -8,7 +8,6 @@ import java.sql.*; public class AuthenticationTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; private static final String user = "root"; private static final String password = "taos?data"; private Connection conn; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java index 4e005d1291..abd60f5b63 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulConnectionTest.java @@ -12,7 +12,6 @@ import java.util.Properties; public class RestfulConnectionTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; private static Connection conn; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java index 1991c17065..a052fbbdcb 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulDatabaseMetaDataTest.java @@ -10,7 +10,6 @@ import java.sql.*; import java.util.Properties; public class RestfulDatabaseMetaDataTest { - // private static final String host = "master"; private static final String host = "127.0.0.1"; private static final String url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; private static Connection connection; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java index 451f5d4916..c8bb69d827 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulJDBCTest.java @@ -10,7 +10,6 @@ import java.util.Random; public class RestfulJDBCTest { private static final String host = "127.0.0.1"; -// private static final String host = "master"; private static Connection connection; private Random random = new Random(System.currentTimeMillis()); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulParameterMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulParameterMetaDataTest.java new file mode 100644 index 0000000000..8bb2532ce8 --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulParameterMetaDataTest.java @@ -0,0 +1,194 @@ +package com.taosdata.jdbc.rs; + +import com.taosdata.jdbc.TSDBConstants; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.sql.*; + +public class RestfulParameterMetaDataTest { + + private static final String host = "127.0.0.1"; + private static Connection conn; + private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; + private static PreparedStatement pstmt_insert; + private static final String sql_select = "select * from t1 where ts > ? and ts <= ? and f1 >= ?"; + private static PreparedStatement pstmt_select; + private static ParameterMetaData parameterMetaData_insert; + private static ParameterMetaData parameterMetaData_select; + + @Test + public void getParameterCount() throws SQLException { + Assert.assertEquals(10, parameterMetaData_insert.getParameterCount()); + } + + @Test + public void isNullable() throws SQLException { + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(1)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(2)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(3)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(4)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(5)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(6)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(7)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(8)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(9)); + Assert.assertEquals(ParameterMetaData.parameterNullableUnknown, parameterMetaData_insert.isNullable(10)); + } + + @Test + public void isSigned() throws SQLException { + Assert.assertEquals(false, parameterMetaData_insert.isSigned(1)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(2)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(3)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(4)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(5)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(6)); + Assert.assertEquals(true, parameterMetaData_insert.isSigned(7)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(8)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(9)); + Assert.assertEquals(false, parameterMetaData_insert.isSigned(10)); + } + + @Test + public void getPrecision() throws SQLException { + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(1)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(2)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(3)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(4)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(5)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(6)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(7)); + Assert.assertEquals(0, parameterMetaData_insert.getPrecision(8)); + Assert.assertEquals(5, parameterMetaData_insert.getPrecision(9)); + Assert.assertEquals(5, parameterMetaData_insert.getPrecision(10)); + } + + @Test + public void getScale() throws SQLException { + Assert.assertEquals(0, parameterMetaData_insert.getScale(1)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(2)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(3)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(4)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(5)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(6)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(7)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(8)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(9)); + Assert.assertEquals(0, parameterMetaData_insert.getScale(10)); + } + + @Test + public void getParameterType() throws SQLException { + Assert.assertEquals(Types.TIMESTAMP, parameterMetaData_insert.getParameterType(1)); + Assert.assertEquals(Types.INTEGER, parameterMetaData_insert.getParameterType(2)); + Assert.assertEquals(Types.BIGINT, parameterMetaData_insert.getParameterType(3)); + Assert.assertEquals(Types.FLOAT, parameterMetaData_insert.getParameterType(4)); + Assert.assertEquals(Types.DOUBLE, parameterMetaData_insert.getParameterType(5)); + Assert.assertEquals(Types.SMALLINT, parameterMetaData_insert.getParameterType(6)); + Assert.assertEquals(Types.TINYINT, parameterMetaData_insert.getParameterType(7)); + Assert.assertEquals(Types.BOOLEAN, parameterMetaData_insert.getParameterType(8)); + Assert.assertEquals(Types.BINARY, parameterMetaData_insert.getParameterType(9)); + Assert.assertEquals(Types.NCHAR, parameterMetaData_insert.getParameterType(10)); + } + + @Test + public void getParameterTypeName() throws SQLException { + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.TIMESTAMP), parameterMetaData_insert.getParameterTypeName(1)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.INTEGER), parameterMetaData_insert.getParameterTypeName(2)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BIGINT), parameterMetaData_insert.getParameterTypeName(3)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.FLOAT), parameterMetaData_insert.getParameterTypeName(4)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.DOUBLE), parameterMetaData_insert.getParameterTypeName(5)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.SMALLINT), parameterMetaData_insert.getParameterTypeName(6)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.TINYINT), parameterMetaData_insert.getParameterTypeName(7)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BOOLEAN), parameterMetaData_insert.getParameterTypeName(8)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.BINARY), parameterMetaData_insert.getParameterTypeName(9)); + Assert.assertEquals(TSDBConstants.jdbcType2TaosTypeName(Types.NCHAR), parameterMetaData_insert.getParameterTypeName(10)); + } + + @Test + public void getParameterClassName() throws SQLException { + Assert.assertEquals(Timestamp.class.getName(), parameterMetaData_insert.getParameterClassName(1)); + Assert.assertEquals(Integer.class.getName(), parameterMetaData_insert.getParameterClassName(2)); + Assert.assertEquals(Long.class.getName(), parameterMetaData_insert.getParameterClassName(3)); + Assert.assertEquals(Float.class.getName(), parameterMetaData_insert.getParameterClassName(4)); + Assert.assertEquals(Double.class.getName(), parameterMetaData_insert.getParameterClassName(5)); + Assert.assertEquals(Short.class.getName(), parameterMetaData_insert.getParameterClassName(6)); + Assert.assertEquals(Byte.class.getName(), parameterMetaData_insert.getParameterClassName(7)); + Assert.assertEquals(Boolean.class.getName(), parameterMetaData_insert.getParameterClassName(8)); + Assert.assertEquals(byte[].class.getName(), parameterMetaData_insert.getParameterClassName(9)); + Assert.assertEquals(String.class.getName(), parameterMetaData_insert.getParameterClassName(10)); + } + + @Test + public void getParameterMode() throws SQLException { + for (int i = 1; i <= parameterMetaData_insert.getParameterCount(); i++) { + int parameterMode = parameterMetaData_insert.getParameterMode(i); + Assert.assertEquals(ParameterMetaData.parameterModeUnknown, parameterMode); + } + } + + @Test + public void unwrap() throws SQLException { + RestfulParameterMetaData unwrap = parameterMetaData_insert.unwrap(RestfulParameterMetaData.class); + Assert.assertNotNull(unwrap); + } + + @Test + public void isWrapperFor() throws SQLException { + Assert.assertTrue(parameterMetaData_insert.isWrapperFor(RestfulParameterMetaData.class)); + } + + @BeforeClass + public static void beforeClass() { + try { + Class.forName("com.taosdata.jdbc.rs.RestfulDriver"); + conn = DriverManager.getConnection("jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"); + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop database if exists test_pstmt"); + stmt.execute("create database if not exists test_pstmt"); + stmt.execute("use test_pstmt"); + stmt.execute("create table weather(ts timestamp, f1 int, f2 bigint, f3 float, f4 double, f5 smallint, f6 tinyint, f7 bool, f8 binary(64), f9 nchar(64)) tags(loc nchar(64))"); + stmt.execute("create table t1 using weather tags('beijing')"); + } + pstmt_insert = conn.prepareStatement(sql_insert); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(2, 111); + pstmt_insert.setObject(3, Long.MAX_VALUE); + pstmt_insert.setObject(4, 3.14159265354f); + pstmt_insert.setObject(5, Double.MAX_VALUE); + pstmt_insert.setObject(6, Short.MAX_VALUE); + pstmt_insert.setObject(7, Byte.MAX_VALUE); + pstmt_insert.setObject(8, true); + pstmt_insert.setObject(9, "hello".getBytes()); + pstmt_insert.setObject(10, "Hello"); + parameterMetaData_insert = pstmt_insert.getParameterMetaData(); + + pstmt_select = conn.prepareStatement(sql_select); + pstmt_select.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_select.setTimestamp(2, new Timestamp(System.currentTimeMillis() + 10000)); + pstmt_select.setInt(3, 0); + parameterMetaData_select = pstmt_select.getParameterMetaData(); + + } catch (ClassNotFoundException | SQLException e) { + e.printStackTrace(); + } + } + + @AfterClass + public static void afterClass() { + try { + if (pstmt_insert != null) + pstmt_insert.close(); + if (pstmt_select != null) + pstmt_select.close(); + if (conn != null) + conn.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } +} \ No newline at end of file diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java index a3867c1b6e..094dff8c8d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulPreparedStatementTest.java @@ -5,11 +5,12 @@ import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import java.io.IOException; +import java.io.Serializable; import java.sql.*; public class RestfulPreparedStatementTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; private static Connection conn; private static final String sql_insert = "insert into t1 values(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"; private static PreparedStatement pstmt_insert; @@ -38,48 +39,73 @@ public class RestfulPreparedStatementTest { @Test public void executeUpdate() throws SQLException { pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); - pstmt_insert.setFloat(2, 3.14f); + pstmt_insert.setFloat(4, 3.14f); int result = pstmt_insert.executeUpdate(); Assert.assertEquals(1, result); } @Test public void setNull() throws SQLException { - pstmt_insert.setNull(2, Types.FLOAT); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setNull(2, Types.INTEGER); + int result = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, result); } @Test public void setBoolean() throws SQLException { - pstmt_insert.setBoolean(2, true); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setBoolean(8, true); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void setByte() throws SQLException { - pstmt_insert.setByte(1, (byte) 0x001); + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setByte(7, (byte) 0x001); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setShort() { - + public void setShort() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setShort(6, (short) 2); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setInt() { - + public void setInt() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setInt(2, 10086); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setLong() { - + public void setLong() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setLong(3, Long.MAX_VALUE); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setFloat() { - + public void setFloat() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setFloat(4, 3.14f); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void setDouble() { + public void setDouble() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setDouble(5, 3.14444); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -88,12 +114,56 @@ public class RestfulPreparedStatementTest { } @Test - public void setString() { + public void setString() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, "aaaa"); + boolean execute = pstmt_insert.execute(); + Assert.assertFalse(execute); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, new Person("john", 33, true).toString()); + Assert.assertFalse(pstmt_insert.execute()); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setString(10, new Person("john", 33, true).toString().replaceAll("'", "\"")); + Assert.assertFalse(pstmt_insert.execute()); } - @Test(expected = SQLFeatureNotSupportedException.class) - public void setBytes() throws SQLException { - pstmt_insert.setBytes(1, new byte[]{}); + class Person implements Serializable { + String name; + int age; + boolean sex; + + public Person(String name, int age, boolean sex) { + this.name = name; + this.age = age; + this.sex = sex; + } + + @Override + public String toString() { + return "Person{" + + "name='" + name + '\'' + + ", age=" + age + + ", sex=" + sex + + '}'; + } + } + + @Test + public void setBytes() throws SQLException, IOException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + +// ByteArrayOutputStream baos = new ByteArrayOutputStream(); +// ObjectOutputStream oos = new ObjectOutputStream(baos); +// oos.writeObject(new Person("john", 33, true)); +// oos.flush(); +// byte[] bytes = baos.toByteArray(); +// pstmt_insert.setBytes(9, bytes); + + pstmt_insert.setBytes(9, new Person("john", 33, true).toString().getBytes()); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -107,8 +177,10 @@ public class RestfulPreparedStatementTest { } @Test - public void setTimestamp() { - //TODO + public void setTimestamp() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -122,24 +194,69 @@ public class RestfulPreparedStatementTest { } @Test - public void clearParameters() { - //TODO + public void clearParameters() throws SQLException { + pstmt_insert.clearParameters(); } @Test public void setObject() throws SQLException { - pstmt_insert.setObject(1, System.currentTimeMillis()); - //TODO + pstmt_insert.setObject(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(2, 111); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(3, Long.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(4, 3.14159265354f); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(5, Double.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(6, Short.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(7, Byte.MAX_VALUE); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(8, true); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(9, "hello".getBytes()); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); + + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + pstmt_insert.setObject(10, "Hello"); + ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); } @Test - public void execute() { - //TODO - } + public void execute() throws SQLException { + pstmt_insert.setTimestamp(1, new Timestamp(System.currentTimeMillis())); + int ret = pstmt_insert.executeUpdate(); + Assert.assertEquals(1, ret); - @Test - public void addBatch() { - //TODO: + executeQuery(); } @Test(expected = SQLFeatureNotSupportedException.class) @@ -180,8 +297,8 @@ public class RestfulPreparedStatementTest { @Test public void getParameterMetaData() throws SQLException { ParameterMetaData parameterMetaData = pstmt_insert.getParameterMetaData(); - Assert.assertNull(parameterMetaData); - //TODO: + Assert.assertNotNull(parameterMetaData); + //TODO: modify the test case } @Test(expected = SQLFeatureNotSupportedException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java index 13973d8b6b..c7fc812972 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetMetaDataTest.java @@ -10,7 +10,6 @@ import java.sql.*; public class RestfulResultSetMetaDataTest { private static final String host = "127.0.0.1"; -// private static final String host = "master"; private static Connection conn; private static Statement stmt; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java index a15b1964ea..d6b2a58127 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulResultSetTest.java @@ -1,5 +1,8 @@ package com.taosdata.jdbc.rs; +import com.google.common.primitives.Ints; +import com.google.common.primitives.Longs; +import com.google.common.primitives.Shorts; import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; @@ -9,11 +12,12 @@ import java.io.InputStream; import java.io.Reader; import java.math.BigDecimal; import java.sql.*; +import java.text.ParseException; +import java.text.SimpleDateFormat; public class RestfulResultSetTest { private static final String host = "127.0.0.1"; -// private static final String host = "master"; private static Connection conn; private static Statement stmt; @@ -88,24 +92,75 @@ public class RestfulResultSetTest { Assert.assertEquals(3.1415926, f5, 0.0); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getBigDecimal() throws SQLException { - rs.getBigDecimal("f1"); + BigDecimal f1 = rs.getBigDecimal("f1"); + Assert.assertEquals(1609430400000l, f1.longValue()); + + BigDecimal f2 = rs.getBigDecimal("f2"); + Assert.assertEquals(1, f2.intValue()); + + BigDecimal f3 = rs.getBigDecimal("f3"); + Assert.assertEquals(100l, f3.longValue()); + + BigDecimal f4 = rs.getBigDecimal("f4"); + Assert.assertEquals(3.1415f, f4.floatValue(), 0.00000f); + + BigDecimal f5 = rs.getBigDecimal("f5"); + Assert.assertEquals(3.1415926, f5.doubleValue(), 0.0000000); + + BigDecimal f7 = rs.getBigDecimal("f7"); + Assert.assertEquals(10, f7.intValue()); + + BigDecimal f8 = rs.getBigDecimal("f8"); + Assert.assertEquals(10, f8.intValue()); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getBytes() throws SQLException { - rs.getBytes("f1"); + byte[] f1 = rs.getBytes("f1"); + Assert.assertEquals("2021-01-01 00:00:00.0", new String(f1)); + + byte[] f2 = rs.getBytes("f2"); + Assert.assertEquals(1, Ints.fromByteArray(f2)); + + byte[] f3 = rs.getBytes("f3"); + Assert.assertEquals(100l, Longs.fromByteArray(f3)); + + byte[] f4 = rs.getBytes("f4"); + Assert.assertEquals(3.1415f, Float.valueOf(new String(f4)), 0.000000f); + + byte[] f5 = rs.getBytes("f5"); + Assert.assertEquals(3.1415926, Double.valueOf(new String(f5)), 0.000000f); + + byte[] f6 = rs.getBytes("f6"); + Assert.assertEquals("abc", new String(f6)); + + byte[] f7 = rs.getBytes("f7"); + Assert.assertEquals((short) 10, Shorts.fromByteArray(f7)); + + byte[] f8 = rs.getBytes("f8"); + Assert.assertEquals(1, f8.length); + Assert.assertEquals((byte) 10, f8[0]); + + byte[] f9 = rs.getBytes("f9"); + Assert.assertEquals("true", new String(f9)); + + byte[] f10 = rs.getBytes("f10"); + Assert.assertEquals("涛思数据", new String(f10)); } - @Test(expected = SQLFeatureNotSupportedException.class) - public void getDate() throws SQLException { - rs.getDate("f1"); + @Test + public void getDate() throws SQLException, ParseException { + Date f1 = rs.getDate("f1"); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd"); + Assert.assertEquals(sdf.parse("2021-01-01"), f1); } - @Test(expected = SQLFeatureNotSupportedException.class) + @Test public void getTime() throws SQLException { - rs.getTime("f1"); + Time f1 = rs.getTime("f1"); + Assert.assertEquals("00:00:00", f1.toString()); } @Test @@ -152,9 +207,49 @@ public class RestfulResultSetTest { Assert.assertNotNull(meta); } - @Test(expected = SQLFeatureNotSupportedException.class) - public void getObject() throws SQLException { - rs.getObject("f1"); + @Test + public void getObject() throws SQLException, ParseException { + Object f1 = rs.getObject("f1"); + Assert.assertEquals(Timestamp.class, f1.getClass()); + SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd hh:mm:ss.sss"); + java.util.Date date = sdf.parse("2021-01-01 00:00:00.000"); + Assert.assertEquals(new Timestamp(date.getTime()), f1); + + Object f2 = rs.getObject("f2"); + Assert.assertEquals(Integer.class, f2.getClass()); + Assert.assertEquals(1, f2); + + Object f3 = rs.getObject("f3"); + Assert.assertEquals(Long.class, f3.getClass()); + Assert.assertEquals(100l, f3); + + Object f4 = rs.getObject("f4"); + Assert.assertEquals(Float.class, f4.getClass()); + Assert.assertEquals(3.1415f, f4); + + Object f5 = rs.getObject("f5"); + Assert.assertEquals(Double.class, f5.getClass()); + Assert.assertEquals(3.1415926, f5); + + Object f6 = rs.getObject("f6"); + Assert.assertEquals(byte[].class, f6.getClass()); + Assert.assertEquals("abc", new String((byte[]) f6)); + + Object f7 = rs.getObject("f7"); + Assert.assertEquals(Short.class, f7.getClass()); + Assert.assertEquals((short) 10, f7); + + Object f8 = rs.getObject("f8"); + Assert.assertEquals(Byte.class, f8.getClass()); + Assert.assertEquals((byte) 10, f8); + + Object f9 = rs.getObject("f9"); + Assert.assertEquals(Boolean.class, f9.getClass()); + Assert.assertEquals(true, f9); + + Object f10 = rs.getObject("f10"); + Assert.assertEquals(String.class, f10.getClass()); + Assert.assertEquals("涛思数据", f10); } @Test(expected = SQLException.class) diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java index 653e480413..1be32b502d 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/RestfulStatementTest.java @@ -12,7 +12,6 @@ import java.util.UUID; public class RestfulStatementTest { private static final String host = "127.0.0.1"; - // private static final String host = "master"; private static Connection conn; private static Statement stmt; diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java index fe4d04775d..4ad9826384 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/rs/SQLTest.java @@ -12,7 +12,6 @@ import java.sql.*; @FixMethodOrder(MethodSorters.NAME_ASCENDING) public class SQLTest { private static final String host = "127.0.0.1"; -// private static final String host = "master"; private static Connection connection; @Test diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java new file mode 100644 index 0000000000..fd6c83ad1c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/utils/OSUtilsTest.java @@ -0,0 +1,30 @@ +package com.taosdata.jdbc.utils; + +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +public class OSUtilsTest { + + private String OS; + + @Test + public void inWindows() { + Assert.assertEquals(OS.indexOf("win") >= 0, OSUtils.isWindows()); + } + + @Test + public void isMac() { + Assert.assertEquals(OS.indexOf("mac") >= 0, OSUtils.isMac()); + } + + @Test + public void isLinux() { + Assert.assertEquals(OS.indexOf("nux") >= 0, OSUtils.isLinux()); + } + + @Before + public void before() { + OS = System.getProperty("os.name").toLowerCase(); + } +} \ No newline at end of file diff --git a/src/dnode/CMakeLists.txt b/src/dnode/CMakeLists.txt index 516e5b4d1f..dd18f00920 100644 --- a/src/dnode/CMakeLists.txt +++ b/src/dnode/CMakeLists.txt @@ -35,6 +35,10 @@ IF (TD_TOPIC) TARGET_LINK_LIBRARIES(taosd topic) ENDIF () +IF (TD_MODULE AND TD_LINUX) + TARGET_LINK_LIBRARIES(taosd module dl) +ENDIF () + SET(PREPARE_ENV_CMD "prepare_env_cmd") SET(PREPARE_ENV_TARGET "prepare_env_target") ADD_CUSTOM_COMMAND(OUTPUT ${PREPARE_ENV_CMD} diff --git a/src/dnode/src/dnodeMain.c b/src/dnode/src/dnodeMain.c index c24eac84cf..410e6bb188 100644 --- a/src/dnode/src/dnodeMain.c +++ b/src/dnode/src/dnodeMain.c @@ -39,6 +39,13 @@ #include "dnodeMPeer.h" #include "dnodeShell.h" #include "dnodeTelemetry.h" +#include "module.h" + +#if !defined(_MODULE) || !defined(_TD_LINUX) +int32_t moduleStart() { return 0; } +void moduleStop() {} +#endif + void *tsDnodeTmr = NULL; static SRunStatus tsRunStatus = TSDB_RUN_STATUS_STOPPED; @@ -63,13 +70,16 @@ static SStep tsDnodeSteps[] = { {"dnode-vread", dnodeInitVRead, dnodeCleanupVRead}, {"dnode-vwrite", dnodeInitVWrite, dnodeCleanupVWrite}, {"dnode-vmgmt", dnodeInitVMgmt, dnodeCleanupVMgmt}, - {"dnode-mread", dnodeInitMRead, dnodeCleanupMRead}, - {"dnode-mwrite", dnodeInitMWrite, dnodeCleanupMWrite}, - {"dnode-mpeer", dnodeInitMPeer, dnodeCleanupMPeer}, + {"dnode-mread", dnodeInitMRead, NULL}, + {"dnode-mwrite", dnodeInitMWrite, NULL}, + {"dnode-mpeer", dnodeInitMPeer, NULL}, {"dnode-client", dnodeInitClient, dnodeCleanupClient}, {"dnode-server", dnodeInitServer, dnodeCleanupServer}, {"dnode-vnodes", dnodeInitVnodes, dnodeCleanupVnodes}, {"dnode-modules", dnodeInitModules, dnodeCleanupModules}, + {"dnode-mread", NULL, dnodeCleanupMRead}, + {"dnode-mwrite", NULL, dnodeCleanupMWrite}, + {"dnode-mpeer", NULL, dnodeCleanupMPeer}, {"dnode-shell", dnodeInitShell, dnodeCleanupShell}, {"dnode-statustmr", dnodeInitStatusTimer,dnodeCleanupStatusTimer}, {"dnode-telemetry", dnodeInitTelemetry, dnodeCleanupTelemetry}, @@ -146,6 +156,7 @@ int32_t dnodeInitSystem() { } dnodeSetRunStatus(TSDB_RUN_STATUS_RUNING); + moduleStart(); dnodeReportStep("TDengine", "initialized successfully", 1); dInfo("TDengine is initialized successfully"); @@ -155,6 +166,7 @@ int32_t dnodeInitSystem() { void dnodeCleanUpSystem() { if (dnodeGetRunStatus() != TSDB_RUN_STATUS_STOPPED) { + moduleStop(); dnodeSetRunStatus(TSDB_RUN_STATUS_STOPPED); dnodeCleanupTmr(); dnodeCleanupComponents(); @@ -225,6 +237,20 @@ static int32_t dnodeInitStorage() { return -1; } + TDIR *tdir = tfsOpendir("vnode_bak/.staging"); + bool stagingNotEmpty = tfsReaddir(tdir) != NULL; + tfsClosedir(tdir); + + if (stagingNotEmpty) { + dError("vnode_bak/.staging dir not empty, fix it first."); + return -1; + } + + if (tfsMkdir("vnode_bak/.staging") < 0) { + dError("failed to create vnode_bak/.staging dir since %s", tstrerror(terrno)); + return -1; + } + dnodeCheckDataDirOpenned(tsDnodeDir); dInfo("dnode storage is initialized at %s", tsDnodeDir); diff --git a/src/dnode/src/dnodePeer.c b/src/dnode/src/dnodePeer.c index 79c60874f9..b8ce1c802b 100644 --- a/src/dnode/src/dnodePeer.c +++ b/src/dnode/src/dnodePeer.c @@ -43,6 +43,7 @@ int32_t dnodeInitServer() { dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeDispatchToVMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeDispatchToVMgmtQueue; + dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeDispatchToVMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeDispatchToVMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeDispatchToVMgmtQueue; dnodeProcessReqMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeDispatchToVMgmtQueue; diff --git a/src/dnode/src/dnodeShell.c b/src/dnode/src/dnodeShell.c index 60d9c38c05..50343cfd32 100644 --- a/src/dnode/src/dnodeShell.c +++ b/src/dnode/src/dnodeShell.c @@ -49,6 +49,7 @@ int32_t dnodeInitShell() { dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_CREATE_TP] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_DB] = dnodeDispatchToMWriteQueue; + dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_SYNC_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_DROP_TP] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_DB] = dnodeDispatchToMWriteQueue; dnodeProcessShellMsgFp[TSDB_MSG_TYPE_CM_ALTER_TP] = dnodeDispatchToMWriteQueue; diff --git a/src/dnode/src/dnodeVMgmt.c b/src/dnode/src/dnodeVMgmt.c index 1e428fc8b1..66c94bf675 100644 --- a/src/dnode/src/dnodeVMgmt.c +++ b/src/dnode/src/dnodeVMgmt.c @@ -30,6 +30,7 @@ static taos_queue tsVMgmtQueue = NULL; static void * dnodeProcessMgmtQueue(void *param); static int32_t dnodeProcessCreateVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *pMsg); +static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *pMsg); static int32_t dnodeProcessAlterStreamMsg(SRpcMsg *pMsg); static int32_t dnodeProcessConfigDnodeMsg(SRpcMsg *pMsg); @@ -39,6 +40,7 @@ static int32_t (*dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *pMsg); int32_t dnodeInitVMgmt() { dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = dnodeProcessCreateVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = dnodeProcessAlterVnodeMsg; + dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = dnodeProcessSyncVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = dnodeProcessDropVnodeMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = dnodeProcessAlterStreamMsg; dnodeProcessMgmtMsgFp[TSDB_MSG_TYPE_MD_CONFIG_DNODE] = dnodeProcessConfigDnodeMsg; @@ -179,6 +181,13 @@ static int32_t dnodeProcessAlterVnodeMsg(SRpcMsg *rpcMsg) { } } +static int32_t dnodeProcessSyncVnodeMsg(SRpcMsg *rpcMsg) { + SSyncVnodeMsg *pSyncVnode = rpcMsg->pCont; + pSyncVnode->vgId = htonl(pSyncVnode->vgId); + + return vnodeSync(pSyncVnode->vgId); +} + static int32_t dnodeProcessDropVnodeMsg(SRpcMsg *rpcMsg) { SDropVnodeMsg *pDrop = rpcMsg->pCont; pDrop->vgId = htonl(pDrop->vgId); diff --git a/src/dnode/src/dnodeVWrite.c b/src/dnode/src/dnodeVWrite.c index 87b31e4604..84fd260d91 100644 --- a/src/dnode/src/dnodeVWrite.c +++ b/src/dnode/src/dnodeVWrite.c @@ -205,7 +205,7 @@ static void *dnodeProcessVWriteQueue(void *wparam) { pWrite->rpcMsg.ahandle, taosMsg[pWrite->pHead.msgType], qtypeStr[qtype], pWrite->pHead.version); pWrite->code = vnodeProcessWrite(pVnode, &pWrite->pHead, qtype, pWrite); - if (pWrite->code <= 0) pWrite->processedCount = 1; + if (pWrite->code <= 0) atomic_add_fetch_32(&pWrite->processedCount, 1); if (pWrite->code > 0) pWrite->code = 0; if (pWrite->code == 0 && pWrite->pHead.msgType != TSDB_MSG_TYPE_SUBMIT) forceFsync = true; diff --git a/src/dnode/src/dnodeVnodes.c b/src/dnode/src/dnodeVnodes.c index d00314fcbc..d96251cebe 100644 --- a/src/dnode/src/dnodeVnodes.c +++ b/src/dnode/src/dnodeVnodes.c @@ -202,10 +202,11 @@ static void dnodeProcessStatusRsp(SRpcMsg *pMsg) { char clusterId[TSDB_CLUSTER_ID_LEN]; dnodeGetClusterId(clusterId); if (clusterId[0] != '\0') { - dError("exit zombie dropped dnode"); - exit(EXIT_FAILURE); + dError("exit zombie dropped dnode"); + exit(EXIT_FAILURE); } } + taosTmrReset(dnodeSendStatusMsg, tsStatusInterval * 1000, NULL, tsDnodeTmr, &tsStatusTimer); return; } diff --git a/src/inc/module.h b/src/inc/module.h new file mode 100644 index 0000000000..b9b64c493e --- /dev/null +++ b/src/inc/module.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_MODULE +#define TDENGINE_MODULE + +#ifdef __cplusplus +extern "C" { +#endif + +int32_t moduleStart(); +void moduleStop(); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/inc/query.h b/src/inc/query.h index 77a12ebfc5..c9dabcef54 100644 --- a/src/inc/query.h +++ b/src/inc/query.h @@ -38,7 +38,7 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryTableMs * @param qinfo * @return */ -bool qTableQuery(qinfo_t qinfo); +bool qTableQuery(qinfo_t qinfo, uint64_t *qId); /** * Retrieve the produced results information, if current query is not paused or completed, diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h index 6bd4c967f5..e9170860a6 100644 --- a/src/inc/taosdef.h +++ b/src/inc/taosdef.h @@ -318,7 +318,7 @@ do { \ #define TSDB_MAX_DB_QUORUM_OPTION 2 #define TSDB_DEFAULT_DB_QUORUM_OPTION 1 -#define TSDB_MAX_JOIN_TABLE_NUM 5 +#define TSDB_MAX_JOIN_TABLE_NUM 10 #define TSDB_MAX_UNION_CLAUSE 5 #define TSDB_MAX_BINARY_LEN (TSDB_MAX_BYTES_PER_ROW-TSDB_KEYSIZE) diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index 8a92c6dedb..53b4180511 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -59,6 +59,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_DROP_STABLE, "drop-stable" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_STREAM, "alter-stream" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CONFIG_DNODE, "config-dnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_ALTER_VNODE, "alter-vnode" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_SYNC_VNODE, "sync-vnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_MD_CREATE_MNODE, "create-mnode" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY6, "dummy6" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_DUMMY7, "dummy7" ) @@ -77,6 +78,7 @@ TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_DB, "create-db" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_DB, "drop-db" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_USE_DB, "use-db" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_DB, "alter-db" ) +TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_SYNC_DB, "sync-db-replica" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_CREATE_TABLE, "create-table" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_DROP_TABLE, "drop-table" ) TAOS_DEFINE_MESSAGE_TYPE( TSDB_MSG_TYPE_CM_ALTER_TABLE, "alter-table" ) @@ -388,7 +390,7 @@ typedef struct { typedef struct { int32_t vgId; -} SDropVnodeMsg; +} SDropVnodeMsg, SSyncVnodeMsg; typedef struct SColIndex { int16_t colId; // column id @@ -500,12 +502,13 @@ typedef struct { typedef struct { int32_t code; - uint64_t qid; // dnode generated query id + union{uint64_t qhandle; uint64_t qId;}; // query handle } SQueryTableRsp; +// todo: the show handle should be replaced with id typedef struct { SMsgHead header; - union{uint64_t qid; uint64_t qhandle;}; + union{uint64_t qhandle; uint64_t qId;}; // query handle uint16_t free; } SRetrieveTableMsg; @@ -561,7 +564,7 @@ typedef struct { typedef struct { char db[TSDB_TABLE_FNAME_LEN]; uint8_t ignoreNotExists; -} SDropDbMsg, SUseDbMsg; +} SDropDbMsg, SUseDbMsg, SSyncDbMsg; // IMPORTANT: sizeof(SVnodeStatisticInfo) should not exceed // TSDB_FILE_HEADER_LEN/4 - TSDB_FILE_HEADER_VERSION_SIZE @@ -805,7 +808,7 @@ typedef struct { uint32_t queryId; int64_t useconds; int64_t stime; - uint64_t qid; + uint64_t qId; } SQueryDesc; typedef struct { diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 493bdbe5de..495bfa2384 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -245,7 +245,7 @@ typedef struct { * @param qinfo query info handle from query processor * @return */ -TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, void *qinfo, +TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfoGroup, uint64_t qId, SMemRef *pRef); /** @@ -258,7 +258,7 @@ TsdbQueryHandleT *tsdbQueryTables(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable * @param tableInfo table list. * @return */ -TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, void *qinfo, +TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *tableInfo, uint64_t qId, SMemRef *pRef); /** @@ -277,7 +277,7 @@ SArray *tsdbGetQueriedTableList(TsdbQueryHandleT *pHandle); * @return */ TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, - void *qinfo, SMemRef *pRef); + uint64_t qId, SMemRef *pRef); /** diff --git a/src/inc/tsync.h b/src/inc/tsync.h index 99dfd3a6a3..d1b68e3f5a 100644 --- a/src/inc/tsync.h +++ b/src/inc/tsync.h @@ -79,9 +79,6 @@ typedef void (*FStopSyncFile)(int32_t vgId, uint64_t fversion); // get file version typedef int32_t (*FGetVersion)(int32_t vgId, uint64_t *fver, uint64_t *vver); -// reset version -typedef int32_t (*FResetVersion)(int32_t vgId, uint64_t fver); - typedef int32_t (*FSendFile)(void *tsdb, SOCKET socketFd); typedef int32_t (*FRecvFile)(void *tsdb, SOCKET socketFd); @@ -99,7 +96,6 @@ typedef struct { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; } SSyncInfo; diff --git a/src/inc/ttokendef.h b/src/inc/ttokendef.h index 91c08fa26a..e9f95660f7 100644 --- a/src/inc/ttokendef.h +++ b/src/inc/ttokendef.h @@ -152,56 +152,57 @@ #define TK_NOW 133 #define TK_RESET 134 #define TK_QUERY 135 -#define TK_ADD 136 -#define TK_COLUMN 137 -#define TK_TAG 138 -#define TK_CHANGE 139 -#define TK_SET 140 -#define TK_KILL 141 -#define TK_CONNECTION 142 -#define TK_STREAM 143 -#define TK_COLON 144 -#define TK_ABORT 145 -#define TK_AFTER 146 -#define TK_ATTACH 147 -#define TK_BEFORE 148 -#define TK_BEGIN 149 -#define TK_CASCADE 150 -#define TK_CLUSTER 151 -#define TK_CONFLICT 152 -#define TK_COPY 153 -#define TK_DEFERRED 154 -#define TK_DELIMITERS 155 -#define TK_DETACH 156 -#define TK_EACH 157 -#define TK_END 158 -#define TK_EXPLAIN 159 -#define TK_FAIL 160 -#define TK_FOR 161 -#define TK_IGNORE 162 -#define TK_IMMEDIATE 163 -#define TK_INITIALLY 164 -#define TK_INSTEAD 165 -#define TK_MATCH 166 -#define TK_KEY 167 -#define TK_OF 168 -#define TK_RAISE 169 -#define TK_REPLACE 170 -#define TK_RESTRICT 171 -#define TK_ROW 172 -#define TK_STATEMENT 173 -#define TK_TRIGGER 174 -#define TK_VIEW 175 -#define TK_SEMI 176 -#define TK_NONE 177 -#define TK_PREV 178 -#define TK_LINEAR 179 -#define TK_IMPORT 180 -#define TK_TBNAME 181 -#define TK_JOIN 182 -#define TK_INSERT 183 -#define TK_INTO 184 -#define TK_VALUES 185 +#define TK_SYNCDB 136 +#define TK_ADD 137 +#define TK_COLUMN 138 +#define TK_TAG 139 +#define TK_CHANGE 140 +#define TK_SET 141 +#define TK_KILL 142 +#define TK_CONNECTION 143 +#define TK_STREAM 144 +#define TK_COLON 145 +#define TK_ABORT 146 +#define TK_AFTER 147 +#define TK_ATTACH 148 +#define TK_BEFORE 149 +#define TK_BEGIN 150 +#define TK_CASCADE 151 +#define TK_CLUSTER 152 +#define TK_CONFLICT 153 +#define TK_COPY 154 +#define TK_DEFERRED 155 +#define TK_DELIMITERS 156 +#define TK_DETACH 157 +#define TK_EACH 158 +#define TK_END 159 +#define TK_EXPLAIN 160 +#define TK_FAIL 161 +#define TK_FOR 162 +#define TK_IGNORE 163 +#define TK_IMMEDIATE 164 +#define TK_INITIALLY 165 +#define TK_INSTEAD 166 +#define TK_MATCH 167 +#define TK_KEY 168 +#define TK_OF 169 +#define TK_RAISE 170 +#define TK_REPLACE 171 +#define TK_RESTRICT 172 +#define TK_ROW 173 +#define TK_STATEMENT 174 +#define TK_TRIGGER 175 +#define TK_VIEW 176 +#define TK_SEMI 177 +#define TK_NONE 178 +#define TK_PREV 179 +#define TK_LINEAR 180 +#define TK_IMPORT 181 +#define TK_TBNAME 182 +#define TK_JOIN 183 +#define TK_INSERT 184 +#define TK_INTO 185 +#define TK_VALUES 186 #define TK_SPACE 300 diff --git a/src/inc/vnode.h b/src/inc/vnode.h index dddec83da8..39bd2f46c3 100644 --- a/src/inc/vnode.h +++ b/src/inc/vnode.h @@ -60,6 +60,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId); int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); +int32_t vnodeSync(int32_t vgId); int32_t vnodeClose(int32_t vgId); // vnodeMgmt @@ -89,4 +90,4 @@ int32_t vnodeProcessRead(void *pVnode, SVReadMsg *pRead); } #endif -#endif \ No newline at end of file +#endif diff --git a/src/kit/shell/inc/shell.h b/src/kit/shell/inc/shell.h index 019f3e5d92..d0b7149541 100644 --- a/src/kit/shell/inc/shell.h +++ b/src/kit/shell/inc/shell.h @@ -74,6 +74,7 @@ void source_file(TAOS* con, char* fptr); void source_dir(TAOS* con, SShellArguments* args); void shellCheck(TAOS* con, SShellArguments* args); void get_history_path(char* history); +void shellCheck(TAOS* con, SShellArguments* args); void cleanup_handler(void* arg); void exitShell(); int shellDumpResult(TAOS_RES* con, char* fname, int* error_no, bool printMode); diff --git a/src/kit/shell/src/shellEngine.c b/src/kit/shell/src/shellEngine.c index b9529aac8e..0eb1248fad 100644 --- a/src/kit/shell/src/shellEngine.c +++ b/src/kit/shell/src/shellEngine.c @@ -132,7 +132,6 @@ TAOS *shellInit(SShellArguments *args) { return con; } - static bool isEmptyCommand(const char* cmd) { for (char c = *cmd++; c != 0; c = *cmd++) { if (c != ' ' && c != '\t' && c != ';') { diff --git a/src/kit/taosdemo/CMakeLists.txt b/src/kit/taosdemo/CMakeLists.txt index a75157b94c..ba27044a87 100644 --- a/src/kit/taosdemo/CMakeLists.txt +++ b/src/kit/taosdemo/CMakeLists.txt @@ -3,6 +3,55 @@ PROJECT(TDengine) INCLUDE_DIRECTORIES(${TD_COMMUNITY_DIR}/src/client/inc) +FIND_PACKAGE(Git) +IF (GIT_FOUND) + MESSAGE("Git found") + EXECUTE_PROCESS( + COMMAND ${GIT_EXECUTABLE} log --pretty=oneline -n 1 ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c + RESULT_VARIABLE RESULT + OUTPUT_VARIABLE TAOSDEMO_COMMIT) + EXECUTE_PROCESS( + COMMAND bash "-c" "echo '${TAOSDEMO_COMMIT}' | awk '{print $1}' | cut -c -9" + RESULT_VARIABLE RESULT + OUTPUT_VARIABLE TAOSDEMO_COMMIT_SHA1) + EXECUTE_PROCESS( + COMMAND ${GIT_EXECUTABLE} status -z -s ${CMAKE_CURRENT_LIST_DIR}/taosdemo.c + RESULT_VARIABLE RESULT + OUTPUT_VARIABLE TAOSDEMO_STATUS) + EXECUTE_PROCESS( + COMMAND bash "-c" "echo '${TAOSDEMO_STATUS}' | awk '{print $1}'" + RESULT_VARIABLE RESULT + OUTPUT_VARIABLE TAOSDEMO_STATUS) + MESSAGE("taosdemo.c status: " ${TAOSDEMO_STATUS}) +ELSE() + MESSAGE("Git not found") + SET(TAOSDEMO_COMMIT_SHA1 "unknown") + SET(TAOSDEMO_STATUS "unknown") +ENDIF (GIT_FOUND) + +STRING(STRIP ${TAOSDEMO_COMMIT_SHA1} TAOSDEMO_COMMIT_SHA1) +MESSAGE("taosdemo's latest commit in short is:" ${TAOSDEMO_COMMIT_SHA1}) +STRING(STRIP ${TAOSDEMO_STATUS} TAOSDEMO_STATUS) + +IF (TAOSDEMO_STATUS MATCHES "M") + SET(TAOSDEMO_STATUS "modified") +ELSE() + SET(TAOSDEMO_STATUS "") +ENDIF () +MESSAGE("taosdemo's status is:" ${TAOSDEMO_STATUS}) + +ADD_DEFINITIONS(-DTAOSDEMO_COMMIT_SHA1="${TAOSDEMO_COMMIT_SHA1}") +ADD_DEFINITIONS(-DTAOSDEMO_STATUS="${TAOSDEMO_STATUS}") + +MESSAGE("VERNUMBER is:" ${VERNUMBER}) +IF ("${VERNUMBER}" STREQUAL "") + SET(TD_VERSION_NUMBER "TDengine-version-unknown") +ELSE() + SET(TD_VERSION_NUMBER ${VERNUMBER}) +ENDIF () +MESSAGE("TD_VERSION_NUMBER is:" ${TD_VERSION_NUMBER}) +ADD_DEFINITIONS(-DTD_VERNUMBER="${TD_VERSION_NUMBER}") + IF (TD_LINUX) AUX_SOURCE_DIRECTORY(. SRC) ADD_EXECUTABLE(taosdemo ${SRC}) diff --git a/src/kit/taosdemo/insert-interlace.json b/src/kit/taosdemo/insert-interlace.json index 0f54f008fb..344db4fd00 100644 --- a/src/kit/taosdemo/insert-interlace.json +++ b/src/kit/taosdemo/insert-interlace.json @@ -41,7 +41,7 @@ "insert_mode": "taosc", "insert_rows": 1000, "multi_thread_write_one_tbl": "no", - "rows_per_tbl": 20, + "interlace_rows": 20, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/src/kit/taosdemo/insert.json b/src/kit/taosdemo/insert.json index e6b1895043..f0e3ab1d50 100644 --- a/src/kit/taosdemo/insert.json +++ b/src/kit/taosdemo/insert.json @@ -41,7 +41,7 @@ "insert_mode": "taosc", "insert_rows": 100000, "multi_thread_write_one_tbl": "no", - "rows_per_tbl": 0, + "interlace_rows": 0, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6534196603..7eb55acda8 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -37,10 +37,10 @@ #include #include #include -#else +#else #include #include -#endif +#endif #include #include @@ -69,7 +69,10 @@ enum TEST_MODE { #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) +#define MAX_USERNAME_SIZE 64 +#define MAX_PASSWORD_SIZE 64 #define MAX_DB_NAME_SIZE 64 +#define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE 16000 #define MAX_NUM_DATATYPE 10 @@ -82,11 +85,11 @@ enum TEST_MODE { #define MAX_NUM_DATATYPE 10 #define MAX_DB_COUNT 8 -#define MAX_SUPER_TABLE_COUNT 8 +#define MAX_SUPER_TABLE_COUNT 200 #define MAX_COLUMN_COUNT 1024 #define MAX_TAG_COUNT 128 -#define MAX_QUERY_SQL_COUNT 10 +#define MAX_QUERY_SQL_COUNT 100 #define MAX_QUERY_SQL_LENGTH 256 #define MAX_DATABASE_COUNT 256 @@ -94,12 +97,13 @@ enum TEST_MODE { #define DEFAULT_TIMESTAMP_STEP 1 + typedef enum CREATE_SUB_TALBE_MOD_EN { PRE_CREATE_SUBTBL, AUTO_CREATE_SUBTBL, NO_CREATE_SUBTBL } CREATE_SUB_TALBE_MOD_EN; - + typedef enum TALBE_EXISTS_EN { TBL_NO_EXISTS, TBL_ALREADY_EXISTS, @@ -107,7 +111,7 @@ typedef enum TALBE_EXISTS_EN { } TALBE_EXISTS_EN; enum MODE { - SYNC, + SYNC, ASYNC, MODE_BUT }; @@ -118,11 +122,11 @@ typedef enum enum_INSERT_MODE { INVALID_INSERT_MODE } INSERT_MODE; -enum QUERY_TYPE { +typedef enum enumQUERY_TYPE { NO_INSERT_TYPE, - INSERT_TYPE, + INSERT_TYPE, QUERY_TYPE_BUT -} ; +} QUERY_TYPE; enum _show_db_index { TSDB_SHOW_DB_NAME_INDEX, @@ -130,7 +134,7 @@ enum _show_db_index { TSDB_SHOW_DB_NTABLES_INDEX, TSDB_SHOW_DB_VGROUPS_INDEX, TSDB_SHOW_DB_REPLICA_INDEX, - TSDB_SHOW_DB_QUORUM_INDEX, + TSDB_SHOW_DB_QUORUM_INDEX, TSDB_SHOW_DB_DAYS_INDEX, TSDB_SHOW_DB_KEEP_INDEX, TSDB_SHOW_DB_CACHE_INDEX, @@ -141,7 +145,7 @@ enum _show_db_index { TSDB_SHOW_DB_FSYNC_INDEX, TSDB_SHOW_DB_COMP_INDEX, TSDB_SHOW_DB_CACHELAST_INDEX, - TSDB_SHOW_DB_PRECISION_INDEX, + TSDB_SHOW_DB_PRECISION_INDEX, TSDB_SHOW_DB_UPDATE_INDEX, TSDB_SHOW_DB_STATUS_INDEX, TSDB_MAX_SHOW_DB @@ -152,10 +156,10 @@ enum _show_stables_index { TSDB_SHOW_STABLES_NAME_INDEX, TSDB_SHOW_STABLES_CREATED_TIME_INDEX, TSDB_SHOW_STABLES_COLUMNS_INDEX, - TSDB_SHOW_STABLES_METRIC_INDEX, - TSDB_SHOW_STABLES_UID_INDEX, + TSDB_SHOW_STABLES_METRIC_INDEX, + TSDB_SHOW_STABLES_UID_INDEX, TSDB_SHOW_STABLES_TID_INDEX, - TSDB_SHOW_STABLES_VGID_INDEX, + TSDB_SHOW_STABLES_VGID_INDEX, TSDB_MAX_SHOW_STABLES }; @@ -187,6 +191,7 @@ typedef struct SArguments_S { char * tb_prefix; char * sqlFile; bool use_metric; + bool drop_database; bool insert_only; bool answer_yes; bool debug_print; @@ -199,7 +204,8 @@ typedef struct SArguments_S { int num_of_CPR; int num_of_threads; int insert_interval; - int rows_per_tbl; + int query_times; + int interlace_rows; int num_of_RPR; int max_sql_len; int num_of_tables; @@ -217,14 +223,13 @@ typedef struct SColumn_S { char field[TSDB_COL_NAME_LEN + 1]; char dataType[MAX_TB_NAME_SIZE]; int dataLen; - char note[128]; + char note[128]; } StrColumn; typedef struct SSuperTable_S { char sTblName[MAX_TB_NAME_SIZE+1]; int childTblCount; - bool superTblExists; // 0: no, 1: yes - bool childTblExists; // 0: no, 1: yes + bool childTblExists; // 0: no, 1: yes int batchCreateTableNum; // 0: no batch, > 0: batch table number in one sql int8_t autoCreateTable; // 0: create sub table, 1: auto create sub table char childTblPrefix[MAX_TB_NAME_SIZE]; @@ -234,15 +239,15 @@ typedef struct SSuperTable_S { int childTblOffset; int multiThreadWriteOneTbl; // 0: no, 1: yes - int rowsPerTbl; // + int interlaceRows; // int disorderRatio; // 0: no disorder, >0: x% int disorderRange; // ms or us by database precision - int maxSqlLen; // + int maxSqlLen; // int insertInterval; // insert interval, will override global insert interval int64_t insertRows; // 0: no limit int timeStampStep; - char startTimestamp[MAX_TB_NAME_SIZE]; // + char startTimestamp[MAX_TB_NAME_SIZE]; // char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json char sampleFile[MAX_FILE_NAME_LEN+1]; char tagsFile[MAX_FILE_NAME_LEN+1]; @@ -267,7 +272,7 @@ typedef struct SSuperTable_S { int tagSampleCount; int tagUsePos; - // statistics + // statistics int64_t totalInsertRows; int64_t totalAffectedRows; } SSuperTable; @@ -276,10 +281,10 @@ typedef struct { char name[TSDB_DB_NAME_LEN + 1]; char create_time[32]; int32_t ntables; - int32_t vgroups; + int32_t vgroups; int16_t replica; int16_t quorum; - int16_t days; + int16_t days; char keeplist[32]; int32_t cache; //MB int32_t blocks; @@ -294,14 +299,14 @@ typedef struct { char status[16]; } SDbInfo; -typedef struct SDbCfg_S { +typedef struct SDbCfg_S { // int maxtablesPerVnode; - int minRows; + int minRows; int maxRows; int comp; int walLevel; int cacheLast; - int fsync; + int fsync; int replica; int update; int keep; @@ -309,12 +314,12 @@ typedef struct SDbCfg_S { int cache; int blocks; int quorum; - char precision[MAX_TB_NAME_SIZE]; + char precision[MAX_TB_NAME_SIZE]; } SDbCfg; typedef struct SDataBase_S { char dbName[MAX_DB_NAME_SIZE]; - int drop; // 0: use exists, 1: if exists, drop then new create + bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; int superTblCount; SSuperTable superTbls[MAX_SUPER_TABLE_COUNT]; @@ -322,16 +327,16 @@ typedef struct SDataBase_S { typedef struct SDbs_S { char cfgDir[MAX_FILE_NAME_LEN+1]; - char host[MAX_DB_NAME_SIZE]; + char host[MAX_HOSTNAME_SIZE]; uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; + char user[MAX_USERNAME_SIZE]; + char password[MAX_PASSWORD_SIZE]; char resultFile[MAX_FILE_NAME_LEN+1]; bool use_metric; bool insert_only; bool do_aggreFunc; bool queryMode; - + int threadCount; int threadCountByCreateTbl; int dbCount; @@ -343,68 +348,71 @@ typedef struct SDbs_S { } SDbs; -typedef struct SuperQueryInfo_S { +typedef struct SpecifiedQueryInfo_S { int rate; // 0: unlimit > 0 loop/s int concurrent; int sqlCount; int subscribeMode; // 0: sync, 1: async int subscribeInterval; // ms + int queryTimes; int subscribeRestart; int subscribeKeepProgress; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; -} SuperQueryInfo; +} SpecifiedQueryInfo; -typedef struct SubQueryInfo_S { +typedef struct SuperQueryInfo_S { char sTblName[MAX_TB_NAME_SIZE+1]; int rate; // 0: unlimit > 0 loop/s - int threadCnt; + int threadCnt; int subscribeMode; // 0: sync, 1: async int subscribeInterval; // ms int subscribeRestart; int subscribeKeepProgress; + int queryTimes; int childTblCount; char childTblPrefix[MAX_TB_NAME_SIZE]; int sqlCount; - char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; + char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; - + char* childTblName; -} SubQueryInfo; +} SuperQueryInfo; typedef struct SQueryMetaInfo_S { char cfgDir[MAX_FILE_NAME_LEN+1]; - char host[MAX_DB_NAME_SIZE]; + char host[MAX_HOSTNAME_SIZE]; uint16_t port; - char user[MAX_DB_NAME_SIZE]; - char password[MAX_DB_NAME_SIZE]; + char user[MAX_USERNAME_SIZE]; + char password[MAX_PASSWORD_SIZE]; char dbName[MAX_DB_NAME_SIZE+1]; char queryMode[MAX_TB_NAME_SIZE]; // taosc, restful - SuperQueryInfo superQueryInfo; - SubQueryInfo subQueryInfo; + SpecifiedQueryInfo specifiedQueryInfo; + SuperQueryInfo superQueryInfo; } SQueryMetaInfo; typedef struct SThreadInfo_S { TAOS *taos; int threadID; char db_name[MAX_DB_NAME_SIZE+1]; + uint32_t time_precision; char fp[4096]; char tb_prefix[MAX_TB_NAME_SIZE]; int start_table_from; int end_table_to; int ntables; int data_of_rate; - uint64_t start_time; - char* cols; - bool use_metric; + uint64_t start_time; + char* cols; + bool use_metric; SSuperTable* superTblInfo; // for async insert tsem_t lock_sem; - int64_t counter; + int64_t counter; uint64_t st; uint64_t et; int64_t lastTs; @@ -421,7 +429,7 @@ typedef struct SThreadInfo_S { int64_t avgDelay; int64_t maxDelay; int64_t minDelay; - + } threadInfo; #ifdef WINDOWS @@ -460,12 +468,12 @@ static void setupForAnsiEscape(void) { if(!SetConsoleMode(g_stdoutHandle, mode)) { exit(GetLastError()); - } + } } static void resetAfterAnsiEscape(void) { // Reset colors - printf("\x1b[0m"); + printf("\x1b[0m"); // Reset console mode if(!SetConsoleMode(g_stdoutHandle, g_consoleMode)) { @@ -488,16 +496,19 @@ static void resetAfterAnsiEscape(void) { printf("\x1b[0m"); } +#include + static int taosRandom() { - return random(); + srand(time(NULL)); + return rand(); } #endif -static int createDatabases(); +static int createDatabasesAndStables(); static void createChildTables(); -static int queryDbExec(TAOS *taos, char *command, int type); +static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet); /* ************ Global variables ************ */ @@ -505,7 +516,7 @@ int32_t randint[MAX_PREPARED_RAND]; int64_t randbigint[MAX_PREPARED_RAND]; float randfloat[MAX_PREPARED_RAND]; double randdouble[MAX_PREPARED_RAND]; -char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", +char *aggreFunc[] = {"*", "count(*)", "avg(col0)", "sum(col0)", "max(col0)", "min(col0)", "first(col0)", "last(col0)"}; SArguments g_args = { @@ -514,7 +525,7 @@ SArguments g_args = { "127.0.0.1", // host 6030, // port "root", // user - #ifdef _TD_POWER_ + #ifdef _TD_POWER_ "powerdb", // password #else "taosdata", // password @@ -524,10 +535,11 @@ SArguments g_args = { "t", // tb_prefix NULL, // sqlFile true, // use_metric + true, // drop_database true, // insert_only false, // debug_print false, // verbose_print - false, // performance statistic print + false, // performance statistic print false, // answer_yes; "./output.txt", // output_file 0, // mode : sync or async @@ -547,7 +559,8 @@ SArguments g_args = { 10, // num_of_CPR 10, // num_of_connections/thread 0, // insert_interval - 0, // rows_per_tbl; + 1, // query_times + 0, // interlace_rows; 100, // num_of_RPR TSDB_PAYLOAD_SIZE, // max_sql_len 10000, // num_of_tables @@ -586,64 +599,92 @@ static FILE * g_fpOfInsertResult = NULL; static void ERROR_EXIT(const char *msg) { perror(msg); exit(-1); } +#ifndef TAOSDEMO_COMMIT_SHA1 +#define TAOSDEMO_COMMIT_SHA1 "unknown" +#endif + +#ifndef TD_VERNUMBER +#define TD_VERNUMBER "unknown" +#endif + +#ifndef TAOSDEMO_STATUS +#define TAOSDEMO_STATUS "unknown" +#endif + +static void printVersion() { + char tdengine_ver[] = TD_VERNUMBER; + char taosdemo_ver[] = TAOSDEMO_COMMIT_SHA1; + char taosdemo_status[] = TAOSDEMO_STATUS; + + if (strlen(taosdemo_status) == 0) { + printf("taosdemo verison %s-%s\n", + tdengine_ver, taosdemo_ver); + } else { + printf("taosdemo verison %s-%s, status:%s\n", + tdengine_ver, taosdemo_ver, taosdemo_status); + } +} + static void printHelp() { char indent[10] = " "; - printf("%s%s%s%s\n", indent, "-f", indent, + printf("%s%s%s%s\n", indent, "-f", indent, "The meta file to the execution procedure. Default is './meta.json'."); - printf("%s%s%s%s\n", indent, "-u", indent, + printf("%s%s%s%s\n", indent, "-u", indent, "The TDengine user name to use when connecting to the server. Default is 'root'."); #ifdef _TD_POWER_ - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The password to use when connecting to the server. Default is 'powerdb'."); - printf("%s%s%s%s\n", indent, "-c", indent, + printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/power/'."); #else - printf("%s%s%s%s\n", indent, "-P", indent, + printf("%s%s%s%s\n", indent, "-P", indent, "The password to use when connecting to the server. Default is 'taosdata'."); - printf("%s%s%s%s\n", indent, "-c", indent, + printf("%s%s%s%s\n", indent, "-c", indent, "Configuration directory. Default is '/etc/taos/'."); -#endif - printf("%s%s%s%s\n", indent, "-h", indent, +#endif + printf("%s%s%s%s\n", indent, "-h", indent, "The host to connect to TDengine. Default is localhost."); - printf("%s%s%s%s\n", indent, "-p", indent, + printf("%s%s%s%s\n", indent, "-p", indent, "The TCP/IP port number to use for the connection. Default is 0."); - printf("%s%s%s%s\n", indent, "-d", indent, + printf("%s%s%s%s\n", indent, "-d", indent, "Destination database. Default is 'test'."); - printf("%s%s%s%s\n", indent, "-a", indent, + printf("%s%s%s%s\n", indent, "-a", indent, "Set the replica parameters of the database, Default 1, min: 1, max: 3."); - printf("%s%s%s%s\n", indent, "-m", indent, + printf("%s%s%s%s\n", indent, "-m", indent, "Table prefix name. Default is 't'."); printf("%s%s%s%s\n", indent, "-s", indent, "The select sql file."); printf("%s%s%s%s\n", indent, "-N", indent, "Use normal table flag."); - printf("%s%s%s%s\n", indent, "-o", indent, + printf("%s%s%s%s\n", indent, "-o", indent, "Direct output to the named file. Default is './output.txt'."); - printf("%s%s%s%s\n", indent, "-q", indent, + printf("%s%s%s%s\n", indent, "-q", indent, "Query mode--0: SYNC, 1: ASYNC. Default is SYNC."); - printf("%s%s%s%s\n", indent, "-b", indent, + printf("%s%s%s%s\n", indent, "-b", indent, "The data_type of columns, default: TINYINT,SMALLINT,INT,BIGINT,FLOAT,DOUBLE,BINARY,NCHAR,BOOL,TIMESTAMP."); - printf("%s%s%s%s\n", indent, "-w", indent, + printf("%s%s%s%s\n", indent, "-w", indent, "The length of data_type 'BINARY' or 'NCHAR'. Default is 16"); - printf("%s%s%s%s\n", indent, "-l", indent, + printf("%s%s%s%s\n", indent, "-l", indent, "The number of columns per record. Default is 10."); - printf("%s%s%s%s\n", indent, "-T", indent, + printf("%s%s%s%s\n", indent, "-T", indent, "The number of threads. Default is 10."); - printf("%s%s%s%s\n", indent, "-i", indent, + printf("%s%s%s%s\n", indent, "-i", indent, "The sleep time (ms) between insertion. Default is 0."); - printf("%s%s%s%s\n", indent, "-r", indent, + printf("%s%s%s%s\n", indent, "-r", indent, "The number of records per request. Default is 100."); - printf("%s%s%s%s\n", indent, "-t", indent, + printf("%s%s%s%s\n", indent, "-t", indent, "The number of tables. Default is 10000."); - printf("%s%s%s%s\n", indent, "-n", indent, + printf("%s%s%s%s\n", indent, "-n", indent, "The number of records per table. Default is 10000."); printf("%s%s%s%s\n", indent, "-x", indent, "Not insert only flag."); printf("%s%s%s%s\n", indent, "-y", indent, "Default input yes for prompt."); - printf("%s%s%s%s\n", indent, "-O", indent, + printf("%s%s%s%s\n", indent, "-O", indent, "Insert mode--0: In order, > 0: disorder ratio. Default is in order."); - printf("%s%s%s%s\n", indent, "-R", indent, + printf("%s%s%s%s\n", indent, "-R", indent, "Out of order data's range, ms, default is 1000."); - printf("%s%s%s%s\n", indent, "-g", indent, + printf("%s%s%s%s\n", indent, "-g", indent, "Print debug info."); -/* printf("%s%s%s%s\n", indent, "-D", indent, + printf("%s%s%s%s\n", indent, "-V, --version", indent, + "Print version info."); +/* printf("%s%s%s%s\n", indent, "-D", indent, "if elete database if exists. 0: no, 1: yes, default is 1"); */ } @@ -663,6 +704,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } taos_options(TSDB_OPTION_CONFIGDIR, full_path.we_wordv[0]); wordfree(&full_path); + } else if (strcmp(argv[i], "-h") == 0) { arguments->host = argv[++i]; } else if (strcmp(argv[i], "-p") == 0) { @@ -681,8 +723,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->num_of_threads = atoi(argv[++i]); } else if (strcmp(argv[i], "-i") == 0) { arguments->insert_interval = atoi(argv[++i]); + } else if (strcmp(argv[i], "-qt") == 0) { + arguments->query_times = atoi(argv[++i]); } else if (strcmp(argv[i], "-B") == 0) { - arguments->rows_per_tbl = atoi(argv[++i]); + arguments->interlace_rows = atoi(argv[++i]); } else if (strcmp(argv[i], "-r") == 0) { arguments->num_of_RPR = atoi(argv[++i]); } else if (strcmp(argv[i], "-t") == 0) { @@ -705,7 +749,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { && strcasecmp(argv[i], "SMALLINT") && strcasecmp(argv[i], "BIGINT") && strcasecmp(argv[i], "DOUBLE") - && strcasecmp(argv[i], "BINARY") + && strcasecmp(argv[i], "BINARY") && strcasecmp(argv[i], "NCHAR")) { printHelp(); ERROR_EXIT( "Invalid data_type!\n"); @@ -758,7 +802,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { strcpy(configDir, argv[++i]); } else if (strcmp(argv[i], "-O") == 0) { arguments->disorderRatio = atoi(argv[++i]); - if (arguments->disorderRatio > 1 + if (arguments->disorderRatio > 1 || arguments->disorderRatio < 0) { arguments->disorderRatio = 0; } else if (arguments->disorderRatio == 1) { @@ -766,8 +810,8 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { } } else if (strcmp(argv[i], "-R") == 0) { arguments->disorderRange = atoi(argv[++i]); - if (arguments->disorderRange == 1 - && (arguments->disorderRange > 50 + if (arguments->disorderRange == 1 + && (arguments->disorderRange > 50 || arguments->disorderRange <= 0)) { arguments->disorderRange = 10; } @@ -782,6 +826,10 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { || arguments->method_of_delete > 3) { arguments->method_of_delete = 0; } + } else if ((strcmp(argv[i], "--version") == 0) || + (strcmp(argv[i], "-V") == 0)){ + printVersion(); + exit(0); } else if (strcmp(argv[i], "--help") == 0) { printHelp(); exit(0); @@ -796,7 +844,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { || arguments->verbose_print) { printf("###################################################################\n"); printf("# meta file: %s\n", arguments->metaFile); - printf("# Server IP: %s:%hu\n", + printf("# Server IP: %s:%hu\n", arguments->host == NULL ? "localhost" : arguments->host, arguments->port ); printf("# User: %s\n", arguments->user); @@ -823,7 +871,7 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { if (arguments->disorderRatio) { printf("# Data order: %d\n", arguments->disorderRatio); printf("# Data out of order rate: %d\n", arguments->disorderRange); - + } printf("# Delete method: %d\n", arguments->method_of_delete); printf("# Answer yes when prompt: %d\n", arguments->answer_yes); @@ -853,7 +901,7 @@ static void tmfree(char *buf) { } } -static int queryDbExec(TAOS *taos, char *command, int type) { +static int queryDbExec(TAOS *taos, char *command, QUERY_TYPE type, bool quiet) { int i; TAOS_RES *res = NULL; int32_t code = -1; @@ -868,12 +916,14 @@ static int queryDbExec(TAOS *taos, char *command, int type) { code = taos_errno(res); if (0 == code) { break; - } + } } if (code != 0) { - debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); - errorPrint( "Failed to run %s, reason: %s\n", command, taos_errstr(res)); + if (!quiet) { + debugPrint("%s() LN%d - command: %s\n", __func__, __LINE__, command); + errorPrint("Failed to run %s, reason: %s\n", command, taos_errstr(res)); + } taos_free_result(res); //taos_close(taos); return -1; @@ -884,12 +934,12 @@ static int queryDbExec(TAOS *taos, char *command, int type) { taos_free_result(res); return affectedRows; } - + taos_free_result(res); return 0; } -static void getResult(TAOS_RES *res, char* resultFileName) { +static void getResult(TAOS_RES *res, char* resultFileName) { TAOS_ROW row = NULL; int num_rows = 0; int num_fields = taos_field_count(res); @@ -899,13 +949,15 @@ static void getResult(TAOS_RES *res, char* resultFileName) { if (resultFileName[0] != 0) { fp = fopen(resultFileName, "at"); if (fp == NULL) { - errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n", __func__, __LINE__, resultFileName); + errorPrint("%s() LN%d, failed to open result file: %s, result will not save to file\n", + __func__, __LINE__, resultFileName); } } - + char* databuf = (char*) calloc(1, 100*1024*1024); if (databuf == NULL) { - errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to malloc, warning: save result to file slowly!\n", + __func__, __LINE__); if (fp) fclose(fp); return ; @@ -941,7 +993,7 @@ static void selectAndGetResult(TAOS *taos, char *command, char* resultFileName) taos_free_result(res); return; } - + getResult(res, resultFileName); taos_free_result(res); } @@ -989,14 +1041,13 @@ static int64_t rand_bigint(){ cursor++; cursor = cursor % MAX_PREPARED_RAND; return randbigint[cursor]; - } static float rand_float(){ static int cursor; cursor++; cursor = cursor % MAX_PREPARED_RAND; - return randfloat[cursor]; + return randfloat[cursor]; } static const char charset[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"; @@ -1059,18 +1110,19 @@ static int printfInsertMeta() { printf("resultFile: \033[33m%s\033[0m\n", g_Dbs.resultFile); printf("thread num of insert data: \033[33m%d\033[0m\n", g_Dbs.threadCount); printf("thread num of create table: \033[33m%d\033[0m\n", g_Dbs.threadCountByCreateTbl); - printf("insert interval: \033[33m%d\033[0m\n", g_args.insert_interval); + printf("top insert interval: \033[33m%d\033[0m\n", g_args.insert_interval); printf("number of records per req: \033[33m%d\033[0m\n", g_args.num_of_RPR); printf("max sql length: \033[33m%d\033[0m\n", g_args.max_sql_len); printf("database count: \033[33m%d\033[0m\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { printf("database[\033[33m%d\033[0m]:\n", i); printf(" database[%d] name: \033[33m%s\033[0m\n", i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { - printf(" drop: \033[33mno\033[0m\n"); - }else { - printf(" drop: \033[33myes\033[0m\n"); + printf(" drop: \033[33mno\033[0m\n"); + } else { + printf(" drop: \033[33myes\033[0m\n"); } if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -1123,8 +1175,8 @@ static int printfInsertMeta() { printf(" super table count: \033[33m%d\033[0m\n", g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { printf(" super table[\033[33m%d\033[0m]:\n", j); - - printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); + + printf(" stbName: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { printf(" autoCreateTable: \033[33m%s\033[0m\n", "no"); @@ -1133,7 +1185,7 @@ static int printfInsertMeta() { } else { printf(" autoCreateTable: \033[33m%s\033[0m\n", "error"); } - + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { printf(" childTblExists: \033[33m%s\033[0m\n", "no"); } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { @@ -1142,25 +1194,38 @@ static int printfInsertMeta() { printf(" childTblExists: \033[33m%s\033[0m\n", "error"); } - printf(" childTblCount: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblCount); - printf(" childTblPrefix: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - printf(" dataSource: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].dataSource); - printf(" insertMode: \033[33m%s\033[0m\n", g_Dbs.db[i].superTbls[j].insertMode); + printf(" childTblCount: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblCount); + printf(" childTblPrefix: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + printf(" dataSource: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].dataSource); + printf(" insertMode: \033[33m%s\033[0m\n", + g_Dbs.db[i].superTbls[j].insertMode); if (g_Dbs.db[i].superTbls[j].childTblLimit > 0) { - printf(" childTblLimit: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblLimit); + printf(" childTblLimit: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblLimit); } if (g_Dbs.db[i].superTbls[j].childTblOffset >= 0) { - printf(" childTblOffset: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].childTblOffset); + printf(" childTblOffset: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].childTblOffset); } - printf(" insertRows: \033[33m%"PRId64"\033[0m\n", g_Dbs.db[i].superTbls[j].insertRows); + printf(" insertRows: \033[33m%"PRId64"\033[0m\n", + g_Dbs.db[i].superTbls[j].insertRows); if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { printf(" multiThreadWriteOneTbl: \033[33mno\033[0m\n"); }else { printf(" multiThreadWriteOneTbl: \033[33myes\033[0m\n"); } - printf(" rowsPerTbl: \033[33m%d\033[0m\n", - g_Dbs.db[i].superTbls[j].rowsPerTbl); + printf(" interlaceRows: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + printf(" stable insert interval: \033[33m%d\033[0m\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } + printf(" disorderRange: \033[33m%d\033[0m\n", g_Dbs.db[i].superTbls[j].disorderRange); printf(" disorderRatio: \033[33m%d\033[0m\n", @@ -1199,8 +1264,10 @@ static int printfInsertMeta() { g_Dbs.db[i].superTbls[j].tagCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].tagCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); - if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "binary", 6)) - || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, "nchar", 5))) { + if ((0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "binary", strlen("binary"))) + || (0 == strncasecmp(g_Dbs.db[i].superTbls[j].tags[k].dataType, + "nchar", strlen("nchar")))) { printf("tag[%d]:\033[33m%s(%d)\033[0m ", k, g_Dbs.db[i].superTbls[j].tags[k].dataType, g_Dbs.db[i].superTbls[j].tags[k].dataLen); @@ -1220,23 +1287,25 @@ static int printfInsertMeta() { } static void printfInsertMetaToFile(FILE* fp) { - SHOW_PARSE_RESULT_START_TO_FILE(fp); + + SHOW_PARSE_RESULT_START_TO_FILE(fp); fprintf(fp, "host: %s:%u\n", g_Dbs.host, g_Dbs.port); fprintf(fp, "user: %s\n", g_Dbs.user); - fprintf(fp, "password: %s\n", g_Dbs.password); fprintf(fp, "resultFile: %s\n", g_Dbs.resultFile); fprintf(fp, "thread num of insert data: %d\n", g_Dbs.threadCount); fprintf(fp, "thread num of create table: %d\n", g_Dbs.threadCountByCreateTbl); - + fprintf(fp, "number of records per req: %d\n", g_args.num_of_RPR); + fprintf(fp, "max sql length: %d\n", g_args.max_sql_len); fprintf(fp, "database count: %d\n", g_Dbs.dbCount); + for (int i = 0; i < g_Dbs.dbCount; i++) { fprintf(fp, "database[%d]:\n", i); fprintf(fp, " database[%d] name: %s\n", i, g_Dbs.db[i].dbName); if (0 == g_Dbs.db[i].drop) { - fprintf(fp, " drop: no\n"); + fprintf(fp, " drop: no\n"); }else { - fprintf(fp, " drop: yes\n"); + fprintf(fp, " drop: yes\n"); } if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -1287,8 +1356,8 @@ static void printfInsertMetaToFile(FILE* fp) { fprintf(fp, " super table count: %d\n", g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { fprintf(fp, " super table[%d]:\n", j); - - fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); + + fprintf(fp, " stbName: %s\n", g_Dbs.db[i].superTbls[j].sTblName); if (PRE_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable) { fprintf(fp, " autoCreateTable: %s\n", "no"); @@ -1297,7 +1366,7 @@ static void printfInsertMetaToFile(FILE* fp) { } else { fprintf(fp, " autoCreateTable: %s\n", "error"); } - + if (TBL_NO_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { fprintf(fp, " childTblExists: %s\n", "no"); } else if (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists) { @@ -1305,30 +1374,41 @@ static void printfInsertMetaToFile(FILE* fp) { } else { fprintf(fp, " childTblExists: %s\n", "error"); } - - fprintf(fp, " childTblCount: %d\n", g_Dbs.db[i].superTbls[j].childTblCount); - fprintf(fp, " childTblPrefix: %s\n", g_Dbs.db[i].superTbls[j].childTblPrefix); - fprintf(fp, " dataSource: %s\n", g_Dbs.db[i].superTbls[j].dataSource); - fprintf(fp, " insertMode: %s\n", g_Dbs.db[i].superTbls[j].insertMode); - fprintf(fp, " insertRows: %"PRId64"\n", g_Dbs.db[i].superTbls[j].insertRows); - fprintf(fp, " insert interval: %d\n", g_Dbs.db[i].superTbls[j].insertInterval); + + fprintf(fp, " childTblCount: %d\n", + g_Dbs.db[i].superTbls[j].childTblCount); + fprintf(fp, " childTblPrefix: %s\n", + g_Dbs.db[i].superTbls[j].childTblPrefix); + fprintf(fp, " dataSource: %s\n", + g_Dbs.db[i].superTbls[j].dataSource); + fprintf(fp, " insertMode: %s\n", + g_Dbs.db[i].superTbls[j].insertMode); + fprintf(fp, " insertRows: %"PRId64"\n", + g_Dbs.db[i].superTbls[j].insertRows); + fprintf(fp, " interlace rows: %d\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + if (g_Dbs.db[i].superTbls[j].interlaceRows > 0) { + fprintf(fp, " stable insert interval: %d\n", + g_Dbs.db[i].superTbls[j].insertInterval); + } if (0 == g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl) { - fprintf(fp, " multiThreadWriteOneTbl: no\n"); + fprintf(fp, " multiThreadWriteOneTbl: no\n"); }else { - fprintf(fp, " multiThreadWriteOneTbl: yes\n"); + fprintf(fp, " multiThreadWriteOneTbl: yes\n"); } - fprintf(fp, " rowsPerTbl: %d\n", g_Dbs.db[i].superTbls[j].rowsPerTbl); - fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); + fprintf(fp, " interlaceRows: %d\n", + g_Dbs.db[i].superTbls[j].interlaceRows); + fprintf(fp, " disorderRange: %d\n", g_Dbs.db[i].superTbls[j].disorderRange); fprintf(fp, " disorderRatio: %d\n", g_Dbs.db[i].superTbls[j].disorderRatio); - fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); - - fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); - fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); + fprintf(fp, " maxSqlLen: %d\n", g_Dbs.db[i].superTbls[j].maxSqlLen); + + fprintf(fp, " timeStampStep: %d\n", g_Dbs.db[i].superTbls[j].timeStampStep); + fprintf(fp, " startTimestamp: %s\n", g_Dbs.db[i].superTbls[j].startTimestamp); fprintf(fp, " sampleFormat: %s\n", g_Dbs.db[i].superTbls[j].sampleFormat); - fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); - fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); - + fprintf(fp, " sampleFile: %s\n", g_Dbs.db[i].superTbls[j].sampleFile); + fprintf(fp, " tagsFile: %s\n", g_Dbs.db[i].superTbls[j].tagsFile); + fprintf(fp, " columnCount: %d\n ", g_Dbs.db[i].superTbls[j].columnCount); for (int k = 0; k < g_Dbs.db[i].superTbls[j].columnCount; k++) { //printf("dataType:%s, dataLen:%d\t", g_Dbs.db[i].superTbls[j].columns[k].dataType, g_Dbs.db[i].superTbls[j].columns[k].dataLen); @@ -1364,22 +1444,45 @@ static void printfInsertMetaToFile(FILE* fp) { } fprintf(fp, "\n"); } + SHOW_PARSE_RESULT_END_TO_FILE(fp); } static void printfQueryMeta() { + SHOW_PARSE_RESULT_START(); + printf("host: \033[33m%s:%u\033[0m\n", g_queryInfo.host, g_queryInfo.port); printf("user: \033[33m%s\033[0m\n", g_queryInfo.user); - printf("password: \033[33m%s\033[0m\n", g_queryInfo.password); printf("database name: \033[33m%s\033[0m\n", g_queryInfo.dbName); printf("\n"); - printf("specified table query info: \n"); + printf("specified table query info: \n"); + printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.rate); + printf("top query times:\033[33m%d\033[0m\n", g_args.query_times); + printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.concurrent); + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.sqlCount); + printf("specified tbl query times:\n"); + printf(" \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.queryTimes); + + if (SUBSCRIBE_TEST == g_args.test_mode) { + printf("mod: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeMode); + printf("interval: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeInterval); + printf("restart: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeRestart); + printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + } + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.specifiedQueryInfo.sql[i]); + } + printf("\n"); + printf("super table query info: \n"); printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.rate); - printf("concurrent: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.concurrent); - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); + printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.threadCnt); + printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.childTblCount); + printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.superQueryInfo.sTblName); + printf("stb query times:\033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.queryTimes); if (SUBSCRIBE_TEST == g_args.test_mode) { printf("mod: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeMode); @@ -1388,34 +1491,16 @@ static void printfQueryMeta() { printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.subscribeKeepProgress); } + printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.superQueryInfo.sqlCount); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.superQueryInfo.sql[i]); } printf("\n"); - printf("super table query info: \n"); - printf("query interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.rate); - printf("threadCnt: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.threadCnt); - printf("childTblCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.childTblCount); - printf("stable name: \033[33m%s\033[0m\n", g_queryInfo.subQueryInfo.sTblName); - if (SUBSCRIBE_TEST == g_args.test_mode) { - printf("mod: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeMode); - printf("interval: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeInterval); - printf("restart: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeRestart); - printf("keepProgress: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - - printf("sqlCount: \033[33m%d\033[0m\n", g_queryInfo.subQueryInfo.sqlCount); - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - printf(" sql[%d]: \033[33m%s\033[0m\n", i, g_queryInfo.subQueryInfo.sql[i]); - } - printf("\n"); - - SHOW_PARSE_RESULT_END(); + SHOW_PARSE_RESULT_END(); } - -static char* xFormatTimestamp(char* buf, int64_t val, int precision) { +static char* formatTimestamp(char* buf, int64_t val, int precision) { time_t tt; if (precision == TSDB_TIME_PRECISION_MICRO) { tt = (time_t)(val / 1000000); @@ -1447,7 +1532,9 @@ static char* xFormatTimestamp(char* buf, int64_t val, int precision) { return buf; } -static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32_t length, int precision) { +static void xDumpFieldToFile(FILE* fp, const char* val, + TAOS_FIELD* field, int32_t length, int precision) { + if (val == NULL) { fprintf(fp, "%s", TSDB_DATA_NULL_STR); return; @@ -1483,7 +1570,7 @@ static void xDumpFieldToFile(FILE* fp, const char* val, TAOS_FIELD* field, int32 fprintf(fp, "\'%s\'", buf); break; case TSDB_DATA_TYPE_TIMESTAMP: - xFormatTimestamp(buf, *(int64_t*)val, precision); + formatTimestamp(buf, *(int64_t*)val, precision); fprintf(fp, "'%s'", buf); break; default: @@ -1514,7 +1601,7 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { fprintf(fp, "%s", fields[col].name); } fputc('\n', fp); - + int numOfRows = 0; do { int32_t* length = taos_fetch_lengths(tres); @@ -1535,14 +1622,14 @@ static int xDumpResultToFile(const char* fname, TAOS_RES* tres) { return numOfRows; } -static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { - TAOS_RES * res; +static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { + TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; - - res = taos_query(taos, "show databases;"); + + res = taos_query(taos, "show databases;"); int32_t code = taos_errno(res); - + if (code != 0) { errorPrint( "failed to run , reason: %s\n", taos_errstr(res)); return -1; @@ -1552,7 +1639,10 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { while ((row = taos_fetch_row(res)) != NULL) { // sys database name : 'log' - if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) continue; + if (strncasecmp(row[TSDB_SHOW_DB_NAME_INDEX], "log", + fields[TSDB_SHOW_DB_NAME_INDEX].bytes) == 0) { + continue; + } dbInfos[count] = (SDbInfo *)calloc(1, sizeof(SDbInfo)); if (dbInfos[count] == NULL) { @@ -1562,17 +1652,17 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { tstrncpy(dbInfos[count]->name, (char *)row[TSDB_SHOW_DB_NAME_INDEX], fields[TSDB_SHOW_DB_NAME_INDEX].bytes); - xFormatTimestamp(dbInfos[count]->create_time, + formatTimestamp(dbInfos[count]->create_time, *(int64_t*)row[TSDB_SHOW_DB_CREATED_TIME_INDEX], TSDB_TIME_PRECISION_MILLI); dbInfos[count]->ntables = *((int32_t *)row[TSDB_SHOW_DB_NTABLES_INDEX]); - dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); + dbInfos[count]->vgroups = *((int32_t *)row[TSDB_SHOW_DB_VGROUPS_INDEX]); dbInfos[count]->replica = *((int16_t *)row[TSDB_SHOW_DB_REPLICA_INDEX]); dbInfos[count]->quorum = *((int16_t *)row[TSDB_SHOW_DB_QUORUM_INDEX]); - dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); + dbInfos[count]->days = *((int16_t *)row[TSDB_SHOW_DB_DAYS_INDEX]); tstrncpy(dbInfos[count]->keeplist, (char *)row[TSDB_SHOW_DB_KEEP_INDEX], - fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); + fields[TSDB_SHOW_DB_KEEP_INDEX].bytes); dbInfos[count]->cache = *((int32_t *)row[TSDB_SHOW_DB_CACHE_INDEX]); dbInfos[count]->blocks = *((int32_t *)row[TSDB_SHOW_DB_BLOCKS_INDEX]); dbInfos[count]->minrows = *((int32_t *)row[TSDB_SHOW_DB_MINROWS_INDEX]); @@ -1582,16 +1672,16 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { dbInfos[count]->comp = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_COMP_INDEX])); dbInfos[count]->cachelast = (int8_t)(*((int8_t *)row[TSDB_SHOW_DB_CACHELAST_INDEX])); - tstrncpy(dbInfos[count]->precision, + tstrncpy(dbInfos[count]->precision, (char *)row[TSDB_SHOW_DB_PRECISION_INDEX], - fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); + fields[TSDB_SHOW_DB_PRECISION_INDEX].bytes); dbInfos[count]->update = *((int8_t *)row[TSDB_SHOW_DB_UPDATE_INDEX]); tstrncpy(dbInfos[count]->status, (char *)row[TSDB_SHOW_DB_STATUS_INDEX], - fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); + fields[TSDB_SHOW_DB_STATUS_INDEX].bytes); count++; if (count > MAX_DATABASE_COUNT) { - errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); + errorPrint( "The database count overflow than %d\n", MAX_DATABASE_COUNT); break; } } @@ -1599,7 +1689,8 @@ static int getDbFromServer(TAOS * taos, SDbInfo** dbInfos) { return count; } -static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int index) { +static void printfDbInfoForQueryToFile( + char* filename, SDbInfo* dbInfos, int index) { if (filename[0] == 0) return; @@ -1613,11 +1704,11 @@ static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int ind fprintf(fp, "name: %s\n", dbInfos->name); fprintf(fp, "created_time: %s\n", dbInfos->create_time); fprintf(fp, "ntables: %d\n", dbInfos->ntables); - fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); + fprintf(fp, "vgroups: %d\n", dbInfos->vgroups); fprintf(fp, "replica: %d\n", dbInfos->replica); fprintf(fp, "quorum: %d\n", dbInfos->quorum); - fprintf(fp, "days: %d\n", dbInfos->days); - fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); + fprintf(fp, "days: %d\n", dbInfos->days); + fprintf(fp, "keep0,keep1,keep(D): %s\n", dbInfos->keeplist); fprintf(fp, "cache(MB): %d\n", dbInfos->cache); fprintf(fp, "blocks: %d\n", dbInfos->blocks); fprintf(fp, "minrows: %d\n", dbInfos->minrows); @@ -1625,10 +1716,10 @@ static void printfDbInfoForQueryToFile(char* filename, SDbInfo* dbInfos, int ind fprintf(fp, "wallevel: %d\n", dbInfos->wallevel); fprintf(fp, "fsync: %d\n", dbInfos->fsync); fprintf(fp, "comp: %d\n", dbInfos->comp); - fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); - fprintf(fp, "precision: %s\n", dbInfos->precision); + fprintf(fp, "cachelast: %d\n", dbInfos->cachelast); + fprintf(fp, "precision: %s\n", dbInfos->precision); fprintf(fp, "update: %d\n", dbInfos->update); - fprintf(fp, "status: %s\n", dbInfos->status); + fprintf(fp, "status: %s\n", dbInfos->status); fprintf(fp, "\n"); fclose(fp); @@ -1671,14 +1762,14 @@ static void printfQuerySystemInfo(TAOS * taos) { } for (int i = 0; i < dbCount; i++) { - // printf database info + // printf database info printfDbInfoForQueryToFile(filename, dbInfos[i], i); // show db.vgroups snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.vgroups;", dbInfos[i]->name); res = taos_query(taos, buffer); xDumpResultToFile(filename, res); - + // show db.stables snprintf(buffer, MAX_QUERY_SQL_LENGTH, "show %s.stables;", dbInfos[i]->name); res = taos_query(taos, buffer); @@ -1688,7 +1779,6 @@ static void printfQuerySystemInfo(TAOS * taos) { } free(dbInfos); - } static int postProceSql(char* host, uint16_t port, char* sqlstr) @@ -1792,7 +1882,8 @@ static int postProceSql(char* host, uint16_t port, char* sqlstr) for (int l = 0; l < mod_table[userpass_buf_len % 3]; l++) base64_buf[encoded_len - 1 - l] = '='; - debugPrint("%s() LN%d: auth string base64 encoded: %s\n", __func__, __LINE__, base64_buf); + debugPrint("%s() LN%d: auth string base64 encoded: %s\n", + __func__, __LINE__, base64_buf); char *auth = base64_buf; int r = snprintf(request_buf, @@ -1871,7 +1962,7 @@ static char* getTagValueFromTagSample(SSuperTable* stbInfo, int tagUsePos) { return dataBuf; } -static char* generateTagVaulesForStb(SSuperTable* stbInfo) { +static char* generateTagVaulesForStb(SSuperTable* stbInfo, int32_t tableSeq) { char* dataBuf = (char*)calloc(TSDB_MAX_SQL_LEN+1, 1); if (NULL == dataBuf) { printf("calloc failed! size:%d\n", TSDB_MAX_SQL_LEN+1); @@ -1890,20 +1981,27 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo) { return NULL; } - char* buf = (char*)calloc(stbInfo->tags[i].dataLen+1, 1); + int tagBufLen = stbInfo->tags[i].dataLen + 1; + char* buf = (char*)calloc(tagBufLen, 1); if (NULL == buf) { printf("calloc failed! size:%d\n", stbInfo->tags[i].dataLen); tmfree(dataBuf); return NULL; } - rand_string(buf, stbInfo->tags[i].dataLen); + + if (tableSeq % 2) { + tstrncpy(buf, "beijing", tagBufLen); + } else { + tstrncpy(buf, "shanghai", tagBufLen); + } + //rand_string(buf, stbInfo->tags[i].dataLen); dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "int", strlen("int"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, - "%d, ", rand_int()); + "%d, ", tableSeq); } else if (0 == strncasecmp(stbInfo->tags[i].dataType, "bigint", strlen("bigint"))) { dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, @@ -1940,17 +2038,17 @@ static char* generateTagVaulesForStb(SSuperTable* stbInfo) { } dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); + dataLen += snprintf(dataBuf + dataLen, TSDB_MAX_SQL_LEN - dataLen, ")"); return dataBuf; } -static int calcRowLen(SSuperTable* superTbls) { +static int calcRowLen(SSuperTable* superTbls) { int colIndex; int lenOfOneRow = 0; - + for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - + if (strcasecmp(dataType, "BINARY") == 0) { lenOfOneRow += superTbls->columns[colIndex].dataLen + 3; } else if (strcasecmp(dataType, "NCHAR") == 0) { @@ -1967,9 +2065,9 @@ static int calcRowLen(SSuperTable* superTbls) { lenOfOneRow += 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { lenOfOneRow += 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfOneRow += 42; - } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { + } else if (strcasecmp(dataType, "TIMESTAMP") == 0) { lenOfOneRow += 21; } else { printf("get error data type : %s\n", dataType); @@ -2000,7 +2098,7 @@ static int calcRowLen(SSuperTable* superTbls) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 6; } else if (strcasecmp(dataType, "FLOAT") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; } else { printf("get error tag type : %s\n", dataType); @@ -2009,7 +2107,7 @@ static int calcRowLen(SSuperTable* superTbls) { } superTbls->lenOfTagOfOneRow = lenOfTagOfOneRow; - + return 0; } @@ -2021,7 +2119,7 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char command[BUFFER_SIZE] = "\0"; char limitBuf[100] = "\0"; - TAOS_RES * res; + TAOS_RES * res; TAOS_ROW row = NULL; char* childTblName = *childTblNameOfSuperTbl; @@ -2030,21 +2128,32 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, snprintf(limitBuf, 100, " limit %d offset %d", limit, offset); } - //get all child table name use cmd: select tbname from superTblName; - snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", dbName, sTblName, limitBuf); + //get all child table name use cmd: select tbname from superTblName; + snprintf(command, BUFFER_SIZE, "select tbname from %s.%s %s", + dbName, sTblName, limitBuf); - res = taos_query(taos, command); + res = taos_query(taos, command); int32_t code = taos_errno(res); if (code != 0) { - printf("failed to run command %s\n", command); taos_free_result(res); taos_close(taos); + errorPrint("%s() LN%d, failed to run command %s\n", + __func__, __LINE__, command); exit(-1); } int childTblCount = (limit < 0)?10000:limit; int count = 0; -// childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + if (childTblName == NULL) { + childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); + if (NULL == childTblName) { + taos_free_result(res); + taos_close(taos); + errorPrint("%s() LN%d, failed to allocate memory!\n", __func__, __LINE__); + exit(-1); + } + } + char* pTblName = childTblName; while ((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); @@ -2090,8 +2199,9 @@ static int getAllChildNameOfSuperTable(TAOS * taos, char* dbName, static int getSuperTableFromServer(TAOS * taos, char* dbName, SSuperTable* superTbls) { + char command[BUFFER_SIZE] = "\0"; - TAOS_RES * res; + TAOS_RES * res; TAOS_ROW row = NULL; int count = 0; @@ -2152,7 +2262,7 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, /* if (TBL_ALREADY_EXISTS == superTbls->childTblExists) { - //get all child table name use cmd: select tbname from superTblName; + //get all child table name use cmd: select tbname from superTblName; int childTblCount = 10000; superTbls->childTblName = (char*)calloc(1, childTblCount * TSDB_TABLE_NAME_LEN); if (superTbls->childTblName == NULL) { @@ -2168,7 +2278,8 @@ static int getSuperTableFromServer(TAOS * taos, char* dbName, return 0; } -static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, bool use_metric) { +static int createSuperTable(TAOS * taos, char* dbName, + SSuperTable* superTbls, bool use_metric) { char command[BUFFER_SIZE] = "\0"; char cols[STRING_LEN] = "\0"; @@ -2178,7 +2289,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, int lenOfOneRow = 0; for (colIndex = 0; colIndex < superTbls->columnCount; colIndex++) { char* dataType = superTbls->columns[colIndex].dataType; - + if (strcasecmp(dataType, "BINARY") == 0) { len += snprintf(cols + len, STRING_LEN - len, ", col%d %s(%d)", colIndex, "BINARY", @@ -2275,7 +2386,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "FLOAT"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 22; - } else if (strcasecmp(dataType, "DOUBLE") == 0) { + } else if (strcasecmp(dataType, "DOUBLE") == 0) { len += snprintf(tags + len, STRING_LEN - len, "t%d %s, ", tagIndex, "DOUBLE"); lenOfTagOfOneRow += superTbls->tags[tagIndex].dataLen + 42; @@ -2295,7 +2406,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, dbName, superTbls->sTblName, cols, tags); verbosePrint("%s() LN%d: %s\n", __func__, __LINE__, command); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { errorPrint( "create supertable %s failed!\n\n", superTbls->sTblName); return -1; @@ -2305,7 +2416,7 @@ static int createSuperTable(TAOS * taos, char* dbName, SSuperTable* superTbls, return 0; } -static int createDatabases() { +static int createDatabasesAndStables() { TAOS * taos = NULL; int ret = 0; taos = taos_connect(g_Dbs.host, g_Dbs.user, g_Dbs.password, NULL, g_Dbs.port); @@ -2315,18 +2426,18 @@ static int createDatabases() { } char command[BUFFER_SIZE] = "\0"; - for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int i = 0; i < g_Dbs.dbCount; i++) { if (g_Dbs.db[i].drop) { sprintf(command, "drop database if exists %s;", g_Dbs.db[i].dbName); verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); return -1; } } int dataLen = 0; - dataLen += snprintf(command + dataLen, + dataLen += snprintf(command + dataLen, BUFFER_SIZE - dataLen, "create database if not exists %s", g_Dbs.db[i].dbName); if (g_Dbs.db[i].dbCfg.blocks > 0) { @@ -2393,7 +2504,7 @@ static int createDatabases() { } debugPrint("%s() %d command: %s\n", __func__, __LINE__, command); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { + if (0 != queryDbExec(taos, command, NO_INSERT_TYPE, false)) { taos_close(taos); errorPrint( "\ncreate database %s failed!\n\n", g_Dbs.db[i].dbName); return -1; @@ -2403,38 +2514,41 @@ static int createDatabases() { debugPrint("%s() %d supertbl count:%d\n", __func__, __LINE__, g_Dbs.db[i].superTblCount); for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { - // describe super table, if exists sprintf(command, "describe %s.%s;", g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); verbosePrint("%s() %d command: %s\n", __func__, __LINE__, command); - if (0 != queryDbExec(taos, command, NO_INSERT_TYPE)) { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_NO_EXISTS; + + ret = queryDbExec(taos, command, NO_INSERT_TYPE, true); + + if ((ret != 0) || (g_Dbs.db[i].drop)) { ret = createSuperTable(taos, g_Dbs.db[i].dbName, &g_Dbs.db[i].superTbls[j], g_Dbs.use_metric); - } else { - g_Dbs.db[i].superTbls[j].superTblExists = TBL_ALREADY_EXISTS; - if (g_Dbs.db[i].superTbls[j].childTblExists != TBL_ALREADY_EXISTS) { - ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, - &g_Dbs.db[i].superTbls[j]); + if (0 != ret) { + errorPrint("\ncreate super table %d failed!\n\n", j); + taos_close(taos); + return -1; } } + ret = getSuperTableFromServer(taos, g_Dbs.db[i].dbName, + &g_Dbs.db[i].superTbls[j]); if (0 != ret) { - printf("\ncreate super table %d failed!\n\n", j); + errorPrint("\nget super table %s.%s info failed!\n\n", + g_Dbs.db[i].dbName, g_Dbs.db[i].superTbls[j].sTblName); taos_close(taos); return -1; } - } + } } taos_close(taos); return 0; } -static void* createTable(void *sarg) -{ - threadInfo *winfo = (threadInfo *)sarg; +static void* createTable(void *sarg) +{ + threadInfo *winfo = (threadInfo *)sarg; SSuperTable* superTblInfo = winfo->superTblInfo; int64_t lastPrintTime = taosGetTimestampMs(); @@ -2454,57 +2568,64 @@ static void* createTable(void *sarg) int len = 0; int batchNum = 0; - verbosePrint("%s() LN%d: Creating table from %d to %d\n", + verbosePrint("%s() LN%d: Creating table from %d to %d\n", __func__, __LINE__, winfo->start_table_from, winfo->end_table_to); for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { if (0 == g_Dbs.use_metric) { - snprintf(buffer, buff_len, + snprintf(buffer, buff_len, "create table if not exists %s.%s%d %s;", winfo->db_name, g_args.tb_prefix, i, winfo->cols); } else { - if (0 == len) { - batchNum = 0; - memset(buffer, 0, buff_len); - len += snprintf(buffer + len, - buff_len - len, "create table "); - } - - char* tagsValBuf = NULL; - if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); - } else { - tagsValBuf = getTagValueFromTagSample( - superTblInfo, - i % superTblInfo->tagSampleCount); - } - if (NULL == tagsValBuf) { + if (superTblInfo == NULL) { + errorPrint("%s() LN%d, use metric, but super table info is NULL\n", + __func__, __LINE__); free(buffer); - return NULL; - } - - len += snprintf(buffer + len, - superTblInfo->maxSqlLen - len, - "if not exists %s.%s%d using %s.%s tags %s ", - winfo->db_name, superTblInfo->childTblPrefix, - i, winfo->db_name, - superTblInfo->sTblName, tagsValBuf); - free(tagsValBuf); - batchNum++; + exit(-1); + } else { + if (0 == len) { + batchNum = 0; + memset(buffer, 0, buff_len); + len += snprintf(buffer + len, + buff_len - len, "create table "); + } - if ((batchNum < superTblInfo->batchCreateTableNum) - && ((superTblInfo->maxSqlLen - len) - >= (superTblInfo->lenOfTagOfOneRow + 256))) { - continue; + char* tagsValBuf = NULL; + if (0 == superTblInfo->tagSource) { + tagsValBuf = generateTagVaulesForStb(superTblInfo, i); + } else { + tagsValBuf = getTagValueFromTagSample( + superTblInfo, + i % superTblInfo->tagSampleCount); + } + if (NULL == tagsValBuf) { + free(buffer); + return NULL; + } + + len += snprintf(buffer + len, + superTblInfo->maxSqlLen - len, + "if not exists %s.%s%d using %s.%s tags %s ", + winfo->db_name, superTblInfo->childTblPrefix, + i, winfo->db_name, + superTblInfo->sTblName, tagsValBuf); + free(tagsValBuf); + batchNum++; + + if ((batchNum < superTblInfo->batchCreateTableNum) + && ((superTblInfo->maxSqlLen - len) + >= (superTblInfo->lenOfTagOfOneRow + 256))) { + continue; + } } } len = 0; verbosePrint("%s() LN%d %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)){ + if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)){ errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); free(buffer); return NULL; @@ -2517,10 +2638,10 @@ static void* createTable(void *sarg) lastPrintTime = currentPrintTime; } } - + if (0 != len) { verbosePrint("%s() %d buffer: %s\n", __func__, __LINE__, buffer); - if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE)) { + if (0 != queryDbExec(winfo->taos, buffer, NO_INSERT_TYPE, false)) { errorPrint( "queryDbExec() failed. buffer:\n%s\n", buffer); } } @@ -2532,6 +2653,7 @@ static void* createTable(void *sarg) static int startMultiThreadCreateChildTable( char* cols, int threads, int startFrom, int ntables, char* db_name, SSuperTable* superTblInfo) { + pthread_t *pids = malloc(threads * sizeof(pthread_t)); threadInfo *infos = malloc(threads * sizeof(threadInfo)); @@ -2552,7 +2674,7 @@ static int startMultiThreadCreateChildTable( int b = 0; b = ntables % threads; - + for (int i = 0; i < threads; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; @@ -2568,7 +2690,7 @@ static int startMultiThreadCreateChildTable( if (t_info->taos == NULL) { errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); free(pids); - free(infos); + free(infos); return -1; } @@ -2581,7 +2703,7 @@ static int startMultiThreadCreateChildTable( t_info->minDelay = INT16_MAX; pthread_create(pids + i, NULL, createTable, t_info); } - + for (int i = 0; i < threads; i++) { pthread_join(pids[i], NULL); } @@ -2615,13 +2737,13 @@ static void createChildTables() { int startFrom = 0; g_totalChildTables += g_Dbs.db[i].superTbls[j].childTblCount; - verbosePrint("%s() LN%d: create %d child tables from %d\n", __func__, __LINE__, - g_totalChildTables, startFrom); + verbosePrint("%s() LN%d: create %d child tables from %d\n", + __func__, __LINE__, g_totalChildTables, startFrom); startMultiThreadCreateChildTable( g_Dbs.db[i].superTbls[j].colsOfCreateChildTable, g_Dbs.threadCountByCreateTbl, startFrom, - g_totalChildTables, + g_Dbs.db[i].superTbls[j].childTblCount, g_Dbs.db[i].dbName, &(g_Dbs.db[i].superTbls[j])); } } else { @@ -2632,10 +2754,10 @@ static void createChildTables() { if ((strncasecmp(g_args.datatype[j], "BINARY", strlen("BINARY")) == 0) || (strncasecmp(g_args.datatype[j], "NCHAR", strlen("NCHAR")) == 0)) { - len = snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ", COL%d %s(60)", j, g_args.datatype[j]); } else { - len = snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, + snprintf(tblColsBuf + len, MAX_SQL_SIZE - len, ", COL%d %s", j, g_args.datatype[j]); } len = strlen(tblColsBuf); @@ -2740,7 +2862,7 @@ static int readSampleFromCsvFileToMem( ssize_t readLen = 0; char * line = NULL; int getRows = 0; - + FILE* fp = fopen(superTblInfo->sampleFile, "r"); if (fp == NULL) { errorPrint( "Failed to open sample file: %s, reason:%s\n", @@ -2762,7 +2884,7 @@ static int readSampleFromCsvFileToMem( } continue; } - + if (('\r' == line[readLen - 1]) || ('\n' == line[readLen - 1])) { line[--readLen] = 0; } @@ -2798,7 +2920,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( cJSON* stbInfo, SSuperTable* superTbls) { bool ret = false; - // columns + // columns cJSON *columns = cJSON_GetObjectItem(stbInfo, "columns"); if (columns && columns->type != cJSON_Array) { printf("ERROR: failed to read json, columns not found\n"); @@ -2808,7 +2930,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( superTbls->tagCount = 0; return true; } - + int columnSize = cJSON_GetArraySize(columns); if (columnSize > MAX_COLUMN_COUNT) { errorPrint("%s() LN%d, failed to read json, column size overflow, max column size is %d\n", @@ -2836,7 +2958,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(column, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -2864,16 +2986,16 @@ static bool getColumnAndTagTypeFromInsertJsonFile( } } superTbls->columnCount = index; - + count = 1; index = 0; - // tags + // tags cJSON *tags = cJSON_GetObjectItem(stbInfo, "tags"); if (!tags || tags->type != cJSON_Array) { debugPrint("%s() LN%d, failed to read json, tags not found\n", __func__, __LINE__); goto PARSE_OVER; } - + int tagSize = cJSON_GetArraySize(tags); if (tagSize > MAX_TAG_COUNT) { debugPrint("%s() LN%d, failed to read json, tags size overflow, max tag size is %d\n", __func__, __LINE__, MAX_TAG_COUNT); @@ -2896,7 +3018,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( count = 1; } - // column info + // column info memset(&columnCase, 0, sizeof(StrColumn)); cJSON *dataType = cJSON_GetObjectItem(tag, "type"); if (!dataType || dataType->type != cJSON_String || dataType->valuestring == NULL) { @@ -2942,9 +3064,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* host = cJSON_GetObjectItem(root, "host"); if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_Dbs.host, host->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.host, host->valuestring, MAX_HOSTNAME_SIZE); } else if (!host) { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { printf("ERROR: failed to read json, host not found\n"); goto PARSE_OVER; @@ -2959,16 +3081,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* user = cJSON_GetObjectItem(root, "user"); if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_Dbs.user, user->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.user, user->valuestring, MAX_USERNAME_SIZE); } else if (!user) { - tstrncpy(g_Dbs.user, "root", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.user, "root", MAX_USERNAME_SIZE); } cJSON* password = cJSON_GetObjectItem(root, "password"); if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_Dbs.password, password->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.password, password->valuestring, MAX_PASSWORD_SIZE); } else if (!password) { - tstrncpy(g_Dbs.password, "taosdata", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.password, "taosdata", MAX_PASSWORD_SIZE); } cJSON* resultfile = cJSON_GetObjectItem(root, "result_file"); @@ -2986,17 +3108,18 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, threads not found\n"); goto PARSE_OVER; - } - + } + cJSON* threads2 = cJSON_GetObjectItem(root, "thread_count_create_tbl"); if (threads2 && threads2->type == cJSON_Number) { g_Dbs.threadCountByCreateTbl = threads2->valueint; } else if (!threads2) { - g_Dbs.threadCountByCreateTbl = 1; + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; } else { - printf("ERROR: failed to read json, threads2 not found\n"); + errorPrint("%s() LN%d, failed to read json, threads2 not found\n", + __func__, __LINE__); goto PARSE_OVER; - } + } cJSON* gInsertInterval = cJSON_GetObjectItem(root, "insert_interval"); if (gInsertInterval && gInsertInterval->type == cJSON_Number) { @@ -3008,15 +3131,26 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* rowsPerTbl = cJSON_GetObjectItem(root, "rows_per_tbl"); - if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { - g_args.rows_per_tbl = rowsPerTbl->valueint; - } else if (!rowsPerTbl) { - g_args.rows_per_tbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + cJSON* interlaceRows = cJSON_GetObjectItem(root, "interlace_rows"); + if (interlaceRows && interlaceRows->type == cJSON_Number) { + g_args.interlace_rows = interlaceRows->valueint; + + // rows per table need be less than insert batch + if (g_args.interlace_rows > g_args.num_of_RPR) { + printf("NOTICE: interlace rows value %d > num_of_records_per_request %d\n\n", + g_args.interlace_rows, g_args.num_of_RPR); + printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + g_args.num_of_RPR); + printf(" press Enter key to continue or Ctrl-C to stop."); + (void)getchar(); + g_args.interlace_rows = g_args.num_of_RPR; + } + } else if (!interlaceRows) { + g_args.interlace_rows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, rows_per_tbl input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, interlace_rows input mistake\n", __func__, __LINE__); goto PARSE_OVER; - } + } cJSON* maxSqlLen = cJSON_GetObjectItem(root, "max_sql_len"); if (maxSqlLen && maxSqlLen->type == cJSON_Number) { @@ -3027,20 +3161,19 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { errorPrint("%s() LN%d, failed to read json, max_sql_len input mistake\n", __func__, __LINE__); goto PARSE_OVER; } - cJSON* numRecPerReq = cJSON_GetObjectItem(root, "num_of_records_per_req"); if (numRecPerReq && numRecPerReq->type == cJSON_Number) { g_args.num_of_RPR = numRecPerReq->valueint; } else if (!numRecPerReq) { - g_args.num_of_RPR = 100; + g_args.num_of_RPR = 0xffff; } else { errorPrint("%s() LN%d, failed to read json, num_of_records_per_req not found\n", __func__, __LINE__); goto PARSE_OVER; } cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, - if (answerPrompt + if (answerPrompt && answerPrompt->type == cJSON_String && answerPrompt->valuestring != NULL) { if (0 == strncasecmp(answerPrompt->valuestring, "yes", 3)) { @@ -3055,7 +3188,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); goto PARSE_OVER; - } + } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (!dbs || dbs->type != cJSON_Array) { @@ -3076,13 +3209,13 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON* dbinfos = cJSON_GetArrayItem(dbs, i); if (dbinfos == NULL) continue; - // dbinfo + // dbinfo cJSON *dbinfo = cJSON_GetObjectItem(dbinfos, "dbinfo"); if (!dbinfo || dbinfo->type != cJSON_Object) { printf("ERROR: failed to read json, dbinfo not found\n"); goto PARSE_OVER; } - + cJSON *dbName = cJSON_GetObjectItem(dbinfo, "name"); if (!dbName || dbName->type != cJSON_String || dbName->valuestring == NULL) { printf("ERROR: failed to read json, db name not found\n"); @@ -3092,15 +3225,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { - if (0 == strncasecmp(drop->valuestring, "yes", 3)) { - g_Dbs.db[i].drop = 1; + if (0 == strncasecmp(drop->valuestring, "yes", strlen("yes"))) { + g_Dbs.db[i].drop = true; } else { - g_Dbs.db[i].drop = 0; - } + g_Dbs.db[i].drop = false; + } } else if (!drop) { - g_Dbs.db[i].drop = 0; + g_Dbs.db[i].drop = g_args.drop_database; } else { - printf("ERROR: failed to read json, drop not found\n"); + errorPrint("%s() LN%d, failed to read json, drop input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3146,7 +3280,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, keep not found\n"); goto PARSE_OVER; } - + cJSON* days = cJSON_GetObjectItem(dbinfo, "days"); if (days && days->type == cJSON_Number) { g_Dbs.db[i].dbCfg.days = days->valueint; @@ -3156,7 +3290,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, days not found\n"); goto PARSE_OVER; } - + cJSON* cache = cJSON_GetObjectItem(dbinfo, "cache"); if (cache && cache->type == cJSON_Number) { g_Dbs.db[i].dbCfg.cache = cache->valueint; @@ -3166,7 +3300,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, cache not found\n"); goto PARSE_OVER; } - + cJSON* blocks= cJSON_GetObjectItem(dbinfo, "blocks"); if (blocks && blocks->type == cJSON_Number) { g_Dbs.db[i].dbCfg.blocks = blocks->valueint; @@ -3253,22 +3387,24 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!fsync) { g_Dbs.db[i].dbCfg.fsync = -1; } else { - printf("ERROR: failed to read json, fsync not found\n"); - goto PARSE_OVER; - } + errorPrint("%s() LN%d, failed to read json, fsync input mistake\n", + __func__, __LINE__); + goto PARSE_OVER; + } - // super_talbes + // super_talbes cJSON *stables = cJSON_GetObjectItem(dbinfos, "super_tables"); if (!stables || stables->type != cJSON_Array) { - printf("ERROR: failed to read json, super_tables not found\n"); + errorPrint("%s() LN%d, failed to read json, super_tables not found\n", + __func__, __LINE__); goto PARSE_OVER; - } - + } + int stbSize = cJSON_GetArraySize(stables); if (stbSize > MAX_SUPER_TABLE_COUNT) { errorPrint( - "ERROR: failed to read json, databases size overflow, max database is %d\n", - MAX_SUPER_TABLE_COUNT); + "%s() LN%d, failed to read json, supertable size overflow, max supertable is %d\n", + __func__, __LINE__, MAX_SUPER_TABLE_COUNT); goto PARSE_OVER; } @@ -3276,15 +3412,16 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { for (int j = 0; j < stbSize; ++j) { cJSON* stbInfo = cJSON_GetArrayItem(stables, j); if (stbInfo == NULL) continue; - - // dbinfo + + // dbinfo cJSON *stbName = cJSON_GetObjectItem(stbInfo, "name"); if (!stbName || stbName->type != cJSON_String || stbName->valuestring == NULL) { - printf("ERROR: failed to read json, stb name not found\n"); + errorPrint("%s() LN%d, failed to read json, stb name not found\n", + __func__, __LINE__); goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, MAX_TB_NAME_SIZE); - + cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { printf("ERROR: failed to read json, childtable_prefix not found\n"); @@ -3309,7 +3446,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, auto_create_table not found\n"); goto PARSE_OVER; } - + cJSON* batchCreateTbl = cJSON_GetObjectItem(stbInfo, "batch_create_tbl_num"); if (batchCreateTbl && batchCreateTbl->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].batchCreateTableNum = batchCreateTbl->valueint; @@ -3318,7 +3455,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, batch_create_tbl_num not found\n"); goto PARSE_OVER; - } + } cJSON *childTblExists = cJSON_GetObjectItem(stbInfo, "child_table_exists"); // yes, no if (childTblExists @@ -3334,13 +3471,15 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!childTblExists) { g_Dbs.db[i].superTbls[j].childTblExists = TBL_NO_EXISTS; } else { - errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, child_table_exists not found\n", + __func__, __LINE__); goto PARSE_OVER; } - + cJSON* count = cJSON_GetObjectItem(stbInfo, "childtable_count"); if (!count || count->type != cJSON_Number || 0 >= count->valueint) { - errorPrint("%s() LN%d, failed to read json, childtable_count not found\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, childtable_count not found\n", + __func__, __LINE__); goto PARSE_OVER; } g_Dbs.db[i].superTbls[j].childTblCount = count->valueint; @@ -3424,10 +3563,11 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, sample_buf_size not found\n"); goto PARSE_OVER; - } + } cJSON *sampleFormat = cJSON_GetObjectItem(stbInfo, "sample_format"); - if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { + if (sampleFormat && sampleFormat->type + == cJSON_String && sampleFormat->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, sampleFormat->valuestring, MAX_DB_NAME_SIZE); } else if (!sampleFormat) { @@ -3435,7 +3575,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, sample_format not found\n"); goto PARSE_OVER; - } + } cJSON *sampleFile = cJSON_GetObjectItem(stbInfo, "sample_file"); if (sampleFile && sampleFile->type == cJSON_String && sampleFile->valuestring != NULL) { @@ -3446,7 +3586,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, sample_file not found\n"); goto PARSE_OVER; - } + } cJSON *tagsFile = cJSON_GetObjectItem(stbInfo, "tags_file"); if (tagsFile && tagsFile->type == cJSON_String && tagsFile->valuestring != NULL) { @@ -3472,14 +3612,14 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { len = TSDB_MAX_ALLOWED_SQL_LEN; } else if (len < TSDB_MAX_SQL_LEN) { len = TSDB_MAX_SQL_LEN; - } + } g_Dbs.db[i].superTbls[j].maxSqlLen = len; } else if (!maxSqlLen) { - g_Dbs.db[i].superTbls[j].maxSqlLen = TSDB_MAX_SQL_LEN; + g_Dbs.db[i].superTbls[j].maxSqlLen = g_args.max_sql_len; } else { printf("ERROR: failed to read json, maxSqlLen not found\n"); goto PARSE_OVER; - } + } cJSON *multiThreadWriteOneTbl = cJSON_GetObjectItem(stbInfo, "multi_thread_write_one_tbl"); // no , yes @@ -3490,7 +3630,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 1; } else { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; - } + } } else if (!multiThreadWriteOneTbl) { g_Dbs.db[i].superTbls[j].multiThreadWriteOneTbl = 0; } else { @@ -3498,15 +3638,27 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } - cJSON* rowsPerTbl = cJSON_GetObjectItem(stbInfo, "rows_per_tbl"); - if (rowsPerTbl && rowsPerTbl->type == cJSON_Number) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = rowsPerTbl->valueint; - } else if (!rowsPerTbl) { - g_Dbs.db[i].superTbls[j].rowsPerTbl = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req + cJSON* interlaceRows = cJSON_GetObjectItem(stbInfo, "interlace_rows"); + if (interlaceRows && interlaceRows->type == cJSON_Number) { + g_Dbs.db[i].superTbls[j].interlaceRows = interlaceRows->valueint; + // rows per table need be less than insert batch + if (g_Dbs.db[i].superTbls[j].interlaceRows > g_args.num_of_RPR) { + printf("NOTICE: db[%d].superTbl[%d]'s interlace rows value %d > num_of_records_per_request %d\n\n", + i, j, g_Dbs.db[i].superTbls[j].interlaceRows, g_args.num_of_RPR); + printf(" interlace rows value will be set to num_of_records_per_request %d\n\n", + g_args.num_of_RPR); + printf(" press Enter key to continue or Ctrl-C to stop."); + (void)getchar(); + g_Dbs.db[i].superTbls[j].interlaceRows = g_args.num_of_RPR; + } + } else if (!interlaceRows) { + g_Dbs.db[i].superTbls[j].interlaceRows = 0; // 0 means progressive mode, > 0 mean interlace mode. max value is less or equ num_of_records_per_req } else { - errorPrint("%s() LN%d, failed to read json, rowsPerTbl input mistake\n", __func__, __LINE__); + errorPrint( + "%s() LN%d, failed to read json, interlace rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; - } + } cJSON* disorderRatio = cJSON_GetObjectItem(stbInfo, "disorder_ratio"); if (disorderRatio && disorderRatio->type == cJSON_Number) { @@ -3516,7 +3668,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, disorderRatio not found\n"); goto PARSE_OVER; - } + } cJSON* disorderRange = cJSON_GetObjectItem(stbInfo, "disorder_range"); if (disorderRange && disorderRange->type == cJSON_Number) { @@ -3534,7 +3686,8 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { } else if (!insertRows) { g_Dbs.db[i].superTbls[j].insertRows = 0x7FFFFFFFFFFFFFFF; } else { - errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, insert_rows input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } @@ -3542,26 +3695,21 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (insertInterval && insertInterval->type == cJSON_Number) { g_Dbs.db[i].superTbls[j].insertInterval = insertInterval->valueint; } else if (!insertInterval) { - debugPrint("%s() LN%d: stable insert interval be overrided by global %d.\n", + verbosePrint("%s() LN%d: stable insert interval be overrided by global %d.\n", __func__, __LINE__, g_args.insert_interval); g_Dbs.db[i].superTbls[j].insertInterval = g_args.insert_interval; } else { - errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", __func__, __LINE__); + errorPrint("%s() LN%d, failed to read json, insert_interval input mistake\n", + __func__, __LINE__); goto PARSE_OVER; } -/* CBD if (NO_CREATE_SUBTBL == g_Dbs.db[i].superTbls[j].autoCreateTable - || (TBL_ALREADY_EXISTS == g_Dbs.db[i].superTbls[j].childTblExists)) { - continue; - } - */ - int retVal = getColumnAndTagTypeFromInsertJsonFile( stbInfo, &g_Dbs.db[i].superTbls[j]); if (false == retVal) { goto PARSE_OVER; - } - } + } + } } ret = true; @@ -3583,9 +3731,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* host = cJSON_GetObjectItem(root, "host"); if (host && host->type == cJSON_String && host->valuestring != NULL) { - tstrncpy(g_queryInfo.host, host->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.host, host->valuestring, MAX_HOSTNAME_SIZE); } else if (!host) { - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } else { printf("ERROR: failed to read json, host not found\n"); goto PARSE_OVER; @@ -3600,16 +3748,16 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* user = cJSON_GetObjectItem(root, "user"); if (user && user->type == cJSON_String && user->valuestring != NULL) { - tstrncpy(g_queryInfo.user, user->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.user, user->valuestring, MAX_USERNAME_SIZE); } else if (!user) { - tstrncpy(g_queryInfo.user, "root", MAX_DB_NAME_SIZE); ; + tstrncpy(g_queryInfo.user, "root", MAX_USERNAME_SIZE); ; } cJSON* password = cJSON_GetObjectItem(root, "password"); if (password && password->type == cJSON_String && password->valuestring != NULL) { - tstrncpy(g_queryInfo.password, password->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.password, password->valuestring, MAX_PASSWORD_SIZE); } else if (!password) { - tstrncpy(g_queryInfo.password, "taosdata", MAX_DB_NAME_SIZE);; + tstrncpy(g_queryInfo.password, "taosdata", MAX_PASSWORD_SIZE);; } cJSON *answerPrompt = cJSON_GetObjectItem(root, "confirm_parameter_prompt"); // yes, no, @@ -3627,7 +3775,17 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else { printf("ERROR: failed to read json, confirm_parameter_prompt not found\n"); goto PARSE_OVER; - } + } + + cJSON* gQueryTimes = cJSON_GetObjectItem(root, "query_times"); + if (gQueryTimes && gQueryTimes->type == cJSON_Number) { + g_args.query_times = gQueryTimes->valueint; + } else if (!gQueryTimes) { + g_args.query_times = 1; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + goto PARSE_OVER; + } cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { @@ -3646,35 +3804,186 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { printf("ERROR: failed to read json, query_mode not found\n"); goto PARSE_OVER; } - - // super_table_query - cJSON *superQuery = cJSON_GetObjectItem(root, "specified_table_query"); - if (!superQuery) { - g_queryInfo.superQueryInfo.concurrent = 0; - g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superQuery->type != cJSON_Object) { + + // super_table_query + cJSON *specifiedQuery = cJSON_GetObjectItem(root, "specified_table_query"); + if (!specifiedQuery) { + g_queryInfo.specifiedQueryInfo.concurrent = 0; + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (specifiedQuery->type != cJSON_Object) { printf("ERROR: failed to read json, super_table_query not found\n"); goto PARSE_OVER; - } else { - cJSON* rate = cJSON_GetObjectItem(superQuery, "query_interval"); + } else { + cJSON* rate = cJSON_GetObjectItem(specifiedQuery, "query_interval"); if (rate && rate->type == cJSON_Number) { - g_queryInfo.superQueryInfo.rate = rate->valueint; + g_queryInfo.specifiedQueryInfo.rate = rate->valueint; } else if (!rate) { + g_queryInfo.specifiedQueryInfo.rate = 0; + } + + cJSON* specifiedQueryTimes = cJSON_GetObjectItem(specifiedQuery, "query_times"); + if (specifiedQueryTimes && specifiedQueryTimes->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.queryTimes = specifiedQueryTimes->valueint; + } else if (!specifiedQueryTimes) { + g_queryInfo.specifiedQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + goto PARSE_OVER; + } + + cJSON* concurrent = cJSON_GetObjectItem(specifiedQuery, "concurrent"); + if (concurrent && concurrent->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.concurrent = concurrent->valueint; + } else if (!concurrent) { + g_queryInfo.specifiedQueryInfo.concurrent = 1; + } + + cJSON* mode = cJSON_GetObjectItem(specifiedQuery, "mode"); + if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { + if (0 == strcmp("sync", mode->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeMode = 0; + } else if (0 == strcmp("async", mode->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeMode = 1; + } else { + printf("ERROR: failed to read json, subscribe mod error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeMode = 0; + } + + cJSON* interval = cJSON_GetObjectItem(specifiedQuery, "interval"); + if (interval && interval->type == cJSON_Number) { + g_queryInfo.specifiedQueryInfo.subscribeInterval = interval->valueint; + } else if (!interval) { + //printf("failed to read json, subscribe interval no found\n"); + //goto PARSE_OVER; + g_queryInfo.specifiedQueryInfo.subscribeInterval = 10000; + } + + cJSON* restart = cJSON_GetObjectItem(specifiedQuery, "restart"); + if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { + if (0 == strcmp("yes", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = 1; + } else if (0 == strcmp("no", restart->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeRestart = 0; + } else { + printf("ERROR: failed to read json, subscribe restart error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeRestart = 1; + } + + cJSON* keepProgress = cJSON_GetObjectItem(specifiedQuery, "keepProgress"); + if (keepProgress + && keepProgress->type == cJSON_String + && keepProgress->valuestring != NULL) { + if (0 == strcmp("yes", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 1; + } else if (0 == strcmp("no", keepProgress->valuestring)) { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } else { + printf("ERROR: failed to read json, subscribe keepProgress error\n"); + goto PARSE_OVER; + } + } else { + g_queryInfo.specifiedQueryInfo.subscribeKeepProgress = 0; + } + + // sqls + cJSON* superSqls = cJSON_GetObjectItem(specifiedQuery, "sqls"); + if (!superSqls) { + g_queryInfo.specifiedQueryInfo.sqlCount = 0; + } else if (superSqls->type != cJSON_Array) { + printf("ERROR: failed to read json, super sqls not found\n"); + goto PARSE_OVER; + } else { + int superSqlSize = cJSON_GetArraySize(superSqls); + if (superSqlSize > MAX_QUERY_SQL_COUNT) { + printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); + goto PARSE_OVER; + } + + g_queryInfo.specifiedQueryInfo.sqlCount = superSqlSize; + for (int j = 0; j < superSqlSize; ++j) { + cJSON* sql = cJSON_GetArrayItem(superSqls, j); + if (sql == NULL) continue; + + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); + if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { + printf("ERROR: failed to read json, sql not found\n"); + goto PARSE_OVER; + } + tstrncpy(g_queryInfo.specifiedQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); + + cJSON *result = cJSON_GetObjectItem(sql, "result"); + if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { + tstrncpy(g_queryInfo.specifiedQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); + } else if (NULL == result) { + memset(g_queryInfo.specifiedQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); + } else { + printf("ERROR: failed to read json, super query result file not found\n"); + goto PARSE_OVER; + } + } + } + } + + // sub_table_query + cJSON *superQuery = cJSON_GetObjectItem(root, "super_table_query"); + if (!superQuery) { + g_queryInfo.superQueryInfo.threadCnt = 0; + g_queryInfo.superQueryInfo.sqlCount = 0; + } else if (superQuery->type != cJSON_Object) { + printf("ERROR: failed to read json, sub_table_query not found\n"); + ret = true; + goto PARSE_OVER; + } else { + cJSON* subrate = cJSON_GetObjectItem(superQuery, "query_interval"); + if (subrate && subrate->type == cJSON_Number) { + g_queryInfo.superQueryInfo.rate = subrate->valueint; + } else if (!subrate) { g_queryInfo.superQueryInfo.rate = 0; } - - cJSON* concurrent = cJSON_GetObjectItem(superQuery, "concurrent"); - if (concurrent && concurrent->type == cJSON_Number) { - g_queryInfo.superQueryInfo.concurrent = concurrent->valueint; - } else if (!concurrent) { - g_queryInfo.superQueryInfo.concurrent = 1; + + cJSON* superQueryTimes = cJSON_GetObjectItem(superQuery, "query_times"); + if (superQueryTimes && superQueryTimes->type == cJSON_Number) { + g_queryInfo.superQueryInfo.queryTimes = superQueryTimes->valueint; + } else if (!superQueryTimes) { + g_queryInfo.superQueryInfo.queryTimes = g_args.query_times; + } else { + errorPrint("%s() LN%d, failed to read json, query_times input mistake\n", __func__, __LINE__); + goto PARSE_OVER; } - - cJSON* mode = cJSON_GetObjectItem(superQuery, "mode"); - if (mode && mode->type == cJSON_String && mode->valuestring != NULL) { - if (0 == strcmp("sync", mode->valuestring)) { + + cJSON* threads = cJSON_GetObjectItem(superQuery, "threads"); + if (threads && threads->type == cJSON_Number) { + g_queryInfo.superQueryInfo.threadCnt = threads->valueint; + } else if (!threads) { + g_queryInfo.superQueryInfo.threadCnt = 1; + } + + //cJSON* subTblCnt = cJSON_GetObjectItem(superQuery, "childtable_count"); + //if (subTblCnt && subTblCnt->type == cJSON_Number) { + // g_queryInfo.superQueryInfo.childTblCount = subTblCnt->valueint; + //} else if (!subTblCnt) { + // g_queryInfo.superQueryInfo.childTblCount = 0; + //} + + cJSON* stblname = cJSON_GetObjectItem(superQuery, "stblname"); + if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { + tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); + } else { + printf("ERROR: failed to read json, super table name not found\n"); + goto PARSE_OVER; + } + + cJSON* submode = cJSON_GetObjectItem(superQuery, "mode"); + if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { + if (0 == strcmp("sync", submode->valuestring)) { g_queryInfo.superQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", mode->valuestring)) { + } else if (0 == strcmp("async", submode->valuestring)) { g_queryInfo.superQueryInfo.subscribeMode = 1; } else { printf("ERROR: failed to read json, subscribe mod error\n"); @@ -3683,21 +3992,21 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else { g_queryInfo.superQueryInfo.subscribeMode = 0; } - - cJSON* interval = cJSON_GetObjectItem(superQuery, "interval"); - if (interval && interval->type == cJSON_Number) { - g_queryInfo.superQueryInfo.subscribeInterval = interval->valueint; - } else if (!interval) { + + cJSON* subinterval = cJSON_GetObjectItem(superQuery, "interval"); + if (subinterval && subinterval->type == cJSON_Number) { + g_queryInfo.superQueryInfo.subscribeInterval = subinterval->valueint; + } else if (!subinterval) { //printf("failed to read json, subscribe interval no found\n"); //goto PARSE_OVER; g_queryInfo.superQueryInfo.subscribeInterval = 10000; } - - cJSON* restart = cJSON_GetObjectItem(superQuery, "restart"); - if (restart && restart->type == cJSON_String && restart->valuestring != NULL) { - if (0 == strcmp("yes", restart->valuestring)) { + + cJSON* subrestart = cJSON_GetObjectItem(superQuery, "restart"); + if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { + if (0 == strcmp("yes", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", restart->valuestring)) { + } else if (0 == strcmp("no", subrestart->valuestring)) { g_queryInfo.superQueryInfo.subscribeRestart = 0; } else { printf("ERROR: failed to read json, subscribe restart error\n"); @@ -3706,14 +4015,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { } else { g_queryInfo.superQueryInfo.subscribeRestart = 1; } - - cJSON* keepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); - if (keepProgress - && keepProgress->type == cJSON_String - && keepProgress->valuestring != NULL) { - if (0 == strcmp("yes", keepProgress->valuestring)) { + + cJSON* subkeepProgress = cJSON_GetObjectItem(superQuery, "keepProgress"); + if (subkeepProgress && + subkeepProgress->type == cJSON_String + && subkeepProgress->valuestring != NULL) { + if (0 == strcmp("yes", subkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", keepProgress->valuestring)) { + } else if (0 == strcmp("no", subkeepProgress->valuestring)) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } else { printf("ERROR: failed to read json, subscribe keepProgress error\n"); @@ -3723,15 +4032,15 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.subscribeKeepProgress = 0; } - // sqls - cJSON* superSqls = cJSON_GetObjectItem(superQuery, "sqls"); - if (!superSqls) { + // sqls + cJSON* subsqls = cJSON_GetObjectItem(superQuery, "sqls"); + if (!subsqls) { g_queryInfo.superQueryInfo.sqlCount = 0; - } else if (superSqls->type != cJSON_Array) { + } else if (subsqls->type != cJSON_Array) { printf("ERROR: failed to read json, super sqls not found\n"); goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(superSqls); + } else { + int superSqlSize = cJSON_GetArraySize(subsqls); if (superSqlSize > MAX_QUERY_SQL_COUNT) { printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); goto PARSE_OVER; @@ -3739,9 +4048,9 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { g_queryInfo.superQueryInfo.sqlCount = superSqlSize; for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(superSqls, j); + cJSON* sql = cJSON_GetArrayItem(subsqls, j); if (sql == NULL) continue; - + cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { printf("ERROR: failed to read json, sql not found\n"); @@ -3750,145 +4059,14 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { tstrncpy(g_queryInfo.superQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (NULL != result && result->type == cJSON_String && result->valuestring != NULL) { + if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ tstrncpy(g_queryInfo.superQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); } else if (NULL == result) { memset(g_queryInfo.superQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); - } else { - printf("ERROR: failed to read json, super query result file not found\n"); - goto PARSE_OVER; - } - } - } - } - - // sub_table_query - cJSON *subQuery = cJSON_GetObjectItem(root, "super_table_query"); - if (!subQuery) { - g_queryInfo.subQueryInfo.threadCnt = 0; - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subQuery->type != cJSON_Object) { - printf("ERROR: failed to read json, sub_table_query not found\n"); - ret = true; - goto PARSE_OVER; - } else { - cJSON* subrate = cJSON_GetObjectItem(subQuery, "query_interval"); - if (subrate && subrate->type == cJSON_Number) { - g_queryInfo.subQueryInfo.rate = subrate->valueint; - } else if (!subrate) { - g_queryInfo.subQueryInfo.rate = 0; - } - - cJSON* threads = cJSON_GetObjectItem(subQuery, "threads"); - if (threads && threads->type == cJSON_Number) { - g_queryInfo.subQueryInfo.threadCnt = threads->valueint; - } else if (!threads) { - g_queryInfo.subQueryInfo.threadCnt = 1; - } - - //cJSON* subTblCnt = cJSON_GetObjectItem(subQuery, "childtable_count"); - //if (subTblCnt && subTblCnt->type == cJSON_Number) { - // g_queryInfo.subQueryInfo.childTblCount = subTblCnt->valueint; - //} else if (!subTblCnt) { - // g_queryInfo.subQueryInfo.childTblCount = 0; - //} - - cJSON* stblname = cJSON_GetObjectItem(subQuery, "stblname"); - if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { - tstrncpy(g_queryInfo.subQueryInfo.sTblName, stblname->valuestring, MAX_TB_NAME_SIZE); - } else { - printf("ERROR: failed to read json, super table name not found\n"); - goto PARSE_OVER; - } - - cJSON* submode = cJSON_GetObjectItem(subQuery, "mode"); - if (submode && submode->type == cJSON_String && submode->valuestring != NULL) { - if (0 == strcmp("sync", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } else if (0 == strcmp("async", submode->valuestring)) { - g_queryInfo.subQueryInfo.subscribeMode = 1; - } else { - printf("ERROR: failed to read json, subscribe mod error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeMode = 0; - } - - cJSON* subinterval = cJSON_GetObjectItem(subQuery, "interval"); - if (subinterval && subinterval->type == cJSON_Number) { - g_queryInfo.subQueryInfo.subscribeInterval = subinterval->valueint; - } else if (!subinterval) { - //printf("failed to read json, subscribe interval no found\n"); - //goto PARSE_OVER; - g_queryInfo.subQueryInfo.subscribeInterval = 10000; - } - - cJSON* subrestart = cJSON_GetObjectItem(subQuery, "restart"); - if (subrestart && subrestart->type == cJSON_String && subrestart->valuestring != NULL) { - if (0 == strcmp("yes", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } else if (0 == strcmp("no", subrestart->valuestring)) { - g_queryInfo.subQueryInfo.subscribeRestart = 0; - } else { - printf("ERROR: failed to read json, subscribe restart error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeRestart = 1; - } - - cJSON* subkeepProgress = cJSON_GetObjectItem(subQuery, "keepProgress"); - if (subkeepProgress && - subkeepProgress->type == cJSON_String - && subkeepProgress->valuestring != NULL) { - if (0 == strcmp("yes", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 1; - } else if (0 == strcmp("no", subkeepProgress->valuestring)) { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } else { - printf("ERROR: failed to read json, subscribe keepProgress error\n"); - goto PARSE_OVER; - } - } else { - g_queryInfo.subQueryInfo.subscribeKeepProgress = 0; - } - - // sqls - cJSON* subsqls = cJSON_GetObjectItem(subQuery, "sqls"); - if (!subsqls) { - g_queryInfo.subQueryInfo.sqlCount = 0; - } else if (subsqls->type != cJSON_Array) { - printf("ERROR: failed to read json, super sqls not found\n"); - goto PARSE_OVER; - } else { - int superSqlSize = cJSON_GetArraySize(subsqls); - if (superSqlSize > MAX_QUERY_SQL_COUNT) { - printf("ERROR: failed to read json, query sql size overflow, max is %d\n", MAX_QUERY_SQL_COUNT); - goto PARSE_OVER; - } - - g_queryInfo.subQueryInfo.sqlCount = superSqlSize; - for (int j = 0; j < superSqlSize; ++j) { - cJSON* sql = cJSON_GetArrayItem(subsqls, j); - if (sql == NULL) continue; - - cJSON *sqlStr = cJSON_GetObjectItem(sql, "sql"); - if (!sqlStr || sqlStr->type != cJSON_String || sqlStr->valuestring == NULL) { - printf("ERROR: failed to read json, sql not found\n"); - goto PARSE_OVER; - } - tstrncpy(g_queryInfo.subQueryInfo.sql[j], sqlStr->valuestring, MAX_QUERY_SQL_LENGTH); - - cJSON *result = cJSON_GetObjectItem(sql, "result"); - if (result != NULL && result->type == cJSON_String && result->valuestring != NULL){ - tstrncpy(g_queryInfo.subQueryInfo.result[j], result->valuestring, MAX_FILE_NAME_LEN); - } else if (NULL == result) { - memset(g_queryInfo.subQueryInfo.result[j], 0, MAX_FILE_NAME_LEN); } else { printf("ERROR: failed to read json, sub query result file not found\n"); goto PARSE_OVER; - } + } } } } @@ -3912,7 +4090,7 @@ static bool getInfoFromJsonFile(char* file) { } bool ret = false; - int maxLen = 64000; + int maxLen = 6400000; char *content = calloc(1, maxLen + 1); int len = fread(content, 1, maxLen, fp); if (len <= 0) { @@ -3950,14 +4128,14 @@ static bool getInfoFromJsonFile(char* file) { if (INSERT_TEST == g_args.test_mode) { ret = getMetaFromInsertJsonFile(root); - } else if (QUERY_TEST == g_args.test_mode) { - ret = getMetaFromQueryJsonFile(root); - } else if (SUBSCRIBE_TEST == g_args.test_mode) { + } else if ((QUERY_TEST == g_args.test_mode) + || (SUBSCRIBE_TEST == g_args.test_mode)) { ret = getMetaFromQueryJsonFile(root); } else { - errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", __func__, __LINE__); + errorPrint("%s() LN%d, input json file type error! please input correct file type: insert or query or subscribe\n", + __func__, __LINE__); goto PARSE_OVER; - } + } PARSE_OVER: free(content); @@ -3967,7 +4145,7 @@ PARSE_OVER: } static void prepareSampleData() { - for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int i = 0; i < g_Dbs.dbCount; i++) { for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { if (g_Dbs.db[i].superTbls[j].tagsFile[0] != 0) { (void)readTagFromCsvFileToMem(&g_Dbs.db[i].superTbls[j]); @@ -3978,7 +4156,7 @@ static void prepareSampleData() { static void postFreeResource() { tmfclose(g_fpOfInsertResult); - for (int i = 0; i < g_Dbs.dbCount; i++) { + for (int i = 0; i < g_Dbs.dbCount; i++) { for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { if (0 != g_Dbs.db[i].superTbls[j].colsOfCreateChildTable) { free(g_Dbs.db[i].superTbls[j].colsOfCreateChildTable); @@ -4022,71 +4200,77 @@ static int getRowDataFromSample(char* dataBuf, int maxLen, int64_t timestamp, dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); (*sampleUsePos)++; - + return dataLen; } -static int generateRowData(char* dataBuf, int maxLen, int64_t timestamp, SSuperTable* stbInfo) { - int dataLen = 0; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); - for (int i = 0; i < stbInfo->columnCount; i++) { +static int generateRowData(char* recBuf, int64_t timestamp, SSuperTable* stbInfo) { + int dataLen = 0; + char *pstr = recBuf; + int maxLen = MAX_DATA_SIZE; + + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "(%" PRId64 ", ", timestamp); + + for (int i = 0; i < stbInfo->columnCount; i++) { if ((0 == strncasecmp(stbInfo->columns[i].dataType, "binary", 6)) || (0 == strncasecmp(stbInfo->columns[i].dataType, "nchar", 5))) { if (stbInfo->columns[i].dataLen > TSDB_MAX_BINARY_LEN) { errorPrint( "binary or nchar length overflow, max size:%u\n", (uint32_t)TSDB_MAX_BINARY_LEN); - return (-1); + return -1; } char* buf = (char*)calloc(stbInfo->columns[i].dataLen+1, 1); if (NULL == buf) { errorPrint( "calloc failed! size:%d\n", stbInfo->columns[i].dataLen); - return (-1); + return -1; } rand_string(buf, stbInfo->columns[i].dataLen); - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "\'%s\', ", buf); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\', ", buf); tmfree(buf); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "int", 3)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_int()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bigint", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "float", 5)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_float()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "double", 6)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%f, ", rand_double()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "smallint", 8)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_smallint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "tinyint", 7)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_tinyint()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "bool", 4)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%d, ", rand_bool()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%d, ", rand_bool()); } else if (0 == strncasecmp(stbInfo->columns[i].dataType, "timestamp", 9)) { - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "%"PRId64", ", rand_bigint()); } else { errorPrint( "No support data type: %s\n", stbInfo->columns[i].dataType); - return (-1); + return -1; } } dataLen -= 2; - dataLen += snprintf(dataBuf + dataLen, maxLen - dataLen, ")"); + dataLen += snprintf(pstr + dataLen, maxLen - dataLen, ")"); - return dataLen; + verbosePrint("%s() LN%d, recBuf:\n\t%s\n", __func__, __LINE__, recBuf); + + return strlen(recBuf); } -static int32_t generateData(char *res, char **data_type, +static int32_t generateData(char *recBuf, char **data_type, int num_of_cols, int64_t timestamp, int lenOfBinary) { - memset(res, 0, MAX_DATA_SIZE); - char *pstr = res; + memset(recBuf, 0, MAX_DATA_SIZE); + char *pstr = recBuf; pstr += sprintf(pstr, "(%" PRId64, timestamp); int c = 0; @@ -4107,7 +4291,7 @@ static int32_t generateData(char *res, char **data_type, } else if (strcasecmp(data_type[i % c], "smallint") == 0) { pstr += sprintf(pstr, ", %d", rand_smallint()); } else if (strcasecmp(data_type[i % c], "int") == 0) { - pstr += sprintf(pstr, ", %d", rand_int()); + pstr += sprintf(pstr, ", %d", rand_int()); } else if (strcasecmp(data_type[i % c], "bigint") == 0) { pstr += sprintf(pstr, ", %" PRId64, rand_bigint()); } else if (strcasecmp(data_type[i % c], "float") == 0) { @@ -4130,7 +4314,7 @@ static int32_t generateData(char *res, char **data_type, free(s); } - if (pstr - res > MAX_DATA_SIZE) { + if (strlen(recBuf) > MAX_DATA_SIZE) { perror("column length too long, abort"); exit(-1); } @@ -4138,7 +4322,7 @@ static int32_t generateData(char *res, char **data_type, pstr += sprintf(pstr, ")"); - return (int32_t)(pstr - res); + return (int32_t)strlen(recBuf); } static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { @@ -4147,9 +4331,9 @@ static int prepareSampleDataForSTable(SSuperTable *superTblInfo) { sampleDataBuf = calloc( superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, 1); if (sampleDataBuf == NULL) { - errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", + errorPrint("%s() LN%d, Failed to calloc %d Bytes, reason:%s\n", __func__, __LINE__, - superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, + superTblInfo->lenOfOneRow * MAX_SAMPLES_ONCE_FROM_FILE, strerror(errno)); return -1; } @@ -4176,7 +4360,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) __func__, __LINE__, buffer); if (superTblInfo) { if (0 == strncasecmp(superTblInfo->insertMode, "taosc", strlen("taosc"))) { - affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE); + affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); } else { if (0 != postProceSql(g_Dbs.host, g_Dbs.port, buffer)) { affectedRows = -1; @@ -4186,7 +4370,7 @@ static int execInsert(threadInfo *pThreadInfo, char *buffer, int k) } } } else { - affectedRows = queryDbExec(pThreadInfo->taos, buffer, 1); + affectedRows = queryDbExec(pThreadInfo->taos, buffer, INSERT_TYPE, false); } return affectedRows; @@ -4199,7 +4383,8 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) if ((superTblInfo->childTblOffset >= 0) && (superTblInfo->childTblLimit > 0)) { snprintf(pTblName, TSDB_TABLE_NAME_LEN, "%s", - superTblInfo->childTblName + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); + superTblInfo->childTblName + + (tableSeq - superTblInfo->childTblOffset) * TSDB_TABLE_NAME_LEN); } else { verbosePrint("[%d] %s() LN%d: from=%d count=%d seq=%d\n", @@ -4217,7 +4402,7 @@ static void getTableName(char *pTblName, threadInfo* pThreadInfo, int tableSeq) static int generateDataTail(char *tableName, int32_t tableSeq, threadInfo* pThreadInfo, SSuperTable* superTblInfo, - int batch, char* buffer, int64_t insertRows, + int batch, char* buffer, int remainderBufLen, int64_t insertRows, int64_t startFrom, uint64_t startTime, int *pSamplePos, int *dataLen) { int len = 0; int ncols_per_record = 1; // count first col ts @@ -4234,80 +4419,77 @@ static int generateDataTail(char *tableName, int32_t tableSeq, int k = 0; for (k = 0; k < batch;) { - if (superTblInfo) { - int retLen = 0; + char data[MAX_DATA_SIZE]; + int retLen = 0; - if (0 == strncasecmp(superTblInfo->dataSource, + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample"))) { retLen = getRowDataFromSample( - buffer + len, - superTblInfo->maxSqlLen - len, + data, + remainderBufLen, startTime + superTblInfo->timeStampStep * k, - superTblInfo, + superTblInfo, pSamplePos); - } else if (0 == strncasecmp(superTblInfo->dataSource, + } else if (0 == strncasecmp(superTblInfo->dataSource, "rand", strlen("rand"))) { int rand_num = rand_tinyint() % 100; - if (0 != superTblInfo->disorderRatio + if (0 != superTblInfo->disorderRatio && rand_num < superTblInfo->disorderRatio) { int64_t d = startTime + superTblInfo->timeStampStep * k - taosRandom() % superTblInfo->disorderRange; retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, - d, + data, + d, superTblInfo); } else { retLen = generateRowData( - buffer + len, - superTblInfo->maxSqlLen - len, + data, startTime + superTblInfo->timeStampStep * k, superTblInfo); - } - } + } + } - if (retLen < 0) { - return -1; - } + if (retLen > remainderBufLen) { + break; + } - len += retLen; - - if (len >= (superTblInfo->maxSqlLen - 256)) { // reserve for overwrite - k++; - break; - } + buffer += snprintf(buffer, retLen + 1, "%s", data); + k++; + len += retLen; + remainderBufLen -= retLen; } else { int rand_num = taosRandom() % 100; - char data[MAX_DATA_SIZE]; - char **data_type = g_args.datatype; - int lenOfBinary = g_args.len_of_binary; + char **data_type = g_args.datatype; + int lenOfBinary = g_args.len_of_binary; if ((g_args.disorderRatio != 0) && (rand_num < g_args.disorderRange)) { - - int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k + + int64_t d = startTime + DEFAULT_TIMESTAMP_STEP * k - taosRandom() % 1000000 + rand_num; - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, d, lenOfBinary); } else { - len = generateData(data, data_type, + retLen = generateData(data, data_type, ncols_per_record, startTime + DEFAULT_TIMESTAMP_STEP * k, lenOfBinary); } + if (len > remainderBufLen) + break; + buffer += sprintf(buffer, " %s", data); - if (strlen(buffer) >= (g_args.max_sql_len - 256)) { // too long - k++; - break; - } + k++; + len += retLen; + remainderBufLen -= retLen; } verbosePrint("%s() LN%d len=%d k=%d \nbuffer=%s\n", __func__, __LINE__, len, k, buffer); - k++; startFrom ++; if (startFrom >= insertRows) { @@ -4327,7 +4509,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, if (AUTO_CREATE_SUBTBL == superTblInfo->autoCreateTable) { char* tagsValBuf = NULL; if (0 == superTblInfo->tagSource) { - tagsValBuf = generateTagVaulesForStb(superTblInfo); + tagsValBuf = generateTagVaulesForStb(superTblInfo, tableSeq); } else { tagsValBuf = getTagValueFromTagSample( superTblInfo, @@ -4371,7 +4553,7 @@ static int generateSQLHead(char *tableName, int32_t tableSeq, return len; } -static int generateDataBuffer(char *pTblName, +static int generateProgressiveDataBuffer(char *pTblName, int32_t tableSeq, threadInfo *pThreadInfo, char *buffer, int64_t insertRows, @@ -4391,20 +4573,25 @@ static int generateDataBuffer(char *pTblName, assert(buffer != NULL); - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen = maxSqlLen; + + memset(buffer, 0, maxSqlLen); char *pstr = buffer; int headLen = generateSQLHead(pTblName, tableSeq, pThreadInfo, superTblInfo, buffer); pstr += headLen; + remainderBufLen -= headLen; int k; int dataLen; k = generateDataTail(pTblName, tableSeq, pThreadInfo, superTblInfo, - g_args.num_of_RPR, pstr, insertRows, startFrom, + g_args.num_of_RPR, pstr, remainderBufLen, insertRows, startFrom, startTime, pSamplePos, &dataLen); + return k; } @@ -4413,6 +4600,18 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__); SSuperTable* superTblInfo = pThreadInfo->superTblInfo; + int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; + + int insertMode; + + if (interlaceRows > 0) { + insertMode = INTERLACE_INSERT_MODE; + } else { + insertMode = PROGRESSIVE_INSERT_MODE; + } + + // TODO: prompt tbl count multple interlace rows and batch + // char* buffer = calloc(superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len, 1); if (NULL == buffer) { @@ -4421,27 +4620,16 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { strerror(errno)); return NULL; } - - int insertMode; + + char tableName[TSDB_TABLE_NAME_LEN]; - int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.rows_per_tbl; - - if (rowsPerTbl > 0) { - insertMode = INTERLACE_INSERT_MODE; - } else { - insertMode = PROGRESSIVE_INSERT_MODE; - } - - // rows per table need be less than insert batch - if (rowsPerTbl > g_args.num_of_RPR) - rowsPerTbl = g_args.num_of_RPR; - pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; int64_t insertRows = (superTblInfo)?superTblInfo->insertRows:g_args.num_of_DPT; - int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; uint64_t st = 0; uint64_t et = 0xffffffff; @@ -4462,13 +4650,13 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { assert(pThreadInfo->ntables > 0); - if (rowsPerTbl > g_args.num_of_RPR) - rowsPerTbl = g_args.num_of_RPR; + if (interlaceRows > g_args.num_of_RPR) + interlaceRows = g_args.num_of_RPR; - batchPerTbl = rowsPerTbl; - if ((rowsPerTbl > 0) && (pThreadInfo->ntables > 1)) { + batchPerTbl = interlaceRows; + if ((interlaceRows > 0) && (pThreadInfo->ntables > 1)) { batchPerTblTimes = - (g_args.num_of_RPR / (rowsPerTbl * pThreadInfo->ntables)) + 1; + (g_args.num_of_RPR / (interlaceRows * pThreadInfo->ntables)) + 1; } else { batchPerTblTimes = 1; } @@ -4476,19 +4664,30 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { int generatedRecPerTbl = 0; bool flagSleep = true; int sleepTimeTotal = 0; + + int maxSqlLen = superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len; + int remainderBufLen; + while(pThreadInfo->totalInsertRows < pThreadInfo->ntables * insertRows) { if ((flagSleep) && (insert_interval)) { st = taosGetTimestampUs(); flagSleep = false; } // generate data - memset(buffer, 0, superTblInfo?superTblInfo->maxSqlLen:g_args.max_sql_len); + memset(buffer, 0, maxSqlLen); + remainderBufLen = maxSqlLen; char *pstr = buffer; int recOfBatch = 0; for (int i = 0; i < batchPerTblTimes; i ++) { getTableName(tableName, pThreadInfo, tableSeq); + if (0 == strlen(tableName)) { + errorPrint("[%d] %s() LN%d, getTableName return null\n", + pThreadInfo->threadID, __func__, __LINE__); + return NULL; + exit(-1); + } int headLen; if (i == 0) { @@ -4505,21 +4704,38 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { pThreadInfo->threadID, __func__, __LINE__, i, buffer); pstr += headLen; + remainderBufLen -= headLen; + int dataLen = 0; verbosePrint("[%d] %s() LN%d i=%d batchPerTblTimes=%d batchPerTbl = %d\n", pThreadInfo->threadID, __func__, __LINE__, i, batchPerTblTimes, batchPerTbl); - generateDataTail( + + if (superTblInfo) { + if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { + startTime = taosGetTimestamp(pThreadInfo->time_precision); + } + } else { + startTime = 1500000000000; + } + int generated = generateDataTail( tableName, tableSeq, pThreadInfo, superTblInfo, - batchPerTbl, pstr, insertRows, 0, + batchPerTbl, pstr, remainderBufLen, insertRows, 0, startTime, &(pThreadInfo->samplePos), &dataLen); + if (generated < 0) { + debugPrint("[%d] %s() LN%d, generated data is %d\n", + pThreadInfo->threadID, __func__, __LINE__, generated); + goto free_and_statistics_interlace; + } pstr += dataLen; - recOfBatch += batchPerTbl; - pThreadInfo->totalInsertRows += batchPerTbl; + remainderBufLen -= dataLen; + recOfBatch += batchPerTbl; +// startTime += batchPerTbl * superTblInfo->timeStampStep; + pThreadInfo->totalInsertRows += batchPerTbl; verbosePrint("[%d] %s() LN%d batchPerTbl=%d recOfBatch=%d\n", pThreadInfo->threadID, __func__, __LINE__, batchPerTbl, recOfBatch); @@ -4528,9 +4744,12 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { if (insertMode == INTERLACE_INSERT_MODE) { if (tableSeq == pThreadInfo->start_table_from + pThreadInfo->ntables) { // turn to first table - startTime += batchPerTbl * superTblInfo->timeStampStep; tableSeq = pThreadInfo->start_table_from; generatedRecPerTbl += batchPerTbl; + + startTime = pThreadInfo->start_time + + generatedRecPerTbl * superTblInfo->timeStampStep; + flagSleep = true; if (generatedRecPerTbl >= insertRows) break; @@ -4608,9 +4827,9 @@ static void* syncWriteInterlace(threadInfo *pThreadInfo) { free_and_statistics_interlace: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } @@ -4635,22 +4854,26 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { strerror(errno)); return NULL; } - + int64_t lastPrintTime = taosGetTimestampMs(); int64_t startTs = taosGetTimestampUs(); int64_t endTs; - int timeStampStep = superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; - int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + int timeStampStep = + superTblInfo?superTblInfo->timeStampStep:DEFAULT_TIMESTAMP_STEP; +/* int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; uint64_t st = 0; uint64_t et = 0xffffffff; + */ pThreadInfo->totalInsertRows = 0; pThreadInfo->totalAffectedRows = 0; pThreadInfo->samplePos = 0; - for (uint32_t tableSeq = pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; + for (uint32_t tableSeq = + pThreadInfo->start_table_from; tableSeq <= pThreadInfo->end_table_to; tableSeq ++) { int64_t start_time = pThreadInfo->start_time; @@ -4658,9 +4881,11 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { verbosePrint("%s() LN%d insertRows=%"PRId64"\n", __func__, __LINE__, insertRows); for (int64_t i = 0; i < insertRows;) { + /* if (insert_interval) { st = taosGetTimestampUs(); } + */ char tableName[TSDB_TABLE_NAME_LEN]; getTableName(tableName, pThreadInfo, tableSeq); @@ -4668,7 +4893,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { __func__, __LINE__, pThreadInfo->threadID, tableSeq, tableName); - int generated = generateDataBuffer( + int generated = generateProgressiveDataBuffer( tableName, tableSeq, pThreadInfo, buffer, insertRows, i, start_time, &(pThreadInfo->samplePos)); @@ -4710,7 +4935,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { if (i >= insertRows) break; - +/* if (insert_interval) { et = taosGetTimestampUs(); @@ -4721,6 +4946,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { taosMsleep(sleep_time); // ms } } + */ } // num_of_DPT if ((tableSeq == pThreadInfo->ntables - 1) && superTblInfo && @@ -4734,21 +4960,21 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { free_and_statistics_2: tmfree(buffer); - printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", - pThreadInfo->threadID, - pThreadInfo->totalInsertRows, + printf("====thread[%d] completed total inserted rows: %"PRId64 ", total affected rows: %"PRId64 "====\n", + pThreadInfo->threadID, + pThreadInfo->totalInsertRows, pThreadInfo->totalAffectedRows); return NULL; } static void* syncWrite(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *winfo = (threadInfo *)sarg; SSuperTable* superTblInfo = winfo->superTblInfo; - int rowsPerTbl = superTblInfo?superTblInfo->rowsPerTbl:g_args.rows_per_tbl; + int interlaceRows = superTblInfo?superTblInfo->interlaceRows:g_args.interlace_rows; - if (rowsPerTbl > 0) { + if (interlaceRows > 0) { // interlace mode return syncWriteInterlace(winfo); } else { @@ -4758,19 +4984,20 @@ static void* syncWrite(void *sarg) { } static void callBack(void *param, TAOS_RES *res, int code) { - threadInfo* winfo = (threadInfo*)param; + threadInfo* winfo = (threadInfo*)param; SSuperTable* superTblInfo = winfo->superTblInfo; - int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { winfo->et = taosGetTimestampUs(); if (((winfo->et - winfo->st)/1000) < insert_interval) { taosMsleep(insert_interval - (winfo->et - winfo->st)/1000); // ms } } - + char *buffer = calloc(1, winfo->superTblInfo->maxSqlLen); - char *data = calloc(1, MAX_DATA_SIZE); + char data[MAX_DATA_SIZE]; char *pstr = buffer; pstr += sprintf(pstr, "insert into %s.%s%d values", winfo->db_name, winfo->tb_prefix, winfo->start_table_from); @@ -4782,21 +5009,18 @@ static void callBack(void *param, TAOS_RES *res, int code) { if (winfo->start_table_from > winfo->end_table_to) { tsem_post(&winfo->lock_sem); free(buffer); - free(data); taos_free_result(res); return; } - + for (int i = 0; i < g_args.num_of_RPR; i++) { int rand_num = taosRandom() % 100; - if (0 != winfo->superTblInfo->disorderRatio && rand_num < winfo->superTblInfo->disorderRatio) - { + if (0 != winfo->superTblInfo->disorderRatio + && rand_num < winfo->superTblInfo->disorderRatio) { int64_t d = winfo->lastTs - taosRandom() % 1000000 + rand_num; - //generateData(data, datatype, ncols_per_record, d, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, d, winfo->superTblInfo); + generateRowData(data, d, winfo->superTblInfo); } else { - //generateData(data, datatype, ncols_per_record, start_time += 1000, len_of_binary); - (void)generateRowData(data, MAX_DATA_SIZE, winfo->lastTs += 1000, winfo->superTblInfo); + generateRowData(data, winfo->lastTs += 1000, winfo->superTblInfo); } pstr += sprintf(pstr, "%s", data); winfo->counter++; @@ -4811,7 +5035,6 @@ static void callBack(void *param, TAOS_RES *res, int code) { } taos_query_a(winfo->taos, buffer, callBack, winfo); free(buffer); - free(data); taos_free_result(res); } @@ -4823,8 +5046,9 @@ static void *asyncWrite(void *sarg) { winfo->st = 0; winfo->et = 0; winfo->lastTs = winfo->start_time; - - int insert_interval = superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; + + int insert_interval = + superTblInfo?superTblInfo->insertInterval:g_args.insert_interval; if (insert_interval) { winfo->st = taosGetTimestampUs(); } @@ -4838,39 +5062,14 @@ static void *asyncWrite(void *sarg) { static void startMultiThreadInsertData(int threads, char* db_name, char* precision,SSuperTable* superTblInfo) { - pthread_t *pids = malloc(threads * sizeof(pthread_t)); - assert(pids != NULL); + pthread_t *pids = malloc(threads * sizeof(pthread_t)); + assert(pids != NULL); - threadInfo *infos = malloc(threads * sizeof(threadInfo)); - assert(infos != NULL); + threadInfo *infos = malloc(threads * sizeof(threadInfo)); + assert(infos != NULL); - memset(pids, 0, threads * sizeof(pthread_t)); - memset(infos, 0, threads * sizeof(threadInfo)); - - int ntables = 0; - if (superTblInfo) { - - if ((superTblInfo->childTblOffset >= 0) - && (superTblInfo->childTblLimit > 0)) { - - ntables = superTblInfo->childTblLimit; - } else { - ntables = superTblInfo->childTblCount; - } - } else { - ntables = g_args.num_of_tables; - } - - int a = ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; - } - - int b = 0; - if (threads != 0) { - b = ntables % threads; - } + memset(pids, 0, threads * sizeof(pthread_t)); + memset(infos, 0, threads * sizeof(threadInfo)); //TAOS* taos; //if (0 == strncasecmp(superTblInfo->insertMode, "taosc", 5)) { @@ -4888,12 +5087,12 @@ static void startMultiThreadInsertData(int threads, char* db_name, } else if (0 == strncasecmp(precision, "us", 2)) { timePrec = TSDB_TIME_PRECISION_MICRO; } else { - errorPrint( "No support precision: %s\n", precision); + errorPrint("Not support precision: %s\n", precision); exit(-1); } } - int64_t start_time; + int64_t start_time; if (superTblInfo) { if (0 == strncasecmp(superTblInfo->startTimestamp, "now", 3)) { start_time = taosGetTimestamp(timePrec); @@ -4911,28 +5110,23 @@ static void startMultiThreadInsertData(int threads, char* db_name, } double start = getCurrentTime(); - - int startFrom; - - if ((superTblInfo) && (superTblInfo->childTblOffset >= 0)) - startFrom = superTblInfo->childTblOffset; - else - startFrom = 0; // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); + errorPrint("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); exit(-1); } } // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, + if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, "sample", strlen("sample")))) { if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", __func__, __LINE__); + errorPrint("%s() LN%d, prepare sample data for stable failed!\n", + __func__, __LINE__); exit(-1); } } @@ -4946,18 +5140,36 @@ static void startMultiThreadInsertData(int threads, char* db_name, exit(-1); } - if (superTblInfo) { + int ntables = 0; + int startFrom; + if (superTblInfo) { int limit, offset; - if (superTblInfo && (superTblInfo->childTblOffset >= 0) - && (superTblInfo->childTblLimit > 0)) { - limit = superTblInfo->childTblLimit; - offset = superTblInfo->childTblOffset; + + if (superTblInfo->childTblOffset >= superTblInfo->childTblCount) { + printf("WARNING: specified offset >= child table count! \n"); + if (!g_args.answer_yes) { + printf(" Press enter key to continue or Ctrl-C to stop\n\n"); + (void)getchar(); + } + } + + if (superTblInfo->childTblOffset >= 0) { + if (superTblInfo->childTblLimit <= 0) { + superTblInfo->childTblLimit = + superTblInfo->childTblCount - superTblInfo->childTblOffset; + } + + offset = superTblInfo->childTblOffset; + limit = superTblInfo->childTblLimit; } else { limit = superTblInfo->childTblCount; offset = 0; } + ntables = limit; + startFrom = offset; + superTblInfo->childTblName = (char*)calloc(1, limit * TSDB_TABLE_NAME_LEN); if (superTblInfo->childTblName == NULL) { @@ -4973,13 +5185,29 @@ static void startMultiThreadInsertData(int threads, char* db_name, &superTblInfo->childTblName, &childTblCount, limit, offset); + } else { + ntables = g_args.num_of_tables; + startFrom = 0; } + taos_close(taos); + int a = ntables / threads; + if (a < 1) { + threads = ntables; + a = 1; + } + + int b = 0; + if (threads != 0) { + b = ntables % threads; + } + for (int i = 0; i < threads; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; tstrncpy(t_info->db_name, db_name, MAX_DB_NAME_SIZE); + t_info->time_precision = timePrec; t_info->superTblInfo = superTblInfo; t_info->start_time = start_time; @@ -4992,7 +5220,8 @@ static void startMultiThreadInsertData(int threads, char* db_name, g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); if (NULL == t_info->taos) { - errorPrint( "connect to server fail from insert sub thread, reason: %s\n", + errorPrint( + "connect to server fail from insert sub thread, reason: %s\n", taos_errstr(NULL)); exit(-1); } @@ -5091,7 +5320,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); fprintf(g_fpOfInsertResult, "insert delay, avg:%10.6fms, max: %10.6fms, min: %10.6fms\n\n", avgDelay/1000.0, (double)maxDelay/1000.0, (double)minDelay/1000.0); - + //taos_close(taos); free(pids); @@ -5099,7 +5328,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, } static void *readTable(void *sarg) { -#if 1 +#if 1 threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; @@ -5134,7 +5363,8 @@ static void *readTable(void *sarg) { double totalT = 0; int count = 0; for (int i = 0; i < num_of_tables; i++) { - sprintf(command, "select %s from %s%d where ts>= %" PRId64, aggreFunc[j], tb_prefix, i, sTime); + sprintf(command, "select %s from %s%d where ts>= %" PRId64, + aggreFunc[j], tb_prefix, i, sTime); double t = getCurrentTime(); TAOS_RES *pSql = taos_query(taos, command); @@ -5170,7 +5400,7 @@ static void *readTable(void *sarg) { } static void *readMetric(void *sarg) { -#if 1 +#if 1 threadInfo *rinfo = (threadInfo *)sarg; TAOS *taos = rinfo->taos; char command[BUFFER_SIZE] = "\0"; @@ -5265,11 +5495,11 @@ static int insertTestProcess() { printf("Press enter key to continue\n\n"); (void)getchar(); } - + init_rand_data(); // create database and super tables - if(createDatabases() != 0) { + if(createDatabasesAndStables() != 0) { fclose(g_fpOfInsertResult); return -1; } @@ -5287,10 +5517,10 @@ static int insertTestProcess() { if (g_totalChildTables > 0) { printf("Spent %.4f seconds to create %d tables with %d thread(s)\n\n", - end - start, g_totalChildTables, g_Dbs.threadCount); + end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl); fprintf(g_fpOfInsertResult, "Spent %.4f seconds to create %d tables with %d thread(s)\n\n", - end - start, g_totalChildTables, g_Dbs.threadCount); + end - start, g_totalChildTables, g_Dbs.threadCountByCreateTbl); } taosMsleep(1000); @@ -5304,16 +5534,16 @@ static int insertTestProcess() { continue; } startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, superTblInfo); } } else { startMultiThreadInsertData( - g_Dbs.threadCount, - g_Dbs.db[i].dbName, - g_Dbs.db[i].dbCfg.precision, + g_Dbs.threadCount, + g_Dbs.db[i].dbName, + g_Dbs.db[i].dbCfg.precision, NULL); } } @@ -5321,7 +5551,7 @@ static int insertTestProcess() { //int64_t totalInsertRows = 0; //int64_t totalAffectedRows = 0; - //for (int i = 0; i < g_Dbs.dbCount; i++) { + //for (int i = 0; i < g_Dbs.dbCount; i++) { // for (int j = 0; j < g_Dbs.db[i].superTblCount; j++) { // totalInsertRows+= g_Dbs.db[i].superTbls[j].totalInsertRows; // totalAffectedRows += g_Dbs.db[i].superTbls[j].totalAffectedRows; @@ -5333,48 +5563,74 @@ static int insertTestProcess() { } static void *superQueryProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *winfo = (threadInfo *)sarg; + + if (winfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + winfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + winfo->taos = taos; + } + } + + char sqlStr[MAX_DB_NAME_SIZE + 5]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(winfo->taos); + errorPrint( "use database %s failed!\n\n", + g_queryInfo.dbName); + return NULL; + } - //char sqlStr[MAX_TB_NAME_SIZE*2]; - //sprintf(sqlStr, "use %s", g_queryInfo.dbName); - //queryDB(winfo->taos, sqlStr); - int64_t st = 0; int64_t et = 0; - while (1) { - if (g_queryInfo.superQueryInfo.rate && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + + int queryTimes = g_queryInfo.specifiedQueryInfo.queryTimes; + + while(queryTimes --) { + if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < + (int64_t)g_queryInfo.specifiedQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); } st = taosGetTimestampUs(); - for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { int64_t t1 = taosGetTimestampUs(); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.superQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); + if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); } - selectAndGetResult(winfo->taos, g_queryInfo.superQueryInfo.sql[i], tmpFile); - int64_t t2 = taosGetTimestampUs(); - printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", + selectAndGetResult(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], tmpFile); + int64_t t2 = taosGetTimestampUs(); + printf("=[taosc] thread[%"PRId64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); } else { int64_t t1 = taosGetTimestampUs(); - int retCode = postProceSql(g_queryInfo.host, - g_queryInfo.port, g_queryInfo.superQueryInfo.sql[i]); - int64_t t2 = taosGetTimestampUs(); - printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", + int retCode = postProceSql(g_queryInfo.host, + g_queryInfo.port, g_queryInfo.specifiedQueryInfo.sql[i]); + int64_t t2 = taosGetTimestampUs(); + printf("=[restful] thread[%"PRId64"] complete one sql, Spent %f s\n", taosGetSelfPthreadId(), (t2 - t1)/1000000.0); - + if (0 != retCode) { printf("====restful return fail, threadID[%d]\n", winfo->threadID); return NULL; } - } + } } et = taosGetTimestampUs(); - printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", + printf("==thread[%"PRId64"] complete all sqls to specify tables once queries duration:%.6fs\n\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); } return NULL; @@ -5383,205 +5639,230 @@ static void *superQueryProcess(void *sarg) { static void replaceSubTblName(char* inSql, char* outSql, int tblIndex) { char sourceString[32] = "xxxx"; char subTblName[MAX_TB_NAME_SIZE*3]; - sprintf(subTblName, "%s.%s", - g_queryInfo.dbName, - g_queryInfo.subQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); + sprintf(subTblName, "%s.%s", + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.childTblName + tblIndex*TSDB_TABLE_NAME_LEN); //printf("inSql: %s\n", inSql); - + char* pos = strstr(inSql, sourceString); if (0 == pos) { - return; + return; } - + tstrncpy(outSql, inSql, pos - inSql + 1); //printf("1: %s\n", outSql); - strcat(outSql, subTblName); - //printf("2: %s\n", outSql); - strcat(outSql, pos+strlen(sourceString)); - //printf("3: %s\n", outSql); + strcat(outSql, subTblName); + //printf("2: %s\n", outSql); + strcat(outSql, pos+strlen(sourceString)); + //printf("3: %s\n", outSql); } static void *subQueryProcess(void *sarg) { char sqlstr[1024]; - threadInfo *winfo = (threadInfo *)sarg; + threadInfo *winfo = (threadInfo *)sarg; + + if (winfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + winfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + winfo->taos = taos; + } + } + int64_t st = 0; - int64_t et = (int64_t)g_queryInfo.subQueryInfo.rate*1000; - while (1) { - if (g_queryInfo.subQueryInfo.rate - && (et - st) < (int64_t)g_queryInfo.subQueryInfo.rate*1000) { - taosMsleep(g_queryInfo.subQueryInfo.rate*1000 - (et - st)); // ms + int64_t et = (int64_t)g_queryInfo.superQueryInfo.rate*1000; + + int queryTimes = g_queryInfo.superQueryInfo.queryTimes; + + while(queryTimes --) { + if (g_queryInfo.superQueryInfo.rate + && (et - st) < (int64_t)g_queryInfo.superQueryInfo.rate*1000) { + taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); } st = taosGetTimestampUs(); for (int i = winfo->start_table_from; i <= winfo->end_table_to; i++) { - for (int j = 0; j < g_queryInfo.subQueryInfo.sqlCount; j++) { + for (int j = 0; j < g_queryInfo.superQueryInfo.sqlCount; j++) { memset(sqlstr,0,sizeof(sqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[j], sqlstr, i); + replaceSubTblName(g_queryInfo.superQueryInfo.sql[j], sqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.subQueryInfo.result[i], + if (g_queryInfo.superQueryInfo.result[j][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.superQueryInfo.result[j], winfo->threadID); } - selectAndGetResult(winfo->taos, sqlstr, tmpFile); + selectAndGetResult(winfo->taos, sqlstr, tmpFile); } } et = taosGetTimestampUs(); - printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", - taosGetSelfPthreadId(), - winfo->start_table_from, - winfo->end_table_to, + printf("####thread[%"PRId64"] complete all sqls to allocate all sub-tables[%d - %d] once queries duration:%.4fs\n\n", + taosGetSelfPthreadId(), + winfo->start_table_from, + winfo->end_table_to, (double)(et - st)/1000000.0); } + return NULL; } static int queryTestProcess() { - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - NULL, + + setupForAnsiEscape(); + printfQueryMeta(); + resetAfterAnsiEscape(); + + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + NULL, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); exit(-1); } - if (0 != g_queryInfo.subQueryInfo.sqlCount) { + if (0 != g_queryInfo.superQueryInfo.sqlCount) { getAllChildNameOfSuperTable(taos, g_queryInfo.dbName, - g_queryInfo.subQueryInfo.sTblName, - &g_queryInfo.subQueryInfo.childTblName, - &g_queryInfo.subQueryInfo.childTblCount); - } - - printfQueryMeta(); - + g_queryInfo.superQueryInfo.sTblName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); + } + if (!g_args.answer_yes) { printf("Press enter key to continue\n\n"); (void)getchar(); } - + printfQuerySystemInfo(taos); - + pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from specify table - if (g_queryInfo.superQueryInfo.sqlCount > 0 - && g_queryInfo.superQueryInfo.concurrent > 0) { + if (g_queryInfo.specifiedQueryInfo.sqlCount > 0 + && g_queryInfo.specifiedQueryInfo.concurrent > 0) { - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); + pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); if (NULL == pids) { taos_close(taos); ERROR_EXIT("memory allocation failed\n"); } - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); if (NULL == infos) { taos_close(taos); free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; if (0 == strncasecmp(g_queryInfo.queryMode, "taosc", 5)) { - t_info->taos = taos; char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); verbosePrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(t_info->taos, sqlStr, NO_INSERT_TYPE)) { + if (0 != queryDbExec(taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(taos); free(infos); free(pids); errorPrint( "use database %s failed!\n\n", g_queryInfo.dbName); return -1; } - } else { - t_info->taos = NULL; } + t_info->taos = NULL;// TODO: workaround to use separate taos connection; + pthread_create(pids + i, NULL, superQueryProcess, t_info); } - }else { - g_queryInfo.superQueryInfo.concurrent = 0; + } else { + g_queryInfo.specifiedQueryInfo.concurrent = 0; } + taos_close(taos); + pthread_t *pidsOfSub = NULL; threadInfo *infosOfSub = NULL; //==== create sub threads for query from all sub table of the super table - if ((g_queryInfo.subQueryInfo.sqlCount > 0) - && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(pthread_t)); + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); if (NULL == pidsOfSub) { - taos_close(taos); free(infos); free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * sizeof(threadInfo)); + infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if (NULL == infosOfSub) { - taos_close(taos); free(pidsOfSub); free(infos); free(pids); ERROR_EXIT("memory allocation failed for create threads\n"); } - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - + int ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; + int a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - + int b = 0; if (threads != 0) { b = ntables % threads; } int startFrom = 0; - for (int i = 0; i < threads; i++) { + for (int i = 0; i < threads; i++) { threadInfo *t_info = infosOfSub + i; t_info->threadID = i; - + t_info->start_table_from = startFrom; t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; - t_info->taos = taos; + t_info->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pidsOfSub + i, NULL, subQueryProcess, t_info); } - g_queryInfo.subQueryInfo.threadCnt = threads; + g_queryInfo.superQueryInfo.threadCnt = threads; } else { - g_queryInfo.subQueryInfo.threadCnt = 0; - } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + g_queryInfo.superQueryInfo.threadCnt = 0; + } + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { pthread_join(pids[i], NULL); } tmfree((char*)pids); tmfree((char*)infos); - - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { + + for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { pthread_join(pidsOfSub[i], NULL); } tmfree((char*)pidsOfSub); tmfree((char*)infosOfSub); - - taos_close(taos); + +// taos_close(taos);// TODO: workaround to use separate taos connection; return 0; } @@ -5599,14 +5880,14 @@ static void subscribe_callback(TAOS_SUB* tsub, TAOS_RES *res, void* param, int c static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultFileName) { TAOS_SUB* tsub = NULL; - if (g_queryInfo.superQueryInfo.subscribeMode) { + if (g_queryInfo.specifiedQueryInfo.subscribeMode) { tsub = taos_subscribe(taos, - g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, subscribe_callback, (void*)resultFileName, - g_queryInfo.superQueryInfo.subscribeInterval); + g_queryInfo.specifiedQueryInfo.subscribeInterval); } else { tsub = taos_subscribe(taos, - g_queryInfo.superQueryInfo.subscribeRestart, + g_queryInfo.specifiedQueryInfo.subscribeRestart, topic, sql, NULL, NULL, 0); } @@ -5621,85 +5902,38 @@ static TAOS_SUB* subscribeImpl(TAOS *taos, char *sql, char* topic, char* resultF static void *subSubscribeProcess(void *sarg) { threadInfo *winfo = (threadInfo *)sarg; char subSqlstr[1024]; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + + if (winfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + winfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + winfo->taos = taos; + } + } char sqlStr[MAX_TB_NAME_SIZE*2]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); - debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)){ + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(winfo->taos); + errorPrint( "use database %s failed!\n\n", + g_queryInfo.dbName); return NULL; } - + //int64_t st = 0; //int64_t et = 0; do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms - // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); - //} - - //st = taosGetTimestampMs(); - char topic[32] = {0}; - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - sprintf(topic, "taosdemo-subscribe-%d", i); - memset(subSqlstr,0,sizeof(subSqlstr)); - replaceSubTblName(g_queryInfo.subQueryInfo.sql[i], subSqlstr, i); - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", g_queryInfo.subQueryInfo.result[i], winfo->threadID); - } - g_queryInfo.subQueryInfo.tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); - if (NULL == g_queryInfo.subQueryInfo.tsub[i]) { - return NULL; - } - } - //et = taosGetTimestampMs(); - //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); - } while (0); - - // start loop to consume result - TAOS_RES* res = NULL; - while (1) { - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - if (1 == g_queryInfo.subQueryInfo.subscribeMode) { - continue; - } - - res = taos_consume(g_queryInfo.subQueryInfo.tsub[i]); - if (res) { - char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { - sprintf(tmpFile, "%s-%d", - g_queryInfo.subQueryInfo.result[i], - winfo->threadID); - } - getResult(res, tmpFile); - } - } - } - taos_free_result(res); - - for (int i = 0; i < g_queryInfo.subQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.subQueryInfo.tsub[i], - g_queryInfo.subQueryInfo.subscribeKeepProgress); - } - return NULL; -} - -static void *superSubscribeProcess(void *sarg) { - threadInfo *winfo = (threadInfo *)sarg; - - char sqlStr[MAX_TB_NAME_SIZE*2]; - sprintf(sqlStr, "use %s", g_queryInfo.dbName); - debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); - if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE)) { - return NULL; - } - - //int64_t st = 0; - //int64_t et = 0; - do { - //if (g_queryInfo.superQueryInfo.rate && (et - st) < g_queryInfo.superQueryInfo.rate*1000) { - // taosMsleep(g_queryInfo.superQueryInfo.rate*1000 - (et - st)); // ms + //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); //} @@ -5707,16 +5941,16 @@ static void *superSubscribeProcess(void *sarg) { char topic[32] = {0}; for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { sprintf(topic, "taosdemo-subscribe-%d", i); + memset(subSqlstr,0,sizeof(subSqlstr)); + replaceSubTblName(g_queryInfo.superQueryInfo.sql[i], subSqlstr, i); char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; - if (g_queryInfo.subQueryInfo.result[i][0] != 0) { + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", g_queryInfo.superQueryInfo.result[i], winfo->threadID); } - g_queryInfo.superQueryInfo.tsub[i] = - subscribeImpl(winfo->taos, - g_queryInfo.superQueryInfo.sql[i], - topic, tmpFile); - if (NULL == g_queryInfo.superQueryInfo.tsub[i]) { + tsub[i] = subscribeImpl(winfo->taos, subSqlstr, topic, tmpFile); + if (NULL == tsub[i]) { + taos_close(winfo->taos); return NULL; } } @@ -5732,12 +5966,13 @@ static void *superSubscribeProcess(void *sarg) { continue; } - res = taos_consume(g_queryInfo.superQueryInfo.tsub[i]); + res = taos_consume(tsub[i]); if (res) { char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; if (g_queryInfo.superQueryInfo.result[i][0] != 0) { sprintf(tmpFile, "%s-%d", - g_queryInfo.superQueryInfo.result[i], winfo->threadID); + g_queryInfo.superQueryInfo.result[i], + winfo->threadID); } getResult(res, tmpFile); } @@ -5746,94 +5981,184 @@ static void *superSubscribeProcess(void *sarg) { taos_free_result(res); for (int i = 0; i < g_queryInfo.superQueryInfo.sqlCount; i++) { - taos_unsubscribe(g_queryInfo.superQueryInfo.tsub[i], - g_queryInfo.superQueryInfo.subscribeKeepProgress); + taos_unsubscribe(tsub[i], g_queryInfo.superQueryInfo.subscribeKeepProgress); } + + taos_close(winfo->taos); + return NULL; +} + +static void *superSubscribeProcess(void *sarg) { + threadInfo *winfo = (threadInfo *)sarg; + TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT] = {0}; + + if (winfo->taos == NULL) { + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, + g_queryInfo.port); + if (taos == NULL) { + errorPrint("[%d] Failed to connect to TDengine, reason:%s\n", + winfo->threadID, taos_errstr(NULL)); + return NULL; + } else { + winfo->taos = taos; + } + } + + char sqlStr[MAX_TB_NAME_SIZE*2]; + sprintf(sqlStr, "use %s", g_queryInfo.dbName); + debugPrint("%s() %d sqlStr: %s\n", __func__, __LINE__, sqlStr); + if (0 != queryDbExec(winfo->taos, sqlStr, NO_INSERT_TYPE, false)) { + taos_close(winfo->taos); + return NULL; + } + + //int64_t st = 0; + //int64_t et = 0; + do { + //if (g_queryInfo.specifiedQueryInfo.rate && (et - st) < g_queryInfo.specifiedQueryInfo.rate*1000) { + // taosMsleep(g_queryInfo.specifiedQueryInfo.rate*1000 - (et - st)); // ms + // //printf("========sleep duration:%"PRId64 "========inserted rows:%d, table range:%d - %d\n", (1000 - (et - st)), i, winfo->start_table_from, winfo->end_table_to); + //} + + //st = taosGetTimestampMs(); + char topic[32] = {0}; + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + sprintf(topic, "taosdemo-subscribe-%d", i); + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.superQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + } + tsub[i] = subscribeImpl(winfo->taos, g_queryInfo.specifiedQueryInfo.sql[i], topic, tmpFile); + if (NULL == g_queryInfo.specifiedQueryInfo.tsub[i]) { + taos_close(winfo->taos); + return NULL; + } + } + //et = taosGetTimestampMs(); + //printf("========thread[%"PRId64"] complete all sqls to super table once queries duration:%.4fs\n", taosGetSelfPthreadId(), (double)(et - st)/1000.0); + } while (0); + + // start loop to consume result + TAOS_RES* res = NULL; + while (1) { + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + if (1 == g_queryInfo.specifiedQueryInfo.subscribeMode) { + continue; + } + + res = taos_consume(tsub[i]); + if (res) { + char tmpFile[MAX_FILE_NAME_LEN*2] = {0}; + if (g_queryInfo.specifiedQueryInfo.result[i][0] != 0) { + sprintf(tmpFile, "%s-%d", + g_queryInfo.specifiedQueryInfo.result[i], winfo->threadID); + } + getResult(res, tmpFile); + } + } + } + taos_free_result(res); + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.sqlCount; i++) { + taos_unsubscribe(tsub[i], g_queryInfo.specifiedQueryInfo.subscribeKeepProgress); + } + + taos_close(winfo->taos); return NULL; } static int subscribeTestProcess() { + setupForAnsiEscape(); printfQueryMeta(); + resetAfterAnsiEscape(); if (!g_args.answer_yes) { printf("Press enter key to continue\n\n"); - (void)getchar(); + (void) getchar(); } - TAOS * taos = NULL; - taos = taos_connect(g_queryInfo.host, - g_queryInfo.user, - g_queryInfo.password, - g_queryInfo.dbName, + TAOS * taos = NULL; + taos = taos_connect(g_queryInfo.host, + g_queryInfo.user, + g_queryInfo.password, + g_queryInfo.dbName, g_queryInfo.port); if (taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); exit(-1); } - if (0 != g_queryInfo.subQueryInfo.sqlCount) { - getAllChildNameOfSuperTable(taos, - g_queryInfo.dbName, - g_queryInfo.subQueryInfo.sTblName, - &g_queryInfo.subQueryInfo.childTblName, - &g_queryInfo.subQueryInfo.childTblCount); + if (0 != g_queryInfo.superQueryInfo.sqlCount) { + getAllChildNameOfSuperTable(taos, + g_queryInfo.dbName, + g_queryInfo.superQueryInfo.sTblName, + &g_queryInfo.superQueryInfo.childTblName, + &g_queryInfo.superQueryInfo.childTblCount); } + taos_close(taos); // TODO: workaround to use separate taos connection; + pthread_t *pids = NULL; threadInfo *infos = NULL; //==== create sub threads for query from super table - if ((g_queryInfo.superQueryInfo.sqlCount <= 0) || - (g_queryInfo.superQueryInfo.concurrent <= 0)) { + if ((g_queryInfo.specifiedQueryInfo.sqlCount <= 0) || + (g_queryInfo.specifiedQueryInfo.concurrent <= 0)) { errorPrint("%s() LN%d, query sqlCount %d or concurrent %d is not correct.\n", - __func__, __LINE__, g_queryInfo.superQueryInfo.sqlCount, - g_queryInfo.superQueryInfo.concurrent); + __func__, __LINE__, g_queryInfo.specifiedQueryInfo.sqlCount, + g_queryInfo.specifiedQueryInfo.concurrent); exit(-1); } - pids = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(pthread_t)); - infos = malloc(g_queryInfo.superQueryInfo.concurrent * sizeof(threadInfo)); + pids = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(pthread_t)); + infos = malloc(g_queryInfo.specifiedQueryInfo.concurrent * sizeof(threadInfo)); if ((NULL == pids) || (NULL == infos)) { - printf("malloc failed for create threads\n"); - taos_close(taos); + errorPrint("%s() LN%d, malloc failed for create threads\n", __func__, __LINE__); exit(-1); } - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { threadInfo *t_info = infos + i; t_info->threadID = i; - t_info->taos = taos; + t_info->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pids + i, NULL, superSubscribeProcess, t_info); } - - //==== create sub threads for query from sub table + + //==== create sub threads for query from sub table pthread_t *pidsOfSub = NULL; threadInfo *infosOfSub = NULL; - if ((g_queryInfo.subQueryInfo.sqlCount > 0) - && (g_queryInfo.subQueryInfo.threadCnt > 0)) { - pidsOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * + if ((g_queryInfo.superQueryInfo.sqlCount > 0) + && (g_queryInfo.superQueryInfo.threadCnt > 0)) { + pidsOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(pthread_t)); - infosOfSub = malloc(g_queryInfo.subQueryInfo.threadCnt * + infosOfSub = malloc(g_queryInfo.superQueryInfo.threadCnt * sizeof(threadInfo)); if ((NULL == pidsOfSub) || (NULL == infosOfSub)) { - printf("malloc failed for create threads\n"); - taos_close(taos); + errorPrint("%s() LN%d, malloc failed for create threads\n", + __func__, __LINE__); + // taos_close(taos); exit(-1); } - int ntables = g_queryInfo.subQueryInfo.childTblCount; - int threads = g_queryInfo.subQueryInfo.threadCnt; - + int ntables = g_queryInfo.superQueryInfo.childTblCount; + int threads = g_queryInfo.superQueryInfo.threadCnt; + int a = ntables / threads; if (a < 1) { threads = ntables; a = 1; } - + int b = 0; if (threads != 0) { b = ntables % threads; } - + int startFrom = 0; for (int i = 0; i < threads; i++) { threadInfo *t_info = infosOfSub + i; @@ -5843,69 +6168,71 @@ static int subscribeTestProcess() { t_info->ntables = iend_table_to = i < b ? startFrom + a : startFrom + a - 1; startFrom = t_info->end_table_to + 1; - t_info->taos = taos; + t_info->taos = NULL; // TODO: workaround to use separate taos connection; pthread_create(pidsOfSub + i, NULL, subSubscribeProcess, t_info); } - g_queryInfo.subQueryInfo.threadCnt = threads; + + g_queryInfo.superQueryInfo.threadCnt = threads; + + for (int i = 0; i < g_queryInfo.superQueryInfo.threadCnt; i++) { + pthread_join(pidsOfSub[i], NULL); + } } - - for (int i = 0; i < g_queryInfo.superQueryInfo.concurrent; i++) { + + for (int i = 0; i < g_queryInfo.specifiedQueryInfo.concurrent; i++) { pthread_join(pids[i], NULL); - } + } tmfree((char*)pids); tmfree((char*)infos); - for (int i = 0; i < g_queryInfo.subQueryInfo.threadCnt; i++) { - pthread_join(pidsOfSub[i], NULL); - } - tmfree((char*)pidsOfSub); tmfree((char*)infosOfSub); - taos_close(taos); +// taos_close(taos); return 0; } static void initOfInsertMeta() { memset(&g_Dbs, 0, sizeof(SDbs)); - + // set default values - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); g_Dbs.port = 6030; - tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); + tstrncpy(g_Dbs.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); g_Dbs.threadCount = 2; - g_Dbs.use_metric = true; + + g_Dbs.use_metric = g_args.use_metric; } static void initOfQueryMeta() { memset(&g_queryInfo, 0, sizeof(SQueryMetaInfo)); - + // set default values - tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.host, "127.0.0.1", MAX_HOSTNAME_SIZE); g_queryInfo.port = 6030; - tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_DB_NAME_SIZE); - tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.user, TSDB_DEFAULT_USER, MAX_USERNAME_SIZE); + tstrncpy(g_queryInfo.password, TSDB_DEFAULT_PASS, MAX_PASSWORD_SIZE); } static void setParaFromArg(){ if (g_args.host) { - strcpy(g_Dbs.host, g_args.host); + tstrncpy(g_Dbs.host, g_args.host, MAX_HOSTNAME_SIZE); } else { - tstrncpy(g_Dbs.host, "127.0.0.1", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.host, "127.0.0.1", MAX_HOSTNAME_SIZE); } if (g_args.user) { - strcpy(g_Dbs.user, g_args.user); - } + tstrncpy(g_Dbs.user, g_args.user, MAX_USERNAME_SIZE); + } if (g_args.password) { - strcpy(g_Dbs.password, g_args.password); - } - + tstrncpy(g_Dbs.password, g_args.password, MAX_PASSWORD_SIZE); + } + if (g_args.port) { g_Dbs.port = g_args.port; - } + } g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; @@ -5926,11 +6253,11 @@ static void setParaFromArg(){ char dataString[STRING_LEN]; char **data_type = g_args.datatype; - + memset(dataString, 0, STRING_LEN); - if (strcasecmp(data_type[0], "BINARY") == 0 - || strcasecmp(data_type[0], "BOOL") == 0 + if (strcasecmp(data_type[0], "BINARY") == 0 + || strcasecmp(data_type[0], "BOOL") == 0 || strcasecmp(data_type[0], "NCHAR") == 0 ) { g_Dbs.do_aggreFunc = false; } @@ -5940,58 +6267,56 @@ static void setParaFromArg(){ tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; - g_Dbs.threadCountByCreateTbl = 1; + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; g_Dbs.queryMode = g_args.mode; - + g_Dbs.db[0].superTbls[0].autoCreateTable = PRE_CREATE_SUBTBL; - g_Dbs.db[0].superTbls[0].superTblExists = TBL_NO_EXISTS; g_Dbs.db[0].superTbls[0].childTblExists = TBL_NO_EXISTS; g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; - tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, + tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); tstrncpy(g_Dbs.db[0].superTbls[0].insertMode, "taosc", MAX_TB_NAME_SIZE); - tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, + tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, "2017-07-14 10:40:00.000", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].timeStampStep = DEFAULT_TIMESTAMP_STEP; - + g_Dbs.db[0].superTbls[0].insertRows = g_args.num_of_DPT; - g_Dbs.db[0].superTbls[0].maxSqlLen = TSDB_PAYLOAD_SIZE; + g_Dbs.db[0].superTbls[0].maxSqlLen = g_args.max_sql_len; g_Dbs.db[0].superTbls[0].columnCount = 0; for (int i = 0; i < MAX_NUM_DATATYPE; i++) { if (data_type[i] == NULL) { break; } - - tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, + + tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, data_type[i], MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].columnCount++; } - + if (g_Dbs.db[0].superTbls[0].columnCount > g_args.num_of_CPR) { g_Dbs.db[0].superTbls[0].columnCount = g_args.num_of_CPR; } else { for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; + g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; g_Dbs.db[0].superTbls[0].columnCount++; } } tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, "INT", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; - - tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); - g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; - g_Dbs.db[0].superTbls[0].tagCount = 2; - } else { - g_Dbs.threadCountByCreateTbl = 1; - g_Dbs.db[0].superTbls[0].tagCount = 0; - } + g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; + tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, "BINARY", MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; + g_Dbs.db[0].superTbls[0].tagCount = 2; + } else { + g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; + g_Dbs.db[0].superTbls[0].tagCount = 0; + } } /* Function to do regular expression check */ @@ -6062,8 +6387,9 @@ static void querySqlFile(TAOS* taos, char* sqlFile) memcpy(cmd + cmd_len, line, read_len); verbosePrint("%s() LN%d cmd: %s\n", __func__, __LINE__, cmd); - if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE)) { - printf("queryDbExec %s failed!\n", cmd); + if (0 != queryDbExec(taos, cmd, NO_INSERT_TYPE, false)) { + errorPrint("%s() LN%d, queryDbExec %s failed!\n", + __func__, __LINE__, cmd); tmfree(cmd); tmfree(line); tmfclose(fp); @@ -6084,16 +6410,23 @@ static void querySqlFile(TAOS* taos, char* sqlFile) static void testMetaFile() { if (INSERT_TEST == g_args.test_mode) { - if (g_Dbs.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + if (g_Dbs.cfgDir[0]) + taos_options(TSDB_OPTION_CONFIGDIR, g_Dbs.cfgDir); + insertTestProcess(); + } else if (QUERY_TEST == g_args.test_mode) { if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + queryTestProcess(); + } else if (SUBSCRIBE_TEST == g_args.test_mode) { if (g_queryInfo.cfgDir[0]) taos_options(TSDB_OPTION_CONFIGDIR, g_queryInfo.cfgDir); + subscribeTestProcess(); + } else { ; } @@ -6114,27 +6447,28 @@ static void queryResult() { rInfo->ntables = g_Dbs.db[0].superTbls[0].childTblCount; rInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; rInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; - strcpy(rInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix); + tstrncpy(rInfo->tb_prefix, + g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); } else { rInfo->ntables = g_args.num_of_tables; rInfo->end_table_to = g_args.num_of_tables -1; - strcpy(rInfo->tb_prefix, g_args.tb_prefix); + tstrncpy(rInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); } rInfo->taos = taos_connect( - g_Dbs.host, - g_Dbs.user, - g_Dbs.password, - g_Dbs.db[0].dbName, + g_Dbs.host, + g_Dbs.user, + g_Dbs.password, + g_Dbs.db[0].dbName, g_Dbs.port); if (rInfo->taos == NULL) { - errorPrint( "Failed to connect to TDengine, reason:%s\n", taos_errstr(NULL)); + errorPrint( "Failed to connect to TDengine, reason:%s\n", + taos_errstr(NULL)); free(rInfo); exit(-1); } - strcpy(rInfo->fp, g_Dbs.resultFile); + tstrncpy(rInfo->fp, g_Dbs.resultFile, MAX_FILE_NAME_LEN); if (!g_Dbs.use_metric) { pthread_create(&read_id, NULL, readTable, rInfo); diff --git a/src/mnode/inc/mnodeVgroup.h b/src/mnode/inc/mnodeVgroup.h index e052cdb83c..7b798c23f8 100644 --- a/src/mnode/inc/mnodeVgroup.h +++ b/src/mnode/inc/mnodeVgroup.h @@ -49,6 +49,7 @@ void mnodeRemoveTableFromVgroup(SVgObj *pVgroup, SCTableObj *pTable); void mnodeSendDropVnodeMsg(int32_t vgId, SRpcEpSet *epSet, void *ahandle); void mnodeSendCreateVgroupMsg(SVgObj *pVgroup, void *ahandle); void mnodeSendAlterVgroupMsg(SVgObj *pVgroup); +void mnodeSendSyncVgroupMsg(SVgObj *pVgroup); SRpcEpSet mnodeGetEpSetFromVgroup(SVgObj *pVgroup); SRpcEpSet mnodeGetEpSetFromIp(char *ep); diff --git a/src/mnode/src/mnodeDb.c b/src/mnode/src/mnodeDb.c index b1c88ca718..909ca7cac6 100644 --- a/src/mnode/src/mnodeDb.c +++ b/src/mnode/src/mnodeDb.c @@ -50,6 +50,7 @@ static int32_t mnodeGetDbMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn static int32_t mnodeRetrieveDbs(SShowObj *pShow, char *data, int32_t rows, void *pConn); static int32_t mnodeProcessCreateDbMsg(SMnodeMsg *pMsg); static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg); +static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg); int32_t mnodeProcessAlterDbMsg(SMnodeMsg *pMsg); #ifndef _TOPIC @@ -178,6 +179,7 @@ int32_t mnodeInitDbs() { mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_CREATE_DB, mnodeProcessCreateDbMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_ALTER_DB, mnodeProcessAlterDbMsg); mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_DROP_DB, mnodeProcessDropDbMsg); + mnodeAddWriteMsgHandle(TSDB_MSG_TYPE_CM_SYNC_DB, mnodeProcessSyncDbMsg); mnodeAddShowMetaHandle(TSDB_MGMT_TABLE_DB, mnodeGetDbMeta); mnodeAddShowRetrieveHandle(TSDB_MGMT_TABLE_DB, mnodeRetrieveDbs); mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_DB, mnodeCancelGetNextDb); @@ -1184,6 +1186,46 @@ static int32_t mnodeProcessDropDbMsg(SMnodeMsg *pMsg) { return mnodeDropDb(pMsg); } +static int32_t mnodeSyncDb(SDbObj *pDb, SMnodeMsg *pMsg) { + void *pIter = NULL; + SVgObj *pVgroup = NULL; + while (1) { + pIter = mnodeGetNextVgroup(pIter, &pVgroup); + if (pVgroup == NULL) break; + if (pVgroup->pDb == pDb) { + mnodeSendSyncVgroupMsg(pVgroup); + } + mnodeDecVgroupRef(pVgroup); + } + + mLInfo("db:%s, is synced by %s", pDb->name, mnodeGetUserFromMsg(pMsg)); + + return TSDB_CODE_SUCCESS; +} + +static int32_t mnodeProcessSyncDbMsg(SMnodeMsg *pMsg) { + SSyncDbMsg *pSyncDb = pMsg->rpcMsg.pCont; + mDebug("db:%s, syncdb is received from thandle:%p, ignore:%d", pSyncDb->db, pMsg->rpcMsg.handle, pSyncDb->ignoreNotExists); + + if (pMsg->pDb == NULL) pMsg->pDb = mnodeGetDb(pSyncDb->db); + if (pMsg->pDb == NULL) { + if (pSyncDb->ignoreNotExists) { + mDebug("db:%s, db is not exist, treat as success", pSyncDb->db); + return TSDB_CODE_SUCCESS; + } else { + mError("db:%s, failed to sync, invalid db", pSyncDb->db); + return TSDB_CODE_MND_INVALID_DB; + } + } + + if (pMsg->pDb->status != TSDB_DB_STATUS_READY) { + mError("db:%s, status:%d, in dropping", pSyncDb->db, pMsg->pDb->status); + return TSDB_CODE_MND_DB_IN_DROPPING; + } + + return mnodeSyncDb(pMsg->pDb, pMsg); +} + void mnodeDropAllDbs(SAcctObj *pAcct) { int32_t numOfDbs = 0; SDbObj *pDb = NULL; diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index 304096f3ae..db03da4fe1 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -722,6 +722,10 @@ int32_t mnodeDropDnode(SDnodeObj *pDnode, void *pMsg) { static int32_t mnodeDropDnodeByEp(char *ep, SMnodeMsg *pMsg) { SDnodeObj *pDnode = mnodeGetDnodeByEp(ep); if (pDnode == NULL) { + if (strspn(ep, "0123456789 ;") != strlen(ep)) { + return TSDB_CODE_MND_DNODE_NOT_EXIST; + } + int32_t dnodeId = (int32_t)strtol(ep, NULL, 10); pDnode = mnodeGetDnode(dnodeId); if (pDnode == NULL) { diff --git a/src/mnode/src/mnodeProfile.c b/src/mnode/src/mnodeProfile.c index fe19d10963..459d981138 100644 --- a/src/mnode/src/mnodeProfile.c +++ b/src/mnode/src/mnodeProfile.c @@ -420,7 +420,7 @@ static int32_t mnodeRetrieveQueries(SShowObj *pShow, char *data, int32_t rows, v cols++; char handleBuf[24] = {0}; - snprintf(handleBuf, tListLen(handleBuf), "%p", (void*)htobe64(pDesc->qid)); + snprintf(handleBuf, tListLen(handleBuf), "%"PRIu64, htobe64(pDesc->qId)); pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows; STR_WITH_MAXSIZE_TO_VARSTR(pWrite, handleBuf, pShow->bytes[cols]); diff --git a/src/mnode/src/mnodeVgroup.c b/src/mnode/src/mnodeVgroup.c index fdbf7ae398..7eb3122d83 100644 --- a/src/mnode/src/mnodeVgroup.c +++ b/src/mnode/src/mnodeVgroup.c @@ -60,6 +60,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, void *pConn); static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg); static void mnodeProcessAlterVnodeRsp(SRpcMsg *rpcMsg); +static void mnodeProcessSyncVnodeRsp(SRpcMsg *rpcMsg); static void mnodeProcessDropVnodeRsp(SRpcMsg *rpcMsg); static int32_t mnodeProcessVnodeCfgMsg(SMnodeMsg *pMsg) ; static void mnodeSendDropVgroupMsg(SVgObj *pVgroup, void *ahandle); @@ -236,6 +237,7 @@ int32_t mnodeInitVgroups() { mnodeAddShowFreeIterHandle(TSDB_MGMT_TABLE_VGROUP, mnodeCancelGetNextVgroup); mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_CREATE_VNODE_RSP, mnodeProcessCreateVnodeRsp); mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP, mnodeProcessAlterVnodeRsp); + mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_ALTER_VNODE_RSP, mnodeProcessSyncVnodeRsp); mnodeAddPeerRspHandle(TSDB_MSG_TYPE_MD_DROP_VNODE_RSP, mnodeProcessDropVnodeRsp); mnodeAddPeerMsgHandle(TSDB_MSG_TYPE_DM_CONFIG_VNODE, mnodeProcessVnodeCfgMsg); @@ -967,6 +969,38 @@ void mnodeSendAlterVgroupMsg(SVgObj *pVgroup) { } } +static SSyncVnodeMsg *mnodeBuildSyncVnodeMsg(int32_t vgId) { + SSyncVnodeMsg *pSyncVnode = rpcMallocCont(sizeof(SSyncVnodeMsg)); + if (pSyncVnode == NULL) return NULL; + + pSyncVnode->vgId = htonl(vgId); + return pSyncVnode; +} + +static void mnodeSendSyncVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet) { + SSyncVnodeMsg *pSyncVnode = mnodeBuildSyncVnodeMsg(pVgroup->vgId); + SRpcMsg rpcMsg = { + .ahandle = NULL, + .pCont = pSyncVnode, + .contLen = pSyncVnode ? sizeof(SSyncVnodeMsg) : 0, + .code = 0, + .msgType = TSDB_MSG_TYPE_MD_SYNC_VNODE + }; + + dnodeSendMsgToDnode(epSet, &rpcMsg); +} + +void mnodeSendSyncVgroupMsg(SVgObj *pVgroup) { + mDebug("vgId:%d, send sync all vnodes msg, numOfVnodes:%d db:%s", pVgroup->vgId, pVgroup->numOfVnodes, + pVgroup->dbName); + for (int32_t i = 0; i < pVgroup->numOfVnodes; ++i) { + SRpcEpSet epSet = mnodeGetEpSetFromIp(pVgroup->vnodeGid[i].pDnode->dnodeEp); + mDebug("vgId:%d, index:%d, send sync vnode msg to dnode %s", pVgroup->vgId, i, + pVgroup->vnodeGid[i].pDnode->dnodeEp); + mnodeSendSyncVnodeMsg(pVgroup, &epSet); + } +} + static void mnodeSendCreateVnodeMsg(SVgObj *pVgroup, SRpcEpSet *epSet, void *ahandle) { SCreateVnodeMsg *pCreate = mnodeBuildVnodeMsg(pVgroup); SRpcMsg rpcMsg = { @@ -994,6 +1028,10 @@ static void mnodeProcessAlterVnodeRsp(SRpcMsg *rpcMsg) { mDebug("alter vnode rsp received"); } +static void mnodeProcessSyncVnodeRsp(SRpcMsg *rpcMsg) { + mDebug("sync vnode rsp received"); +} + static void mnodeProcessCreateVnodeRsp(SRpcMsg *rpcMsg) { if (rpcMsg->ahandle == NULL) return; diff --git a/src/os/inc/osArm32.h b/src/os/inc/osArm32.h index 24ff95522e..54835a1ca8 100644 --- a/src/os/inc/osArm32.h +++ b/src/os/inc/osArm32.h @@ -77,6 +77,7 @@ extern "C" { #include #include #include +#include #define TAOS_OS_FUNC_LZ4 #define BUILDIN_CLZL(val) __builtin_clzll(val) diff --git a/src/os/inc/osArm64.h b/src/os/inc/osArm64.h index 22f0000e96..76098f6846 100644 --- a/src/os/inc/osArm64.h +++ b/src/os/inc/osArm64.h @@ -78,6 +78,7 @@ extern "C" { #include #include #include +#include #ifdef __cplusplus } diff --git a/src/os/inc/osLinux32.h b/src/os/inc/osLinux32.h index cfef05368f..eee4999399 100644 --- a/src/os/inc/osLinux32.h +++ b/src/os/inc/osLinux32.h @@ -77,6 +77,7 @@ extern "C" { #include #include #include +#include #define TAOS_OS_FUNC_LZ4 #define BUILDIN_CLZL(val) __builtin_clzll(val) diff --git a/src/os/inc/osLinux64.h b/src/os/inc/osLinux64.h index a2febd51b7..0dfc819da3 100644 --- a/src/os/inc/osLinux64.h +++ b/src/os/inc/osLinux64.h @@ -75,6 +75,7 @@ extern "C" { #include #include #include +#include #ifndef _ALPINE #include #endif diff --git a/src/os/inc/osSocket.h b/src/os/inc/osSocket.h index 111fca712c..cf75b74a9a 100644 --- a/src/os/inc/osSocket.h +++ b/src/os/inc/osSocket.h @@ -68,6 +68,8 @@ void taosSetMaskSIGPIPE(); // TAOS_OS_FUNC_SOCKET_SETSOCKETOPT int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t optlen); +int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen); + // TAOS_OS_FUNC_SOCKET_INET uint32_t taosInetAddr(char *ipAddr); const char *taosInetNtoa(struct in_addr ipInt); diff --git a/src/os/src/detail/osSocket.c b/src/os/src/detail/osSocket.c index 1186a6dd0a..db976acccd 100644 --- a/src/os/src/detail/osSocket.c +++ b/src/os/src/detail/osSocket.c @@ -71,6 +71,9 @@ int32_t taosSetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *op return setsockopt(socketfd, level, optname, optval, (socklen_t)optlen); } +int32_t taosGetSockOpt(SOCKET socketfd, int32_t level, int32_t optname, void *optval, int32_t* optlen) { + return getsockopt(socketfd, level, optname, optval, (socklen_t *)optlen); +} #endif #ifndef TAOS_OS_FUNC_SOCKET_INET diff --git a/src/query/inc/qResultbuf.h b/src/query/inc/qResultbuf.h index e46af8c298..ef43a9621a 100644 --- a/src/query/inc/qResultbuf.h +++ b/src/query/inc/qResultbuf.h @@ -72,7 +72,7 @@ typedef struct SDiskbasedResultBuf { bool comp; // compressed before flushed to disk int32_t nextPos; // next page flush position - const void* handle; // for debug purpose + uint64_t qId; // for debug purpose SResultBufStatis statis; } SDiskbasedResultBuf; @@ -88,7 +88,7 @@ typedef struct SDiskbasedResultBuf { * @param handle * @return */ -int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle); +int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, uint64_t qId); /** * diff --git a/src/query/inc/qUtil.h b/src/query/inc/qUtil.h index 6bfdd909f9..3ca6d96746 100644 --- a/src/query/inc/qUtil.h +++ b/src/query/inc/qUtil.h @@ -25,6 +25,7 @@ } while (0) #define GET_RES_WINDOW_KEY_LEN(_l) ((_l) + sizeof(uint64_t)) +#define GET_QID(_r) (((SQInfo*)((_r)->qinfo))->qId) #define curTimeWindowIndex(_winres) ((_winres)->curIndex) #define GET_ROW_PARAM_FOR_MULTIOUTPUT(_q, tbq, sq) (((tbq) && (!(sq)))? (_q)->pExpr1[1].base.param[0].i64:1) diff --git a/src/query/inc/sql.y b/src/query/inc/sql.y index 8b537ffe5d..fb1eee1d13 100644 --- a/src/query/inc/sql.y +++ b/src/query/inc/sql.y @@ -668,6 +668,7 @@ expr(A) ::= STRING(X). { A = tSqlExprCreateIdValue(&X, TK_STRING);} expr(A) ::= NOW(X). { A = tSqlExprCreateIdValue(&X, TK_NOW); } expr(A) ::= VARIABLE(X). { A = tSqlExprCreateIdValue(&X, TK_VARIABLE);} expr(A) ::= BOOL(X). { A = tSqlExprCreateIdValue(&X, TK_BOOL);} +expr(A) ::= NULL(X). { A = tSqlExprCreateIdValue(&X, TK_NULL);} // ordinary functions: min(x), max(x), top(k, 20) expr(A) ::= ID(X) LP exprlist(Y) RP(E). { A = tSqlExprCreateFunction(Y, &X, &E, X.type); } @@ -719,6 +720,9 @@ expritem(A) ::= . {A = 0;} ///////////////////////////////////reset query cache////////////////////////////////////// cmd ::= RESET QUERY CACHE. { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} +///////////////////////////////////sync replica database////////////////////////////////// +cmd ::= SYNCDB ids(X) REPLICA.{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &X);} + ///////////////////////////////////ALTER TABLE statement////////////////////////////////// cmd ::= ALTER TABLE ids(X) cpxName(F) ADD COLUMN columnlist(A). { X.n += F.n; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 89dabe40dc..d80912db48 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1249,7 +1249,7 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn int16_t type = pColInfoData->info.type; if (type == TSDB_DATA_TYPE_FLOAT || type == TSDB_DATA_TYPE_DOUBLE) { - qError("QInfo:%p group by not supported on double/float columns, abort", pRuntimeEnv->qinfo); + qError("QInfo:%"PRIu64" group by not supported on double/float columns, abort", GET_QID(pRuntimeEnv)); return; } @@ -1636,7 +1636,7 @@ static void* destroySQLFunctionCtx(SQLFunctionCtx* pCtx, int32_t numOfOutput) { } static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfTables, SArray* pOperator) { - qDebug("QInfo:%p setup runtime env", pRuntimeEnv->qinfo); + qDebug("QInfo:%"PRIu64" setup runtime env", GET_QID(pRuntimeEnv)); SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; pRuntimeEnv->prevGroupId = INT32_MIN; @@ -1669,7 +1669,7 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf *(int64_t*) pRuntimeEnv->prevRow[0] = INT64_MIN; } - qDebug("QInfo:%p init runtime environment completed", pRuntimeEnv->qinfo); + qDebug("QInfo:%"PRIu64" init runtime environment completed", GET_QID(pRuntimeEnv)); // group by normal column, sliding window query, interval query are handled by interval query processor // interval (down sampling operation) @@ -1850,7 +1850,7 @@ static void teardownQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; SQInfo* pQInfo = (SQInfo*) pRuntimeEnv->qinfo; - qDebug("QInfo:%p teardown runtime env", pQInfo); + qDebug("QInfo:%"PRIu64" teardown runtime env", pQInfo->qId); if (pRuntimeEnv->sasArray != NULL) { for(int32_t i = 0; i < pQueryAttr->numOfOutput; ++i) { @@ -1898,8 +1898,8 @@ bool isQueryKilled(SQInfo *pQInfo) { (!needBuildResAfterQueryComplete(pQInfo))) { assert(pQInfo->startExecTs != 0); - qDebug("QInfo:%p retrieve not arrive beyond %d sec, abort current query execution, start:%"PRId64", current:%d", pQInfo, 1, - pQInfo->startExecTs, taosGetTimestampSec()); + qDebug("QInfo:%" PRIu64 " retrieve not arrive beyond %d sec, abort current query execution, start:%" PRId64 + ", current:%d", pQInfo->qId, 1, pQInfo->startExecTs, taosGetTimestampSec()); return true; } @@ -2017,11 +2017,11 @@ void getAlignQueryTimeWindow(SQueryAttr *pQueryAttr, int64_t key, int64_t keyFir /* * todo add more parameters to check soon.. */ -bool colIdCheck(SQueryAttr *pQueryAttr, void* qinfo) { +bool colIdCheck(SQueryAttr *pQueryAttr, uint64_t qId) { // load data column information is incorrect for (int32_t i = 0; i < pQueryAttr->numOfCols - 1; ++i) { if (pQueryAttr->colList[i].colId == pQueryAttr->colList[i + 1].colId) { - qError("QInfo:%p invalid data load column for query", qinfo); + qError("QInfo:%"PRIu64" invalid data load column for query", qId); return false; } } @@ -2074,13 +2074,14 @@ static void changeExecuteScanOrder(SQInfo *pQInfo, SQueryTableMsg* pQueryMsg, bo SQueryAttr* pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; // in case of point-interpolation query, use asc order scan - char msg[] = "QInfo:%p scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 + char msg[] = "QInfo:%"PRIu64" scan order changed for %s query, old:%d, new:%d, qrange exchanged, old qrange:%" PRId64 "-%" PRId64 ", new qrange:%" PRId64 "-%" PRId64; // todo handle the case the the order irrelevant query type mixed up with order critical query type // descending order query for last_row query if (isFirstLastRowQuery(pQueryAttr)) { - qDebug("QInfo:%p scan order changed for last_row query, old:%d, new:%d", pQInfo, pQueryAttr->order.order, TSDB_ORDER_ASC); + qDebug("QInfo:%"PRIu64" scan order changed for last_row query, old:%d, new:%d", pQInfo->qId, + pQueryAttr->order.order, TSDB_ORDER_ASC); pQueryAttr->order.order = TSDB_ORDER_ASC; if (pQueryAttr->window.skey > pQueryAttr->window.ekey) { @@ -2555,7 +2556,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa SDataBlockInfo* pBlockInfo = &pBlock->info; if ((*status) == BLK_DATA_NO_NEEDED) { - qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo, pBlockInfo->window.skey, + qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); pCost->discardBlocks += 1; } else if ((*status) == BLK_DATA_STATIS_NEEDED) { @@ -2583,7 +2584,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa (char*)&(pBlock->pBlockStatis[i].max)); if (!load) { // current block has been discard due to filter applied pCost->discardBlocks += 1; - qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo, + qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); (*status) = BLK_DATA_DISCARD; return TSDB_CODE_SUCCESS; @@ -2595,7 +2596,7 @@ int32_t loadDataBlockOnDemand(SQueryRuntimeEnv* pRuntimeEnv, STableScanInfo* pTa // current block has been discard due to filter applied if (!doFilterByBlockStatistics(pRuntimeEnv, pBlock->pBlockStatis, pTableScanInfo->pCtx, pBlockInfo->rows)) { pCost->discardBlocks += 1; - qDebug("QInfo:%p data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo, pBlockInfo->window.skey, + qDebug("QInfo:%"PRIu64" data block discard, brange:%" PRId64 "-%" PRId64 ", rows:%d", pQInfo->qId, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows); (*status) = BLK_DATA_DISCARD; return TSDB_CODE_SUCCESS; @@ -3285,9 +3286,9 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, // failed to find data with the specified tag value and vnodeId if (!tsBufIsValidElem(&elem)) { if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qError("QInfo:%p failed to find tag:%s in ts_comp", pRuntimeEnv->qinfo, pTag->pz); + qError("QInfo:%"PRIu64" failed to find tag:%s in ts_comp", GET_QID(pRuntimeEnv), pTag->pz); } else { - qError("QInfo:%p failed to find tag:%" PRId64 " in ts_comp", pRuntimeEnv->qinfo, pTag->i64); + qError("QInfo:%"PRIu64" failed to find tag:%" PRId64 " in ts_comp", GET_QID(pRuntimeEnv), pTag->i64); } return -1; @@ -3296,17 +3297,17 @@ int32_t setTimestampListJoinInfo(SQueryRuntimeEnv* pRuntimeEnv, tVariant* pTag, // Keep the cursor info of current table pTableQueryInfo->cur = tsBufGetCursor(pRuntimeEnv->pTsBuf); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { - qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } } else { tsBufSetCursor(pRuntimeEnv->pTsBuf, &pTableQueryInfo->cur); if (pTag->nType == TSDB_DATA_TYPE_BINARY || pTag->nType == TSDB_DATA_TYPE_NCHAR) { - qDebug("QInfo:%p find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:%"PRIu64" find tag:%s start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->pz, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } else { - qDebug("QInfo:%p find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", pRuntimeEnv->qinfo, pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); + qDebug("QInfo:%"PRIu64" find tag:%"PRId64" start pos in ts_comp, blockIndex:%d, tsIndex:%d", GET_QID(pRuntimeEnv), pTag->i64, pTableQueryInfo->cur.blockIndex, pTableQueryInfo->cur.tsIndex); } } @@ -3348,9 +3349,9 @@ void setParamForStableStddev(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx } void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunctionCtx* pCtx, int32_t numOfOutput, SExprInfo* pExpr, char* val, int16_t bytes) { - SQueryAttr* pQuery = pRuntimeEnv->pQueryAttr; + SQueryAttr* pQueryAttr = pRuntimeEnv->pQueryAttr; - int32_t numOfExprs = pQuery->numOfOutput; + int32_t numOfExprs = pQueryAttr->numOfOutput; for(int32_t i = 0; i < numOfExprs; ++i) { SSqlExpr* pExpr1 = &pExpr[i].base; if (pExpr1->functionId != TSDB_FUNC_STDDEV_DST) { @@ -3377,6 +3378,7 @@ void setParamForStableStddevByColData(SQueryRuntimeEnv* pRuntimeEnv, SQLFunction } } } + /* * There are two cases to handle: * @@ -3440,7 +3442,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* int32_t start = 0; int32_t step = -1; - qDebug("QInfo:%p start to copy data from windowResInfo to output buf", pRuntimeEnv->qinfo); + qDebug("QInfo:%"PRIu64" start to copy data from windowResInfo to output buf", GET_QID(pRuntimeEnv)); if (orderType == TSDB_ORDER_ASC) { start = pGroupResInfo->index; step = 1; @@ -3480,7 +3482,7 @@ static int32_t doCopyToSDataBlock(SQueryRuntimeEnv* pRuntimeEnv, SGroupResInfo* } } - qDebug("QInfo:%p copy data to query buf completed", pRuntimeEnv->qinfo); + qDebug("QInfo:%"PRIu64" copy data to query buf completed", GET_QID(pRuntimeEnv)); pBlock->info.rows = numOfResult; return 0; } @@ -3566,11 +3568,11 @@ static void doCopyQueryResultToMsg(SQInfo *pQInfo, int32_t numOfRows, char *data data += sizeof(STableIdInfo); total++; - qDebug("QInfo:%p set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo, item->tid, item->uid, item->key); + qDebug("QInfo:%"PRIu64" set subscribe info, tid:%d, uid:%"PRIu64", skey:%"PRId64, pQInfo->qId, item->tid, item->uid, item->key); item = taosHashIterate(pRuntimeEnv->pTableRetrieveTsMap, item); } - qDebug("QInfo:%p set %d subscribe info", pQInfo, total); + qDebug("QInfo:%"PRIu64" set %d subscribe info", pQInfo->qId, total); // Check if query is completed or not for stable query or normal table query respectively. if (Q_STATUS_EQUAL(pRuntimeEnv->status, QUERY_COMPLETED) && pRuntimeEnv->proot->status == OP_EXEC_DONE) { setQueryStatus(pRuntimeEnv, QUERY_OVER); @@ -3609,12 +3611,12 @@ void queryCostStatis(SQInfo *pQInfo) { pSummary->numOfTimeWindows = 0; } - qDebug("QInfo:%p :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " + qDebug("QInfo:%"PRIu64" :cost summary: elapsed time:%"PRId64" us, first merge:%"PRId64" us, total blocks:%d, " "load block statis:%d, load data block:%d, total rows:%"PRId64 ", check rows:%"PRId64, - pQInfo, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis, + pQInfo->qId, pSummary->elapsedTime, pSummary->firstStageMergeTime, pSummary->totalBlocks, pSummary->loadBlockStatis, pSummary->loadBlocks, pSummary->totalRows, pSummary->totalCheckedRows); - qDebug("QInfo:%p :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo, pSummary->winInfoSize/1024.0, + qDebug("QInfo:%"PRIu64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); } @@ -3650,8 +3652,8 @@ void queryCostStatis(SQInfo *pQInfo) { // // int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); // -// qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, pRuntimeEnv->qinfo, -// pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pRuntimeEnv->current->lastKey); +// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%"PRId64, GET_QID(pRuntimeEnv), +// pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, pQueryAttr->current->lastKey); //} //void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) { @@ -3680,7 +3682,7 @@ void queryCostStatis(SQInfo *pQInfo) { // pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQueryAttr)) ? blockInfo.window.ekey : blockInfo.window.skey; // pTableQueryInfo->lastKey += step; // -// qDebug("QInfo:%p skip rows:%d, offset:%" PRId64, pRuntimeEnv->qinfo, blockInfo.rows, +// qDebug("QInfo:%"PRIu64" skip rows:%d, offset:%" PRId64, GET_QID(pRuntimeEnv), blockInfo.rows, // pQueryAttr->limit.offset); // } else { // find the appropriated start position in current block // updateOffsetVal(pRuntimeEnv, &blockInfo); @@ -3728,9 +3730,9 @@ void queryCostStatis(SQInfo *pQInfo) { // int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock); // pRuntimeEnv->resultRowInfo.curIndex = index; // restore the window index // -// qDebug("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, -// pRuntimeEnv->qinfo, pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, -// pRuntimeEnv->current->lastKey); +// qDebug("QInfo:%"PRIu64" check data block, brange:%" PRId64 "-%" PRId64 ", numOfRows:%d, numOfRes:%d, lastKey:%" PRId64, +// GET_QID(pRuntimeEnv), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes, +// pQueryAttr->current->lastKey); // // return key; // } else { // do nothing @@ -3884,7 +3886,7 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) terrno = TSDB_CODE_SUCCESS; if (isFirstLastRowQuery(pQueryAttr)) { - pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo, &pQueryAttr->memRef); + pRuntimeEnv->pQueryHandle = tsdbQueryLastRow(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo->qId, &pQueryAttr->memRef); // update the query time window pQueryAttr->window = cond.twindow; @@ -3905,9 +3907,9 @@ static int32_t setupQueryHandle(void* tsdb, SQInfo* pQInfo, bool isSTableQuery) } } } else if (pQueryAttr->pointInterpQuery) { - pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo, &pQueryAttr->memRef); + pRuntimeEnv->pQueryHandle = tsdbQueryRowsInExternalWindow(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo->qId, &pQueryAttr->memRef); } else { - pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo, &pQueryAttr->memRef); + pRuntimeEnv->pQueryHandle = tsdbQueryTables(tsdb, &cond, &pQueryAttr->tableGroupInfo, pQInfo->qId, &pQueryAttr->memRef); } return terrno; @@ -3945,7 +3947,6 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts pQueryAttr->tsdb = tsdb; pRuntimeEnv->prevResult = prevResult; - pRuntimeEnv->qinfo = pQInfo; if (tsdb != NULL) { int32_t code = setupQueryHandle(tsdb, pQInfo, pQueryAttr->stableQuery); @@ -3994,7 +3995,7 @@ int32_t doInitQInfo(SQInfo *pQInfo, STSBuf *pTsBuf, SArray* prevResult, void *ts getIntermediateBufInfo(pRuntimeEnv, &ps, &pQueryAttr->intermediateResultRowSize); int32_t TENMB = 1024*1024*10; - int32_t code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo); + int32_t code = createDiskbasedResultBuffer(&pRuntimeEnv->pResultBuf, ps, TENMB, pQInfo->qId); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -4163,8 +4164,8 @@ static SSDataBlock* doTableScan(void* param) { pResultRowInfo->prevSKey = pResultRowInfo->pResult[0]->win.skey; } - qDebug("QInfo:%p start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - pRuntimeEnv->qinfo, cond.twindow.skey, cond.twindow.ekey); + qDebug("QInfo:%"PRIu64" start to repeat scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, + GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey); } if (pTableScanInfo->reverseTimes > 0) { @@ -4173,8 +4174,8 @@ static SSDataBlock* doTableScan(void* param) { STsdbQueryCond cond = createTsdbQueryCond(pQueryAttr, &pQueryAttr->window); tsdbResetQueryHandle(pTableScanInfo->pQueryHandle, &cond); - qDebug("QInfo:%p start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, - pRuntimeEnv->qinfo, cond.twindow.skey, cond.twindow.ekey); + qDebug("QInfo:%"PRIu64" start to reverse scan data blocks due to query func required, qrange:%" PRId64 "-%" PRId64, + GET_QID(pRuntimeEnv), cond.twindow.skey, cond.twindow.ekey); pRuntimeEnv->scanFlag = REVERSE_SCAN; @@ -5249,14 +5250,14 @@ static SSDataBlock* doTagScan(void* param) { count += 1; } - qDebug("QInfo:%p create (tableId, tag) info completed, rows:%d", pRuntimeEnv->qinfo, count); + qDebug("QInfo:%"PRIu64" create (tableId, tag) info completed, rows:%d", GET_QID(pRuntimeEnv), count); } else if (functionId == TSDB_FUNC_COUNT) {// handle the "count(tbname)" query SColumnInfoData* pColInfo = taosArrayGet(pRes->pDataBlock, 0); *(int64_t*)pColInfo->pData = pInfo->totalTables; count = 1; pOperator->status = OP_EXEC_DONE; - qDebug("QInfo:%p create count(tbname) query, res:%d rows:1", pRuntimeEnv->qinfo, count); + qDebug("QInfo:%"PRIu64" create count(tbname) query, res:%d rows:1", GET_QID(pRuntimeEnv), count); } else { // return only the tags|table name etc. SExprInfo* pExprInfo = pOperator->pExpr; // todo use the column list instead of exprinfo @@ -5295,7 +5296,7 @@ static SSDataBlock* doTagScan(void* param) { pOperator->status = OP_EXEC_DONE; } - qDebug("QInfo:%p create tag values results completed, rows:%d", pRuntimeEnv->qinfo, count); + qDebug("QInfo:%"PRIu64" create tag values results completed, rows:%d", GET_QID(pRuntimeEnv), count); } pRes->info.rows = count; @@ -5997,7 +5998,7 @@ SSqlGroupbyExpr *createGroupbyExprFromMsg(SQueryTableMsg *pQueryMsg, SColIndex * return pGroupbyExpr; } -static int32_t createFilterInfo(void *pQInfo, SQueryAttr *pQueryAttr) { +static int32_t createFilterInfo(SQueryAttr *pQueryAttr, uint64_t qId) { for (int32_t i = 0; i < pQueryAttr->numOfCols; ++i) { if (pQueryAttr->colList[i].numOfFilters > 0) { pQueryAttr->numOfFilterCols++; @@ -6033,13 +6034,13 @@ static int32_t createFilterInfo(void *pQInfo, SQueryAttr *pQueryAttr) { int32_t lower = pSingleColFilter->filterInfo.lowerRelOptr; int32_t upper = pSingleColFilter->filterInfo.upperRelOptr; if (lower == TSDB_RELATION_INVALID && upper == TSDB_RELATION_INVALID) { - qError("QInfo:%p invalid filter info", pQInfo); + qError("QInfo:%"PRIu64" invalid filter info", qId); return TSDB_CODE_QRY_INVALID_MSG; } pSingleColFilter->fp = getFilterOperator(lower, upper); if (pSingleColFilter->fp == NULL) { - qError("QInfo:%p invalid filter info", pQInfo); + qError("QInfo:%"PRIu64" invalid filter info", qId); return TSDB_CODE_QRY_INVALID_MSG; } @@ -6198,7 +6199,7 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr } doUpdateExprColumnIndex(pQueryAttr); - int32_t ret = createFilterInfo(pQInfo, pQueryAttr); + int32_t ret = createFilterInfo(pQueryAttr, pQInfo->qId); if (ret != TSDB_CODE_SUCCESS) { goto _cleanup; } @@ -6271,7 +6272,10 @@ SQInfo* createQInfoImpl(SQueryTableMsg* pQueryMsg, SSqlGroupbyExpr* pGroupbyExpr } } - colIdCheck(pQueryAttr, pQInfo); + colIdCheck(pQueryAttr, pQInfo->qId); + + // todo refactor + pQInfo->query.queryBlockDist = (numOfOutput == 1 && pExprs[0].base.colInfo.colId == TSDB_BLOCK_DIST_COLUMN_INDEX); pQInfo->qId = atomic_add_fetch_64(&queryHandleId, 1); *qId = pQInfo->qId; @@ -6320,7 +6324,9 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, SQInfo* pQInfo, SQueryPara int32_t code = TSDB_CODE_SUCCESS; SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; - SQueryAttr *pQueryAttr = pQInfo->runtimeEnv.pQueryAttr; + pRuntimeEnv->qinfo = pQInfo; + + SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; STSBuf *pTsBuf = NULL; if (pTsBufInfo->tsLen > 0) { // open new file to save the result @@ -6344,7 +6350,7 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, SQInfo* pQInfo, SQueryPara if ((QUERY_IS_ASC_QUERY(pQueryAttr) && (pQueryAttr->window.skey > pQueryAttr->window.ekey)) || (!QUERY_IS_ASC_QUERY(pQueryAttr) && (pQueryAttr->window.ekey > pQueryAttr->window.skey))) { - qDebug("QInfo:%p no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo, pQueryAttr->window.skey, + qDebug("QInfo:%"PRIu64" no result in time range %" PRId64 "-%" PRId64 ", order %d", pQInfo->qId, pQueryAttr->window.skey, pQueryAttr->window.ekey, pQueryAttr->order.order); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); pRuntimeEnv->tableqinfoGroupInfo.numOfTables = 0; @@ -6353,7 +6359,7 @@ int32_t initQInfo(STsBufInfo* pTsBufInfo, void* tsdb, SQInfo* pQInfo, SQueryPara } if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%p no table qualified for tag filter, abort query", pQInfo); + qDebug("QInfo:%"PRIu64" no table qualified for tag filter, abort query", pQInfo->qId); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); return TSDB_CODE_SUCCESS; } @@ -6430,7 +6436,7 @@ void freeQInfo(SQInfo *pQInfo) { return; } - qDebug("QInfo:%p start to free QInfo", pQInfo); + qDebug("QInfo:%"PRIu64" start to free QInfo", pQInfo->qId); SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; releaseQueryBuf(pRuntimeEnv->tableqinfoGroupInfo.numOfTables); @@ -6479,7 +6485,7 @@ void freeQInfo(SQInfo *pQInfo) { taosArrayDestroy(pRuntimeEnv->groupResInfo.pRows); pQInfo->signature = 0; - qDebug("QInfo:%p QInfo is freed", pQInfo); + qDebug("QInfo:%"PRIu64" QInfo is freed", pQInfo->qId); tfree(pQInfo); } @@ -6499,7 +6505,7 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { off_t s = lseek(fileno(f), 0, SEEK_END); assert(s == pRuntimeEnv->outputBuf->info.rows); - qDebug("QInfo:%p ts comp data return, file:%p, size:%"PRId64, pQInfo, f, (uint64_t)s); + qDebug("QInfo:%"PRIu64" ts comp data return, file:%p, size:%"PRId64, pQInfo->qId, f, (uint64_t)s); if (fseek(f, 0, SEEK_SET) >= 0) { size_t sz = fread(data, 1, s, f); if(sz < s) { // todo handle error @@ -6530,11 +6536,11 @@ int32_t doDumpQueryResult(SQInfo *pQInfo, char *data) { } pRuntimeEnv->resultInfo.total += pRuntimeEnv->outputBuf->info.rows; - qDebug("QInfo:%p current numOfRes rows:%d, total:%" PRId64, pQInfo, + qDebug("QInfo:%"PRIu64" current numOfRes rows:%d, total:%" PRId64, pQInfo->qId, pRuntimeEnv->outputBuf->info.rows, pRuntimeEnv->resultInfo.total); if (pQueryAttr->limit.limit > 0 && pQueryAttr->limit.limit == pRuntimeEnv->resultInfo.total) { - qDebug("QInfo:%p results limitation reached, limitation:%"PRId64, pQInfo, pQueryAttr->limit.limit); + qDebug("QInfo:%"PRIu64" results limitation reached, limitation:%"PRId64, pQInfo->qId, pQueryAttr->limit.limit); setQueryStatus(pRuntimeEnv, QUERY_OVER); } diff --git a/src/query/src/qPercentile.c b/src/query/src/qPercentile.c index d366646172..e3326cc26b 100644 --- a/src/query/src/qPercentile.c +++ b/src/query/src/qPercentile.c @@ -254,7 +254,7 @@ tMemBucket *tMemBucketCreate(int16_t nElemSize, int16_t dataType, double minval, resetSlotInfo(pBucket); - int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, NULL); + int32_t ret = createDiskbasedResultBuffer(&pBucket->pBuffer, pBucket->bufPageSize, pBucket->bufPageSize * 512, 1); if (ret != TSDB_CODE_SUCCESS) { tMemBucketDestroy(pBucket); return NULL; diff --git a/src/query/src/qResultbuf.c b/src/query/src/qResultbuf.c index ed7f8c6719..c5dd6b3cac 100644 --- a/src/query/src/qResultbuf.c +++ b/src/query/src/qResultbuf.c @@ -9,7 +9,7 @@ #define GET_DATA_PAYLOAD(_p) ((char *)(_p)->pData + POINTER_BYTES) #define NO_IN_MEM_AVAILABLE_PAGES(_b) (listNEles((_b)->lruList) >= (_b)->inMemPages) -int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, const void* handle) { +int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pagesize, int32_t inMemBufSize, uint64_t qId) { *pResultBuf = calloc(1, sizeof(SDiskbasedResultBuf)); SDiskbasedResultBuf* pResBuf = *pResultBuf; @@ -24,7 +24,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa pResBuf->allocateId = -1; pResBuf->comp = true; pResBuf->file = NULL; - pResBuf->handle = handle; + pResBuf->qId = qId; pResBuf->fileSize = 0; // at least more than 2 pages must be in memory @@ -43,7 +43,7 @@ int32_t createDiskbasedResultBuffer(SDiskbasedResultBuf** pResultBuf, int32_t pa pResBuf->emptyDummyIdList = taosArrayInit(1, sizeof(int32_t)); - qDebug("QInfo:%p create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", handle, pResBuf->pageSize, + qDebug("QInfo:%"PRIu64" create resBuf for output, page size:%d, inmem buf pages:%d, file:%s", qId, pResBuf->pageSize, pResBuf->inMemPages, pResBuf->path); return TSDB_CODE_SUCCESS; @@ -406,13 +406,13 @@ void destroyResultBuf(SDiskbasedResultBuf* pResultBuf) { } if (pResultBuf->file != NULL) { - qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb", - pResultBuf->handle, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0, + qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, inmem size:%.2f Kb, file size:%.2f Kb", + pResultBuf->qId, pResultBuf->totalBufSize/1024.0, listNEles(pResultBuf->lruList) * pResultBuf->pageSize / 1024.0, pResultBuf->fileSize/1024.0); fclose(pResultBuf->file); } else { - qDebug("QInfo:%p res output buffer closed, total:%.2f Kb, no file created", pResultBuf->handle, + qDebug("QInfo:%"PRIu64" res output buffer closed, total:%.2f Kb, no file created", pResultBuf->qId, pResultBuf->totalBufSize/1024.0); } diff --git a/src/query/src/qSqlParser.c b/src/query/src/qSqlParser.c index 19bd2d35fc..a3a81083f0 100644 --- a/src/query/src/qSqlParser.c +++ b/src/query/src/qSqlParser.c @@ -127,7 +127,12 @@ tSqlExpr *tSqlExprCreateIdValue(SStrToken *pToken, int32_t optrType) { pSqlExpr->token = *pToken; } - if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { + if (optrType == TK_NULL) { + pToken->type = TSDB_DATA_TYPE_NULL; + tVariantCreate(&pSqlExpr->value, pToken); + pSqlExpr->tokenId = optrType; + pSqlExpr->type = SQL_NODE_VALUE; + } else if (optrType == TK_INTEGER || optrType == TK_STRING || optrType == TK_FLOAT || optrType == TK_BOOL) { toTSDBType(pToken->type); tVariantCreate(&pSqlExpr->value, pToken); @@ -356,7 +361,11 @@ void tSqlExprCompact(tSqlExpr** pExpr) { bool tSqlExprIsLeaf(tSqlExpr* pExpr) { return (pExpr->pRight == NULL && pExpr->pLeft == NULL) && - (pExpr->tokenId == 0 || pExpr->tokenId == TK_ID || (pExpr->tokenId >= TK_BOOL && pExpr->tokenId <= TK_NCHAR) || pExpr->tokenId == TK_SET); + (pExpr->tokenId == 0 || + (pExpr->tokenId == TK_ID) || + (pExpr->tokenId >= TK_BOOL && pExpr->tokenId <= TK_NCHAR) || + (pExpr->tokenId == TK_NULL) || + (pExpr->tokenId == TK_SET)); } bool tSqlExprIsParentOfLeaf(tSqlExpr* pExpr) { @@ -911,6 +920,7 @@ void setDCLSqlElems(SSqlInfo *pInfo, int32_t type, int32_t nParam, ...) { SStrToken *pToken = va_arg(va, SStrToken *); taosArrayPush(pInfo->pMiscInfo->a, pToken); } + va_end(va); } diff --git a/src/query/src/qTokenizer.c b/src/query/src/qTokenizer.c index 013eaaf2a9..52b2fdbb82 100644 --- a/src/query/src/qTokenizer.c +++ b/src/query/src/qTokenizer.c @@ -100,6 +100,7 @@ static SKeyword keywordTable[] = { {"ACCOUNT", TK_ACCOUNT}, {"USE", TK_USE}, {"DESCRIBE", TK_DESCRIBE}, + {"SYNCDB", TK_SYNCDB}, {"ALTER", TK_ALTER}, {"PASS", TK_PASS}, {"PRIVILEGE", TK_PRIVILEGE}, diff --git a/src/query/src/qUtil.c b/src/query/src/qUtil.c index 930c5bf893..4cd22aadf5 100644 --- a/src/query/src/qUtil.c +++ b/src/query/src/qUtil.c @@ -468,7 +468,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes pTableQueryInfoList = malloc(POINTER_BYTES * size); if (pTableQueryInfoList == NULL || posList == NULL || pGroupResInfo->pRows == NULL || pGroupResInfo->pRows == NULL) { - qError("QInfo:%p failed alloc memory", pRuntimeEnv->qinfo); + qError("QInfo:%"PRIu64" failed alloc memory", GET_QID(pRuntimeEnv)); code = TSDB_CODE_QRY_OUT_OF_MEMORY; goto _end; } @@ -540,7 +540,7 @@ static int32_t mergeIntoGroupResultImpl(SQueryRuntimeEnv *pRuntimeEnv, SGroupRes int64_t endt = taosGetTimestampMs(); - qDebug("QInfo:%p result merge completed for group:%d, elapsed time:%" PRId64 " ms", pRuntimeEnv->qinfo, + qDebug("QInfo:%"PRIu64" result merge completed for group:%d, elapsed time:%" PRId64 " ms", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, endt - startt); _end: @@ -567,13 +567,13 @@ int32_t mergeIntoGroupResult(SGroupResInfo* pGroupResInfo, SQueryRuntimeEnv* pRu break; } - qDebug("QInfo:%p no result in group %d, continue", pRuntimeEnv->qinfo, pGroupResInfo->currentGroup); + qDebug("QInfo:%"PRIu64" no result in group %d, continue", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup); cleanupGroupResInfo(pGroupResInfo); incNextGroup(pGroupResInfo); } int64_t elapsedTime = taosGetTimestampUs() - st; - qDebug("QInfo:%p merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", pRuntimeEnv->qinfo, + qDebug("QInfo:%"PRIu64" merge res data into group, index:%d, total group:%d, elapsed time:%" PRId64 "us", GET_QID(pRuntimeEnv), pGroupResInfo->currentGroup, pGroupResInfo->totalGroup, elapsedTime); // pQInfo->summary.firstStageMergeTime += elapsedTime; diff --git a/src/query/src/queryMain.c b/src/query/src/queryMain.c index 0b332c6092..64af517e9a 100644 --- a/src/query/src/queryMain.c +++ b/src/query/src/queryMain.c @@ -198,29 +198,30 @@ int32_t qCreateQueryInfo(void* tsdb, int32_t vgId, SQueryTableMsg* pQueryMsg, qi return code; } -bool qTableQuery(qinfo_t qinfo) { +bool qTableQuery(qinfo_t qinfo, uint64_t *qId) { SQInfo *pQInfo = (SQInfo *)qinfo; assert(pQInfo && pQInfo->signature == pQInfo); int64_t threadId = taosGetSelfPthreadId(); int64_t curOwner = 0; if ((curOwner = atomic_val_compare_exchange_64(&pQInfo->owner, 0, threadId)) != 0) { - qError("QInfo:%p qhandle is now executed by thread:%p", pQInfo, (void*) curOwner); + qError("QInfo:%"PRIu64"-%p qhandle is now executed by thread:%p", pQInfo->qId, pQInfo, (void*) curOwner); pQInfo->code = TSDB_CODE_QRY_IN_EXEC; return false; } + *qId = pQInfo->qId; pQInfo->startExecTs = taosGetTimestampSec(); if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p it is already killed, abort", pQInfo); + qDebug("QInfo:%"PRIu64" it is already killed, abort", pQInfo->qId); return doBuildResCheck(pQInfo); } SQueryRuntimeEnv* pRuntimeEnv = &pQInfo->runtimeEnv; if (pRuntimeEnv->tableqinfoGroupInfo.numOfTables == 0) { - qDebug("QInfo:%p no table exists for query, abort", pQInfo); + qDebug("QInfo:%"PRIu64" no table exists for query, abort", pQInfo->qId); setQueryStatus(pRuntimeEnv, QUERY_COMPLETED); return doBuildResCheck(pQInfo); } @@ -229,22 +230,22 @@ bool qTableQuery(qinfo_t qinfo) { int32_t ret = setjmp(pQInfo->runtimeEnv.env); if (ret != TSDB_CODE_SUCCESS) { pQInfo->code = ret; - qDebug("QInfo:%p query abort due to error/cancel occurs, code:%s", pQInfo, tstrerror(pQInfo->code)); + qDebug("QInfo:%"PRIu64" query abort due to error/cancel occurs, code:%s", pQInfo->qId, tstrerror(pQInfo->code)); return doBuildResCheck(pQInfo); } - qDebug("QInfo:%p query task is launched", pQInfo); + qDebug("QInfo:%"PRIu64" query task is launched", pQInfo->qId); pRuntimeEnv->outputBuf = pRuntimeEnv->proot->exec(pRuntimeEnv->proot); if (isQueryKilled(pQInfo)) { - qDebug("QInfo:%p query is killed", pQInfo); + qDebug("QInfo:%"PRIu64" query is killed", pQInfo->qId); } else if (GET_NUM_OF_RESULTS(pRuntimeEnv) == 0) { - qDebug("QInfo:%p over, %u tables queried, %"PRId64" rows are returned", pQInfo, pRuntimeEnv->tableqinfoGroupInfo.numOfTables, + qDebug("QInfo:%"PRIu64" over, %u tables queried, %"PRId64" rows are returned", pQInfo->qId, pRuntimeEnv->tableqinfoGroupInfo.numOfTables, pRuntimeEnv->resultInfo.total); } else { - qDebug("QInfo:%p query paused, %d rows returned, numOfTotal:%" PRId64 " rows", - pQInfo, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv)); + qDebug("QInfo:%"PRIu64" query paused, %d rows returned, numOfTotal:%" PRId64 " rows", + pQInfo->qId, GET_NUM_OF_RESULTS(pRuntimeEnv), pRuntimeEnv->resultInfo.total + GET_NUM_OF_RESULTS(pRuntimeEnv)); } return doBuildResCheck(pQInfo); @@ -254,13 +255,13 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex SQInfo *pQInfo = (SQInfo *)qinfo; if (pQInfo == NULL || !isValidQInfo(pQInfo)) { - qError("QInfo:%p invalid qhandle", pQInfo); + qError("QInfo:%"PRIu64" invalid qhandle", pQInfo->qId); return TSDB_CODE_QRY_INVALID_QHANDLE; } *buildRes = false; if (IS_QUERY_KILLED(pQInfo)) { - qDebug("QInfo:%p query is killed, code:0x%08x", pQInfo, pQInfo->code); + qDebug("QInfo:%"PRIu64" query is killed, code:0x%08x", pQInfo->qId, pQInfo->code); return pQInfo->code; } @@ -280,11 +281,11 @@ int32_t qRetrieveQueryResultInfo(qinfo_t qinfo, bool* buildRes, void* pRspContex assert(pQInfo->rspContext == NULL); if (pQInfo->dataReady == QUERY_RESULT_READY) { *buildRes = true; - qDebug("QInfo:%p retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo, pQueryAttr->resultRowSize, + qDebug("QInfo:%"PRIu64" retrieve result info, rowsize:%d, rows:%d, code:%s", pQInfo->qId, pQueryAttr->resultRowSize, GET_NUM_OF_RESULTS(pRuntimeEnv), tstrerror(pQInfo->code)); } else { *buildRes = false; - qDebug("QInfo:%p retrieve req set query return result after paused", pQInfo); + qDebug("QInfo:%"PRIu64" retrieve req set query return result after paused", pQInfo->qId); pQInfo->rspContext = pRspContext; assert(pQInfo->rspContext != NULL); } @@ -343,9 +344,10 @@ int32_t qDumpRetrieveResult(qinfo_t qinfo, SRetrieveTableRsp **pRsp, int32_t *co // here current thread hold the refcount, so it is safe to free tsdbQueryHandle. *continueExec = false; (*pRsp)->completed = 1; // notify no more result to client + qDebug("QInfo:%"PRIu64" no more results to retrieve", pQInfo->qId); } else { *continueExec = true; - qDebug("QInfo:%p has more results to retrieve", pQInfo); + qDebug("QInfo:%"PRIu64" has more results to retrieve", pQInfo->qId); } // the memory should be freed if the code of pQInfo is not TSDB_CODE_SUCCESS @@ -371,6 +373,7 @@ int32_t qKillQuery(qinfo_t qinfo) { return TSDB_CODE_QRY_INVALID_QHANDLE; } + qDebug("QInfo:%"PRIu64" query killed", pQInfo->qId); setQueryKilled(pQInfo); // Wait for the query executing thread being stopped/ @@ -398,7 +401,7 @@ void qDestroyQueryInfo(qinfo_t qHandle) { return; } - qDebug("QInfo:%p query completed", pQInfo); + qDebug("QInfo:%"PRIu64" query completed", pQInfo->qId); queryCostStatis(pQInfo); // print the query cost summary freeQInfo(pQInfo); } @@ -481,7 +484,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) { SQueryMgmt *pQueryMgmt = pMgmt; if (pQueryMgmt->qinfoPool == NULL) { - qError("QInfo:%p failed to add qhandle into qMgmt, since qMgmt is closed", (void *)qInfo); + qError("QInfo:%"PRIu64"-%p failed to add qhandle into qMgmt, since qMgmt is closed", qId, (void*)qInfo); terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } @@ -489,7 +492,7 @@ void** qRegisterQInfo(void* pMgmt, uint64_t qId, uint64_t qInfo) { pthread_mutex_lock(&pQueryMgmt->lock); if (pQueryMgmt->closed) { pthread_mutex_unlock(&pQueryMgmt->lock); - qError("QInfo:%p failed to add qhandle into cache, since qMgmt is colsing", (void *)qInfo); + qError("QInfo:%"PRIu64"-%p failed to add qhandle into cache, since qMgmt is colsing", qId, (void*)qInfo); terrno = TSDB_CODE_VND_INVALID_VGROUP_ID; return NULL; } else { diff --git a/src/query/src/sql.c b/src/query/src/sql.c index e976e5ebe2..77f836ad0f 100644 --- a/src/query/src/sql.c +++ b/src/query/src/sql.c @@ -97,28 +97,28 @@ #endif /************* Begin control #defines *****************************************/ #define YYCODETYPE unsigned short int -#define YYNOCODE 263 +#define YYNOCODE 264 #define YYACTIONTYPE unsigned short int #define ParseTOKENTYPE SStrToken typedef union { int yyinit; ParseTOKENTYPE yy0; - SLimitVal yy18; - SFromInfo* yy70; - SSessionWindowVal yy87; - SCreateDbInfo yy94; - int yy116; - SSubclauseInfo* yy141; - tSqlExpr* yy170; - SCreateTableSql* yy194; - tVariant yy218; - SIntervalVal yy220; - SCreatedTableInfo yy252; - SQuerySqlNode* yy254; - SCreateAcctInfo yy419; - SArray* yy429; - TAOS_FIELD yy451; - int64_t yy481; + SCreateTableSql* yy14; + int yy20; + tSqlExpr* yy118; + SArray* yy159; + SIntervalVal yy184; + SCreatedTableInfo yy206; + SSessionWindowVal yy249; + SQuerySqlNode* yy272; + int64_t yy317; + SCreateDbInfo yy322; + SCreateAcctInfo yy351; + SSubclauseInfo* yy391; + TAOS_FIELD yy407; + SLimitVal yy440; + tVariant yy488; + SFromInfo* yy514; } YYMINORTYPE; #ifndef YYSTACKDEPTH #define YYSTACKDEPTH 100 @@ -128,17 +128,17 @@ typedef union { #define ParseARG_FETCH SSqlInfo* pInfo = yypParser->pInfo #define ParseARG_STORE yypParser->pInfo = pInfo #define YYFALLBACK 1 -#define YYNSTATE 313 -#define YYNRULE 265 -#define YYNTOKEN 186 -#define YY_MAX_SHIFT 312 -#define YY_MIN_SHIFTREDUCE 502 -#define YY_MAX_SHIFTREDUCE 766 -#define YY_ERROR_ACTION 767 -#define YY_ACCEPT_ACTION 768 -#define YY_NO_ACTION 769 -#define YY_MIN_REDUCE 770 -#define YY_MAX_REDUCE 1034 +#define YYNSTATE 315 +#define YYNRULE 267 +#define YYNTOKEN 187 +#define YY_MAX_SHIFT 314 +#define YY_MIN_SHIFTREDUCE 506 +#define YY_MAX_SHIFTREDUCE 772 +#define YY_ERROR_ACTION 773 +#define YY_ACCEPT_ACTION 774 +#define YY_NO_ACTION 775 +#define YY_MIN_REDUCE 776 +#define YY_MAX_REDUCE 1042 /************* End control #defines *******************************************/ /* Define the yytestcase() macro to be a no-op if is not already defined @@ -204,259 +204,259 @@ typedef union { ** yy_default[] Default action for each state. ** *********** Begin parsing tables **********************************************/ -#define YY_ACTTAB_COUNT (676) +#define YY_ACTTAB_COUNT (680) static const YYACTIONTYPE yy_action[] = { - /* 0 */ 910, 549, 201, 310, 205, 139, 937, 3, 166, 550, - /* 10 */ 768, 312, 17, 47, 48, 139, 51, 52, 30, 180, - /* 20 */ 213, 41, 180, 50, 260, 55, 53, 57, 54, 1016, - /* 30 */ 916, 208, 1017, 46, 45, 178, 180, 44, 43, 42, - /* 40 */ 47, 48, 219, 51, 52, 207, 1017, 213, 41, 549, - /* 50 */ 50, 260, 55, 53, 57, 54, 928, 550, 184, 202, - /* 60 */ 46, 45, 913, 218, 44, 43, 42, 48, 934, 51, - /* 70 */ 52, 240, 968, 213, 41, 549, 50, 260, 55, 53, - /* 80 */ 57, 54, 969, 550, 255, 220, 46, 45, 276, 916, - /* 90 */ 44, 43, 42, 503, 504, 505, 506, 507, 508, 509, - /* 100 */ 510, 511, 512, 513, 514, 515, 311, 628, 84, 230, - /* 110 */ 69, 916, 1013, 47, 48, 30, 51, 52, 296, 30, - /* 120 */ 213, 41, 549, 50, 260, 55, 53, 57, 54, 64, - /* 130 */ 550, 306, 714, 46, 45, 286, 285, 44, 43, 42, - /* 140 */ 47, 49, 904, 51, 52, 224, 1012, 213, 41, 65, - /* 150 */ 50, 260, 55, 53, 57, 54, 216, 916, 902, 913, - /* 160 */ 46, 45, 222, 912, 44, 43, 42, 23, 274, 305, - /* 170 */ 304, 273, 272, 271, 303, 270, 302, 301, 300, 269, - /* 180 */ 299, 298, 876, 139, 864, 865, 866, 867, 868, 869, - /* 190 */ 870, 871, 872, 873, 874, 875, 877, 878, 51, 52, - /* 200 */ 815, 139, 213, 41, 165, 50, 260, 55, 53, 57, - /* 210 */ 54, 1011, 18, 81, 226, 46, 45, 283, 282, 44, - /* 220 */ 43, 42, 212, 727, 928, 25, 718, 68, 721, 189, - /* 230 */ 724, 223, 212, 727, 278, 190, 718, 276, 721, 203, - /* 240 */ 724, 117, 116, 188, 899, 900, 29, 903, 257, 233, - /* 250 */ 77, 44, 43, 42, 209, 210, 237, 236, 259, 901, - /* 260 */ 23, 197, 305, 304, 209, 210, 225, 303, 78, 302, - /* 270 */ 301, 300, 73, 299, 298, 884, 104, 30, 882, 883, - /* 280 */ 36, 296, 720, 885, 723, 887, 888, 886, 667, 889, - /* 290 */ 890, 55, 53, 57, 54, 132, 309, 308, 125, 46, - /* 300 */ 45, 914, 239, 44, 43, 42, 102, 107, 30, 196, - /* 310 */ 664, 73, 96, 106, 112, 115, 105, 24, 217, 36, - /* 320 */ 674, 913, 109, 5, 155, 56, 261, 79, 243, 33, - /* 330 */ 154, 91, 86, 90, 30, 56, 173, 169, 726, 30, - /* 340 */ 70, 30, 171, 168, 120, 119, 118, 12, 726, 279, - /* 350 */ 211, 83, 913, 80, 725, 824, 46, 45, 245, 165, - /* 360 */ 44, 43, 42, 198, 725, 816, 671, 652, 182, 165, - /* 370 */ 649, 719, 650, 722, 651, 280, 1, 153, 913, 716, - /* 380 */ 284, 61, 288, 913, 183, 913, 241, 695, 696, 680, - /* 390 */ 31, 686, 687, 134, 6, 60, 20, 747, 227, 228, - /* 400 */ 728, 19, 638, 62, 19, 263, 31, 640, 265, 31, - /* 410 */ 639, 60, 82, 28, 60, 717, 266, 95, 94, 14, - /* 420 */ 13, 67, 730, 627, 185, 101, 100, 179, 16, 15, - /* 430 */ 979, 656, 654, 657, 655, 114, 113, 130, 128, 186, - /* 440 */ 187, 193, 194, 192, 177, 191, 181, 1026, 915, 978, - /* 450 */ 214, 975, 974, 215, 287, 131, 39, 936, 944, 946, - /* 460 */ 133, 137, 929, 244, 129, 150, 961, 960, 911, 909, - /* 470 */ 149, 679, 246, 151, 204, 152, 653, 250, 258, 827, - /* 480 */ 140, 66, 141, 268, 37, 63, 175, 926, 34, 277, - /* 490 */ 248, 823, 253, 142, 1031, 58, 92, 1030, 1028, 256, - /* 500 */ 156, 143, 281, 1025, 98, 1024, 1022, 254, 157, 845, - /* 510 */ 35, 32, 38, 252, 176, 812, 108, 810, 110, 111, - /* 520 */ 808, 807, 229, 167, 805, 804, 803, 802, 801, 800, - /* 530 */ 170, 172, 797, 795, 793, 791, 789, 174, 247, 242, - /* 540 */ 71, 74, 249, 962, 40, 297, 103, 289, 290, 291, - /* 550 */ 292, 293, 294, 295, 199, 221, 307, 766, 231, 232, - /* 560 */ 267, 765, 234, 235, 764, 200, 238, 87, 88, 752, - /* 570 */ 195, 243, 75, 8, 262, 806, 72, 659, 681, 135, - /* 580 */ 76, 121, 159, 846, 160, 161, 158, 162, 164, 122, - /* 590 */ 163, 799, 2, 123, 880, 124, 798, 790, 684, 144, - /* 600 */ 147, 145, 146, 4, 136, 148, 892, 206, 251, 26, - /* 610 */ 688, 138, 9, 10, 729, 27, 7, 11, 21, 731, - /* 620 */ 22, 85, 264, 591, 587, 83, 585, 584, 583, 580, - /* 630 */ 553, 275, 93, 89, 31, 630, 59, 97, 629, 99, - /* 640 */ 626, 575, 573, 565, 571, 567, 569, 563, 561, 594, - /* 650 */ 593, 592, 590, 589, 588, 586, 582, 581, 60, 551, - /* 660 */ 519, 517, 770, 769, 769, 769, 769, 769, 769, 769, - /* 670 */ 769, 769, 769, 769, 126, 127, + /* 0 */ 133, 553, 202, 312, 206, 140, 943, 226, 140, 554, + /* 10 */ 774, 314, 17, 47, 48, 140, 51, 52, 30, 181, + /* 20 */ 214, 41, 181, 50, 262, 55, 53, 57, 54, 1023, + /* 30 */ 922, 209, 1024, 46, 45, 179, 181, 44, 43, 42, + /* 40 */ 47, 48, 920, 51, 52, 208, 1024, 214, 41, 553, + /* 50 */ 50, 262, 55, 53, 57, 54, 934, 554, 185, 203, + /* 60 */ 46, 45, 919, 247, 44, 43, 42, 48, 940, 51, + /* 70 */ 52, 242, 974, 214, 41, 79, 50, 262, 55, 53, + /* 80 */ 57, 54, 975, 632, 257, 30, 46, 45, 278, 225, + /* 90 */ 44, 43, 42, 507, 508, 509, 510, 511, 512, 513, + /* 100 */ 514, 515, 516, 517, 518, 519, 313, 553, 85, 231, + /* 110 */ 70, 288, 287, 47, 48, 554, 51, 52, 298, 219, + /* 120 */ 214, 41, 553, 50, 262, 55, 53, 57, 54, 918, + /* 130 */ 554, 105, 718, 46, 45, 1020, 298, 44, 43, 42, + /* 140 */ 47, 49, 910, 51, 52, 922, 140, 214, 41, 234, + /* 150 */ 50, 262, 55, 53, 57, 54, 1019, 238, 237, 227, + /* 160 */ 46, 45, 285, 284, 44, 43, 42, 23, 276, 307, + /* 170 */ 306, 275, 274, 273, 305, 272, 304, 303, 302, 271, + /* 180 */ 301, 300, 882, 30, 870, 871, 872, 873, 874, 875, + /* 190 */ 876, 877, 878, 879, 880, 881, 883, 884, 51, 52, + /* 200 */ 821, 1018, 214, 41, 166, 50, 262, 55, 53, 57, + /* 210 */ 54, 259, 18, 78, 82, 46, 45, 198, 223, 44, + /* 220 */ 43, 42, 213, 731, 217, 25, 722, 919, 725, 190, + /* 230 */ 728, 221, 213, 731, 199, 191, 722, 724, 725, 727, + /* 240 */ 728, 118, 117, 189, 263, 905, 906, 29, 909, 44, + /* 250 */ 43, 42, 30, 74, 210, 211, 308, 922, 261, 30, + /* 260 */ 23, 36, 307, 306, 210, 211, 934, 305, 30, 304, + /* 270 */ 303, 302, 74, 301, 300, 890, 908, 183, 888, 889, + /* 280 */ 36, 204, 922, 891, 916, 893, 894, 892, 224, 895, + /* 290 */ 896, 280, 656, 218, 830, 653, 919, 654, 166, 655, + /* 300 */ 281, 69, 241, 919, 68, 55, 53, 57, 54, 282, + /* 310 */ 197, 671, 919, 46, 45, 30, 822, 44, 43, 42, + /* 320 */ 166, 103, 108, 228, 229, 56, 220, 97, 107, 113, + /* 330 */ 116, 106, 732, 907, 723, 56, 726, 110, 730, 30, + /* 340 */ 735, 12, 732, 5, 156, 84, 184, 81, 730, 33, + /* 350 */ 155, 92, 87, 91, 729, 278, 286, 1, 154, 919, + /* 360 */ 174, 170, 186, 212, 729, 80, 172, 169, 121, 120, + /* 370 */ 119, 46, 45, 3, 167, 44, 43, 42, 71, 720, + /* 380 */ 290, 699, 700, 919, 311, 310, 126, 243, 668, 675, + /* 390 */ 678, 31, 684, 690, 180, 24, 135, 60, 245, 691, + /* 400 */ 752, 657, 733, 20, 19, 61, 19, 6, 64, 642, + /* 410 */ 265, 1034, 644, 31, 31, 721, 60, 267, 643, 187, + /* 420 */ 28, 83, 60, 268, 96, 95, 188, 62, 65, 14, + /* 430 */ 13, 102, 101, 660, 67, 661, 631, 194, 16, 15, + /* 440 */ 658, 195, 659, 115, 114, 131, 129, 193, 178, 192, + /* 450 */ 182, 921, 985, 984, 215, 981, 239, 980, 132, 942, + /* 460 */ 216, 289, 39, 950, 952, 967, 134, 966, 138, 935, + /* 470 */ 246, 130, 248, 917, 151, 915, 150, 205, 683, 250, + /* 480 */ 152, 886, 299, 153, 291, 148, 146, 260, 142, 932, + /* 490 */ 141, 58, 833, 270, 66, 255, 143, 37, 63, 176, + /* 500 */ 34, 279, 829, 258, 144, 1039, 93, 256, 1038, 1036, + /* 510 */ 157, 254, 283, 1033, 99, 1032, 1030, 158, 851, 35, + /* 520 */ 252, 145, 32, 38, 177, 818, 109, 816, 111, 40, + /* 530 */ 112, 814, 813, 230, 168, 811, 810, 809, 808, 807, + /* 540 */ 806, 171, 173, 803, 801, 799, 797, 795, 175, 249, + /* 550 */ 244, 72, 75, 104, 251, 968, 292, 293, 294, 295, + /* 560 */ 296, 297, 309, 200, 222, 269, 772, 232, 201, 233, + /* 570 */ 771, 88, 89, 196, 235, 236, 770, 758, 757, 240, + /* 580 */ 245, 8, 264, 73, 812, 663, 805, 161, 852, 159, + /* 590 */ 160, 163, 162, 164, 165, 122, 123, 124, 804, 76, + /* 600 */ 125, 796, 4, 2, 685, 136, 137, 688, 77, 149, + /* 610 */ 147, 207, 253, 86, 692, 898, 139, 9, 10, 26, + /* 620 */ 27, 734, 7, 11, 736, 21, 22, 266, 595, 591, + /* 630 */ 589, 84, 588, 587, 584, 557, 277, 94, 90, 31, + /* 640 */ 634, 633, 59, 630, 579, 577, 98, 569, 575, 571, + /* 650 */ 573, 567, 565, 100, 598, 597, 596, 594, 593, 592, + /* 660 */ 590, 586, 585, 60, 555, 523, 521, 776, 775, 127, + /* 670 */ 775, 775, 775, 775, 775, 775, 775, 775, 775, 128, }; static const YYCODETYPE yy_lookahead[] = { - /* 0 */ 190, 1, 189, 190, 209, 190, 190, 193, 194, 9, - /* 10 */ 187, 188, 251, 13, 14, 190, 16, 17, 190, 251, - /* 20 */ 20, 21, 251, 23, 24, 25, 26, 27, 28, 261, - /* 30 */ 235, 260, 261, 33, 34, 251, 251, 37, 38, 39, - /* 40 */ 13, 14, 232, 16, 17, 260, 261, 20, 21, 1, - /* 50 */ 23, 24, 25, 26, 27, 28, 233, 9, 251, 231, - /* 60 */ 33, 34, 234, 209, 37, 38, 39, 14, 252, 16, - /* 70 */ 17, 248, 257, 20, 21, 1, 23, 24, 25, 26, - /* 80 */ 27, 28, 257, 9, 259, 209, 33, 34, 79, 235, + /* 0 */ 191, 1, 190, 191, 210, 191, 191, 191, 191, 9, + /* 10 */ 188, 189, 252, 13, 14, 191, 16, 17, 191, 252, + /* 20 */ 20, 21, 252, 23, 24, 25, 26, 27, 28, 262, + /* 30 */ 236, 261, 262, 33, 34, 252, 252, 37, 38, 39, + /* 40 */ 13, 14, 226, 16, 17, 261, 262, 20, 21, 1, + /* 50 */ 23, 24, 25, 26, 27, 28, 234, 9, 252, 232, + /* 60 */ 33, 34, 235, 254, 37, 38, 39, 14, 253, 16, + /* 70 */ 17, 249, 258, 20, 21, 258, 23, 24, 25, 26, + /* 80 */ 27, 28, 258, 5, 260, 191, 33, 34, 79, 67, /* 90 */ 37, 38, 39, 45, 46, 47, 48, 49, 50, 51, - /* 100 */ 52, 53, 54, 55, 56, 57, 58, 5, 196, 61, - /* 110 */ 110, 235, 251, 13, 14, 190, 16, 17, 81, 190, - /* 120 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 109, - /* 130 */ 9, 209, 105, 33, 34, 33, 34, 37, 38, 39, - /* 140 */ 13, 14, 230, 16, 17, 67, 251, 20, 21, 129, - /* 150 */ 23, 24, 25, 26, 27, 28, 231, 235, 0, 234, - /* 160 */ 33, 34, 67, 234, 37, 38, 39, 88, 89, 90, + /* 100 */ 52, 53, 54, 55, 56, 57, 58, 1, 197, 61, + /* 110 */ 110, 33, 34, 13, 14, 9, 16, 17, 81, 210, + /* 120 */ 20, 21, 1, 23, 24, 25, 26, 27, 28, 235, + /* 130 */ 9, 76, 105, 33, 34, 252, 81, 37, 38, 39, + /* 140 */ 13, 14, 231, 16, 17, 236, 191, 20, 21, 135, + /* 150 */ 23, 24, 25, 26, 27, 28, 252, 143, 144, 137, + /* 160 */ 33, 34, 140, 141, 37, 38, 39, 88, 89, 90, /* 170 */ 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, - /* 180 */ 101, 102, 208, 190, 210, 211, 212, 213, 214, 215, - /* 190 */ 216, 217, 218, 219, 220, 221, 222, 223, 16, 17, - /* 200 */ 195, 190, 20, 21, 199, 23, 24, 25, 26, 27, - /* 210 */ 28, 251, 44, 196, 136, 33, 34, 139, 140, 37, - /* 220 */ 38, 39, 1, 2, 233, 104, 5, 196, 7, 61, - /* 230 */ 9, 136, 1, 2, 139, 67, 5, 79, 7, 248, - /* 240 */ 9, 73, 74, 75, 227, 228, 229, 230, 255, 135, - /* 250 */ 257, 37, 38, 39, 33, 34, 142, 143, 37, 228, - /* 260 */ 88, 251, 90, 91, 33, 34, 190, 95, 257, 97, - /* 270 */ 98, 99, 104, 101, 102, 208, 76, 190, 211, 212, - /* 280 */ 112, 81, 5, 216, 7, 218, 219, 220, 37, 222, - /* 290 */ 223, 25, 26, 27, 28, 190, 64, 65, 66, 33, - /* 300 */ 34, 225, 134, 37, 38, 39, 62, 63, 190, 141, - /* 310 */ 109, 104, 68, 69, 70, 71, 72, 116, 231, 112, - /* 320 */ 105, 234, 78, 62, 63, 104, 15, 236, 113, 68, - /* 330 */ 69, 70, 71, 72, 190, 104, 62, 63, 117, 190, - /* 340 */ 249, 190, 68, 69, 70, 71, 72, 104, 117, 231, - /* 350 */ 60, 108, 234, 110, 133, 195, 33, 34, 253, 199, - /* 360 */ 37, 38, 39, 251, 133, 195, 115, 2, 251, 199, - /* 370 */ 5, 5, 7, 7, 9, 231, 197, 198, 234, 1, - /* 380 */ 231, 109, 231, 234, 251, 234, 105, 124, 125, 105, - /* 390 */ 109, 105, 105, 109, 104, 109, 109, 105, 33, 34, - /* 400 */ 105, 109, 105, 131, 109, 105, 109, 105, 105, 109, - /* 410 */ 105, 109, 109, 104, 109, 37, 107, 137, 138, 137, - /* 420 */ 138, 104, 111, 106, 251, 137, 138, 251, 137, 138, - /* 430 */ 226, 5, 5, 7, 7, 76, 77, 62, 63, 251, - /* 440 */ 251, 251, 251, 251, 251, 251, 251, 235, 235, 226, - /* 450 */ 226, 226, 226, 226, 226, 190, 250, 190, 190, 190, - /* 460 */ 190, 190, 233, 233, 60, 190, 258, 258, 233, 190, - /* 470 */ 237, 117, 254, 190, 254, 190, 111, 119, 122, 190, - /* 480 */ 246, 128, 245, 190, 190, 130, 190, 247, 190, 190, - /* 490 */ 254, 190, 254, 244, 190, 127, 190, 190, 190, 126, - /* 500 */ 190, 243, 190, 190, 190, 190, 190, 121, 190, 190, - /* 510 */ 190, 190, 190, 120, 190, 190, 190, 190, 190, 190, - /* 520 */ 190, 190, 190, 190, 190, 190, 190, 190, 190, 190, - /* 530 */ 190, 190, 190, 190, 190, 190, 190, 190, 118, 191, - /* 540 */ 191, 191, 191, 191, 132, 103, 87, 86, 50, 83, - /* 550 */ 85, 54, 84, 82, 191, 191, 79, 5, 144, 5, - /* 560 */ 191, 5, 144, 5, 5, 191, 135, 196, 196, 89, - /* 570 */ 191, 113, 109, 104, 107, 191, 114, 105, 105, 104, - /* 580 */ 104, 192, 205, 207, 201, 204, 206, 202, 200, 192, - /* 590 */ 203, 191, 197, 192, 224, 192, 191, 191, 105, 242, - /* 600 */ 239, 241, 240, 193, 109, 238, 224, 1, 104, 109, - /* 610 */ 105, 104, 123, 123, 105, 109, 104, 104, 104, 111, - /* 620 */ 104, 76, 107, 9, 5, 108, 5, 5, 5, 5, - /* 630 */ 80, 15, 138, 76, 109, 5, 16, 138, 5, 138, - /* 640 */ 105, 5, 5, 5, 5, 5, 5, 5, 5, 5, - /* 650 */ 5, 5, 5, 5, 5, 5, 5, 5, 109, 80, - /* 660 */ 60, 59, 0, 262, 262, 262, 262, 262, 262, 262, - /* 670 */ 262, 262, 262, 262, 21, 21, 262, 262, 262, 262, - /* 680 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 690 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 700 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 710 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 720 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 730 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 740 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 750 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 760 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 770 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 780 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 790 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 800 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 810 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 820 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 830 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 840 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 850 */ 262, 262, 262, 262, 262, 262, 262, 262, 262, 262, - /* 860 */ 262, 262, + /* 180 */ 101, 102, 209, 191, 211, 212, 213, 214, 215, 216, + /* 190 */ 217, 218, 219, 220, 221, 222, 223, 224, 16, 17, + /* 200 */ 196, 252, 20, 21, 200, 23, 24, 25, 26, 27, + /* 210 */ 28, 256, 44, 258, 197, 33, 34, 252, 67, 37, + /* 220 */ 38, 39, 1, 2, 232, 104, 5, 235, 7, 61, + /* 230 */ 9, 210, 1, 2, 252, 67, 5, 5, 7, 7, + /* 240 */ 9, 73, 74, 75, 15, 228, 229, 230, 231, 37, + /* 250 */ 38, 39, 191, 104, 33, 34, 210, 236, 37, 191, + /* 260 */ 88, 112, 90, 91, 33, 34, 234, 95, 191, 97, + /* 270 */ 98, 99, 104, 101, 102, 209, 0, 252, 212, 213, + /* 280 */ 112, 249, 236, 217, 191, 219, 220, 221, 137, 223, + /* 290 */ 224, 140, 2, 232, 196, 5, 235, 7, 200, 9, + /* 300 */ 232, 197, 134, 235, 136, 25, 26, 27, 28, 232, + /* 310 */ 142, 37, 235, 33, 34, 191, 196, 37, 38, 39, + /* 320 */ 200, 62, 63, 33, 34, 104, 233, 68, 69, 70, + /* 330 */ 71, 72, 111, 229, 5, 104, 7, 78, 117, 191, + /* 340 */ 111, 104, 111, 62, 63, 108, 252, 110, 117, 68, + /* 350 */ 69, 70, 71, 72, 133, 79, 232, 198, 199, 235, + /* 360 */ 62, 63, 252, 60, 133, 237, 68, 69, 70, 71, + /* 370 */ 72, 33, 34, 194, 195, 37, 38, 39, 250, 1, + /* 380 */ 232, 124, 125, 235, 64, 65, 66, 105, 109, 115, + /* 390 */ 105, 109, 105, 105, 252, 116, 109, 109, 113, 105, + /* 400 */ 105, 111, 105, 109, 109, 109, 109, 104, 109, 105, + /* 410 */ 105, 236, 105, 109, 109, 37, 109, 105, 105, 252, + /* 420 */ 104, 109, 109, 107, 138, 139, 252, 131, 129, 138, + /* 430 */ 139, 138, 139, 5, 104, 7, 106, 252, 138, 139, + /* 440 */ 5, 252, 7, 76, 77, 62, 63, 252, 252, 252, + /* 450 */ 252, 236, 227, 227, 227, 227, 191, 227, 191, 191, + /* 460 */ 227, 227, 251, 191, 191, 259, 191, 259, 191, 234, + /* 470 */ 234, 60, 255, 234, 191, 191, 238, 255, 117, 255, + /* 480 */ 191, 225, 103, 191, 86, 240, 242, 122, 246, 248, + /* 490 */ 247, 127, 191, 191, 128, 255, 245, 191, 130, 191, + /* 500 */ 191, 191, 191, 126, 244, 191, 191, 121, 191, 191, + /* 510 */ 191, 120, 191, 191, 191, 191, 191, 191, 191, 191, + /* 520 */ 119, 243, 191, 191, 191, 191, 191, 191, 191, 132, + /* 530 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 191, + /* 540 */ 191, 191, 191, 191, 191, 191, 191, 191, 191, 118, + /* 550 */ 192, 192, 192, 87, 192, 192, 50, 83, 85, 54, + /* 560 */ 84, 82, 79, 192, 192, 192, 5, 145, 192, 5, + /* 570 */ 5, 197, 197, 192, 145, 5, 5, 90, 89, 135, + /* 580 */ 113, 104, 107, 114, 192, 105, 192, 202, 208, 207, + /* 590 */ 206, 203, 205, 204, 201, 193, 193, 193, 192, 109, + /* 600 */ 193, 192, 194, 198, 105, 104, 109, 105, 104, 239, + /* 610 */ 241, 1, 104, 76, 105, 225, 104, 123, 123, 109, + /* 620 */ 109, 105, 104, 104, 111, 104, 104, 107, 9, 5, + /* 630 */ 5, 108, 5, 5, 5, 80, 15, 139, 76, 109, + /* 640 */ 5, 5, 16, 105, 5, 5, 139, 5, 5, 5, + /* 650 */ 5, 5, 5, 139, 5, 5, 5, 5, 5, 5, + /* 660 */ 5, 5, 5, 109, 80, 60, 59, 0, 263, 21, + /* 670 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 21, + /* 680 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 690 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 700 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 710 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 720 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 730 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 740 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 750 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 760 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 770 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 780 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 790 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 800 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 810 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 820 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 830 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 840 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 850 */ 263, 263, 263, 263, 263, 263, 263, 263, 263, 263, + /* 860 */ 263, 263, 263, 263, 263, 263, 263, }; -#define YY_SHIFT_COUNT (312) +#define YY_SHIFT_COUNT (314) #define YY_SHIFT_MIN (0) -#define YY_SHIFT_MAX (662) +#define YY_SHIFT_MAX (667) static const unsigned short int yy_shift_ofst[] = { - /* 0 */ 168, 79, 79, 172, 172, 9, 221, 231, 74, 74, - /* 10 */ 74, 74, 74, 74, 74, 74, 74, 0, 48, 231, - /* 20 */ 365, 365, 365, 365, 121, 207, 74, 74, 74, 158, - /* 30 */ 74, 74, 200, 9, 37, 37, 676, 676, 676, 231, + /* 0 */ 168, 79, 79, 172, 172, 9, 221, 231, 106, 106, + /* 10 */ 106, 106, 106, 106, 106, 106, 106, 0, 48, 231, + /* 20 */ 290, 290, 290, 290, 121, 149, 106, 106, 106, 276, + /* 30 */ 106, 106, 55, 9, 37, 37, 680, 680, 680, 231, /* 40 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 231, - /* 50 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 365, - /* 60 */ 365, 102, 102, 102, 102, 102, 102, 102, 74, 74, - /* 70 */ 251, 74, 207, 207, 74, 74, 74, 263, 263, 201, - /* 80 */ 207, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 90 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 100 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 110 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 120 */ 74, 74, 74, 74, 74, 74, 74, 74, 74, 74, - /* 130 */ 74, 404, 404, 404, 354, 354, 354, 404, 354, 404, - /* 140 */ 353, 355, 368, 356, 373, 386, 393, 358, 420, 412, - /* 150 */ 404, 404, 404, 442, 9, 9, 404, 404, 459, 461, - /* 160 */ 498, 466, 465, 497, 468, 471, 442, 404, 477, 477, - /* 170 */ 404, 477, 404, 477, 404, 676, 676, 27, 100, 127, - /* 180 */ 100, 100, 53, 182, 266, 266, 266, 266, 244, 261, - /* 190 */ 274, 323, 323, 323, 323, 78, 114, 214, 214, 243, - /* 200 */ 95, 232, 281, 215, 284, 286, 287, 292, 295, 277, - /* 210 */ 366, 378, 290, 311, 272, 20, 297, 300, 302, 303, - /* 220 */ 305, 309, 280, 282, 288, 317, 291, 426, 427, 359, - /* 230 */ 375, 552, 414, 554, 556, 418, 558, 559, 480, 431, - /* 240 */ 458, 467, 469, 462, 472, 463, 473, 475, 493, 495, - /* 250 */ 476, 606, 504, 505, 507, 500, 489, 506, 490, 509, - /* 260 */ 512, 508, 513, 467, 514, 515, 516, 517, 545, 614, - /* 270 */ 619, 621, 622, 623, 624, 550, 616, 557, 494, 525, - /* 280 */ 525, 620, 499, 501, 525, 630, 633, 535, 525, 636, - /* 290 */ 637, 638, 639, 640, 641, 642, 643, 644, 645, 646, - /* 300 */ 647, 648, 649, 650, 651, 652, 549, 579, 653, 654, - /* 310 */ 600, 602, 662, + /* 50 */ 231, 231, 231, 231, 231, 231, 231, 231, 231, 290, + /* 60 */ 290, 78, 78, 78, 78, 78, 78, 78, 106, 106, + /* 70 */ 106, 274, 106, 149, 149, 106, 106, 106, 257, 257, + /* 80 */ 279, 149, 106, 106, 106, 106, 106, 106, 106, 106, + /* 90 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 100 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 110 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 120 */ 106, 106, 106, 106, 106, 106, 106, 106, 106, 106, + /* 130 */ 106, 106, 411, 411, 411, 361, 361, 361, 411, 361, + /* 140 */ 411, 366, 368, 364, 365, 377, 386, 391, 401, 431, + /* 150 */ 397, 411, 411, 411, 379, 9, 9, 411, 411, 466, + /* 160 */ 398, 506, 474, 473, 505, 476, 479, 379, 411, 483, + /* 170 */ 483, 411, 483, 411, 483, 411, 680, 680, 27, 100, + /* 180 */ 127, 100, 100, 53, 182, 280, 280, 280, 280, 259, + /* 190 */ 281, 298, 338, 338, 338, 338, 22, 14, 212, 212, + /* 200 */ 237, 151, 320, 282, 285, 287, 288, 294, 295, 297, + /* 210 */ 232, 329, 378, 303, 229, 296, 299, 304, 305, 307, + /* 220 */ 312, 313, 316, 286, 291, 293, 330, 300, 428, 435, + /* 230 */ 367, 383, 561, 422, 564, 565, 429, 570, 571, 487, + /* 240 */ 489, 444, 467, 475, 477, 469, 480, 490, 499, 501, + /* 250 */ 502, 497, 504, 610, 508, 509, 512, 510, 494, 511, + /* 260 */ 495, 516, 518, 513, 519, 475, 521, 520, 522, 523, + /* 270 */ 537, 619, 624, 625, 627, 628, 629, 555, 621, 562, + /* 280 */ 498, 530, 530, 626, 507, 514, 530, 635, 636, 538, + /* 290 */ 530, 639, 640, 642, 643, 644, 645, 646, 647, 649, + /* 300 */ 650, 651, 652, 653, 654, 655, 656, 657, 554, 584, + /* 310 */ 648, 658, 605, 607, 667, }; -#define YY_REDUCE_COUNT (176) -#define YY_REDUCE_MIN (-239) -#define YY_REDUCE_MAX (410) +#define YY_REDUCE_COUNT (177) +#define YY_REDUCE_MIN (-240) +#define YY_REDUCE_MAX (409) static const short yy_reduce_ofst[] = { - /* 0 */ -177, -26, -26, 67, 67, 17, -229, -215, -172, -175, - /* 10 */ -7, -75, 87, 118, 144, 149, 151, -184, -187, -232, - /* 20 */ -205, -146, -124, -78, 105, -9, -185, 11, -190, -88, - /* 30 */ 76, -71, 5, 31, 160, 170, 91, 179, -186, -239, - /* 40 */ -216, -193, -139, -105, -40, 10, 112, 117, 133, 173, - /* 50 */ 176, 188, 189, 190, 191, 192, 193, 194, 195, 212, - /* 60 */ 213, 204, 223, 224, 225, 226, 227, 228, 265, 267, - /* 70 */ 206, 268, 229, 230, 269, 270, 271, 208, 209, 233, - /* 80 */ 235, 275, 279, 283, 285, 289, 293, 294, 296, 298, - /* 90 */ 299, 301, 304, 306, 307, 308, 310, 312, 313, 314, - /* 100 */ 315, 316, 318, 319, 320, 321, 322, 324, 325, 326, - /* 110 */ 327, 328, 329, 330, 331, 332, 333, 334, 335, 336, - /* 120 */ 337, 338, 339, 340, 341, 342, 343, 344, 345, 346, - /* 130 */ 347, 348, 349, 350, 218, 220, 236, 351, 238, 352, - /* 140 */ 240, 234, 237, 249, 258, 357, 360, 362, 361, 367, - /* 150 */ 363, 364, 369, 370, 371, 372, 374, 379, 376, 380, - /* 160 */ 377, 383, 381, 385, 387, 388, 382, 384, 389, 397, - /* 170 */ 400, 401, 405, 403, 406, 395, 410, + /* 0 */ -178, -27, -27, 66, 66, 17, -230, -216, -173, -176, + /* 10 */ -45, -8, 61, 68, 77, 124, 148, -185, -188, -233, + /* 20 */ -206, -91, 21, 46, -191, 32, -186, -183, 93, -89, + /* 30 */ -184, -106, 4, 104, 98, 120, 128, 159, 179, -240, + /* 40 */ -217, -194, -117, -96, -51, -35, -18, 25, 94, 110, + /* 50 */ 142, 167, 174, 185, 189, 195, 196, 197, 198, 175, + /* 60 */ 215, 225, 226, 227, 228, 230, 233, 234, 265, 267, + /* 70 */ 268, 211, 272, 235, 236, 273, 275, 277, 206, 208, + /* 80 */ 238, 239, 283, 284, 289, 292, 301, 302, 306, 308, + /* 90 */ 309, 310, 311, 314, 315, 317, 318, 319, 321, 322, + /* 100 */ 323, 324, 325, 326, 327, 328, 331, 332, 333, 334, + /* 110 */ 335, 336, 337, 339, 340, 341, 342, 343, 344, 345, + /* 120 */ 346, 347, 348, 349, 350, 351, 352, 353, 354, 355, + /* 130 */ 356, 357, 358, 359, 360, 217, 222, 224, 362, 240, + /* 140 */ 363, 241, 243, 242, 251, 260, 278, 244, 369, 245, + /* 150 */ 370, 371, 372, 373, 256, 374, 375, 376, 381, 380, + /* 160 */ 382, 384, 385, 387, 388, 389, 393, 390, 392, 402, + /* 170 */ 403, 394, 404, 406, 407, 409, 405, 408, }; static const YYACTIONTYPE yy_default[] = { - /* 0 */ 767, 879, 825, 891, 813, 822, 1019, 1019, 767, 767, - /* 10 */ 767, 767, 767, 767, 767, 767, 767, 938, 786, 1019, - /* 20 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 822, - /* 30 */ 767, 767, 828, 822, 828, 828, 933, 863, 881, 767, - /* 40 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 50 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 60 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 70 */ 940, 943, 767, 767, 945, 767, 767, 965, 965, 931, - /* 80 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 90 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 100 */ 767, 767, 767, 767, 767, 767, 767, 767, 811, 767, - /* 110 */ 809, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 120 */ 767, 767, 767, 767, 767, 796, 767, 767, 767, 767, - /* 130 */ 767, 788, 788, 788, 767, 767, 767, 788, 767, 788, - /* 140 */ 972, 976, 970, 958, 966, 957, 953, 951, 950, 980, - /* 150 */ 788, 788, 788, 826, 822, 822, 788, 788, 844, 842, - /* 160 */ 840, 832, 838, 834, 836, 830, 814, 788, 820, 820, - /* 170 */ 788, 820, 788, 820, 788, 863, 881, 767, 981, 767, - /* 180 */ 1018, 971, 1008, 1007, 1014, 1006, 1005, 1004, 767, 767, - /* 190 */ 767, 1000, 1001, 1003, 1002, 767, 767, 1010, 1009, 767, - /* 200 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 210 */ 767, 767, 983, 767, 977, 973, 767, 767, 767, 767, - /* 220 */ 767, 767, 767, 767, 767, 893, 767, 767, 767, 767, - /* 230 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 240 */ 930, 767, 767, 767, 767, 941, 767, 767, 767, 767, - /* 250 */ 767, 767, 767, 767, 767, 967, 767, 959, 767, 767, - /* 260 */ 767, 767, 767, 905, 767, 767, 767, 767, 767, 767, - /* 270 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 1029, - /* 280 */ 1027, 767, 767, 767, 1023, 767, 767, 767, 1021, 767, - /* 290 */ 767, 767, 767, 767, 767, 767, 767, 767, 767, 767, - /* 300 */ 767, 767, 767, 767, 767, 767, 847, 767, 794, 792, - /* 310 */ 767, 784, 767, + /* 0 */ 773, 885, 831, 897, 819, 828, 1026, 1026, 773, 773, + /* 10 */ 773, 773, 773, 773, 773, 773, 773, 944, 792, 1026, + /* 20 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 828, + /* 30 */ 773, 773, 834, 828, 834, 834, 939, 869, 887, 773, + /* 40 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 50 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 60 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 70 */ 773, 946, 949, 773, 773, 951, 773, 773, 971, 971, + /* 80 */ 937, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 90 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 100 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 817, + /* 110 */ 773, 815, 773, 773, 773, 773, 773, 773, 773, 773, + /* 120 */ 773, 773, 773, 773, 773, 773, 802, 773, 773, 773, + /* 130 */ 773, 773, 794, 794, 794, 773, 773, 773, 794, 773, + /* 140 */ 794, 978, 982, 976, 964, 972, 963, 959, 957, 956, + /* 150 */ 986, 794, 794, 794, 832, 828, 828, 794, 794, 850, + /* 160 */ 848, 846, 838, 844, 840, 842, 836, 820, 794, 826, + /* 170 */ 826, 794, 826, 794, 826, 794, 869, 887, 773, 987, + /* 180 */ 773, 1025, 977, 1015, 1014, 1021, 1013, 1012, 1011, 773, + /* 190 */ 773, 773, 1007, 1008, 1010, 1009, 773, 773, 1017, 1016, + /* 200 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 210 */ 773, 773, 773, 989, 773, 983, 979, 773, 773, 773, + /* 220 */ 773, 773, 773, 773, 773, 773, 899, 773, 773, 773, + /* 230 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 240 */ 773, 773, 936, 773, 773, 773, 773, 947, 773, 773, + /* 250 */ 773, 773, 773, 773, 773, 773, 773, 973, 773, 965, + /* 260 */ 773, 773, 773, 773, 773, 911, 773, 773, 773, 773, + /* 270 */ 773, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 280 */ 773, 1037, 1035, 773, 773, 773, 1031, 773, 773, 773, + /* 290 */ 1029, 773, 773, 773, 773, 773, 773, 773, 773, 773, + /* 300 */ 773, 773, 773, 773, 773, 773, 773, 773, 853, 773, + /* 310 */ 800, 798, 773, 790, 773, }; /********** End of lemon-generated parsing tables *****************************/ @@ -612,6 +612,7 @@ static const YYCODETYPE yyFallback[] = { 1, /* NOW => ID */ 0, /* RESET => nothing */ 0, /* QUERY => nothing */ + 0, /* SYNCDB => nothing */ 0, /* ADD => nothing */ 0, /* COLUMN => nothing */ 0, /* TAG => nothing */ @@ -884,132 +885,133 @@ static const char *const yyTokenName[] = { /* 133 */ "NOW", /* 134 */ "RESET", /* 135 */ "QUERY", - /* 136 */ "ADD", - /* 137 */ "COLUMN", - /* 138 */ "TAG", - /* 139 */ "CHANGE", - /* 140 */ "SET", - /* 141 */ "KILL", - /* 142 */ "CONNECTION", - /* 143 */ "STREAM", - /* 144 */ "COLON", - /* 145 */ "ABORT", - /* 146 */ "AFTER", - /* 147 */ "ATTACH", - /* 148 */ "BEFORE", - /* 149 */ "BEGIN", - /* 150 */ "CASCADE", - /* 151 */ "CLUSTER", - /* 152 */ "CONFLICT", - /* 153 */ "COPY", - /* 154 */ "DEFERRED", - /* 155 */ "DELIMITERS", - /* 156 */ "DETACH", - /* 157 */ "EACH", - /* 158 */ "END", - /* 159 */ "EXPLAIN", - /* 160 */ "FAIL", - /* 161 */ "FOR", - /* 162 */ "IGNORE", - /* 163 */ "IMMEDIATE", - /* 164 */ "INITIALLY", - /* 165 */ "INSTEAD", - /* 166 */ "MATCH", - /* 167 */ "KEY", - /* 168 */ "OF", - /* 169 */ "RAISE", - /* 170 */ "REPLACE", - /* 171 */ "RESTRICT", - /* 172 */ "ROW", - /* 173 */ "STATEMENT", - /* 174 */ "TRIGGER", - /* 175 */ "VIEW", - /* 176 */ "SEMI", - /* 177 */ "NONE", - /* 178 */ "PREV", - /* 179 */ "LINEAR", - /* 180 */ "IMPORT", - /* 181 */ "TBNAME", - /* 182 */ "JOIN", - /* 183 */ "INSERT", - /* 184 */ "INTO", - /* 185 */ "VALUES", - /* 186 */ "error", - /* 187 */ "program", - /* 188 */ "cmd", - /* 189 */ "dbPrefix", - /* 190 */ "ids", - /* 191 */ "cpxName", - /* 192 */ "ifexists", - /* 193 */ "alter_db_optr", - /* 194 */ "alter_topic_optr", - /* 195 */ "acct_optr", - /* 196 */ "ifnotexists", - /* 197 */ "db_optr", - /* 198 */ "topic_optr", - /* 199 */ "pps", - /* 200 */ "tseries", - /* 201 */ "dbs", - /* 202 */ "streams", - /* 203 */ "storage", - /* 204 */ "qtime", - /* 205 */ "users", - /* 206 */ "conns", - /* 207 */ "state", - /* 208 */ "keep", - /* 209 */ "tagitemlist", - /* 210 */ "cache", - /* 211 */ "replica", - /* 212 */ "quorum", - /* 213 */ "days", - /* 214 */ "minrows", - /* 215 */ "maxrows", - /* 216 */ "blocks", - /* 217 */ "ctime", - /* 218 */ "wal", - /* 219 */ "fsync", - /* 220 */ "comp", - /* 221 */ "prec", - /* 222 */ "update", - /* 223 */ "cachelast", - /* 224 */ "partitions", - /* 225 */ "typename", - /* 226 */ "signed", - /* 227 */ "create_table_args", - /* 228 */ "create_stable_args", - /* 229 */ "create_table_list", - /* 230 */ "create_from_stable", - /* 231 */ "columnlist", - /* 232 */ "tagNamelist", - /* 233 */ "select", - /* 234 */ "column", - /* 235 */ "tagitem", - /* 236 */ "selcollist", - /* 237 */ "from", - /* 238 */ "where_opt", - /* 239 */ "interval_opt", - /* 240 */ "session_option", - /* 241 */ "fill_opt", - /* 242 */ "sliding_opt", - /* 243 */ "groupby_opt", - /* 244 */ "orderby_opt", - /* 245 */ "having_opt", - /* 246 */ "slimit_opt", - /* 247 */ "limit_opt", - /* 248 */ "union", - /* 249 */ "sclp", - /* 250 */ "distinct", - /* 251 */ "expr", - /* 252 */ "as", - /* 253 */ "tablelist", - /* 254 */ "tmvar", - /* 255 */ "sortlist", - /* 256 */ "sortitem", - /* 257 */ "item", - /* 258 */ "sortorder", - /* 259 */ "grouplist", - /* 260 */ "exprlist", - /* 261 */ "expritem", + /* 136 */ "SYNCDB", + /* 137 */ "ADD", + /* 138 */ "COLUMN", + /* 139 */ "TAG", + /* 140 */ "CHANGE", + /* 141 */ "SET", + /* 142 */ "KILL", + /* 143 */ "CONNECTION", + /* 144 */ "STREAM", + /* 145 */ "COLON", + /* 146 */ "ABORT", + /* 147 */ "AFTER", + /* 148 */ "ATTACH", + /* 149 */ "BEFORE", + /* 150 */ "BEGIN", + /* 151 */ "CASCADE", + /* 152 */ "CLUSTER", + /* 153 */ "CONFLICT", + /* 154 */ "COPY", + /* 155 */ "DEFERRED", + /* 156 */ "DELIMITERS", + /* 157 */ "DETACH", + /* 158 */ "EACH", + /* 159 */ "END", + /* 160 */ "EXPLAIN", + /* 161 */ "FAIL", + /* 162 */ "FOR", + /* 163 */ "IGNORE", + /* 164 */ "IMMEDIATE", + /* 165 */ "INITIALLY", + /* 166 */ "INSTEAD", + /* 167 */ "MATCH", + /* 168 */ "KEY", + /* 169 */ "OF", + /* 170 */ "RAISE", + /* 171 */ "REPLACE", + /* 172 */ "RESTRICT", + /* 173 */ "ROW", + /* 174 */ "STATEMENT", + /* 175 */ "TRIGGER", + /* 176 */ "VIEW", + /* 177 */ "SEMI", + /* 178 */ "NONE", + /* 179 */ "PREV", + /* 180 */ "LINEAR", + /* 181 */ "IMPORT", + /* 182 */ "TBNAME", + /* 183 */ "JOIN", + /* 184 */ "INSERT", + /* 185 */ "INTO", + /* 186 */ "VALUES", + /* 187 */ "error", + /* 188 */ "program", + /* 189 */ "cmd", + /* 190 */ "dbPrefix", + /* 191 */ "ids", + /* 192 */ "cpxName", + /* 193 */ "ifexists", + /* 194 */ "alter_db_optr", + /* 195 */ "alter_topic_optr", + /* 196 */ "acct_optr", + /* 197 */ "ifnotexists", + /* 198 */ "db_optr", + /* 199 */ "topic_optr", + /* 200 */ "pps", + /* 201 */ "tseries", + /* 202 */ "dbs", + /* 203 */ "streams", + /* 204 */ "storage", + /* 205 */ "qtime", + /* 206 */ "users", + /* 207 */ "conns", + /* 208 */ "state", + /* 209 */ "keep", + /* 210 */ "tagitemlist", + /* 211 */ "cache", + /* 212 */ "replica", + /* 213 */ "quorum", + /* 214 */ "days", + /* 215 */ "minrows", + /* 216 */ "maxrows", + /* 217 */ "blocks", + /* 218 */ "ctime", + /* 219 */ "wal", + /* 220 */ "fsync", + /* 221 */ "comp", + /* 222 */ "prec", + /* 223 */ "update", + /* 224 */ "cachelast", + /* 225 */ "partitions", + /* 226 */ "typename", + /* 227 */ "signed", + /* 228 */ "create_table_args", + /* 229 */ "create_stable_args", + /* 230 */ "create_table_list", + /* 231 */ "create_from_stable", + /* 232 */ "columnlist", + /* 233 */ "tagNamelist", + /* 234 */ "select", + /* 235 */ "column", + /* 236 */ "tagitem", + /* 237 */ "selcollist", + /* 238 */ "from", + /* 239 */ "where_opt", + /* 240 */ "interval_opt", + /* 241 */ "session_option", + /* 242 */ "fill_opt", + /* 243 */ "sliding_opt", + /* 244 */ "groupby_opt", + /* 245 */ "orderby_opt", + /* 246 */ "having_opt", + /* 247 */ "slimit_opt", + /* 248 */ "limit_opt", + /* 249 */ "union", + /* 250 */ "sclp", + /* 251 */ "distinct", + /* 252 */ "expr", + /* 253 */ "as", + /* 254 */ "tablelist", + /* 255 */ "tmvar", + /* 256 */ "sortlist", + /* 257 */ "sortitem", + /* 258 */ "item", + /* 259 */ "sortorder", + /* 260 */ "grouplist", + /* 261 */ "exprlist", + /* 262 */ "expritem", }; #endif /* defined(YYCOVERAGE) || !defined(NDEBUG) */ @@ -1243,45 +1245,47 @@ static const char *const yyRuleName[] = { /* 223 */ "expr ::= NOW", /* 224 */ "expr ::= VARIABLE", /* 225 */ "expr ::= BOOL", - /* 226 */ "expr ::= ID LP exprlist RP", - /* 227 */ "expr ::= ID LP STAR RP", - /* 228 */ "expr ::= expr IS NULL", - /* 229 */ "expr ::= expr IS NOT NULL", - /* 230 */ "expr ::= expr LT expr", - /* 231 */ "expr ::= expr GT expr", - /* 232 */ "expr ::= expr LE expr", - /* 233 */ "expr ::= expr GE expr", - /* 234 */ "expr ::= expr NE expr", - /* 235 */ "expr ::= expr EQ expr", - /* 236 */ "expr ::= expr BETWEEN expr AND expr", - /* 237 */ "expr ::= expr AND expr", - /* 238 */ "expr ::= expr OR expr", - /* 239 */ "expr ::= expr PLUS expr", - /* 240 */ "expr ::= expr MINUS expr", - /* 241 */ "expr ::= expr STAR expr", - /* 242 */ "expr ::= expr SLASH expr", - /* 243 */ "expr ::= expr REM expr", - /* 244 */ "expr ::= expr LIKE expr", - /* 245 */ "expr ::= expr IN LP exprlist RP", - /* 246 */ "exprlist ::= exprlist COMMA expritem", - /* 247 */ "exprlist ::= expritem", - /* 248 */ "expritem ::= expr", - /* 249 */ "expritem ::=", - /* 250 */ "cmd ::= RESET QUERY CACHE", - /* 251 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", - /* 252 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", - /* 253 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", - /* 254 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", - /* 255 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", - /* 256 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", - /* 257 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", - /* 258 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", - /* 259 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", - /* 260 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", - /* 261 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", - /* 262 */ "cmd ::= KILL CONNECTION INTEGER", - /* 263 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", - /* 264 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", + /* 226 */ "expr ::= NULL", + /* 227 */ "expr ::= ID LP exprlist RP", + /* 228 */ "expr ::= ID LP STAR RP", + /* 229 */ "expr ::= expr IS NULL", + /* 230 */ "expr ::= expr IS NOT NULL", + /* 231 */ "expr ::= expr LT expr", + /* 232 */ "expr ::= expr GT expr", + /* 233 */ "expr ::= expr LE expr", + /* 234 */ "expr ::= expr GE expr", + /* 235 */ "expr ::= expr NE expr", + /* 236 */ "expr ::= expr EQ expr", + /* 237 */ "expr ::= expr BETWEEN expr AND expr", + /* 238 */ "expr ::= expr AND expr", + /* 239 */ "expr ::= expr OR expr", + /* 240 */ "expr ::= expr PLUS expr", + /* 241 */ "expr ::= expr MINUS expr", + /* 242 */ "expr ::= expr STAR expr", + /* 243 */ "expr ::= expr SLASH expr", + /* 244 */ "expr ::= expr REM expr", + /* 245 */ "expr ::= expr LIKE expr", + /* 246 */ "expr ::= expr IN LP exprlist RP", + /* 247 */ "exprlist ::= exprlist COMMA expritem", + /* 248 */ "exprlist ::= expritem", + /* 249 */ "expritem ::= expr", + /* 250 */ "expritem ::=", + /* 251 */ "cmd ::= RESET QUERY CACHE", + /* 252 */ "cmd ::= SYNCDB ids REPLICA", + /* 253 */ "cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist", + /* 254 */ "cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids", + /* 255 */ "cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist", + /* 256 */ "cmd ::= ALTER TABLE ids cpxName DROP TAG ids", + /* 257 */ "cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids", + /* 258 */ "cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem", + /* 259 */ "cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist", + /* 260 */ "cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids", + /* 261 */ "cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist", + /* 262 */ "cmd ::= ALTER STABLE ids cpxName DROP TAG ids", + /* 263 */ "cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids", + /* 264 */ "cmd ::= KILL CONNECTION INTEGER", + /* 265 */ "cmd ::= KILL STREAM INTEGER COLON INTEGER", + /* 266 */ "cmd ::= KILL QUERY INTEGER COLON INTEGER", }; #endif /* NDEBUG */ @@ -1402,52 +1406,52 @@ static void yy_destructor( ** inside the C code. */ /********* Begin destructor definitions ***************************************/ - case 208: /* keep */ - case 209: /* tagitemlist */ - case 231: /* columnlist */ - case 232: /* tagNamelist */ - case 241: /* fill_opt */ - case 243: /* groupby_opt */ - case 244: /* orderby_opt */ - case 255: /* sortlist */ - case 259: /* grouplist */ + case 209: /* keep */ + case 210: /* tagitemlist */ + case 232: /* columnlist */ + case 233: /* tagNamelist */ + case 242: /* fill_opt */ + case 244: /* groupby_opt */ + case 245: /* orderby_opt */ + case 256: /* sortlist */ + case 260: /* grouplist */ { -taosArrayDestroy((yypminor->yy429)); +taosArrayDestroy((yypminor->yy159)); } break; - case 229: /* create_table_list */ + case 230: /* create_table_list */ { -destroyCreateTableSql((yypminor->yy194)); +destroyCreateTableSql((yypminor->yy14)); } break; - case 233: /* select */ + case 234: /* select */ { -destroyQuerySqlNode((yypminor->yy254)); +destroyQuerySqlNode((yypminor->yy272)); } break; - case 236: /* selcollist */ - case 249: /* sclp */ - case 260: /* exprlist */ + case 237: /* selcollist */ + case 250: /* sclp */ + case 261: /* exprlist */ { -tSqlExprListDestroy((yypminor->yy429)); +tSqlExprListDestroy((yypminor->yy159)); } break; - case 238: /* where_opt */ - case 245: /* having_opt */ - case 251: /* expr */ - case 261: /* expritem */ + case 239: /* where_opt */ + case 246: /* having_opt */ + case 252: /* expr */ + case 262: /* expritem */ { -tSqlExprDestroy((yypminor->yy170)); +tSqlExprDestroy((yypminor->yy118)); } break; - case 248: /* union */ + case 249: /* union */ { -destroyAllSelectClause((yypminor->yy141)); +destroyAllSelectClause((yypminor->yy391)); } break; - case 256: /* sortitem */ + case 257: /* sortitem */ { -tVariantDestroy(&(yypminor->yy218)); +tVariantDestroy(&(yypminor->yy488)); } break; /********* End destructor definitions *****************************************/ @@ -1741,271 +1745,273 @@ static const struct { YYCODETYPE lhs; /* Symbol on the left-hand side of the rule */ signed char nrhs; /* Negative of the number of RHS symbols in the rule */ } yyRuleInfo[] = { - { 187, -1 }, /* (0) program ::= cmd */ - { 188, -2 }, /* (1) cmd ::= SHOW DATABASES */ - { 188, -2 }, /* (2) cmd ::= SHOW TOPICS */ - { 188, -2 }, /* (3) cmd ::= SHOW MNODES */ - { 188, -2 }, /* (4) cmd ::= SHOW DNODES */ - { 188, -2 }, /* (5) cmd ::= SHOW ACCOUNTS */ - { 188, -2 }, /* (6) cmd ::= SHOW USERS */ - { 188, -2 }, /* (7) cmd ::= SHOW MODULES */ - { 188, -2 }, /* (8) cmd ::= SHOW QUERIES */ - { 188, -2 }, /* (9) cmd ::= SHOW CONNECTIONS */ - { 188, -2 }, /* (10) cmd ::= SHOW STREAMS */ - { 188, -2 }, /* (11) cmd ::= SHOW VARIABLES */ - { 188, -2 }, /* (12) cmd ::= SHOW SCORES */ - { 188, -2 }, /* (13) cmd ::= SHOW GRANTS */ - { 188, -2 }, /* (14) cmd ::= SHOW VNODES */ - { 188, -3 }, /* (15) cmd ::= SHOW VNODES IPTOKEN */ - { 189, 0 }, /* (16) dbPrefix ::= */ - { 189, -2 }, /* (17) dbPrefix ::= ids DOT */ - { 191, 0 }, /* (18) cpxName ::= */ - { 191, -2 }, /* (19) cpxName ::= DOT ids */ - { 188, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ - { 188, -4 }, /* (21) cmd ::= SHOW CREATE DATABASE ids */ - { 188, -3 }, /* (22) cmd ::= SHOW dbPrefix TABLES */ - { 188, -5 }, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ - { 188, -3 }, /* (24) cmd ::= SHOW dbPrefix STABLES */ - { 188, -5 }, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ - { 188, -3 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ - { 188, -4 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ - { 188, -5 }, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ - { 188, -5 }, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ - { 188, -4 }, /* (30) cmd ::= DROP DATABASE ifexists ids */ - { 188, -4 }, /* (31) cmd ::= DROP TOPIC ifexists ids */ - { 188, -3 }, /* (32) cmd ::= DROP DNODE ids */ - { 188, -3 }, /* (33) cmd ::= DROP USER ids */ - { 188, -3 }, /* (34) cmd ::= DROP ACCOUNT ids */ - { 188, -2 }, /* (35) cmd ::= USE ids */ - { 188, -3 }, /* (36) cmd ::= DESCRIBE ids cpxName */ - { 188, -5 }, /* (37) cmd ::= ALTER USER ids PASS ids */ - { 188, -5 }, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ - { 188, -4 }, /* (39) cmd ::= ALTER DNODE ids ids */ - { 188, -5 }, /* (40) cmd ::= ALTER DNODE ids ids ids */ - { 188, -3 }, /* (41) cmd ::= ALTER LOCAL ids */ - { 188, -4 }, /* (42) cmd ::= ALTER LOCAL ids ids */ - { 188, -4 }, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ - { 188, -4 }, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ - { 188, -4 }, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ - { 188, -6 }, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ - { 190, -1 }, /* (47) ids ::= ID */ - { 190, -1 }, /* (48) ids ::= STRING */ - { 192, -2 }, /* (49) ifexists ::= IF EXISTS */ - { 192, 0 }, /* (50) ifexists ::= */ - { 196, -3 }, /* (51) ifnotexists ::= IF NOT EXISTS */ - { 196, 0 }, /* (52) ifnotexists ::= */ - { 188, -3 }, /* (53) cmd ::= CREATE DNODE ids */ - { 188, -6 }, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ - { 188, -5 }, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ - { 188, -5 }, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ - { 188, -5 }, /* (57) cmd ::= CREATE USER ids PASS ids */ - { 199, 0 }, /* (58) pps ::= */ - { 199, -2 }, /* (59) pps ::= PPS INTEGER */ - { 200, 0 }, /* (60) tseries ::= */ - { 200, -2 }, /* (61) tseries ::= TSERIES INTEGER */ - { 201, 0 }, /* (62) dbs ::= */ - { 201, -2 }, /* (63) dbs ::= DBS INTEGER */ - { 202, 0 }, /* (64) streams ::= */ - { 202, -2 }, /* (65) streams ::= STREAMS INTEGER */ - { 203, 0 }, /* (66) storage ::= */ - { 203, -2 }, /* (67) storage ::= STORAGE INTEGER */ - { 204, 0 }, /* (68) qtime ::= */ - { 204, -2 }, /* (69) qtime ::= QTIME INTEGER */ - { 205, 0 }, /* (70) users ::= */ - { 205, -2 }, /* (71) users ::= USERS INTEGER */ - { 206, 0 }, /* (72) conns ::= */ - { 206, -2 }, /* (73) conns ::= CONNS INTEGER */ - { 207, 0 }, /* (74) state ::= */ - { 207, -2 }, /* (75) state ::= STATE ids */ - { 195, -9 }, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ - { 208, -2 }, /* (77) keep ::= KEEP tagitemlist */ - { 210, -2 }, /* (78) cache ::= CACHE INTEGER */ - { 211, -2 }, /* (79) replica ::= REPLICA INTEGER */ - { 212, -2 }, /* (80) quorum ::= QUORUM INTEGER */ - { 213, -2 }, /* (81) days ::= DAYS INTEGER */ - { 214, -2 }, /* (82) minrows ::= MINROWS INTEGER */ - { 215, -2 }, /* (83) maxrows ::= MAXROWS INTEGER */ - { 216, -2 }, /* (84) blocks ::= BLOCKS INTEGER */ - { 217, -2 }, /* (85) ctime ::= CTIME INTEGER */ - { 218, -2 }, /* (86) wal ::= WAL INTEGER */ - { 219, -2 }, /* (87) fsync ::= FSYNC INTEGER */ - { 220, -2 }, /* (88) comp ::= COMP INTEGER */ - { 221, -2 }, /* (89) prec ::= PRECISION STRING */ - { 222, -2 }, /* (90) update ::= UPDATE INTEGER */ - { 223, -2 }, /* (91) cachelast ::= CACHELAST INTEGER */ - { 224, -2 }, /* (92) partitions ::= PARTITIONS INTEGER */ - { 197, 0 }, /* (93) db_optr ::= */ - { 197, -2 }, /* (94) db_optr ::= db_optr cache */ - { 197, -2 }, /* (95) db_optr ::= db_optr replica */ - { 197, -2 }, /* (96) db_optr ::= db_optr quorum */ - { 197, -2 }, /* (97) db_optr ::= db_optr days */ - { 197, -2 }, /* (98) db_optr ::= db_optr minrows */ - { 197, -2 }, /* (99) db_optr ::= db_optr maxrows */ - { 197, -2 }, /* (100) db_optr ::= db_optr blocks */ - { 197, -2 }, /* (101) db_optr ::= db_optr ctime */ - { 197, -2 }, /* (102) db_optr ::= db_optr wal */ - { 197, -2 }, /* (103) db_optr ::= db_optr fsync */ - { 197, -2 }, /* (104) db_optr ::= db_optr comp */ - { 197, -2 }, /* (105) db_optr ::= db_optr prec */ - { 197, -2 }, /* (106) db_optr ::= db_optr keep */ - { 197, -2 }, /* (107) db_optr ::= db_optr update */ - { 197, -2 }, /* (108) db_optr ::= db_optr cachelast */ - { 198, -1 }, /* (109) topic_optr ::= db_optr */ - { 198, -2 }, /* (110) topic_optr ::= topic_optr partitions */ - { 193, 0 }, /* (111) alter_db_optr ::= */ - { 193, -2 }, /* (112) alter_db_optr ::= alter_db_optr replica */ - { 193, -2 }, /* (113) alter_db_optr ::= alter_db_optr quorum */ - { 193, -2 }, /* (114) alter_db_optr ::= alter_db_optr keep */ - { 193, -2 }, /* (115) alter_db_optr ::= alter_db_optr blocks */ - { 193, -2 }, /* (116) alter_db_optr ::= alter_db_optr comp */ - { 193, -2 }, /* (117) alter_db_optr ::= alter_db_optr wal */ - { 193, -2 }, /* (118) alter_db_optr ::= alter_db_optr fsync */ - { 193, -2 }, /* (119) alter_db_optr ::= alter_db_optr update */ - { 193, -2 }, /* (120) alter_db_optr ::= alter_db_optr cachelast */ - { 194, -1 }, /* (121) alter_topic_optr ::= alter_db_optr */ - { 194, -2 }, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ - { 225, -1 }, /* (123) typename ::= ids */ - { 225, -4 }, /* (124) typename ::= ids LP signed RP */ - { 225, -2 }, /* (125) typename ::= ids UNSIGNED */ - { 226, -1 }, /* (126) signed ::= INTEGER */ - { 226, -2 }, /* (127) signed ::= PLUS INTEGER */ - { 226, -2 }, /* (128) signed ::= MINUS INTEGER */ - { 188, -3 }, /* (129) cmd ::= CREATE TABLE create_table_args */ - { 188, -3 }, /* (130) cmd ::= CREATE TABLE create_stable_args */ - { 188, -3 }, /* (131) cmd ::= CREATE STABLE create_stable_args */ - { 188, -3 }, /* (132) cmd ::= CREATE TABLE create_table_list */ - { 229, -1 }, /* (133) create_table_list ::= create_from_stable */ - { 229, -2 }, /* (134) create_table_list ::= create_table_list create_from_stable */ - { 227, -6 }, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ - { 228, -10 }, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ - { 230, -10 }, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ - { 230, -13 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ - { 232, -3 }, /* (139) tagNamelist ::= tagNamelist COMMA ids */ - { 232, -1 }, /* (140) tagNamelist ::= ids */ - { 227, -5 }, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ - { 231, -3 }, /* (142) columnlist ::= columnlist COMMA column */ - { 231, -1 }, /* (143) columnlist ::= column */ - { 234, -2 }, /* (144) column ::= ids typename */ - { 209, -3 }, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ - { 209, -1 }, /* (146) tagitemlist ::= tagitem */ - { 235, -1 }, /* (147) tagitem ::= INTEGER */ - { 235, -1 }, /* (148) tagitem ::= FLOAT */ - { 235, -1 }, /* (149) tagitem ::= STRING */ - { 235, -1 }, /* (150) tagitem ::= BOOL */ - { 235, -1 }, /* (151) tagitem ::= NULL */ - { 235, -2 }, /* (152) tagitem ::= MINUS INTEGER */ - { 235, -2 }, /* (153) tagitem ::= MINUS FLOAT */ - { 235, -2 }, /* (154) tagitem ::= PLUS INTEGER */ - { 235, -2 }, /* (155) tagitem ::= PLUS FLOAT */ - { 233, -13 }, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ - { 233, -3 }, /* (157) select ::= LP select RP */ - { 248, -1 }, /* (158) union ::= select */ - { 248, -4 }, /* (159) union ::= union UNION ALL select */ - { 188, -1 }, /* (160) cmd ::= union */ - { 233, -2 }, /* (161) select ::= SELECT selcollist */ - { 249, -2 }, /* (162) sclp ::= selcollist COMMA */ - { 249, 0 }, /* (163) sclp ::= */ - { 236, -4 }, /* (164) selcollist ::= sclp distinct expr as */ - { 236, -2 }, /* (165) selcollist ::= sclp STAR */ - { 252, -2 }, /* (166) as ::= AS ids */ - { 252, -1 }, /* (167) as ::= ids */ - { 252, 0 }, /* (168) as ::= */ - { 250, -1 }, /* (169) distinct ::= DISTINCT */ - { 250, 0 }, /* (170) distinct ::= */ - { 237, -2 }, /* (171) from ::= FROM tablelist */ - { 237, -4 }, /* (172) from ::= FROM LP union RP */ - { 253, -2 }, /* (173) tablelist ::= ids cpxName */ - { 253, -3 }, /* (174) tablelist ::= ids cpxName ids */ - { 253, -4 }, /* (175) tablelist ::= tablelist COMMA ids cpxName */ - { 253, -5 }, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ - { 254, -1 }, /* (177) tmvar ::= VARIABLE */ - { 239, -4 }, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ - { 239, -6 }, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ - { 239, 0 }, /* (180) interval_opt ::= */ - { 240, 0 }, /* (181) session_option ::= */ - { 240, -7 }, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ - { 241, 0 }, /* (183) fill_opt ::= */ - { 241, -6 }, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ - { 241, -4 }, /* (185) fill_opt ::= FILL LP ID RP */ - { 242, -4 }, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ - { 242, 0 }, /* (187) sliding_opt ::= */ - { 244, 0 }, /* (188) orderby_opt ::= */ - { 244, -3 }, /* (189) orderby_opt ::= ORDER BY sortlist */ - { 255, -4 }, /* (190) sortlist ::= sortlist COMMA item sortorder */ - { 255, -2 }, /* (191) sortlist ::= item sortorder */ - { 257, -2 }, /* (192) item ::= ids cpxName */ - { 258, -1 }, /* (193) sortorder ::= ASC */ - { 258, -1 }, /* (194) sortorder ::= DESC */ - { 258, 0 }, /* (195) sortorder ::= */ - { 243, 0 }, /* (196) groupby_opt ::= */ - { 243, -3 }, /* (197) groupby_opt ::= GROUP BY grouplist */ - { 259, -3 }, /* (198) grouplist ::= grouplist COMMA item */ - { 259, -1 }, /* (199) grouplist ::= item */ - { 245, 0 }, /* (200) having_opt ::= */ - { 245, -2 }, /* (201) having_opt ::= HAVING expr */ - { 247, 0 }, /* (202) limit_opt ::= */ - { 247, -2 }, /* (203) limit_opt ::= LIMIT signed */ - { 247, -4 }, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ - { 247, -4 }, /* (205) limit_opt ::= LIMIT signed COMMA signed */ - { 246, 0 }, /* (206) slimit_opt ::= */ - { 246, -2 }, /* (207) slimit_opt ::= SLIMIT signed */ - { 246, -4 }, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ - { 246, -4 }, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ - { 238, 0 }, /* (210) where_opt ::= */ - { 238, -2 }, /* (211) where_opt ::= WHERE expr */ - { 251, -3 }, /* (212) expr ::= LP expr RP */ - { 251, -1 }, /* (213) expr ::= ID */ - { 251, -3 }, /* (214) expr ::= ID DOT ID */ - { 251, -3 }, /* (215) expr ::= ID DOT STAR */ - { 251, -1 }, /* (216) expr ::= INTEGER */ - { 251, -2 }, /* (217) expr ::= MINUS INTEGER */ - { 251, -2 }, /* (218) expr ::= PLUS INTEGER */ - { 251, -1 }, /* (219) expr ::= FLOAT */ - { 251, -2 }, /* (220) expr ::= MINUS FLOAT */ - { 251, -2 }, /* (221) expr ::= PLUS FLOAT */ - { 251, -1 }, /* (222) expr ::= STRING */ - { 251, -1 }, /* (223) expr ::= NOW */ - { 251, -1 }, /* (224) expr ::= VARIABLE */ - { 251, -1 }, /* (225) expr ::= BOOL */ - { 251, -4 }, /* (226) expr ::= ID LP exprlist RP */ - { 251, -4 }, /* (227) expr ::= ID LP STAR RP */ - { 251, -3 }, /* (228) expr ::= expr IS NULL */ - { 251, -4 }, /* (229) expr ::= expr IS NOT NULL */ - { 251, -3 }, /* (230) expr ::= expr LT expr */ - { 251, -3 }, /* (231) expr ::= expr GT expr */ - { 251, -3 }, /* (232) expr ::= expr LE expr */ - { 251, -3 }, /* (233) expr ::= expr GE expr */ - { 251, -3 }, /* (234) expr ::= expr NE expr */ - { 251, -3 }, /* (235) expr ::= expr EQ expr */ - { 251, -5 }, /* (236) expr ::= expr BETWEEN expr AND expr */ - { 251, -3 }, /* (237) expr ::= expr AND expr */ - { 251, -3 }, /* (238) expr ::= expr OR expr */ - { 251, -3 }, /* (239) expr ::= expr PLUS expr */ - { 251, -3 }, /* (240) expr ::= expr MINUS expr */ - { 251, -3 }, /* (241) expr ::= expr STAR expr */ - { 251, -3 }, /* (242) expr ::= expr SLASH expr */ - { 251, -3 }, /* (243) expr ::= expr REM expr */ - { 251, -3 }, /* (244) expr ::= expr LIKE expr */ - { 251, -5 }, /* (245) expr ::= expr IN LP exprlist RP */ - { 260, -3 }, /* (246) exprlist ::= exprlist COMMA expritem */ - { 260, -1 }, /* (247) exprlist ::= expritem */ - { 261, -1 }, /* (248) expritem ::= expr */ - { 261, 0 }, /* (249) expritem ::= */ - { 188, -3 }, /* (250) cmd ::= RESET QUERY CACHE */ - { 188, -7 }, /* (251) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ - { 188, -7 }, /* (252) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ - { 188, -7 }, /* (253) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ - { 188, -7 }, /* (254) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ - { 188, -8 }, /* (255) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ - { 188, -9 }, /* (256) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ - { 188, -7 }, /* (257) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ - { 188, -7 }, /* (258) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ - { 188, -7 }, /* (259) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ - { 188, -7 }, /* (260) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ - { 188, -8 }, /* (261) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ - { 188, -3 }, /* (262) cmd ::= KILL CONNECTION INTEGER */ - { 188, -5 }, /* (263) cmd ::= KILL STREAM INTEGER COLON INTEGER */ - { 188, -5 }, /* (264) cmd ::= KILL QUERY INTEGER COLON INTEGER */ + { 188, -1 }, /* (0) program ::= cmd */ + { 189, -2 }, /* (1) cmd ::= SHOW DATABASES */ + { 189, -2 }, /* (2) cmd ::= SHOW TOPICS */ + { 189, -2 }, /* (3) cmd ::= SHOW MNODES */ + { 189, -2 }, /* (4) cmd ::= SHOW DNODES */ + { 189, -2 }, /* (5) cmd ::= SHOW ACCOUNTS */ + { 189, -2 }, /* (6) cmd ::= SHOW USERS */ + { 189, -2 }, /* (7) cmd ::= SHOW MODULES */ + { 189, -2 }, /* (8) cmd ::= SHOW QUERIES */ + { 189, -2 }, /* (9) cmd ::= SHOW CONNECTIONS */ + { 189, -2 }, /* (10) cmd ::= SHOW STREAMS */ + { 189, -2 }, /* (11) cmd ::= SHOW VARIABLES */ + { 189, -2 }, /* (12) cmd ::= SHOW SCORES */ + { 189, -2 }, /* (13) cmd ::= SHOW GRANTS */ + { 189, -2 }, /* (14) cmd ::= SHOW VNODES */ + { 189, -3 }, /* (15) cmd ::= SHOW VNODES IPTOKEN */ + { 190, 0 }, /* (16) dbPrefix ::= */ + { 190, -2 }, /* (17) dbPrefix ::= ids DOT */ + { 192, 0 }, /* (18) cpxName ::= */ + { 192, -2 }, /* (19) cpxName ::= DOT ids */ + { 189, -5 }, /* (20) cmd ::= SHOW CREATE TABLE ids cpxName */ + { 189, -4 }, /* (21) cmd ::= SHOW CREATE DATABASE ids */ + { 189, -3 }, /* (22) cmd ::= SHOW dbPrefix TABLES */ + { 189, -5 }, /* (23) cmd ::= SHOW dbPrefix TABLES LIKE ids */ + { 189, -3 }, /* (24) cmd ::= SHOW dbPrefix STABLES */ + { 189, -5 }, /* (25) cmd ::= SHOW dbPrefix STABLES LIKE ids */ + { 189, -3 }, /* (26) cmd ::= SHOW dbPrefix VGROUPS */ + { 189, -4 }, /* (27) cmd ::= SHOW dbPrefix VGROUPS ids */ + { 189, -5 }, /* (28) cmd ::= DROP TABLE ifexists ids cpxName */ + { 189, -5 }, /* (29) cmd ::= DROP STABLE ifexists ids cpxName */ + { 189, -4 }, /* (30) cmd ::= DROP DATABASE ifexists ids */ + { 189, -4 }, /* (31) cmd ::= DROP TOPIC ifexists ids */ + { 189, -3 }, /* (32) cmd ::= DROP DNODE ids */ + { 189, -3 }, /* (33) cmd ::= DROP USER ids */ + { 189, -3 }, /* (34) cmd ::= DROP ACCOUNT ids */ + { 189, -2 }, /* (35) cmd ::= USE ids */ + { 189, -3 }, /* (36) cmd ::= DESCRIBE ids cpxName */ + { 189, -5 }, /* (37) cmd ::= ALTER USER ids PASS ids */ + { 189, -5 }, /* (38) cmd ::= ALTER USER ids PRIVILEGE ids */ + { 189, -4 }, /* (39) cmd ::= ALTER DNODE ids ids */ + { 189, -5 }, /* (40) cmd ::= ALTER DNODE ids ids ids */ + { 189, -3 }, /* (41) cmd ::= ALTER LOCAL ids */ + { 189, -4 }, /* (42) cmd ::= ALTER LOCAL ids ids */ + { 189, -4 }, /* (43) cmd ::= ALTER DATABASE ids alter_db_optr */ + { 189, -4 }, /* (44) cmd ::= ALTER TOPIC ids alter_topic_optr */ + { 189, -4 }, /* (45) cmd ::= ALTER ACCOUNT ids acct_optr */ + { 189, -6 }, /* (46) cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ + { 191, -1 }, /* (47) ids ::= ID */ + { 191, -1 }, /* (48) ids ::= STRING */ + { 193, -2 }, /* (49) ifexists ::= IF EXISTS */ + { 193, 0 }, /* (50) ifexists ::= */ + { 197, -3 }, /* (51) ifnotexists ::= IF NOT EXISTS */ + { 197, 0 }, /* (52) ifnotexists ::= */ + { 189, -3 }, /* (53) cmd ::= CREATE DNODE ids */ + { 189, -6 }, /* (54) cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ + { 189, -5 }, /* (55) cmd ::= CREATE DATABASE ifnotexists ids db_optr */ + { 189, -5 }, /* (56) cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ + { 189, -5 }, /* (57) cmd ::= CREATE USER ids PASS ids */ + { 200, 0 }, /* (58) pps ::= */ + { 200, -2 }, /* (59) pps ::= PPS INTEGER */ + { 201, 0 }, /* (60) tseries ::= */ + { 201, -2 }, /* (61) tseries ::= TSERIES INTEGER */ + { 202, 0 }, /* (62) dbs ::= */ + { 202, -2 }, /* (63) dbs ::= DBS INTEGER */ + { 203, 0 }, /* (64) streams ::= */ + { 203, -2 }, /* (65) streams ::= STREAMS INTEGER */ + { 204, 0 }, /* (66) storage ::= */ + { 204, -2 }, /* (67) storage ::= STORAGE INTEGER */ + { 205, 0 }, /* (68) qtime ::= */ + { 205, -2 }, /* (69) qtime ::= QTIME INTEGER */ + { 206, 0 }, /* (70) users ::= */ + { 206, -2 }, /* (71) users ::= USERS INTEGER */ + { 207, 0 }, /* (72) conns ::= */ + { 207, -2 }, /* (73) conns ::= CONNS INTEGER */ + { 208, 0 }, /* (74) state ::= */ + { 208, -2 }, /* (75) state ::= STATE ids */ + { 196, -9 }, /* (76) acct_optr ::= pps tseries storage streams qtime dbs users conns state */ + { 209, -2 }, /* (77) keep ::= KEEP tagitemlist */ + { 211, -2 }, /* (78) cache ::= CACHE INTEGER */ + { 212, -2 }, /* (79) replica ::= REPLICA INTEGER */ + { 213, -2 }, /* (80) quorum ::= QUORUM INTEGER */ + { 214, -2 }, /* (81) days ::= DAYS INTEGER */ + { 215, -2 }, /* (82) minrows ::= MINROWS INTEGER */ + { 216, -2 }, /* (83) maxrows ::= MAXROWS INTEGER */ + { 217, -2 }, /* (84) blocks ::= BLOCKS INTEGER */ + { 218, -2 }, /* (85) ctime ::= CTIME INTEGER */ + { 219, -2 }, /* (86) wal ::= WAL INTEGER */ + { 220, -2 }, /* (87) fsync ::= FSYNC INTEGER */ + { 221, -2 }, /* (88) comp ::= COMP INTEGER */ + { 222, -2 }, /* (89) prec ::= PRECISION STRING */ + { 223, -2 }, /* (90) update ::= UPDATE INTEGER */ + { 224, -2 }, /* (91) cachelast ::= CACHELAST INTEGER */ + { 225, -2 }, /* (92) partitions ::= PARTITIONS INTEGER */ + { 198, 0 }, /* (93) db_optr ::= */ + { 198, -2 }, /* (94) db_optr ::= db_optr cache */ + { 198, -2 }, /* (95) db_optr ::= db_optr replica */ + { 198, -2 }, /* (96) db_optr ::= db_optr quorum */ + { 198, -2 }, /* (97) db_optr ::= db_optr days */ + { 198, -2 }, /* (98) db_optr ::= db_optr minrows */ + { 198, -2 }, /* (99) db_optr ::= db_optr maxrows */ + { 198, -2 }, /* (100) db_optr ::= db_optr blocks */ + { 198, -2 }, /* (101) db_optr ::= db_optr ctime */ + { 198, -2 }, /* (102) db_optr ::= db_optr wal */ + { 198, -2 }, /* (103) db_optr ::= db_optr fsync */ + { 198, -2 }, /* (104) db_optr ::= db_optr comp */ + { 198, -2 }, /* (105) db_optr ::= db_optr prec */ + { 198, -2 }, /* (106) db_optr ::= db_optr keep */ + { 198, -2 }, /* (107) db_optr ::= db_optr update */ + { 198, -2 }, /* (108) db_optr ::= db_optr cachelast */ + { 199, -1 }, /* (109) topic_optr ::= db_optr */ + { 199, -2 }, /* (110) topic_optr ::= topic_optr partitions */ + { 194, 0 }, /* (111) alter_db_optr ::= */ + { 194, -2 }, /* (112) alter_db_optr ::= alter_db_optr replica */ + { 194, -2 }, /* (113) alter_db_optr ::= alter_db_optr quorum */ + { 194, -2 }, /* (114) alter_db_optr ::= alter_db_optr keep */ + { 194, -2 }, /* (115) alter_db_optr ::= alter_db_optr blocks */ + { 194, -2 }, /* (116) alter_db_optr ::= alter_db_optr comp */ + { 194, -2 }, /* (117) alter_db_optr ::= alter_db_optr wal */ + { 194, -2 }, /* (118) alter_db_optr ::= alter_db_optr fsync */ + { 194, -2 }, /* (119) alter_db_optr ::= alter_db_optr update */ + { 194, -2 }, /* (120) alter_db_optr ::= alter_db_optr cachelast */ + { 195, -1 }, /* (121) alter_topic_optr ::= alter_db_optr */ + { 195, -2 }, /* (122) alter_topic_optr ::= alter_topic_optr partitions */ + { 226, -1 }, /* (123) typename ::= ids */ + { 226, -4 }, /* (124) typename ::= ids LP signed RP */ + { 226, -2 }, /* (125) typename ::= ids UNSIGNED */ + { 227, -1 }, /* (126) signed ::= INTEGER */ + { 227, -2 }, /* (127) signed ::= PLUS INTEGER */ + { 227, -2 }, /* (128) signed ::= MINUS INTEGER */ + { 189, -3 }, /* (129) cmd ::= CREATE TABLE create_table_args */ + { 189, -3 }, /* (130) cmd ::= CREATE TABLE create_stable_args */ + { 189, -3 }, /* (131) cmd ::= CREATE STABLE create_stable_args */ + { 189, -3 }, /* (132) cmd ::= CREATE TABLE create_table_list */ + { 230, -1 }, /* (133) create_table_list ::= create_from_stable */ + { 230, -2 }, /* (134) create_table_list ::= create_table_list create_from_stable */ + { 228, -6 }, /* (135) create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ + { 229, -10 }, /* (136) create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ + { 231, -10 }, /* (137) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ + { 231, -13 }, /* (138) create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ + { 233, -3 }, /* (139) tagNamelist ::= tagNamelist COMMA ids */ + { 233, -1 }, /* (140) tagNamelist ::= ids */ + { 228, -5 }, /* (141) create_table_args ::= ifnotexists ids cpxName AS select */ + { 232, -3 }, /* (142) columnlist ::= columnlist COMMA column */ + { 232, -1 }, /* (143) columnlist ::= column */ + { 235, -2 }, /* (144) column ::= ids typename */ + { 210, -3 }, /* (145) tagitemlist ::= tagitemlist COMMA tagitem */ + { 210, -1 }, /* (146) tagitemlist ::= tagitem */ + { 236, -1 }, /* (147) tagitem ::= INTEGER */ + { 236, -1 }, /* (148) tagitem ::= FLOAT */ + { 236, -1 }, /* (149) tagitem ::= STRING */ + { 236, -1 }, /* (150) tagitem ::= BOOL */ + { 236, -1 }, /* (151) tagitem ::= NULL */ + { 236, -2 }, /* (152) tagitem ::= MINUS INTEGER */ + { 236, -2 }, /* (153) tagitem ::= MINUS FLOAT */ + { 236, -2 }, /* (154) tagitem ::= PLUS INTEGER */ + { 236, -2 }, /* (155) tagitem ::= PLUS FLOAT */ + { 234, -13 }, /* (156) select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ + { 234, -3 }, /* (157) select ::= LP select RP */ + { 249, -1 }, /* (158) union ::= select */ + { 249, -4 }, /* (159) union ::= union UNION ALL select */ + { 189, -1 }, /* (160) cmd ::= union */ + { 234, -2 }, /* (161) select ::= SELECT selcollist */ + { 250, -2 }, /* (162) sclp ::= selcollist COMMA */ + { 250, 0 }, /* (163) sclp ::= */ + { 237, -4 }, /* (164) selcollist ::= sclp distinct expr as */ + { 237, -2 }, /* (165) selcollist ::= sclp STAR */ + { 253, -2 }, /* (166) as ::= AS ids */ + { 253, -1 }, /* (167) as ::= ids */ + { 253, 0 }, /* (168) as ::= */ + { 251, -1 }, /* (169) distinct ::= DISTINCT */ + { 251, 0 }, /* (170) distinct ::= */ + { 238, -2 }, /* (171) from ::= FROM tablelist */ + { 238, -4 }, /* (172) from ::= FROM LP union RP */ + { 254, -2 }, /* (173) tablelist ::= ids cpxName */ + { 254, -3 }, /* (174) tablelist ::= ids cpxName ids */ + { 254, -4 }, /* (175) tablelist ::= tablelist COMMA ids cpxName */ + { 254, -5 }, /* (176) tablelist ::= tablelist COMMA ids cpxName ids */ + { 255, -1 }, /* (177) tmvar ::= VARIABLE */ + { 240, -4 }, /* (178) interval_opt ::= INTERVAL LP tmvar RP */ + { 240, -6 }, /* (179) interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ + { 240, 0 }, /* (180) interval_opt ::= */ + { 241, 0 }, /* (181) session_option ::= */ + { 241, -7 }, /* (182) session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ + { 242, 0 }, /* (183) fill_opt ::= */ + { 242, -6 }, /* (184) fill_opt ::= FILL LP ID COMMA tagitemlist RP */ + { 242, -4 }, /* (185) fill_opt ::= FILL LP ID RP */ + { 243, -4 }, /* (186) sliding_opt ::= SLIDING LP tmvar RP */ + { 243, 0 }, /* (187) sliding_opt ::= */ + { 245, 0 }, /* (188) orderby_opt ::= */ + { 245, -3 }, /* (189) orderby_opt ::= ORDER BY sortlist */ + { 256, -4 }, /* (190) sortlist ::= sortlist COMMA item sortorder */ + { 256, -2 }, /* (191) sortlist ::= item sortorder */ + { 258, -2 }, /* (192) item ::= ids cpxName */ + { 259, -1 }, /* (193) sortorder ::= ASC */ + { 259, -1 }, /* (194) sortorder ::= DESC */ + { 259, 0 }, /* (195) sortorder ::= */ + { 244, 0 }, /* (196) groupby_opt ::= */ + { 244, -3 }, /* (197) groupby_opt ::= GROUP BY grouplist */ + { 260, -3 }, /* (198) grouplist ::= grouplist COMMA item */ + { 260, -1 }, /* (199) grouplist ::= item */ + { 246, 0 }, /* (200) having_opt ::= */ + { 246, -2 }, /* (201) having_opt ::= HAVING expr */ + { 248, 0 }, /* (202) limit_opt ::= */ + { 248, -2 }, /* (203) limit_opt ::= LIMIT signed */ + { 248, -4 }, /* (204) limit_opt ::= LIMIT signed OFFSET signed */ + { 248, -4 }, /* (205) limit_opt ::= LIMIT signed COMMA signed */ + { 247, 0 }, /* (206) slimit_opt ::= */ + { 247, -2 }, /* (207) slimit_opt ::= SLIMIT signed */ + { 247, -4 }, /* (208) slimit_opt ::= SLIMIT signed SOFFSET signed */ + { 247, -4 }, /* (209) slimit_opt ::= SLIMIT signed COMMA signed */ + { 239, 0 }, /* (210) where_opt ::= */ + { 239, -2 }, /* (211) where_opt ::= WHERE expr */ + { 252, -3 }, /* (212) expr ::= LP expr RP */ + { 252, -1 }, /* (213) expr ::= ID */ + { 252, -3 }, /* (214) expr ::= ID DOT ID */ + { 252, -3 }, /* (215) expr ::= ID DOT STAR */ + { 252, -1 }, /* (216) expr ::= INTEGER */ + { 252, -2 }, /* (217) expr ::= MINUS INTEGER */ + { 252, -2 }, /* (218) expr ::= PLUS INTEGER */ + { 252, -1 }, /* (219) expr ::= FLOAT */ + { 252, -2 }, /* (220) expr ::= MINUS FLOAT */ + { 252, -2 }, /* (221) expr ::= PLUS FLOAT */ + { 252, -1 }, /* (222) expr ::= STRING */ + { 252, -1 }, /* (223) expr ::= NOW */ + { 252, -1 }, /* (224) expr ::= VARIABLE */ + { 252, -1 }, /* (225) expr ::= BOOL */ + { 252, -1 }, /* (226) expr ::= NULL */ + { 252, -4 }, /* (227) expr ::= ID LP exprlist RP */ + { 252, -4 }, /* (228) expr ::= ID LP STAR RP */ + { 252, -3 }, /* (229) expr ::= expr IS NULL */ + { 252, -4 }, /* (230) expr ::= expr IS NOT NULL */ + { 252, -3 }, /* (231) expr ::= expr LT expr */ + { 252, -3 }, /* (232) expr ::= expr GT expr */ + { 252, -3 }, /* (233) expr ::= expr LE expr */ + { 252, -3 }, /* (234) expr ::= expr GE expr */ + { 252, -3 }, /* (235) expr ::= expr NE expr */ + { 252, -3 }, /* (236) expr ::= expr EQ expr */ + { 252, -5 }, /* (237) expr ::= expr BETWEEN expr AND expr */ + { 252, -3 }, /* (238) expr ::= expr AND expr */ + { 252, -3 }, /* (239) expr ::= expr OR expr */ + { 252, -3 }, /* (240) expr ::= expr PLUS expr */ + { 252, -3 }, /* (241) expr ::= expr MINUS expr */ + { 252, -3 }, /* (242) expr ::= expr STAR expr */ + { 252, -3 }, /* (243) expr ::= expr SLASH expr */ + { 252, -3 }, /* (244) expr ::= expr REM expr */ + { 252, -3 }, /* (245) expr ::= expr LIKE expr */ + { 252, -5 }, /* (246) expr ::= expr IN LP exprlist RP */ + { 261, -3 }, /* (247) exprlist ::= exprlist COMMA expritem */ + { 261, -1 }, /* (248) exprlist ::= expritem */ + { 262, -1 }, /* (249) expritem ::= expr */ + { 262, 0 }, /* (250) expritem ::= */ + { 189, -3 }, /* (251) cmd ::= RESET QUERY CACHE */ + { 189, -3 }, /* (252) cmd ::= SYNCDB ids REPLICA */ + { 189, -7 }, /* (253) cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (254) cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (255) cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (256) cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (257) cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + { 189, -9 }, /* (258) cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + { 189, -7 }, /* (259) cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + { 189, -7 }, /* (260) cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + { 189, -7 }, /* (261) cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + { 189, -7 }, /* (262) cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + { 189, -8 }, /* (263) cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + { 189, -3 }, /* (264) cmd ::= KILL CONNECTION INTEGER */ + { 189, -5 }, /* (265) cmd ::= KILL STREAM INTEGER COLON INTEGER */ + { 189, -5 }, /* (266) cmd ::= KILL QUERY INTEGER COLON INTEGER */ }; static void yy_accept(yyParser*); /* Forward Declaration */ @@ -2252,13 +2258,13 @@ static void yy_reduce( break; case 43: /* cmd ::= ALTER DATABASE ids alter_db_optr */ case 44: /* cmd ::= ALTER TOPIC ids alter_topic_optr */ yytestcase(yyruleno==44); -{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy94, &t);} +{ SStrToken t = {0}; setCreateDbInfo(pInfo, TSDB_SQL_ALTER_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &t);} break; case 45: /* cmd ::= ALTER ACCOUNT ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-1].minor.yy0, NULL, &yymsp[0].minor.yy351);} break; case 46: /* cmd ::= ALTER ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_ALTER_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; case 47: /* ids ::= ID */ case 48: /* ids ::= STRING */ yytestcase(yyruleno==48); @@ -2280,11 +2286,11 @@ static void yy_reduce( { setDCLSqlElems(pInfo, TSDB_SQL_CREATE_DNODE, 1, &yymsp[0].minor.yy0);} break; case 54: /* cmd ::= CREATE ACCOUNT ids PASS ids acct_optr */ -{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy419);} +{ setCreateAcctSql(pInfo, TSDB_SQL_CREATE_ACCT, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy351);} break; case 55: /* cmd ::= CREATE DATABASE ifnotexists ids db_optr */ case 56: /* cmd ::= CREATE TOPIC ifnotexists ids topic_optr */ yytestcase(yyruleno==56); -{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy94, &yymsp[-2].minor.yy0);} +{ setCreateDbInfo(pInfo, TSDB_SQL_CREATE_DB, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy322, &yymsp[-2].minor.yy0);} break; case 57: /* cmd ::= CREATE USER ids PASS ids */ { setCreateUserSql(pInfo, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0);} @@ -2313,20 +2319,20 @@ static void yy_reduce( break; case 76: /* acct_optr ::= pps tseries storage streams qtime dbs users conns state */ { - yylhsminor.yy419.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; - yylhsminor.yy419.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; - yylhsminor.yy419.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; - yylhsminor.yy419.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; - yylhsminor.yy419.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; - yylhsminor.yy419.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy419.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; - yylhsminor.yy419.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; - yylhsminor.yy419.stat = yymsp[0].minor.yy0; + yylhsminor.yy351.maxUsers = (yymsp[-2].minor.yy0.n>0)?atoi(yymsp[-2].minor.yy0.z):-1; + yylhsminor.yy351.maxDbs = (yymsp[-3].minor.yy0.n>0)?atoi(yymsp[-3].minor.yy0.z):-1; + yylhsminor.yy351.maxTimeSeries = (yymsp[-7].minor.yy0.n>0)?atoi(yymsp[-7].minor.yy0.z):-1; + yylhsminor.yy351.maxStreams = (yymsp[-5].minor.yy0.n>0)?atoi(yymsp[-5].minor.yy0.z):-1; + yylhsminor.yy351.maxPointsPerSecond = (yymsp[-8].minor.yy0.n>0)?atoi(yymsp[-8].minor.yy0.z):-1; + yylhsminor.yy351.maxStorage = (yymsp[-6].minor.yy0.n>0)?strtoll(yymsp[-6].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy351.maxQueryTime = (yymsp[-4].minor.yy0.n>0)?strtoll(yymsp[-4].minor.yy0.z, NULL, 10):-1; + yylhsminor.yy351.maxConnections = (yymsp[-1].minor.yy0.n>0)?atoi(yymsp[-1].minor.yy0.z):-1; + yylhsminor.yy351.stat = yymsp[0].minor.yy0; } - yymsp[-8].minor.yy419 = yylhsminor.yy419; + yymsp[-8].minor.yy351 = yylhsminor.yy351; break; case 77: /* keep ::= KEEP tagitemlist */ -{ yymsp[-1].minor.yy429 = yymsp[0].minor.yy429; } +{ yymsp[-1].minor.yy159 = yymsp[0].minor.yy159; } break; case 78: /* cache ::= CACHE INTEGER */ case 79: /* replica ::= REPLICA INTEGER */ yytestcase(yyruleno==79); @@ -2346,234 +2352,234 @@ static void yy_reduce( { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } break; case 93: /* db_optr ::= */ -{setDefaultCreateDbOption(&yymsp[1].minor.yy94); yymsp[1].minor.yy94.dbType = TSDB_DB_TYPE_DEFAULT;} +{setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 94: /* db_optr ::= db_optr cache */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cacheBlockSize = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 95: /* db_optr ::= db_optr replica */ case 112: /* alter_db_optr ::= alter_db_optr replica */ yytestcase(yyruleno==112); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.replica = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 96: /* db_optr ::= db_optr quorum */ case 113: /* alter_db_optr ::= alter_db_optr quorum */ yytestcase(yyruleno==113); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.quorum = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 97: /* db_optr ::= db_optr days */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.daysPerFile = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 98: /* db_optr ::= db_optr minrows */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.minRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 99: /* db_optr ::= db_optr maxrows */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.maxRowsPerBlock = strtod(yymsp[0].minor.yy0.z, NULL); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 100: /* db_optr ::= db_optr blocks */ case 115: /* alter_db_optr ::= alter_db_optr blocks */ yytestcase(yyruleno==115); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.numOfBlocks = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 101: /* db_optr ::= db_optr ctime */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.commitTime = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 102: /* db_optr ::= db_optr wal */ case 117: /* alter_db_optr ::= alter_db_optr wal */ yytestcase(yyruleno==117); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.walLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 103: /* db_optr ::= db_optr fsync */ case 118: /* alter_db_optr ::= alter_db_optr fsync */ yytestcase(yyruleno==118); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.fsyncPeriod = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 104: /* db_optr ::= db_optr comp */ case 116: /* alter_db_optr ::= alter_db_optr comp */ yytestcase(yyruleno==116); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.compressionLevel = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 105: /* db_optr ::= db_optr prec */ -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.precision = yymsp[0].minor.yy0; } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.precision = yymsp[0].minor.yy0; } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 106: /* db_optr ::= db_optr keep */ case 114: /* alter_db_optr ::= alter_db_optr keep */ yytestcase(yyruleno==114); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.keep = yymsp[0].minor.yy429; } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.keep = yymsp[0].minor.yy159; } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 107: /* db_optr ::= db_optr update */ case 119: /* alter_db_optr ::= alter_db_optr update */ yytestcase(yyruleno==119); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.update = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 108: /* db_optr ::= db_optr cachelast */ case 120: /* alter_db_optr ::= alter_db_optr cachelast */ yytestcase(yyruleno==120); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.cachelast = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 109: /* topic_optr ::= db_optr */ case 121: /* alter_topic_optr ::= alter_db_optr */ yytestcase(yyruleno==121); -{ yylhsminor.yy94 = yymsp[0].minor.yy94; yylhsminor.yy94.dbType = TSDB_DB_TYPE_TOPIC; } - yymsp[0].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[0].minor.yy322; yylhsminor.yy322.dbType = TSDB_DB_TYPE_TOPIC; } + yymsp[0].minor.yy322 = yylhsminor.yy322; break; case 110: /* topic_optr ::= topic_optr partitions */ case 122: /* alter_topic_optr ::= alter_topic_optr partitions */ yytestcase(yyruleno==122); -{ yylhsminor.yy94 = yymsp[-1].minor.yy94; yylhsminor.yy94.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[-1].minor.yy94 = yylhsminor.yy94; +{ yylhsminor.yy322 = yymsp[-1].minor.yy322; yylhsminor.yy322.partitions = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[-1].minor.yy322 = yylhsminor.yy322; break; case 111: /* alter_db_optr ::= */ -{ setDefaultCreateDbOption(&yymsp[1].minor.yy94); yymsp[1].minor.yy94.dbType = TSDB_DB_TYPE_DEFAULT;} +{ setDefaultCreateDbOption(&yymsp[1].minor.yy322); yymsp[1].minor.yy322.dbType = TSDB_DB_TYPE_DEFAULT;} break; case 123: /* typename ::= ids */ { yymsp[0].minor.yy0.type = 0; - tSetColumnType (&yylhsminor.yy451, &yymsp[0].minor.yy0); + tSetColumnType (&yylhsminor.yy407, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy451 = yylhsminor.yy451; + yymsp[0].minor.yy407 = yylhsminor.yy407; break; case 124: /* typename ::= ids LP signed RP */ { - if (yymsp[-1].minor.yy481 <= 0) { + if (yymsp[-1].minor.yy317 <= 0) { yymsp[-3].minor.yy0.type = 0; - tSetColumnType(&yylhsminor.yy451, &yymsp[-3].minor.yy0); + tSetColumnType(&yylhsminor.yy407, &yymsp[-3].minor.yy0); } else { - yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy481; // negative value of name length - tSetColumnType(&yylhsminor.yy451, &yymsp[-3].minor.yy0); + yymsp[-3].minor.yy0.type = -yymsp[-1].minor.yy317; // negative value of name length + tSetColumnType(&yylhsminor.yy407, &yymsp[-3].minor.yy0); } } - yymsp[-3].minor.yy451 = yylhsminor.yy451; + yymsp[-3].minor.yy407 = yylhsminor.yy407; break; case 125: /* typename ::= ids UNSIGNED */ { yymsp[-1].minor.yy0.type = 0; yymsp[-1].minor.yy0.n = ((yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z); - tSetColumnType (&yylhsminor.yy451, &yymsp[-1].minor.yy0); + tSetColumnType (&yylhsminor.yy407, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy451 = yylhsminor.yy451; + yymsp[-1].minor.yy407 = yylhsminor.yy407; break; case 126: /* signed ::= INTEGER */ -{ yylhsminor.yy481 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } - yymsp[0].minor.yy481 = yylhsminor.yy481; +{ yylhsminor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } + yymsp[0].minor.yy317 = yylhsminor.yy317; break; case 127: /* signed ::= PLUS INTEGER */ -{ yymsp[-1].minor.yy481 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } +{ yymsp[-1].minor.yy317 = strtol(yymsp[0].minor.yy0.z, NULL, 10); } break; case 128: /* signed ::= MINUS INTEGER */ -{ yymsp[-1].minor.yy481 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} +{ yymsp[-1].minor.yy317 = -strtol(yymsp[0].minor.yy0.z, NULL, 10);} break; case 132: /* cmd ::= CREATE TABLE create_table_list */ -{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy194;} +{ pInfo->type = TSDB_SQL_CREATE_TABLE; pInfo->pCreateTableInfo = yymsp[0].minor.yy14;} break; case 133: /* create_table_list ::= create_from_stable */ { SCreateTableSql* pCreateTable = calloc(1, sizeof(SCreateTableSql)); pCreateTable->childTableInfo = taosArrayInit(4, sizeof(SCreatedTableInfo)); - taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy252); + taosArrayPush(pCreateTable->childTableInfo, &yymsp[0].minor.yy206); pCreateTable->type = TSQL_CREATE_TABLE_FROM_STABLE; - yylhsminor.yy194 = pCreateTable; + yylhsminor.yy14 = pCreateTable; } - yymsp[0].minor.yy194 = yylhsminor.yy194; + yymsp[0].minor.yy14 = yylhsminor.yy14; break; case 134: /* create_table_list ::= create_table_list create_from_stable */ { - taosArrayPush(yymsp[-1].minor.yy194->childTableInfo, &yymsp[0].minor.yy252); - yylhsminor.yy194 = yymsp[-1].minor.yy194; + taosArrayPush(yymsp[-1].minor.yy14->childTableInfo, &yymsp[0].minor.yy206); + yylhsminor.yy14 = yymsp[-1].minor.yy14; } - yymsp[-1].minor.yy194 = yylhsminor.yy194; + yymsp[-1].minor.yy14 = yylhsminor.yy14; break; case 135: /* create_table_args ::= ifnotexists ids cpxName LP columnlist RP */ { - yylhsminor.yy194 = tSetCreateTableInfo(yymsp[-1].minor.yy429, NULL, NULL, TSQL_CREATE_TABLE); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-1].minor.yy159, NULL, NULL, TSQL_CREATE_TABLE); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-4].minor.yy0, &yymsp[-5].minor.yy0); } - yymsp[-5].minor.yy194 = yylhsminor.yy194; + yymsp[-5].minor.yy14 = yylhsminor.yy14; break; case 136: /* create_stable_args ::= ifnotexists ids cpxName LP columnlist RP TAGS LP columnlist RP */ { - yylhsminor.yy194 = tSetCreateTableInfo(yymsp[-5].minor.yy429, yymsp[-1].minor.yy429, NULL, TSQL_CREATE_STABLE); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(yymsp[-5].minor.yy159, yymsp[-1].minor.yy159, NULL, TSQL_CREATE_STABLE); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy194 = yylhsminor.yy194; + yymsp[-9].minor.yy14 = yylhsminor.yy14; break; case 137: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName TAGS LP tagitemlist RP */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; - yylhsminor.yy252 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy429, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); + yylhsminor.yy206 = createNewChildTableInfo(&yymsp[-5].minor.yy0, NULL, yymsp[-1].minor.yy159, &yymsp[-8].minor.yy0, &yymsp[-9].minor.yy0); } - yymsp[-9].minor.yy252 = yylhsminor.yy252; + yymsp[-9].minor.yy206 = yylhsminor.yy206; break; case 138: /* create_from_stable ::= ifnotexists ids cpxName USING ids cpxName LP tagNamelist RP TAGS LP tagitemlist RP */ { yymsp[-8].minor.yy0.n += yymsp[-7].minor.yy0.n; yymsp[-11].minor.yy0.n += yymsp[-10].minor.yy0.n; - yylhsminor.yy252 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy429, yymsp[-1].minor.yy429, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); + yylhsminor.yy206 = createNewChildTableInfo(&yymsp[-8].minor.yy0, yymsp[-5].minor.yy159, yymsp[-1].minor.yy159, &yymsp[-11].minor.yy0, &yymsp[-12].minor.yy0); } - yymsp[-12].minor.yy252 = yylhsminor.yy252; + yymsp[-12].minor.yy206 = yylhsminor.yy206; break; case 139: /* tagNamelist ::= tagNamelist COMMA ids */ -{taosArrayPush(yymsp[-2].minor.yy429, &yymsp[0].minor.yy0); yylhsminor.yy429 = yymsp[-2].minor.yy429; } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy0); yylhsminor.yy159 = yymsp[-2].minor.yy159; } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 140: /* tagNamelist ::= ids */ -{yylhsminor.yy429 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy429, &yymsp[0].minor.yy0);} - yymsp[0].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = taosArrayInit(4, sizeof(SStrToken)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy0);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 141: /* create_table_args ::= ifnotexists ids cpxName AS select */ { - yylhsminor.yy194 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy254, TSQL_CREATE_STREAM); - setSqlInfo(pInfo, yylhsminor.yy194, NULL, TSDB_SQL_CREATE_TABLE); + yylhsminor.yy14 = tSetCreateTableInfo(NULL, NULL, yymsp[0].minor.yy272, TSQL_CREATE_STREAM); + setSqlInfo(pInfo, yylhsminor.yy14, NULL, TSDB_SQL_CREATE_TABLE); yymsp[-3].minor.yy0.n += yymsp[-2].minor.yy0.n; setCreatedTableName(pInfo, &yymsp[-3].minor.yy0, &yymsp[-4].minor.yy0); } - yymsp[-4].minor.yy194 = yylhsminor.yy194; + yymsp[-4].minor.yy14 = yylhsminor.yy14; break; case 142: /* columnlist ::= columnlist COMMA column */ -{taosArrayPush(yymsp[-2].minor.yy429, &yymsp[0].minor.yy451); yylhsminor.yy429 = yymsp[-2].minor.yy429; } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{taosArrayPush(yymsp[-2].minor.yy159, &yymsp[0].minor.yy407); yylhsminor.yy159 = yymsp[-2].minor.yy159; } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 143: /* columnlist ::= column */ -{yylhsminor.yy429 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy429, &yymsp[0].minor.yy451);} - yymsp[0].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = taosArrayInit(4, sizeof(TAOS_FIELD)); taosArrayPush(yylhsminor.yy159, &yymsp[0].minor.yy407);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 144: /* column ::= ids typename */ { - tSetColumnInfo(&yylhsminor.yy451, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy451); + tSetColumnInfo(&yylhsminor.yy407, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy407); } - yymsp[-1].minor.yy451 = yylhsminor.yy451; + yymsp[-1].minor.yy407 = yylhsminor.yy407; break; case 145: /* tagitemlist ::= tagitemlist COMMA tagitem */ -{ yylhsminor.yy429 = tVariantListAppend(yymsp[-2].minor.yy429, &yymsp[0].minor.yy218, -1); } - yymsp[-2].minor.yy429 = yylhsminor.yy429; +{ yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 146: /* tagitemlist ::= tagitem */ -{ yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[0].minor.yy218, -1); } - yymsp[0].minor.yy429 = yylhsminor.yy429; +{ yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 147: /* tagitem ::= INTEGER */ case 148: /* tagitem ::= FLOAT */ yytestcase(yyruleno==148); case 149: /* tagitem ::= STRING */ yytestcase(yyruleno==149); case 150: /* tagitem ::= BOOL */ yytestcase(yyruleno==150); -{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy218, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy218 = yylhsminor.yy218; +{ toTSDBType(yymsp[0].minor.yy0.type); tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy488 = yylhsminor.yy488; break; case 151: /* tagitem ::= NULL */ -{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy218, &yymsp[0].minor.yy0); } - yymsp[0].minor.yy218 = yylhsminor.yy218; +{ yymsp[0].minor.yy0.type = 0; tVariantCreate(&yylhsminor.yy488, &yymsp[0].minor.yy0); } + yymsp[0].minor.yy488 = yylhsminor.yy488; break; case 152: /* tagitem ::= MINUS INTEGER */ case 153: /* tagitem ::= MINUS FLOAT */ yytestcase(yyruleno==153); @@ -2583,56 +2589,56 @@ static void yy_reduce( yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = yymsp[0].minor.yy0.type; toTSDBType(yymsp[-1].minor.yy0.type); - tVariantCreate(&yylhsminor.yy218, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy488, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy218 = yylhsminor.yy218; + yymsp[-1].minor.yy488 = yylhsminor.yy488; break; case 156: /* select ::= SELECT selcollist from where_opt interval_opt session_option fill_opt sliding_opt groupby_opt orderby_opt having_opt slimit_opt limit_opt */ { - yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy429, yymsp[-10].minor.yy70, yymsp[-9].minor.yy170, yymsp[-4].minor.yy429, yymsp[-3].minor.yy429, &yymsp[-8].minor.yy220, &yymsp[-7].minor.yy87, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy429, &yymsp[0].minor.yy18, &yymsp[-1].minor.yy18); + yylhsminor.yy272 = tSetQuerySqlNode(&yymsp[-12].minor.yy0, yymsp[-11].minor.yy159, yymsp[-10].minor.yy514, yymsp[-9].minor.yy118, yymsp[-4].minor.yy159, yymsp[-3].minor.yy159, &yymsp[-8].minor.yy184, &yymsp[-7].minor.yy249, &yymsp[-5].minor.yy0, yymsp[-6].minor.yy159, &yymsp[0].minor.yy440, &yymsp[-1].minor.yy440); } - yymsp[-12].minor.yy254 = yylhsminor.yy254; + yymsp[-12].minor.yy272 = yylhsminor.yy272; break; case 157: /* select ::= LP select RP */ -{yymsp[-2].minor.yy254 = yymsp[-1].minor.yy254;} +{yymsp[-2].minor.yy272 = yymsp[-1].minor.yy272;} break; case 158: /* union ::= select */ -{ yylhsminor.yy141 = setSubclause(NULL, yymsp[0].minor.yy254); } - yymsp[0].minor.yy141 = yylhsminor.yy141; +{ yylhsminor.yy391 = setSubclause(NULL, yymsp[0].minor.yy272); } + yymsp[0].minor.yy391 = yylhsminor.yy391; break; case 159: /* union ::= union UNION ALL select */ -{ yylhsminor.yy141 = appendSelectClause(yymsp[-3].minor.yy141, yymsp[0].minor.yy254); } - yymsp[-3].minor.yy141 = yylhsminor.yy141; +{ yylhsminor.yy391 = appendSelectClause(yymsp[-3].minor.yy391, yymsp[0].minor.yy272); } + yymsp[-3].minor.yy391 = yylhsminor.yy391; break; case 160: /* cmd ::= union */ -{ setSqlInfo(pInfo, yymsp[0].minor.yy141, NULL, TSDB_SQL_SELECT); } +{ setSqlInfo(pInfo, yymsp[0].minor.yy391, NULL, TSDB_SQL_SELECT); } break; case 161: /* select ::= SELECT selcollist */ { - yylhsminor.yy254 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy429, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); + yylhsminor.yy272 = tSetQuerySqlNode(&yymsp[-1].minor.yy0, yymsp[0].minor.yy159, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL); } - yymsp[-1].minor.yy254 = yylhsminor.yy254; + yymsp[-1].minor.yy272 = yylhsminor.yy272; break; case 162: /* sclp ::= selcollist COMMA */ -{yylhsminor.yy429 = yymsp[-1].minor.yy429;} - yymsp[-1].minor.yy429 = yylhsminor.yy429; +{yylhsminor.yy159 = yymsp[-1].minor.yy159;} + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 163: /* sclp ::= */ case 188: /* orderby_opt ::= */ yytestcase(yyruleno==188); -{yymsp[1].minor.yy429 = 0;} +{yymsp[1].minor.yy159 = 0;} break; case 164: /* selcollist ::= sclp distinct expr as */ { - yylhsminor.yy429 = tSqlExprListAppend(yymsp[-3].minor.yy429, yymsp[-1].minor.yy170, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); + yylhsminor.yy159 = tSqlExprListAppend(yymsp[-3].minor.yy159, yymsp[-1].minor.yy118, yymsp[-2].minor.yy0.n? &yymsp[-2].minor.yy0:0, yymsp[0].minor.yy0.n?&yymsp[0].minor.yy0:0); } - yymsp[-3].minor.yy429 = yylhsminor.yy429; + yymsp[-3].minor.yy159 = yylhsminor.yy159; break; case 165: /* selcollist ::= sclp STAR */ { tSqlExpr *pNode = tSqlExprCreateIdValue(NULL, TK_ALL); - yylhsminor.yy429 = tSqlExprListAppend(yymsp[-1].minor.yy429, pNode, 0, 0); + yylhsminor.yy159 = tSqlExprListAppend(yymsp[-1].minor.yy159, pNode, 0, 0); } - yymsp[-1].minor.yy429 = yylhsminor.yy429; + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 166: /* as ::= AS ids */ { yymsp[-1].minor.yy0 = yymsp[0].minor.yy0; } @@ -2649,64 +2655,64 @@ static void yy_reduce( yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 171: /* from ::= FROM tablelist */ -{yymsp[-1].minor.yy70 = yymsp[0].minor.yy70;} +{yymsp[-1].minor.yy514 = yymsp[0].minor.yy514;} break; case 172: /* from ::= FROM LP union RP */ -{yymsp[-3].minor.yy70 = setSubquery(NULL, yymsp[-1].minor.yy141);} +{yymsp[-3].minor.yy514 = setSubquery(NULL, yymsp[-1].minor.yy391);} break; case 173: /* tablelist ::= ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy70 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy514 = setTableNameList(NULL, &yymsp[-1].minor.yy0, NULL); } - yymsp[-1].minor.yy70 = yylhsminor.yy70; + yymsp[-1].minor.yy514 = yylhsminor.yy514; break; case 174: /* tablelist ::= ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy70 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy514 = setTableNameList(NULL, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-2].minor.yy70 = yylhsminor.yy70; + yymsp[-2].minor.yy514 = yylhsminor.yy514; break; case 175: /* tablelist ::= tablelist COMMA ids cpxName */ { yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - yylhsminor.yy70 = setTableNameList(yymsp[-3].minor.yy70, &yymsp[-1].minor.yy0, NULL); + yylhsminor.yy514 = setTableNameList(yymsp[-3].minor.yy514, &yymsp[-1].minor.yy0, NULL); } - yymsp[-3].minor.yy70 = yylhsminor.yy70; + yymsp[-3].minor.yy514 = yylhsminor.yy514; break; case 176: /* tablelist ::= tablelist COMMA ids cpxName ids */ { yymsp[-2].minor.yy0.n += yymsp[-1].minor.yy0.n; - yylhsminor.yy70 = setTableNameList(yymsp[-4].minor.yy70, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); + yylhsminor.yy514 = setTableNameList(yymsp[-4].minor.yy514, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } - yymsp[-4].minor.yy70 = yylhsminor.yy70; + yymsp[-4].minor.yy514 = yylhsminor.yy514; break; case 177: /* tmvar ::= VARIABLE */ {yylhsminor.yy0 = yymsp[0].minor.yy0;} yymsp[0].minor.yy0 = yylhsminor.yy0; break; case 178: /* interval_opt ::= INTERVAL LP tmvar RP */ -{yymsp[-3].minor.yy220.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy220.offset.n = 0;} +{yymsp[-3].minor.yy184.interval = yymsp[-1].minor.yy0; yymsp[-3].minor.yy184.offset.n = 0;} break; case 179: /* interval_opt ::= INTERVAL LP tmvar COMMA tmvar RP */ -{yymsp[-5].minor.yy220.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy220.offset = yymsp[-1].minor.yy0;} +{yymsp[-5].minor.yy184.interval = yymsp[-3].minor.yy0; yymsp[-5].minor.yy184.offset = yymsp[-1].minor.yy0;} break; case 180: /* interval_opt ::= */ -{memset(&yymsp[1].minor.yy220, 0, sizeof(yymsp[1].minor.yy220));} +{memset(&yymsp[1].minor.yy184, 0, sizeof(yymsp[1].minor.yy184));} break; case 181: /* session_option ::= */ -{yymsp[1].minor.yy87.col.n = 0; yymsp[1].minor.yy87.gap.n = 0;} +{yymsp[1].minor.yy249.col.n = 0; yymsp[1].minor.yy249.gap.n = 0;} break; case 182: /* session_option ::= SESSION LP ids cpxName COMMA tmvar RP */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - yymsp[-6].minor.yy87.col = yymsp[-4].minor.yy0; - yymsp[-6].minor.yy87.gap = yymsp[-1].minor.yy0; + yymsp[-6].minor.yy249.col = yymsp[-4].minor.yy0; + yymsp[-6].minor.yy249.gap = yymsp[-1].minor.yy0; } break; case 183: /* fill_opt ::= */ -{ yymsp[1].minor.yy429 = 0; } +{ yymsp[1].minor.yy159 = 0; } break; case 184: /* fill_opt ::= FILL LP ID COMMA tagitemlist RP */ { @@ -2714,14 +2720,14 @@ static void yy_reduce( toTSDBType(yymsp[-3].minor.yy0.type); tVariantCreate(&A, &yymsp[-3].minor.yy0); - tVariantListInsert(yymsp[-1].minor.yy429, &A, -1, 0); - yymsp[-5].minor.yy429 = yymsp[-1].minor.yy429; + tVariantListInsert(yymsp[-1].minor.yy159, &A, -1, 0); + yymsp[-5].minor.yy159 = yymsp[-1].minor.yy159; } break; case 185: /* fill_opt ::= FILL LP ID RP */ { toTSDBType(yymsp[-1].minor.yy0.type); - yymsp[-3].minor.yy429 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); + yymsp[-3].minor.yy159 = tVariantListAppendToken(NULL, &yymsp[-1].minor.yy0, -1); } break; case 186: /* sliding_opt ::= SLIDING LP tmvar RP */ @@ -2731,238 +2737,245 @@ static void yy_reduce( {yymsp[1].minor.yy0.n = 0; yymsp[1].minor.yy0.z = NULL; yymsp[1].minor.yy0.type = 0; } break; case 189: /* orderby_opt ::= ORDER BY sortlist */ -{yymsp[-2].minor.yy429 = yymsp[0].minor.yy429;} +{yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; case 190: /* sortlist ::= sortlist COMMA item sortorder */ { - yylhsminor.yy429 = tVariantListAppend(yymsp[-3].minor.yy429, &yymsp[-1].minor.yy218, yymsp[0].minor.yy116); + yylhsminor.yy159 = tVariantListAppend(yymsp[-3].minor.yy159, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } - yymsp[-3].minor.yy429 = yylhsminor.yy429; + yymsp[-3].minor.yy159 = yylhsminor.yy159; break; case 191: /* sortlist ::= item sortorder */ { - yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[-1].minor.yy218, yymsp[0].minor.yy116); + yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[-1].minor.yy488, yymsp[0].minor.yy20); } - yymsp[-1].minor.yy429 = yylhsminor.yy429; + yymsp[-1].minor.yy159 = yylhsminor.yy159; break; case 192: /* item ::= ids cpxName */ { toTSDBType(yymsp[-1].minor.yy0.type); yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; - tVariantCreate(&yylhsminor.yy218, &yymsp[-1].minor.yy0); + tVariantCreate(&yylhsminor.yy488, &yymsp[-1].minor.yy0); } - yymsp[-1].minor.yy218 = yylhsminor.yy218; + yymsp[-1].minor.yy488 = yylhsminor.yy488; break; case 193: /* sortorder ::= ASC */ -{ yymsp[0].minor.yy116 = TSDB_ORDER_ASC; } +{ yymsp[0].minor.yy20 = TSDB_ORDER_ASC; } break; case 194: /* sortorder ::= DESC */ -{ yymsp[0].minor.yy116 = TSDB_ORDER_DESC;} +{ yymsp[0].minor.yy20 = TSDB_ORDER_DESC;} break; case 195: /* sortorder ::= */ -{ yymsp[1].minor.yy116 = TSDB_ORDER_ASC; } +{ yymsp[1].minor.yy20 = TSDB_ORDER_ASC; } break; case 196: /* groupby_opt ::= */ -{ yymsp[1].minor.yy429 = 0;} +{ yymsp[1].minor.yy159 = 0;} break; case 197: /* groupby_opt ::= GROUP BY grouplist */ -{ yymsp[-2].minor.yy429 = yymsp[0].minor.yy429;} +{ yymsp[-2].minor.yy159 = yymsp[0].minor.yy159;} break; case 198: /* grouplist ::= grouplist COMMA item */ { - yylhsminor.yy429 = tVariantListAppend(yymsp[-2].minor.yy429, &yymsp[0].minor.yy218, -1); + yylhsminor.yy159 = tVariantListAppend(yymsp[-2].minor.yy159, &yymsp[0].minor.yy488, -1); } - yymsp[-2].minor.yy429 = yylhsminor.yy429; + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; case 199: /* grouplist ::= item */ { - yylhsminor.yy429 = tVariantListAppend(NULL, &yymsp[0].minor.yy218, -1); + yylhsminor.yy159 = tVariantListAppend(NULL, &yymsp[0].minor.yy488, -1); } - yymsp[0].minor.yy429 = yylhsminor.yy429; + yymsp[0].minor.yy159 = yylhsminor.yy159; break; case 200: /* having_opt ::= */ case 210: /* where_opt ::= */ yytestcase(yyruleno==210); - case 249: /* expritem ::= */ yytestcase(yyruleno==249); -{yymsp[1].minor.yy170 = 0;} + case 250: /* expritem ::= */ yytestcase(yyruleno==250); +{yymsp[1].minor.yy118 = 0;} break; case 201: /* having_opt ::= HAVING expr */ case 211: /* where_opt ::= WHERE expr */ yytestcase(yyruleno==211); -{yymsp[-1].minor.yy170 = yymsp[0].minor.yy170;} +{yymsp[-1].minor.yy118 = yymsp[0].minor.yy118;} break; case 202: /* limit_opt ::= */ case 206: /* slimit_opt ::= */ yytestcase(yyruleno==206); -{yymsp[1].minor.yy18.limit = -1; yymsp[1].minor.yy18.offset = 0;} +{yymsp[1].minor.yy440.limit = -1; yymsp[1].minor.yy440.offset = 0;} break; case 203: /* limit_opt ::= LIMIT signed */ case 207: /* slimit_opt ::= SLIMIT signed */ yytestcase(yyruleno==207); -{yymsp[-1].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-1].minor.yy18.offset = 0;} +{yymsp[-1].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-1].minor.yy440.offset = 0;} break; case 204: /* limit_opt ::= LIMIT signed OFFSET signed */ -{ yymsp[-3].minor.yy18.limit = yymsp[-2].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[0].minor.yy481;} +{ yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; case 205: /* limit_opt ::= LIMIT signed COMMA signed */ -{ yymsp[-3].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[-2].minor.yy481;} +{ yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; case 208: /* slimit_opt ::= SLIMIT signed SOFFSET signed */ -{yymsp[-3].minor.yy18.limit = yymsp[-2].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[0].minor.yy481;} +{yymsp[-3].minor.yy440.limit = yymsp[-2].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[0].minor.yy317;} break; case 209: /* slimit_opt ::= SLIMIT signed COMMA signed */ -{yymsp[-3].minor.yy18.limit = yymsp[0].minor.yy481; yymsp[-3].minor.yy18.offset = yymsp[-2].minor.yy481;} +{yymsp[-3].minor.yy440.limit = yymsp[0].minor.yy317; yymsp[-3].minor.yy440.offset = yymsp[-2].minor.yy317;} break; case 212: /* expr ::= LP expr RP */ -{yylhsminor.yy170 = yymsp[-1].minor.yy170; yylhsminor.yy170->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy170->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{yylhsminor.yy118 = yymsp[-1].minor.yy118; yylhsminor.yy118->token.z = yymsp[-2].minor.yy0.z; yylhsminor.yy118->token.n = (yymsp[0].minor.yy0.z - yymsp[-2].minor.yy0.z + 1);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 213: /* expr ::= ID */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_ID);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 214: /* expr ::= ID DOT ID */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ID);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 215: /* expr ::= ID DOT STAR */ -{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; +{ yymsp[-2].minor.yy0.n += (1+yymsp[0].minor.yy0.n); yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-2].minor.yy0, TK_ALL);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; case 216: /* expr ::= INTEGER */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_INTEGER);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 217: /* expr ::= MINUS INTEGER */ case 218: /* expr ::= PLUS INTEGER */ yytestcase(yyruleno==218); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} - yymsp[-1].minor.yy170 = yylhsminor.yy170; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_INTEGER; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_INTEGER);} + yymsp[-1].minor.yy118 = yylhsminor.yy118; break; case 219: /* expr ::= FLOAT */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_FLOAT);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 220: /* expr ::= MINUS FLOAT */ case 221: /* expr ::= PLUS FLOAT */ yytestcase(yyruleno==221); -{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} - yymsp[-1].minor.yy170 = yylhsminor.yy170; +{ yymsp[-1].minor.yy0.n += yymsp[0].minor.yy0.n; yymsp[-1].minor.yy0.type = TK_FLOAT; yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[-1].minor.yy0, TK_FLOAT);} + yymsp[-1].minor.yy118 = yylhsminor.yy118; break; case 222: /* expr ::= STRING */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_STRING);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 223: /* expr ::= NOW */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NOW); } + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 224: /* expr ::= VARIABLE */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_VARIABLE);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; case 225: /* expr ::= BOOL */ -{ yylhsminor.yy170 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} - yymsp[0].minor.yy170 = yylhsminor.yy170; +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_BOOL);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 226: /* expr ::= ID LP exprlist RP */ -{ yylhsminor.yy170 = tSqlExprCreateFunction(yymsp[-1].minor.yy429, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 226: /* expr ::= NULL */ +{ yylhsminor.yy118 = tSqlExprCreateIdValue(&yymsp[0].minor.yy0, TK_NULL);} + yymsp[0].minor.yy118 = yylhsminor.yy118; break; - case 227: /* expr ::= ID LP STAR RP */ -{ yylhsminor.yy170 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 227: /* expr ::= ID LP exprlist RP */ +{ yylhsminor.yy118 = tSqlExprCreateFunction(yymsp[-1].minor.yy159, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 228: /* expr ::= expr IS NULL */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, NULL, TK_ISNULL);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 228: /* expr ::= ID LP STAR RP */ +{ yylhsminor.yy118 = tSqlExprCreateFunction(NULL, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, yymsp[-3].minor.yy0.type); } + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 229: /* expr ::= expr IS NOT NULL */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-3].minor.yy170, NULL, TK_NOTNULL);} - yymsp[-3].minor.yy170 = yylhsminor.yy170; + case 229: /* expr ::= expr IS NULL */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, NULL, TK_ISNULL);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 230: /* expr ::= expr LT expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LT);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 230: /* expr ::= expr IS NOT NULL */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-3].minor.yy118, NULL, TK_NOTNULL);} + yymsp[-3].minor.yy118 = yylhsminor.yy118; break; - case 231: /* expr ::= expr GT expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_GT);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 231: /* expr ::= expr LT expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LT);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 232: /* expr ::= expr LE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 232: /* expr ::= expr GT expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GT);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 233: /* expr ::= expr GE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_GE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 233: /* expr ::= expr LE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 234: /* expr ::= expr NE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_NE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 234: /* expr ::= expr GE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_GE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 235: /* expr ::= expr EQ expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_EQ);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 235: /* expr ::= expr NE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_NE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 236: /* expr ::= expr BETWEEN expr AND expr */ -{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy170); yylhsminor.yy170 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy170, yymsp[-2].minor.yy170, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy170, TK_LE), TK_AND);} - yymsp[-4].minor.yy170 = yylhsminor.yy170; + case 236: /* expr ::= expr EQ expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_EQ);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 237: /* expr ::= expr AND expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_AND);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 237: /* expr ::= expr BETWEEN expr AND expr */ +{ tSqlExpr* X2 = tSqlExprClone(yymsp[-4].minor.yy118); yylhsminor.yy118 = tSqlExprCreate(tSqlExprCreate(yymsp[-4].minor.yy118, yymsp[-2].minor.yy118, TK_GE), tSqlExprCreate(X2, yymsp[0].minor.yy118, TK_LE), TK_AND);} + yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 238: /* expr ::= expr OR expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_OR); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 238: /* expr ::= expr AND expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_AND);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 239: /* expr ::= expr PLUS expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_PLUS); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 239: /* expr ::= expr OR expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_OR); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 240: /* expr ::= expr MINUS expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_MINUS); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 240: /* expr ::= expr PLUS expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_PLUS); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 241: /* expr ::= expr STAR expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_STAR); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 241: /* expr ::= expr MINUS expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_MINUS); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 242: /* expr ::= expr SLASH expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_DIVIDE);} - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 242: /* expr ::= expr STAR expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_STAR); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 243: /* expr ::= expr REM expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_REM); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 243: /* expr ::= expr SLASH expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_DIVIDE);} + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 244: /* expr ::= expr LIKE expr */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-2].minor.yy170, yymsp[0].minor.yy170, TK_LIKE); } - yymsp[-2].minor.yy170 = yylhsminor.yy170; + case 244: /* expr ::= expr REM expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_REM); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 245: /* expr ::= expr IN LP exprlist RP */ -{yylhsminor.yy170 = tSqlExprCreate(yymsp[-4].minor.yy170, (tSqlExpr*)yymsp[-1].minor.yy429, TK_IN); } - yymsp[-4].minor.yy170 = yylhsminor.yy170; + case 245: /* expr ::= expr LIKE expr */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-2].minor.yy118, yymsp[0].minor.yy118, TK_LIKE); } + yymsp[-2].minor.yy118 = yylhsminor.yy118; break; - case 246: /* exprlist ::= exprlist COMMA expritem */ -{yylhsminor.yy429 = tSqlExprListAppend(yymsp[-2].minor.yy429,yymsp[0].minor.yy170,0, 0);} - yymsp[-2].minor.yy429 = yylhsminor.yy429; + case 246: /* expr ::= expr IN LP exprlist RP */ +{yylhsminor.yy118 = tSqlExprCreate(yymsp[-4].minor.yy118, (tSqlExpr*)yymsp[-1].minor.yy159, TK_IN); } + yymsp[-4].minor.yy118 = yylhsminor.yy118; break; - case 247: /* exprlist ::= expritem */ -{yylhsminor.yy429 = tSqlExprListAppend(0,yymsp[0].minor.yy170,0, 0);} - yymsp[0].minor.yy429 = yylhsminor.yy429; + case 247: /* exprlist ::= exprlist COMMA expritem */ +{yylhsminor.yy159 = tSqlExprListAppend(yymsp[-2].minor.yy159,yymsp[0].minor.yy118,0, 0);} + yymsp[-2].minor.yy159 = yylhsminor.yy159; break; - case 248: /* expritem ::= expr */ -{yylhsminor.yy170 = yymsp[0].minor.yy170;} - yymsp[0].minor.yy170 = yylhsminor.yy170; + case 248: /* exprlist ::= expritem */ +{yylhsminor.yy159 = tSqlExprListAppend(0,yymsp[0].minor.yy118,0, 0);} + yymsp[0].minor.yy159 = yylhsminor.yy159; break; - case 250: /* cmd ::= RESET QUERY CACHE */ + case 249: /* expritem ::= expr */ +{yylhsminor.yy118 = yymsp[0].minor.yy118;} + yymsp[0].minor.yy118 = yylhsminor.yy118; + break; + case 251: /* cmd ::= RESET QUERY CACHE */ { setDCLSqlElems(pInfo, TSDB_SQL_RESET_CACHE, 0);} break; - case 251: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ + case 252: /* cmd ::= SYNCDB ids REPLICA */ +{ setDCLSqlElems(pInfo, TSDB_SQL_SYNC_DB_REPLICA, 1, &yymsp[-1].minor.yy0);} + break; + case 253: /* cmd ::= ALTER TABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 252: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ + case 254: /* cmd ::= ALTER TABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2973,14 +2986,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 253: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ + case 255: /* cmd ::= ALTER TABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 254: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ + case 256: /* cmd ::= ALTER TABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -2991,7 +3004,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 255: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ + case 257: /* cmd ::= ALTER TABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3005,26 +3018,26 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 256: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ + case 258: /* cmd ::= ALTER TABLE ids cpxName SET TAG ids EQ tagitem */ { yymsp[-6].minor.yy0.n += yymsp[-5].minor.yy0.n; toTSDBType(yymsp[-2].minor.yy0.type); SArray* A = tVariantListAppendToken(NULL, &yymsp[-2].minor.yy0, -1); - A = tVariantListAppend(A, &yymsp[0].minor.yy218, -1); + A = tVariantListAppend(A, &yymsp[0].minor.yy488, -1); SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-6].minor.yy0, NULL, A, TSDB_ALTER_TABLE_UPDATE_TAG_VAL, -1); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 257: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ + case 259: /* cmd ::= ALTER STABLE ids cpxName ADD COLUMN columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 258: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ + case 260: /* cmd ::= ALTER STABLE ids cpxName DROP COLUMN ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3035,14 +3048,14 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 259: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ + case 261: /* cmd ::= ALTER STABLE ids cpxName ADD TAG columnlist */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; - SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy429, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); + SAlterTableInfo* pAlterTable = tSetAlterTableInfo(&yymsp[-4].minor.yy0, yymsp[0].minor.yy159, NULL, TSDB_ALTER_TABLE_ADD_TAG_COLUMN, TSDB_SUPER_TABLE); setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 260: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ + case 262: /* cmd ::= ALTER STABLE ids cpxName DROP TAG ids */ { yymsp[-4].minor.yy0.n += yymsp[-3].minor.yy0.n; @@ -3053,7 +3066,7 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 261: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ + case 263: /* cmd ::= ALTER STABLE ids cpxName CHANGE TAG ids ids */ { yymsp[-5].minor.yy0.n += yymsp[-4].minor.yy0.n; @@ -3067,13 +3080,13 @@ static void yy_reduce( setSqlInfo(pInfo, pAlterTable, NULL, TSDB_SQL_ALTER_TABLE); } break; - case 262: /* cmd ::= KILL CONNECTION INTEGER */ + case 264: /* cmd ::= KILL CONNECTION INTEGER */ {setKillSql(pInfo, TSDB_SQL_KILL_CONNECTION, &yymsp[0].minor.yy0);} break; - case 263: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ + case 265: /* cmd ::= KILL STREAM INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_STREAM, &yymsp[-2].minor.yy0);} break; - case 264: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ + case 266: /* cmd ::= KILL QUERY INTEGER COLON INTEGER */ {yymsp[-2].minor.yy0.n += (yymsp[-1].minor.yy0.n + yymsp[0].minor.yy0.n); setKillSql(pInfo, TSDB_SQL_KILL_QUERY, &yymsp[-2].minor.yy0);} break; default: diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c index cae227cbdb..133ae6d0ab 100644 --- a/src/rpc/src/rpcMain.c +++ b/src/rpc/src/rpcMain.c @@ -1017,6 +1017,13 @@ static SRpcConn *rpcProcessMsgHead(SRpcInfo *pRpc, SRecvInfo *pRecv, SRpcReqCont return pConn; } +static void doRpcReportBrokenLinkToServer(void *param, void *id) { + SRpcMsg *pRpcMsg = (SRpcMsg *)(param); + SRpcConn *pConn = (SRpcConn *)(pRpcMsg->handle); + SRpcInfo *pRpc = pConn->pRpc; + (*(pRpc->cfp))(pRpcMsg, NULL); + free(pRpcMsg); +} static void rpcReportBrokenLinkToServer(SRpcConn *pConn) { SRpcInfo *pRpc = pConn->pRpc; if (pConn->pReqMsg == NULL) return; @@ -1025,16 +1032,20 @@ static void rpcReportBrokenLinkToServer(SRpcConn *pConn) { rpcAddRef(pRpc); tDebug("%s, notify the server app, connection is gone", pConn->info); - SRpcMsg rpcMsg; - rpcMsg.pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server - rpcMsg.contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length - rpcMsg.ahandle = pConn->ahandle; - rpcMsg.handle = pConn; - rpcMsg.msgType = pConn->inType; - rpcMsg.code = TSDB_CODE_RPC_NETWORK_UNAVAIL; + SRpcMsg *rpcMsg = malloc(sizeof(SRpcMsg)); + rpcMsg->pCont = pConn->pReqMsg; // pReqMsg is re-used to store the APP context from server + rpcMsg->contLen = pConn->reqMsgLen; // reqMsgLen is re-used to store the APP context length + rpcMsg->ahandle = pConn->ahandle; + rpcMsg->handle = pConn; + rpcMsg->msgType = pConn->inType; + rpcMsg->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; pConn->pReqMsg = NULL; pConn->reqMsgLen = 0; - if (pRpc->cfp) (*(pRpc->cfp))(&rpcMsg, NULL); + if (pRpc->cfp) { + taosTmrStart(doRpcReportBrokenLinkToServer, 0, rpcMsg, pRpc->tmrCtrl); + } else { + free(rpcMsg); + } } static void rpcProcessBrokenLink(SRpcConn *pConn) { @@ -1051,7 +1062,7 @@ static void rpcProcessBrokenLink(SRpcConn *pConn) { pConn->pReqMsg = NULL; taosTmrStart(rpcProcessConnError, 0, pContext, pRpc->tmrCtrl); } - + if (pConn->inType) rpcReportBrokenLinkToServer(pConn); rpcReleaseConn(pConn); diff --git a/src/sync/inc/syncInt.h b/src/sync/inc/syncInt.h index 2b87938474..91613ae351 100644 --- a/src/sync/inc/syncInt.h +++ b/src/sync/inc/syncInt.h @@ -117,7 +117,6 @@ typedef struct SSyncNode { FStartSyncFile startSyncFileFp; FStopSyncFile stopSyncFileFp; FGetVersion getVersionFp; - FResetVersion resetVersionFp; FSendFile sendFileFp; FRecvFile recvFileFp; pthread_mutex_t mutex; diff --git a/src/sync/src/syncMain.c b/src/sync/src/syncMain.c index 264bbf6b92..d21743d40a 100644 --- a/src/sync/src/syncMain.c +++ b/src/sync/src/syncMain.c @@ -182,7 +182,6 @@ int64_t syncStart(const SSyncInfo *pInfo) { pNode->startSyncFileFp = pInfo->startSyncFileFp; pNode->stopSyncFileFp = pInfo->stopSyncFileFp; pNode->getVersionFp = pInfo->getVersionFp; - pNode->resetVersionFp = pInfo->resetVersionFp; pNode->sendFileFp = pInfo->sendFileFp; pNode->recvFileFp = pInfo->recvFileFp; @@ -410,7 +409,7 @@ void syncConfirmForward(int64_t rid, uint64_t version, int32_t code, bool force) syncReleaseNode(pNode); } -#if 0 +#if 1 void syncRecover(int64_t rid) { SSyncPeer *pPeer; diff --git a/src/sync/src/syncRestore.c b/src/sync/src/syncRestore.c index 22d0a27581..c0d66316cd 100644 --- a/src/sync/src/syncRestore.c +++ b/src/sync/src/syncRestore.c @@ -238,7 +238,6 @@ static int32_t syncRestoreDataStepByStep(SSyncPeer *pPeer) { (*pNode->stopSyncFileFp)(pNode->vgId, fversion); nodeVersion = fversion; - if (pNode->resetVersionFp) (*pNode->resetVersionFp)(pNode->vgId, fversion); sInfo("%s, start to restore wal, fver:%" PRIu64, pPeer->id, nodeVersion); uint64_t wver = 0; diff --git a/src/tsdb/src/tsdbCommit.c b/src/tsdb/src/tsdbCommit.c index 02dd2d1ca4..4351cce51c 100644 --- a/src/tsdb/src/tsdbCommit.c +++ b/src/tsdb/src/tsdbCommit.c @@ -17,7 +17,7 @@ #define TSDB_MAX_SUBBLOCKS 8 static FORCE_INLINE int TSDB_KEY_FID(TSKEY key, int32_t days, int8_t precision) { if (key < 0) { - return (int)(-((-key) / tsMsPerDay[precision] / days + 1)); + return (int)((key + 1) / tsMsPerDay[precision] / days - 1); } else { return (int)((key / tsMsPerDay[precision] / days)); } @@ -452,6 +452,14 @@ static int tsdbCommitToFile(SCommitH *pCommith, SDFileSet *pSet, int fid) { return -1; } + if (tsdbUpdateDFileSetHeader(&(pCommith->wSet)) < 0) { + tsdbError("vgId:%d failed to update FSET %d header since %s", REPO_ID(pRepo), fid, tstrerror(terrno)); + tsdbCloseCommitFile(pCommith, true); + // revert the file change + tsdbApplyDFileSetChange(TSDB_COMMIT_WRITE_FSET(pCommith), pSet); + return -1; + } + // Close commit file tsdbCloseCommitFile(pCommith, false); diff --git a/src/tsdb/src/tsdbFS.c b/src/tsdb/src/tsdbFS.c index a9e74cb229..c9f087a5cf 100644 --- a/src/tsdb/src/tsdbFS.c +++ b/src/tsdb/src/tsdbFS.c @@ -33,6 +33,7 @@ static int tsdbScanDataDir(STsdbRepo *pRepo); static bool tsdbIsTFileInFS(STsdbFS *pfs, const TFILE *pf); static int tsdbRestoreCurrent(STsdbRepo *pRepo); static int tsdbComparTFILE(const void *arg1, const void *arg2); +static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo); // ================== CURRENT file header info static int tsdbEncodeFSHeader(void **buf, SFSHeader *pHeader) { @@ -246,6 +247,8 @@ int tsdbOpenFS(STsdbRepo *pRepo) { tsdbError("vgId:%d failed to open FS since %s", REPO_ID(pRepo), tstrerror(terrno)); return -1; } + + tsdbScanAndTryFixDFilesHeader(pRepo); } else { if (tsdbRestoreCurrent(pRepo) < 0) { tsdbError("vgId:%d failed to restore current file since %s", REPO_ID(pRepo), tstrerror(terrno)); @@ -1201,4 +1204,41 @@ static int tsdbComparTFILE(const void *arg1, const void *arg2) { return 0; } } +} + +static void tsdbScanAndTryFixDFilesHeader(STsdbRepo *pRepo) { + STsdbFS * pfs = REPO_FS(pRepo); + SFSStatus *pStatus = pfs->cstatus; + SDFInfo info; + + for (size_t i = 0; i < taosArrayGetSize(pStatus->df); i++) { + SDFileSet fset; + tsdbInitDFileSetEx(&fset, (SDFileSet *)taosArrayGet(pStatus->df, i)); + + tsdbDebug("vgId:%d scan DFileSet %d header", REPO_ID(pRepo), fset.fid); + + if (tsdbOpenDFileSet(&fset, O_RDWR) < 0) { + tsdbError("vgId:%d failed to open DFileSet %d since %s, continue", REPO_ID(pRepo), fset.fid, tstrerror(terrno)); + continue; + } + + for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { + SDFile *pDFile = TSDB_DFILE_IN_SET(&fset, ftype); + + if ((tsdbLoadDFileHeader(pDFile, &info) < 0) || pDFile->info.size != info.size || + pDFile->info.magic != info.magic) { + if (tsdbUpdateDFileHeader(pDFile) < 0) { + tsdbError("vgId:%d failed to update DFile header of %s since %s, continue", REPO_ID(pRepo), + TSDB_FILE_FULL_NAME(pDFile), tstrerror(terrno)); + } else { + tsdbInfo("vgId:%d DFile header of %s is updated", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + TSDB_FILE_FSYNC(pDFile); + } + } else { + tsdbDebug("vgId:%d DFile header of %s is correct", REPO_ID(pRepo), TSDB_FILE_FULL_NAME(pDFile)); + } + } + + tsdbCloseDFileSet(&fset); + } } \ No newline at end of file diff --git a/src/tsdb/src/tsdbFile.c b/src/tsdb/src/tsdbFile.c index ac33096aae..5db993e463 100644 --- a/src/tsdb/src/tsdbFile.c +++ b/src/tsdb/src/tsdbFile.c @@ -134,14 +134,14 @@ int tsdbCreateMFile(SMFile *pMFile, bool updateHeader) { return 0; } + pMFile->info.size += TSDB_FILE_HEAD_SIZE; + if (tsdbUpdateMFileHeader(pMFile) < 0) { tsdbCloseMFile(pMFile); tsdbRemoveMFile(pMFile); return -1; } - pMFile->info.size += TSDB_FILE_HEAD_SIZE; - return 0; } @@ -378,14 +378,14 @@ int tsdbCreateDFile(SDFile *pDFile, bool updateHeader) { return 0; } + pDFile->info.size += TSDB_FILE_HEAD_SIZE; + if (tsdbUpdateDFileHeader(pDFile) < 0) { tsdbCloseDFile(pDFile); tsdbRemoveDFile(pDFile); return -1; } - pDFile->info.size += TSDB_FILE_HEAD_SIZE; - return 0; } @@ -567,6 +567,7 @@ void tsdbInitDFileSet(SDFileSet *pSet, SDiskID did, int vid, int fid, uint32_t v } void tsdbInitDFileSetEx(SDFileSet *pSet, SDFileSet *pOSet) { + pSet->fid = pOSet->fid; for (TSDB_FILE_T ftype = 0; ftype < TSDB_FILE_MAX; ftype++) { tsdbInitDFileEx(TSDB_DFILE_IN_SET(pSet, ftype), TSDB_DFILE_IN_SET(pOSet, ftype)); } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 643131185f..41b129939b 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -111,7 +111,7 @@ typedef struct STsdbQueryHandle { bool loadExternalRow; // load time window external data rows bool currentLoadExternalRows; // current load external rows int32_t loadType; // block load type - void* qinfo; // query info handle, for debug purpose + uint64_t qId; // query info handle, for debug purpose int32_t type; // query type: retrieve all data blocks, 2. retrieve only last row, 3. retrieve direct prev|next rows SDFileSet* pFileGroup; SFSIter fileIter; @@ -286,8 +286,8 @@ static SArray* createCheckInfoFromTableGroup(STsdbQueryHandle* pQueryHandle, STa } taosArrayPush(pTableCheckInfo, &info); - tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid, - info.tableId.tid, info.lastKey, pQueryHandle->qinfo); + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %"PRIu64, pQueryHandle, info.tableId.uid, + info.tableId.tid, info.lastKey, pQueryHandle->qId); } } @@ -339,7 +339,7 @@ static SArray* createCheckInfoFromCheckInfo(STableCheckInfo* pCheckInfo, TSKEY s return pNew; } -static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, void* qinfo, SMemRef* pMemRef) { +static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pCond, uint64_t qId, SMemRef* pMemRef) { STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle)); if (pQueryHandle == NULL) { goto out_of_memory; @@ -353,7 +353,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC pQueryHandle->cur.win = TSWINDOW_INITIALIZER; pQueryHandle->checkFiles = true; pQueryHandle->activeIndex = 0; // current active table index - pQueryHandle->qinfo = qinfo; + pQueryHandle->qId = qId; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; pQueryHandle->locateStart = false; @@ -406,7 +406,7 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC pQueryHandle->pDataCols = tdNewDataCols(pMeta->maxRowBytes, pMeta->maxCols, pQueryHandle->pTsdb->config.maxRowsPerFileBlock); if (pQueryHandle->pDataCols == NULL) { - tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo); + tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto out_of_memory; } @@ -422,8 +422,8 @@ static STsdbQueryHandle* tsdbQueryTablesImpl(STsdbRepo* tsdb, STsdbQueryCond* pC return NULL; } -TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo, SMemRef* pRef) { - STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qinfo, pRef); +TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, uint64_t qId, SMemRef* pRef) { + STsdbQueryHandle* pQueryHandle = tsdbQueryTablesImpl(tsdb, pCond, qId, pRef); STsdbMeta* pMeta = tsdbGetMeta(tsdb); assert(pMeta != NULL); @@ -440,7 +440,7 @@ TsdbQueryHandleT* tsdbQueryTables(STsdbRepo* tsdb, STsdbQueryCond* pCond, STable tsdbMayTakeMemSnapshot(pQueryHandle, psTable); - tsdbDebug("%p total numOfTable:%" PRIzu " in query, %p", pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qinfo); + tsdbDebug("%p total numOfTable:%" PRIzu " in query, %"PRIu64, pQueryHandle, taosArrayGetSize(pQueryHandle->pTableCheckInfo), pQueryHandle->qId); return (TsdbQueryHandleT) pQueryHandle; } @@ -512,7 +512,7 @@ void tsdbResetQueryHandleForNewTable(TsdbQueryHandleT queryHandle, STsdbQueryCon pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); } -TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pMemRef) { +TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pMemRef) { pCond->twindow = updateLastrowForEachGroup(groupList); // no qualified table @@ -520,7 +520,7 @@ TsdbQueryHandleT tsdbQueryLastRow(STsdbRepo *tsdb, STsdbQueryCond *pCond, STable return NULL; } - STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qinfo, pMemRef); + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, groupList, qId, pMemRef); int32_t code = checkForCachedLastRow(pQueryHandle, groupList); if (code != TSDB_CODE_SUCCESS) { // set the numOfTables to be 0 terrno = code; @@ -581,10 +581,10 @@ static STableGroupInfo* trimTableGroup(STimeWindow* window, STableGroupInfo* pGr return pNew; } -TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, void* qinfo, SMemRef* pRef) { +TsdbQueryHandleT tsdbQueryRowsInExternalWindow(STsdbRepo *tsdb, STsdbQueryCond* pCond, STableGroupInfo *groupList, uint64_t qId, SMemRef* pRef) { STableGroupInfo* pNew = trimTableGroup(&pCond->twindow, groupList); - STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qinfo, pRef); + STsdbQueryHandle *pQueryHandle = (STsdbQueryHandle*) tsdbQueryTables(tsdb, pCond, pNew, qId, pRef); pQueryHandle->loadExternalRow = true; pQueryHandle->currentLoadExternalRows = true; @@ -651,9 +651,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in mem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 - "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p", + "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pMem->keyFirst, pMem->keyLast, - pCheckInfo->lastKey, pMem->numOfRows, pHandle->qinfo); + pCheckInfo->lastKey, pMem->numOfRows, pHandle->qId); if (ASCENDING_TRAVERSE(order)) { assert(pCheckInfo->lastKey <= key); @@ -662,8 +662,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh } } else { - tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, - pHandle->qinfo); + tsdbDebug("%p uid:%"PRId64", tid:%d no data in mem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, + pHandle->qId); } if (!imemEmpty) { @@ -673,9 +673,9 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh SDataRow row = (SDataRow)SL_GET_NODE_DATA(node); TSKEY key = dataRowKey(row); // first timestamp in buffer tsdbDebug("%p uid:%" PRId64 ", tid:%d check data in imem from skey:%" PRId64 ", order:%d, ts range in buf:%" PRId64 - "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %p", + "-%" PRId64 ", lastKey:%" PRId64 ", numOfRows:%"PRId64", %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, key, order, pIMem->keyFirst, pIMem->keyLast, - pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qinfo); + pCheckInfo->lastKey, pIMem->numOfRows, pHandle->qId); if (ASCENDING_TRAVERSE(order)) { assert(pCheckInfo->lastKey <= key); @@ -683,8 +683,8 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh assert(pCheckInfo->lastKey >= key); } } else { - tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %p", pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, - pHandle->qinfo); + tsdbDebug("%p uid:%"PRId64", tid:%d no data in imem, %"PRIu64, pHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, + pHandle->qId); } return true; @@ -811,8 +811,8 @@ static bool hasMoreDataInCache(STsdbQueryHandle* pHandle) { } pCheckInfo->lastKey = dataRowKey(row); // first timestamp in buffer - tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %p", pHandle, - pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qinfo); + tsdbDebug("%p uid:%" PRId64", tid:%d check data in buffer from skey:%" PRId64 ", order:%d, %"PRIu64, pHandle, + pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, pCheckInfo->lastKey, pHandle->order, pHandle->qId); // all data in mem are checked already. if ((pCheckInfo->lastKey > pHandle->window.ekey && ASCENDING_TRAVERSE(pHandle->order)) || @@ -917,7 +917,9 @@ static int32_t loadBlockInfo(STsdbQueryHandle * pQueryHandle, int32_t index, int pCheckInfo->compSize = compIndex->len; } - tsdbLoadBlockInfo(&(pQueryHandle->rhelper), (void*)(pCheckInfo->pCompInfo)); + if (tsdbLoadBlockInfo(&(pQueryHandle->rhelper), (void*)(pCheckInfo->pCompInfo)) < 0) { + return terrno; + } SBlockInfo* pCompInfo = pCheckInfo->pCompInfo; TSKEY s = TSKEY_INITIAL_VAL, e = TSKEY_INITIAL_VAL; @@ -984,21 +986,21 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc STSchema *pSchema = tsdbGetTableSchema(pCheckInfo->pTableObj); int32_t code = tdInitDataCols(pQueryHandle->pDataCols, pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for pDataCols, %p", pQueryHandle, pQueryHandle->qinfo); + tsdbError("%p failed to malloc buf for pDataCols, %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } code = tdInitDataCols(pQueryHandle->rhelper.pDCols[0], pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %p", pQueryHandle, pQueryHandle->qinfo); + tsdbError("%p failed to malloc buf for rhelper.pDataCols[0], %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } code = tdInitDataCols(pQueryHandle->rhelper.pDCols[1], pSchema); if (code != TSDB_CODE_SUCCESS) { - tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %p", pQueryHandle, pQueryHandle->qinfo); + tsdbError("%p failed to malloc buf for rhelper.pDataCols[1], %"PRIu64, pQueryHandle, pQueryHandle->qId); terrno = TSDB_CODE_TDB_OUT_OF_MEMORY; goto _error; } @@ -1034,15 +1036,15 @@ static int32_t doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SBlock* pBloc int64_t elapsedTime = (taosGetTimestampUs() - st); pQueryHandle->cost.blockLoadTime += elapsedTime; - tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %p", - pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qinfo); + tsdbDebug("%p load file block into buffer, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, elapsed time:%"PRId64 " us, %"PRIu64, + pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, elapsedTime, pQueryHandle->qId); return TSDB_CODE_SUCCESS; _error: pBlock->numOfRows = 0; - tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %p", - pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qinfo); + tsdbError("%p error occurs in loading file block, index:%d, brange:%"PRId64"-%"PRId64", rows:%d, %"PRIu64, + pQueryHandle, slotIndex, pBlock->keyFirst, pBlock->keyLast, pBlock->numOfRows, pQueryHandle->qId); return terrno; } @@ -1064,7 +1066,7 @@ static int32_t handleDataMergeIfNeeded(STsdbQueryHandle* pQueryHandle, SBlock* p assert(cur->pos >= 0 && cur->pos <= binfo.rows); TSKEY key = (row != NULL)? dataRowKey(row):TSKEY_INITIAL_VAL; - tsdbDebug("%p key in mem:%"PRId64", %p", pQueryHandle, key, pQueryHandle->qinfo); + tsdbDebug("%p key in mem:%"PRId64", %"PRIu64, pQueryHandle, key, pQueryHandle->qId); if ((ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key <= binfo.window.ekey)) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && (key != TSKEY_INITIAL_VAL && key >= binfo.window.skey))) { @@ -1545,9 +1547,9 @@ static void copyAllRemainRowsFromFileBlock(STsdbQueryHandle* pQueryHandle, STabl updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos); doCheckGeneratedBlockRange(pQueryHandle); - tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p", + tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey, - cur->win.ekey, cur->rows, pQueryHandle->qinfo); + cur->win.ekey, cur->rows, pQueryHandle->qId); } int32_t getEndPosInDataBlock(STsdbQueryHandle* pQueryHandle, SDataBlockInfo* pBlockInfo) { @@ -1599,9 +1601,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* int32_t endPos = getEndPosInDataBlock(pQueryHandle, &blockInfo); tsdbDebug("%p uid:%" PRIu64",tid:%d start merge data block, file block range:%"PRIu64"-%"PRIu64" rows:%d, start:%d," - "end:%d, %p", + "end:%d, %"PRIu64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, blockInfo.window.skey, blockInfo.window.ekey, - blockInfo.rows, cur->pos, endPos, pQueryHandle->qinfo); + blockInfo.rows, cur->pos, endPos, pQueryHandle->qId); // compared with the data from in-memory buffer, to generate the correct timestamp array list int32_t numOfRows = 0; @@ -1741,9 +1743,9 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* updateInfoAfterMerge(pQueryHandle, pCheckInfo, numOfRows, pos); doCheckGeneratedBlockRange(pQueryHandle); - tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %p", + tsdbDebug("%p uid:%" PRIu64",tid:%d data block created, mixblock:%d, brange:%"PRIu64"-%"PRIu64" rows:%d, %"PRIu64, pQueryHandle, pCheckInfo->tableId.uid, pCheckInfo->tableId.tid, cur->mixBlock, cur->win.skey, - cur->win.ekey, cur->rows, pQueryHandle->qinfo); + cur->win.ekey, cur->rows, pQueryHandle->qId); } int32_t binarySearchForKey(char* pValue, int num, TSKEY key, int order) { @@ -1917,13 +1919,13 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO memcpy(pQueryHandle->pDataBlockInfo, sup.pDataBlockInfo[0], sizeof(STableBlockInfo) * numOfBlocks); cleanBlockOrderSupporter(&sup, numOfQualTables); - tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %p ", pQueryHandle, cnt, - pQueryHandle->qinfo); + tsdbDebug("%p create data blocks info struct completed for 1 table, %d blocks not sorted %"PRIu64, pQueryHandle, cnt, + pQueryHandle->qId); return TSDB_CODE_SUCCESS; } - tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %p", pQueryHandle, cnt, - numOfQualTables, pQueryHandle->qinfo); + tsdbDebug("%p create data blocks info struct completed, %d blocks in %d tables %"PRIu64, pQueryHandle, cnt, + numOfQualTables, pQueryHandle->qId); assert(cnt <= numOfBlocks && numOfQualTables <= numOfTables); // the pTableQueryInfo[j]->numOfBlocks may be 0 sup.numOfTables = numOfQualTables; @@ -1959,7 +1961,7 @@ static int32_t createDataBlocksInfo(STsdbQueryHandle* pQueryHandle, int32_t numO * } */ - tsdbDebug("%p %d data blocks sort completed, %p", pQueryHandle, cnt, pQueryHandle->qinfo); + tsdbDebug("%p %d data blocks sort completed, %"PRIu64, pQueryHandle, cnt, pQueryHandle->qId); cleanBlockOrderSupporter(&sup, numOfTables); free(pTree); @@ -2017,8 +2019,8 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) { tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); - tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle, - pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo); + tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle, + pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId); pQueryHandle->pFileGroup = NULL; assert(pQueryHandle->numOfBlocks == 0); break; @@ -2041,8 +2043,8 @@ static int32_t getFirstFileDataBlock(STsdbQueryHandle* pQueryHandle, bool* exist break; } - tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables, - pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo); + tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables, + pQueryHandle->pFileGroup->fid, pQueryHandle->qId); assert(numOfBlocks >= 0); if (numOfBlocks == 0) { @@ -2133,8 +2135,8 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist if ((ASCENDING_TRAVERSE(pQueryHandle->order) && win.skey > pQueryHandle->window.ekey) || (!ASCENDING_TRAVERSE(pQueryHandle->order) && win.ekey < pQueryHandle->window.ekey)) { tsdbUnLockFS(REPO_FS(pQueryHandle->pTsdb)); - tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %p", pQueryHandle, - pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qinfo); + tsdbDebug("%p remain files are not qualified for qrange:%" PRId64 "-%" PRId64 ", ignore, %"PRIu64, pQueryHandle, + pQueryHandle->window.skey, pQueryHandle->window.ekey, pQueryHandle->qId); pQueryHandle->pFileGroup = NULL; break; } @@ -2157,8 +2159,8 @@ int32_t tsdbGetFileBlocksDistInfo(TsdbQueryHandleT* queryHandle, STableBlockDist break; } - tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %p", pQueryHandle, numOfBlocks, numOfTables, - pQueryHandle->pFileGroup->fid, pQueryHandle->qinfo); + tsdbDebug("%p %d blocks found in file for %d table(s), fid:%d, %"PRIu64, pQueryHandle, numOfBlocks, numOfTables, + pQueryHandle->pFileGroup->fid, pQueryHandle->qId); if (numOfBlocks == 0) { continue; @@ -2205,8 +2207,8 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists if ((!cur->mixBlock) || cur->blockCompleted) { // all data blocks in current file has been checked already, try next file if exists } else { - tsdbDebug("%p continue in current data block, index:%d, pos:%d, %p", pQueryHandle, cur->slot, cur->pos, - pQueryHandle->qinfo); + tsdbDebug("%p continue in current data block, index:%d, pos:%d, %"PRIu64, pQueryHandle, cur->slot, cur->pos, + pQueryHandle->qId); int32_t code = handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = (pQueryHandle->realNumOfRows > 0); @@ -2334,8 +2336,8 @@ static int tsdbReadRowsFromCache(STableCheckInfo* pCheckInfo, TSKEY maxKey, int } int64_t elapsedTime = taosGetTimestampUs() - st; - tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %p", pQueryHandle, - elapsedTime, numOfRows, numOfCols, pQueryHandle->qinfo); + tsdbDebug("%p build data block from cache completed, elapsed time:%"PRId64" us, numOfRows:%d, numOfCols:%d, %"PRIu64, pQueryHandle, + elapsedTime, numOfRows, numOfCols, pQueryHandle->qId); return numOfRows; } @@ -2593,7 +2595,7 @@ static int32_t doGetExternalRow(STsdbQueryHandle* pQueryHandle, int16_t type, SM memcpy(&cond.colList[i], &pColInfoData->info, sizeof(SColumnInfo)); } - pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qinfo, pMemRef); + pSecQueryHandle = tsdbQueryTablesImpl(pQueryHandle->pTsdb, &cond, pQueryHandle->qId, pMemRef); tfree(cond.colList); // current table, only one table @@ -2832,7 +2834,9 @@ int32_t tsdbRetrieveDataBlockStatisInfo(TsdbQueryHandleT* pQueryHandle, SDataSta } int64_t stime = taosGetTimestampUs(); - tsdbLoadBlockStatis(&pHandle->rhelper, pBlockInfo->compBlock); + if (tsdbLoadBlockStatis(&pHandle->rhelper, pBlockInfo->compBlock) < 0) { + return terrno; + } int16_t* colIds = pHandle->defaultLoadColumn->pData; @@ -3393,8 +3397,8 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) { pQueryHandle->next = doFreeColumnInfoData(pQueryHandle->next); SIOCostSummary* pCost = &pQueryHandle->cost; - tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %p", - pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qinfo); + tsdbDebug("%p :io-cost summary: statis-info:%"PRId64" us, datablock:%" PRId64" us, check data:%"PRId64" us, %"PRIu64, + pQueryHandle, pCost->statisInfoLoadTime, pCost->blockLoadTime, pCost->checkForNextTime, pQueryHandle->qId); tfree(pQueryHandle); } diff --git a/src/tsdb/src/tsdbSync.c b/src/tsdb/src/tsdbSync.c index cef561aebe..5a2756537e 100644 --- a/src/tsdb/src/tsdbSync.c +++ b/src/tsdb/src/tsdbSync.c @@ -194,8 +194,8 @@ static int32_t tsdbSyncRecvMeta(SSyncH *pSynch) { return 0; } - if (pLMFile == NULL || memcmp(&(pSynch->pmf->info), &(pLMFile->info), sizeof(SMFInfo)) != 0 || - TSDB_FILE_IS_BAD(pLMFile)) { + if (pLMFile == NULL || pSynch->pmf->info.size != pLMFile->info.size || + pSynch->pmf->info.magic != pLMFile->info.magic || TSDB_FILE_IS_BAD(pLMFile)) { // Local has no meta file or has a different meta file, need to copy from remote pSynch->mfChanged = true; @@ -536,7 +536,7 @@ static bool tsdbIsTowFSetSame(SDFileSet *pSet1, SDFileSet *pSet2) { SDFile *pDFile1 = TSDB_DFILE_IN_SET(pSet1, ftype); SDFile *pDFile2 = TSDB_DFILE_IN_SET(pSet2, ftype); - if (memcmp((void *)(TSDB_FILE_INFO(pDFile1)), (void *)(TSDB_FILE_INFO(pDFile2)), sizeof(SDFInfo)) != 0) { + if (pDFile1->info.size != pDFile2->info.size || pDFile1->info.magic != pDFile2->info.magic) { return false; } } diff --git a/src/util/src/tsocket.c b/src/util/src/tsocket.c index b33cdb8b80..77941cba82 100644 --- a/src/util/src/tsocket.c +++ b/src/util/src/tsocket.c @@ -22,6 +22,8 @@ #define SIGPIPE EPIPE #endif +#define TCP_CONN_TIMEOUT 3000 // conn timeout + int32_t taosGetFqdn(char *fqdn) { char hostname[1024]; hostname[1023] = '\0'; @@ -346,10 +348,47 @@ SOCKET taosOpenTcpClientSocket(uint32_t destIp, uint16_t destPort, uint32_t clie serverAddr.sin_addr.s_addr = destIp; serverAddr.sin_port = (uint16_t)htons((uint16_t)destPort); - ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); +#ifdef _TD_LINUX + taosSetNonblocking(sockFd, 1); + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); + if (ret == -1) { + if (errno == EHOSTUNREACH) { + uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); + taosCloseSocket(sockFd); + return -1; + } else if (errno == EINPROGRESS || errno == EAGAIN || errno == EWOULDBLOCK) { + struct pollfd wfd[1]; + + wfd[0].fd = sockFd; + wfd[0].events = POLLOUT; + + int res = poll(wfd, 1, TCP_CONN_TIMEOUT); + if (res == -1 || res == 0) { + uError("failed to connect socket, ip:0x%x, port:%hu(poll error/conn timeout)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + int optVal = -1, optLen = sizeof(int); + if ((0 != taosGetSockOpt(sockFd, SOL_SOCKET, SO_ERROR, &optVal, &optLen)) || (optVal != 0)) { + uError("failed to connect socket, ip:0x%x, port:%hu(connect host error)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + ret = 0; + } else { // Other error + uError("failed to connect socket, ip:0x%x, port:%hu(target host cannot be reached)", destIp, destPort); + taosCloseSocket(sockFd); // + return -1; + } + } + taosSetNonblocking(sockFd, 0); + +#else + ret = connect(sockFd, (struct sockaddr *)&serverAddr, sizeof(serverAddr)); +#endif if (ret != 0) { - // uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); + uError("failed to connect socket, ip:0x%x, port:%hu(%s)", destIp, destPort, strerror(errno)); taosCloseSocket(sockFd); sockFd = -1; } else { diff --git a/src/vnode/inc/vnodeBackup.h b/src/vnode/inc/vnodeBackup.h new file mode 100644 index 0000000000..0a6b26546c --- /dev/null +++ b/src/vnode/inc/vnodeBackup.h @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#ifndef TDENGINE_VNODE_BACKUP_H +#define TDENGINE_VNODE_BACKUP_H + +#ifdef __cplusplus +extern "C" { +#endif +#include "vnodeInt.h" + +int32_t vnodeInitBackup(); +void vnodeCleanupBackup(); +int32_t vnodeBackup(int32_t vgId); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/src/vnode/inc/vnodeMain.h b/src/vnode/inc/vnodeMain.h index 73591bc10d..91a5d632cd 100644 --- a/src/vnode/inc/vnodeMain.h +++ b/src/vnode/inc/vnodeMain.h @@ -25,6 +25,7 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg); int32_t vnodeDrop(int32_t vgId); int32_t vnodeOpen(int32_t vgId); int32_t vnodeAlter(void *pVnode, SCreateVnodeMsg *pVnodeCfg); +int32_t vnodeSync(int32_t vgId); int32_t vnodeClose(int32_t vgId); void vnodeCleanUp(SVnodeObj *pVnode); void vnodeDestroy(SVnodeObj *pVnode); @@ -33,4 +34,4 @@ void vnodeDestroy(SVnodeObj *pVnode); } #endif -#endif \ No newline at end of file +#endif diff --git a/src/vnode/inc/vnodeSync.h b/src/vnode/inc/vnodeSync.h index 75d7ffbabd..28fb63dd6a 100644 --- a/src/vnode/inc/vnodeSync.h +++ b/src/vnode/inc/vnodeSync.h @@ -30,7 +30,6 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion); void vnodeConfirmForard(int32_t vgId, void *wparam, int32_t code); int32_t vnodeWriteToCache(int32_t vgId, void *wparam, int32_t qtype, void *rparam); int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver); -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver); void vnodeConfirmForward(void *pVnode, uint64_t version, int32_t code, bool force); diff --git a/src/vnode/src/vnodeBackup.c b/src/vnode/src/vnodeBackup.c new file mode 100644 index 0000000000..a0a975be2b --- /dev/null +++ b/src/vnode/src/vnodeBackup.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#define _DEFAULT_SOURCE +#include "os.h" +#include "taoserror.h" +#include "taosmsg.h" +#include "tutil.h" +#include "tqueue.h" +#include "tglobal.h" +#include "tfs.h" +#include "vnodeBackup.h" +#include "vnodeMain.h" + +typedef struct { + int32_t vgId; +} SVBackupMsg; + +typedef struct { + pthread_t thread; + int32_t workerId; +} SVBackupWorker; + +typedef struct { + int32_t num; + SVBackupWorker *worker; +} SVBackupWorkerPool; + +static SVBackupWorkerPool tsVBackupPool; +static taos_qset tsVBackupQset; +static taos_queue tsVBackupQueue; + +static void vnodeProcessBackupMsg(SVBackupMsg *pMsg) { + int32_t vgId = pMsg->vgId; + char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; + + sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); + + if (tsEnableVnodeBak) { + tfsRmdir(newDir); + tfsRename(stagingDir, newDir); + } else { + vInfo("vgId:%d, vnode backup not enabled", vgId); + + tfsRmdir(stagingDir); + } +} + +static void *vnodeBackupFunc(void *param) { + while (1) { + SVBackupMsg *pMsg = NULL; + if (taosReadQitemFromQset(tsVBackupQset, NULL, (void **)&pMsg, NULL) == 0) { + vDebug("qset:%p, vbackup got no message from qset, exiting", tsVBackupQset); + break; + } + + vTrace("vgId:%d, will be processed in vbackup queue", pMsg->vgId); + vnodeProcessBackupMsg(pMsg); + + vTrace("vgId:%d, disposed in vbackup worker", pMsg->vgId); + taosFreeQitem(pMsg); + } + + return NULL; +} + +static int32_t vnodeStartBackup() { + tsVBackupQueue = taosOpenQueue(); + if (tsVBackupQueue == NULL) return TSDB_CODE_DND_OUT_OF_MEMORY; + + taosAddIntoQset(tsVBackupQset, tsVBackupQueue, NULL); + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + + pthread_attr_t thAttr; + pthread_attr_init(&thAttr); + pthread_attr_setdetachstate(&thAttr, PTHREAD_CREATE_JOINABLE); + + if (pthread_create(&pWorker->thread, &thAttr, vnodeBackupFunc, pWorker) != 0) { + vError("failed to create thread to process vbackup queue, reason:%s", strerror(errno)); + } + + pthread_attr_destroy(&thAttr); + + vDebug("vbackup:%d is launched, total:%d", pWorker->workerId, tsVBackupPool.num); + } + + vDebug("vbackup queue:%p is allocated", tsVBackupQueue); + + return TSDB_CODE_SUCCESS; +} + +static int32_t vnodeWriteIntoBackupWorker(int32_t vgId) { + SVBackupMsg *pMsg = taosAllocateQitem(sizeof(SVBackupMsg)); + if (pMsg == NULL) return TSDB_CODE_VND_OUT_OF_MEMORY; + + pMsg->vgId = vgId; + + int32_t code = taosWriteQitem(tsVBackupQueue, TAOS_QTYPE_RPC, pMsg); + if (code == 0) code = TSDB_CODE_DND_ACTION_IN_PROGRESS; + + return code; +} + +int32_t vnodeBackup(int32_t vgId) { + vTrace("vgId:%d, will backup", vgId); + return vnodeWriteIntoBackupWorker(vgId); +} + +int32_t vnodeInitBackup() { + tsVBackupQset = taosOpenQset(); + + tsVBackupPool.num = 1; + tsVBackupPool.worker = calloc(sizeof(SVBackupWorker), tsVBackupPool.num); + + if (tsVBackupPool.worker == NULL) return -1; + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + pWorker->workerId = i; + vDebug("vbackup:%d is created", i); + } + + vDebug("vbackup is initialized, num:%d qset:%p", tsVBackupPool.num, tsVBackupQset); + + return vnodeStartBackup(); +} + +void vnodeCleanupBackup() { + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + if (taosCheckPthreadValid(pWorker->thread)) { + taosQsetThreadResume(tsVBackupQset); + } + vDebug("vbackup:%d is closed", i); + } + + for (int32_t i = 0; i < tsVBackupPool.num; ++i) { + SVBackupWorker *pWorker = tsVBackupPool.worker + i; + vDebug("vbackup:%d start to join", i); + if (taosCheckPthreadValid(pWorker->thread)) { + pthread_join(pWorker->thread, NULL); + } + vDebug("vbackup:%d join success", i); + } + + vDebug("vbackup is closed, qset:%p", tsVBackupQset); + + taosCloseQset(tsVBackupQset); + tsVBackupQset = NULL; + + tfree(tsVBackupPool.worker); + + vDebug("vbackup queue:%p is freed", tsVBackupQueue); + taosCloseQueue(tsVBackupQueue); + tsVBackupQueue = NULL; +} diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c index ded39e67cc..69d94aff87 100644 --- a/src/vnode/src/vnodeMain.c +++ b/src/vnode/src/vnodeMain.c @@ -27,6 +27,7 @@ #include "vnodeVersion.h" #include "vnodeMgmt.h" #include "vnodeWorker.h" +#include "vnodeBackup.h" #include "vnodeMain.h" static int32_t vnodeProcessTsdbStatus(void *arg, int32_t status, int32_t eno); @@ -91,6 +92,23 @@ int32_t vnodeCreate(SCreateVnodeMsg *pVnodeCfg) { return code; } +int32_t vnodeSync(int32_t vgId) { + SVnodeObj *pVnode = vnodeAcquire(vgId); + if (pVnode == NULL) { + vDebug("vgId:%d, failed to sync, vnode not find", vgId); + return TSDB_CODE_VND_INVALID_VGROUP_ID; + } + + if (pVnode->role != TAOS_SYNC_ROLE_MASTER) { + vInfo("vgId:%d, vnode will sync, refCount:%d pVnode:%p", pVnode->vgId, pVnode->refCount, pVnode); + syncRecover(pVnode->sync); + } + + vnodeRelease(pVnode); + + return TSDB_CODE_SUCCESS; +} + int32_t vnodeDrop(int32_t vgId) { SVnodeObj *pVnode = vnodeAcquire(vgId); if (pVnode == NULL) { @@ -347,7 +365,6 @@ int32_t vnodeOpen(int32_t vgId) { syncInfo.startSyncFileFp = vnodeStartSyncFile; syncInfo.stopSyncFileFp = vnodeStopSyncFile; syncInfo.getVersionFp = vnodeGetVersion; - syncInfo.resetVersionFp = vnodeResetVersion; syncInfo.sendFileFp = tsdbSyncSend; syncInfo.recvFileFp = tsdbSyncRecv; syncInfo.pTsdb = pVnode->tsdb; @@ -431,18 +448,14 @@ void vnodeDestroy(SVnodeObj *pVnode) { if (pVnode->dropped) { char rootDir[TSDB_FILENAME_LEN] = {0}; - char newDir[TSDB_FILENAME_LEN] = {0}; + char stagingDir[TSDB_FILENAME_LEN] = {0}; sprintf(rootDir, "%s/vnode%d", "vnode", vgId); - sprintf(newDir, "%s/vnode%d", "vnode_bak", vgId); + sprintf(stagingDir, "%s/.staging/vnode%d", "vnode_bak", vgId); - if (0 == tsEnableVnodeBak) { - vInfo("vgId:%d, vnode backup not enabled", pVnode->vgId); - } else { - tfsRmdir(newDir); - tfsRename(rootDir, newDir); - } + tfsRename(rootDir, stagingDir); + + vnodeBackup(vgId); - tfsRmdir(rootDir); dnodeSendStatusMsgToMnode(); } diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index 71d9bc07f5..32f9532138 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -17,6 +17,7 @@ #include "os.h" #include "dnode.h" #include "vnodeStatus.h" +#include "vnodeBackup.h" #include "vnodeWorker.h" #include "vnodeRead.h" #include "vnodeWrite.h" @@ -29,6 +30,7 @@ static void vnodeCleanupHash(void); static void vnodeIncRef(void *ptNode); static SStep tsVnodeSteps[] = { + {"vnode-backup", vnodeInitBackup, vnodeCleanupBackup}, {"vnode-worker", vnodeInitMWorker, vnodeCleanupMWorker}, {"vnode-write", vnodeInitWrite, vnodeCleanupWrite}, {"vnode-read", vnodeInitRead, vnodeCleanupRead}, diff --git a/src/vnode/src/vnodeRead.c b/src/vnode/src/vnodeRead.c index 3618d86a05..0b3264816b 100644 --- a/src/vnode/src/vnodeRead.c +++ b/src/vnode/src/vnodeRead.c @@ -25,7 +25,7 @@ static int32_t (*vnodeProcessReadMsgFp[TSDB_MSG_TYPE_MAX])(SVnodeObj *pVnode, SV static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead); static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead); -static int32_t vnodeNotifyCurrentQhandle(void* handle, void* qhandle, int32_t vgId); +static int32_t vnodeNotifyCurrentQhandle(void* handle, uint64_t qId, void* qhandle, int32_t vgId); int32_t vnodeInitRead(void) { vnodeProcessReadMsgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessQueryMsg; @@ -167,7 +167,7 @@ static int32_t vnodePutItemIntoReadQueue(SVnodeObj *pVnode, void **qhandle, void * @param ahandle sqlObj address at client side * @return */ -static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, bool *freeHandle, void *ahandle) { +static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, uint64_t qId, void **handle, bool *freeHandle, void *ahandle) { bool continueExec = false; int32_t code = TSDB_CODE_SUCCESS; @@ -183,7 +183,7 @@ static int32_t vnodeDumpQueryResult(SRspRet *pRet, void *pVnode, void **handle, } } else { *freeHandle = true; - vTrace("QInfo:%p exec completed, free handle:%d", *handle, *freeHandle); + vTrace("QInfo:%"PRIu64"-%p exec completed, free handle:%d", qId, *handle, *freeHandle); } } else { SRetrieveTableRsp *pRsp = (SRetrieveTableRsp *)rpcMallocCont(sizeof(SRetrieveTableRsp)); @@ -220,27 +220,6 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { vError("error rpc msg in query, %s", tstrerror(pRead->code)); } -// assert(pRead->code != TSDB_CODE_RPC_NETWORK_UNAVAIL); -// if (pRead->code == TSDB_CODE_RPC_NETWORK_UNAVAIL) { -// SCancelQueryMsg *pMsg = (SCancelQueryMsg *)pRead->pCont; -//// pMsg->free = htons(killQueryMsg->free); -// pMsg->qhandle = htobe64(pMsg->qhandle); -// -// vWarn("QInfo:%p connection %p broken, kill query", (void *)pMsg->qhandle, pRead->rpcHandle); -//// assert(pRead->contLen > 0 && pMsg->free == 1); -// -// void **qhandle = qAcquireQInfo(pVnode->qMgmt, (uint64_t)pMsg->qhandle); -// if (qhandle == NULL || *qhandle == NULL) { -// vWarn("QInfo:%p invalid qhandle, no matched query handle, conn:%p", (void *)pMsg->qhandle, pRead->rpcHandle); -// } else { -// assert(*qhandle == (void *)pMsg->qhandle); -// -// qKillQuery(*qhandle); -// qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, true); -// } -// -// return TSDB_CODE_TSC_QUERY_CANCELLED; -// } int32_t code = TSDB_CODE_SUCCESS; void ** handle = NULL; @@ -252,7 +231,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { SQueryTableRsp *pRsp = (SQueryTableRsp *)rpcMallocCont(sizeof(SQueryTableRsp)); pRsp->code = code; - pRsp->qid = 0; + pRsp->qId = 0; pRet->len = sizeof(SQueryTableRsp); pRet->rsp = pRsp; @@ -270,11 +249,11 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { return pRsp->code; } else { assert(*handle == pQInfo); - pRsp->qid = htobe64(qId); + pRsp->qId = htobe64(qId); } if (handle != NULL && - vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vnodeNotifyCurrentQhandle(pRead->rpcHandle, qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { vError("vgId:%d, QInfo:%"PRIu64 "-%p, query discarded since link is broken, %p", pVnode->vgId, qId, *handle, pRead->rpcHandle); pRsp->code = TSDB_CODE_RPC_NETWORK_UNAVAIL; @@ -297,16 +276,17 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } else { assert(pCont != NULL); void **qhandle = (void **)pRead->qhandle; + uint64_t qId = 0; vTrace("vgId:%d, QInfo:%p, dnode continues to exec query", pVnode->vgId, *qhandle); // In the retrieve blocking model, only 50% CPU will be used in query processing if (tsRetrieveBlockingModel) { - qTableQuery(*qhandle); // do execute query + qTableQuery(*qhandle, &qId); // do execute query qReleaseQInfo(pVnode->qMgmt, (void **)&qhandle, false); } else { bool freehandle = false; - bool buildRes = qTableQuery(*qhandle); // do execute query + bool buildRes = qTableQuery(*qhandle, &qId); // do execute query // build query rsp, the retrieve request has reached here already if (buildRes) { @@ -318,7 +298,7 @@ static int32_t vnodeProcessQueryMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { pRead->rpcHandle); // set the real rsp error code - pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qhandle, &freehandle, pRead->rpcHandle); + pRead->code = vnodeDumpQueryResult(&pRead->rspRet, pVnode, qId, qhandle, &freehandle, pRead->rpcHandle); // NOTE: set return code to be TSDB_CODE_QRY_HAS_RSP to notify dnode to return msg to client code = TSDB_CODE_QRY_HAS_RSP; @@ -348,32 +328,32 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { SRetrieveTableMsg *pRetrieve = pCont; pRetrieve->free = htons(pRetrieve->free); - pRetrieve->qid = htobe64(pRetrieve->qid); + pRetrieve->qId = htobe64(pRetrieve->qId); - vTrace("vgId:%d, qid:%" PRIu64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qid, + vTrace("vgId:%d, qId:%" PRIu64 ", retrieve msg is disposed, free:%d, conn:%p", pVnode->vgId, pRetrieve->qId, pRetrieve->free, pRead->rpcHandle); memset(pRet, 0, sizeof(SRspRet)); terrno = TSDB_CODE_SUCCESS; int32_t code = TSDB_CODE_SUCCESS; - void **handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qid); + void ** handle = qAcquireQInfo(pVnode->qMgmt, pRetrieve->qId); if (handle == NULL) { code = terrno; terrno = TSDB_CODE_SUCCESS; - } else if (!checkQIdEqual(*handle, pRetrieve->qid)) { + } else if (!checkQIdEqual(*handle, pRetrieve->qId)) { code = TSDB_CODE_QRY_INVALID_QHANDLE; } if (code != TSDB_CODE_SUCCESS) { - vError("vgId:%d, invalid handle in retrieving result, code:%s, QInfo:%" PRIu64, pVnode->vgId, tstrerror(code), pRetrieve->qid); + vError("vgId:%d, invalid qId in retrieving result, code:%s, QInfo:%" PRIu64, pVnode->vgId, tstrerror(code), pRetrieve->qId); vnodeBuildNoResultQueryRsp(pRet); return code; } // kill current query and free corresponding resources. if (pRetrieve->free == 1) { - vWarn("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pRetrieve->qid, *handle); + vWarn("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve msg received to kill query and free qhandle", pVnode->vgId, pRetrieve->qId, *handle); qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); @@ -383,8 +363,8 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } // register the qhandle to connect to quit query immediate if connection is broken - if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { - vError("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve discarded since link is broken, %p", pVnode->vgId, pRetrieve->qid, *handle, pRead->rpcHandle); + if (vnodeNotifyCurrentQhandle(pRead->rpcHandle, pRetrieve->qId, *handle, pVnode->vgId) != TSDB_CODE_SUCCESS) { + vError("vgId:%d, QInfo:%"PRIu64 "-%p, retrieve discarded since link is broken, conn:%p", pVnode->vgId, pRetrieve->qhandle, *handle, pRead->rpcHandle); code = TSDB_CODE_RPC_NETWORK_UNAVAIL; qKillQuery(*handle); qReleaseQInfo(pVnode->qMgmt, (void **)&handle, true); @@ -413,7 +393,7 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { } // ahandle is the sqlObj pointer - code = vnodeDumpQueryResult(pRet, pVnode, handle, &freeHandle, pRead->rpcHandle); + code = vnodeDumpQueryResult(pRet, pVnode, pRetrieve->qId, handle, &freeHandle, pRead->rpcHandle); } // If qhandle is not added into vread queue, the query should be completed already or paused with error. @@ -427,13 +407,13 @@ static int32_t vnodeProcessFetchMsg(SVnodeObj *pVnode, SVReadMsg *pRead) { // notify connection(handle) that current qhandle is created, if current connection from // client is broken, the query needs to be killed immediately. -int32_t vnodeNotifyCurrentQhandle(void *handle, void *qhandle, int32_t vgId) { +int32_t vnodeNotifyCurrentQhandle(void *handle, uint64_t qId, void *qhandle, int32_t vgId) { SRetrieveTableMsg *pMsg = rpcMallocCont(sizeof(SRetrieveTableMsg)); - pMsg->qhandle = htobe64((uint64_t)qhandle); + pMsg->qId = htobe64(qId); pMsg->header.vgId = htonl(vgId); pMsg->header.contLen = htonl(sizeof(SRetrieveTableMsg)); - vTrace("QInfo:%p register qhandle to connect:%p", qhandle, handle); + vTrace("QInfo:%"PRIu64"-%p register qhandle to connect:%p", qId, qhandle, handle); return rpcReportProgress(handle, (char *)pMsg, sizeof(SRetrieveTableMsg)); } diff --git a/src/vnode/src/vnodeSync.c b/src/vnode/src/vnodeSync.c index 929dd15926..aa4cf0fc15 100644 --- a/src/vnode/src/vnodeSync.c +++ b/src/vnode/src/vnodeSync.c @@ -107,8 +107,9 @@ void vnodeStopSyncFile(int32_t vgId, uint64_t fversion) { pVnode->fversion = fversion; pVnode->version = fversion; vnodeSaveVersion(pVnode); + walResetVersion(pVnode->wal, fversion); - vDebug("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); + vInfo("vgId:%d, datafile is synced, fver:%" PRIu64 " vver:%" PRIu64, vgId, fversion, fversion); vnodeSetReadyStatus(pVnode); vnodeRelease(pVnode); @@ -158,22 +159,6 @@ int32_t vnodeGetVersion(int32_t vgId, uint64_t *fver, uint64_t *wver) { return code; } -int32_t vnodeResetVersion(int32_t vgId, uint64_t fver) { - SVnodeObj *pVnode = vnodeAcquire(vgId); - if (pVnode == NULL) { - vError("vgId:%d, vnode not found while reset version", vgId); - return -1; - } - - pVnode->fversion = fver; - pVnode->version = fver; - walResetVersion(pVnode->wal, fver); - vInfo("vgId:%d, version reset to %" PRIu64, vgId, fver); - - vnodeRelease(pVnode); - return 0; -} - void vnodeConfirmForward(void *vparam, uint64_t version, int32_t code, bool force) { SVnodeObj *pVnode = vparam; syncConfirmForward(pVnode->sync, version, code, force); diff --git a/src/vnode/src/vnodeWrite.c b/src/vnode/src/vnodeWrite.c index e318978a11..92e1ba804b 100644 --- a/src/vnode/src/vnodeWrite.c +++ b/src/vnode/src/vnodeWrite.c @@ -96,6 +96,7 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write into WAL code = walWrite(pVnode->wal, pHead); if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); vError("vgId:%d, hver:%" PRIu64 " vver:%" PRIu64 " code:0x%x", pVnode->vgId, pHead->version, pVnode->version, code); return code; } @@ -104,7 +105,10 @@ int32_t vnodeProcessWrite(void *vparam, void *wparam, int32_t qtype, void *rpara // write data locally code = (*vnodeProcessWriteMsgFp[pHead->msgType])(pVnode, pHead->cont, pRspRet); - if (code < 0) return code; + if (code < 0) { + if (syncCode > 0) atomic_sub_fetch_32(&pWrite->processedCount, 1); + return code; + } return syncCode; } diff --git a/tests/Jenkinsfile b/tests/Jenkinsfile index 7cdcfb2e24..178a0446c3 100644 --- a/tests/Jenkinsfile +++ b/tests/Jenkinsfile @@ -1,9 +1,8 @@ def pre_test(){ - catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { - sh ''' - sudo rmtaos - ''' - } + + sh ''' + sudo rmtaos||echo 'no taosd installed' + ''' sh ''' cd ${WKC} git reset --hard @@ -56,14 +55,8 @@ pipeline { cd ${WKC}/tests ./test-all.sh b1 date''' - sh ''' - cd ${WKC}/tests - ./test-all.sh full jdbc - date''' - sh ''' - cd ${WKC}/tests - ./test-all.sh full unit - date''' + + } } @@ -83,9 +76,20 @@ pipeline { catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { sh ''' cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* ./handle_crash_gen_val_log.sh ''' } + catchError(buildResult: 'SUCCESS', stageResult: 'FAILURE') { + sh ''' + cd ${WKC}/tests/pytest + rm -rf /var/lib/taos/* + rm -rf /var/log/taos/* + ./handle_taosd_val_log.sh + ''' + } + sh''' systemctl start taosd sleep 10 @@ -136,6 +140,10 @@ pipeline { ./test-all.sh b2 date ''' + sh ''' + cd ${WKC}/tests + ./test-all.sh full unit + date''' } } @@ -154,6 +162,10 @@ pipeline { ''' } sh ''' + cd ${WKC}/tests + ./test-all.sh full jdbc + date''' + sh ''' cd ${WKC}/tests/pytest ./valgrind-test.sh 2>&1 > mem-error-out.log ./handle_val_log.sh @@ -162,6 +174,11 @@ pipeline { cd ${WKC}/tests ./test-all.sh b3 date''' + sh ''' + date + cd ${WKC}/tests + ./test-all.sh full example + date''' } } diff --git a/tests/examples/JDBC/JDBCDemo/pom.xml b/tests/examples/JDBC/JDBCDemo/pom.xml index 167e7e37ae..66e866a2d3 100644 --- a/tests/examples/JDBC/JDBCDemo/pom.xml +++ b/tests/examples/JDBC/JDBCDemo/pom.xml @@ -13,7 +13,7 @@ com.taosdata.jdbc taos-jdbcdriver - 2.0.20 + 2.0.22 diff --git a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java index 07a21e79d1..da865b3ffd 100644 --- a/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java +++ b/tests/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JDBCDemo.java @@ -68,12 +68,12 @@ public class JDBCDemo { } private void insert() { - final String sql = "insert into test.weather (ts, temperature, humidity) values(now, 20.5, 34)"; + final String sql = "insert into " + dbName + "." + tbName + " (ts, temperature, humidity) values(now, 20.5, 34)"; exuete(sql); } private void select() { - final String sql = "select * from test.weather"; + final String sql = "select * from "+ dbName + "." + tbName; executeQuery(sql); } diff --git a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json index a7bd87e6d3..35c7773175 100644 --- a/tests/examples/JDBC/taosdemo/src/main/resources/insert.json +++ b/tests/examples/JDBC/taosdemo/src/main/resources/insert.json @@ -38,7 +38,7 @@ "insert_rows": 100, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 0, - "rows_per_tbl": 3, + "interlace_rows": 3, "max_sql_len": 1024, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/examples/c/asyncdemo.c b/tests/examples/c/asyncdemo.c index 16a14e9654..d711ce22c1 100644 --- a/tests/examples/c/asyncdemo.c +++ b/tests/examples/c/asyncdemo.c @@ -163,6 +163,16 @@ int main(int argc, char *argv[]) getchar(); + while(1) { + if (tablesProcessed < numOfTables) { + printf("wait for process finished\n"); + sleep(1); + continue; + } + + break; + } + taos_close(taos); free(tableList); diff --git a/tests/examples/go/taosdemo.go b/tests/examples/go/taosdemo.go index 2c3a7d09b6..003f5aeddc 100644 --- a/tests/examples/go/taosdemo.go +++ b/tests/examples/go/taosdemo.go @@ -16,45 +16,47 @@ package main import ( "database/sql" + "flag" "fmt" - _ "github.com/taosdata/driver-go/taosSql" + "math/rand" "os" - "sync" "runtime" "strconv" + "sync" "time" - "flag" - "math/rand" + + _ "github.com/taosdata/driver-go/taosSql" + //"golang.org/x/sys/unix" ) const ( - maxLocationSize = 32 - maxSqlBufSize = 65480 + maxLocationSize = 32 + //maxSqlBufSize = 65480 ) -var locations = [maxLocationSize]string { - "Beijing", "Shanghai", "Guangzhou", "Shenzhen", - "HangZhou", "Tianjin", "Wuhan", "Changsha", - "Nanjing", "Xian"} +var locations = [maxLocationSize]string{ + "Beijing", "Shanghai", "Guangzhou", "Shenzhen", + "HangZhou", "Tianjin", "Wuhan", "Changsha", + "Nanjing", "Xian"} type config struct { - hostName string - serverPort int - user string - password string - dbName string - supTblName string - tablePrefix string - numOftables int - numOfRecordsPerTable int - numOfRecordsPerReq int - numOfThreads int - startTimestamp string - startTs int64 + hostName string + serverPort int + user string + password string + dbName string + supTblName string + tablePrefix string + numOftables int + numOfRecordsPerTable int + numOfRecordsPerReq int + numOfThreads int + startTimestamp string + startTs int64 - keep int - days int + keep int + days int } var configPara config @@ -62,7 +64,7 @@ var taosDriverName = "taosSql" var url string func init() { - flag.StringVar(&configPara.hostName, "h", "127.0.0.1","The host to connect to TDengine server.") + flag.StringVar(&configPara.hostName, "h", "127.0.0.1", "The host to connect to TDengine server.") flag.IntVar(&configPara.serverPort, "p", 6030, "The TCP/IP port number to use for the connection to TDengine server.") flag.StringVar(&configPara.user, "u", "root", "The TDengine user name to use when connecting to the server.") flag.StringVar(&configPara.password, "P", "taosdata", "The password to use when connecting to the server.") @@ -80,14 +82,14 @@ func init() { configPara.supTblName = "meters" startTs, err := time.ParseInLocation("2006-01-02 15:04:05", configPara.startTimestamp, time.Local) - if err==nil { - configPara.startTs = startTs.UnixNano() / 1e6 + if err == nil { + configPara.startTs = startTs.UnixNano() / 1e6 } } func printAllArgs() { fmt.Printf("\n============= args parse result: =============\n") - fmt.Printf("hostName: %v\n", configPara.hostName) + fmt.Printf("hostName: %v\n", configPara.hostName) fmt.Printf("serverPort: %v\n", configPara.serverPort) fmt.Printf("usr: %v\n", configPara.user) fmt.Printf("password: %v\n", configPara.password) @@ -104,10 +106,10 @@ func printAllArgs() { func main() { printAllArgs() fmt.Printf("Please press enter key to continue....\n") - fmt.Scanln() + _, _ = fmt.Scanln() url = "root:taosdata@/tcp(" + configPara.hostName + ":" + strconv.Itoa(configPara.serverPort) + ")/" - //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) + //url = fmt.Sprintf("%s:%s@/tcp(%s:%d)/%s?interpolateParams=true", configPara.user, configPara.password, configPara.hostName, configPara.serverPort, configPara.dbName) // open connect to taos server //db, err := sql.Open(taosDriverName, url) //if err != nil { @@ -115,7 +117,7 @@ func main() { // os.Exit(1) //} //defer db.Close() - rand.Seed(time.Now().Unix()) + rand.Seed(time.Now().Unix()) createDatabase(configPara.dbName, configPara.supTblName) fmt.Printf("======== create database success! ========\n\n") @@ -138,7 +140,7 @@ func main() { func createDatabase(dbName string, supTblName string) { db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() @@ -165,27 +167,27 @@ func createDatabase(dbName string, supTblName string) { checkErr(err, sqlStr) } -func multiThreadCreateTable(threads int, ntables int, dbName string, tablePrefix string) { +func multiThreadCreateTable(threads int, nTables int, dbName string, tablePrefix string) { st := time.Now().UnixNano() - if (threads < 1) { - threads = 1; + if threads < 1 { + threads = 1 } - a := ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + a := nTables / threads + if a < 1 { + threads = nTables + a = 1 } - b := ntables % threads; + b := nTables % threads - last := 0; + last := 0 endTblId := 0 wg := sync.WaitGroup{} for i := 0; i < threads; i++ { startTblId := last - if (i < b ) { + if i < b { endTblId = last + a } else { endTblId = last + a - 1 @@ -206,42 +208,43 @@ func createTable(dbName string, childTblPrefix string, startTblId int, endTblId db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() - for i := startTblId; i <= endTblId; i++ { - sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");" - //fmt.Printf("sqlStr: %v\n", sqlStr) - _, err = db.Exec(sqlStr) - checkErr(err, sqlStr) - } - wg.Done() - runtime.Goexit() + for i := startTblId; i <= endTblId; i++ { + sqlStr := "create table if not exists " + dbName + "." + childTblPrefix + strconv.Itoa(i) + " using " + dbName + ".meters tags('" + locations[i%maxLocationSize] + "', " + strconv.Itoa(i) + ");" + //fmt.Printf("sqlStr: %v\n", sqlStr) + _, err = db.Exec(sqlStr) + checkErr(err, sqlStr) + } + wg.Done() + runtime.Goexit() } func generateRowData(ts int64) string { - voltage := rand.Int() % 1000 - current := 200 + rand.Float32() - phase := rand.Float32() - values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) " - return values + voltage := rand.Int() % 1000 + current := 200 + rand.Float32() + phase := rand.Float32() + values := "( " + strconv.FormatInt(ts, 10) + ", " + strconv.FormatFloat(float64(current), 'f', 6, 64) + ", " + strconv.Itoa(voltage) + ", " + strconv.FormatFloat(float64(phase), 'f', 6, 64) + " ) " + return values } + func insertData(dbName string, childTblPrefix string, startTblId int, endTblId int, wg *sync.WaitGroup) { //fmt.Printf("subThread[%d]: insert data to table from %d to %d \n", unix.Gettid(), startTblId, endTblId) // windows.GetCurrentThreadId() db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() - tmpTs := configPara.startTs; + tmpTs := configPara.startTs //rand.New(rand.NewSource(time.Now().UnixNano())) - for tID := startTblId; tID <= endTblId; tID++{ + for tID := startTblId; tID <= endTblId; tID++ { totalNum := 0 for { sqlStr := "insert into " + dbName + "." + childTblPrefix + strconv.Itoa(tID) + " values " @@ -249,13 +252,13 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i for { tmpTs += 1000 valuesOfRow := generateRowData(tmpTs) - currRowNum += 1 - totalNum += 1 + currRowNum += 1 + totalNum += 1 sqlStr = fmt.Sprintf("%s %s", sqlStr, valuesOfRow) - if (currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable) { - break + if currRowNum >= configPara.numOfRecordsPerReq || totalNum >= configPara.numOfRecordsPerTable { + break } } @@ -265,12 +268,12 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i count, err := res.RowsAffected() checkErr(err, "rows affected") - if (count != int64(currRowNum)) { - fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count) + if count != int64(currRowNum) { + fmt.Printf("insert data, expect affected:%d, actual:%d\n", currRowNum, count) os.Exit(1) } - if (totalNum >= configPara.numOfRecordsPerTable) { + if totalNum >= configPara.numOfRecordsPerTable { break } } @@ -279,44 +282,46 @@ func insertData(dbName string, childTblPrefix string, startTblId int, endTblId i wg.Done() runtime.Goexit() } -func multiThreadInsertData(threads int, ntables int, dbName string, tablePrefix string) { + +func multiThreadInsertData(threads int, nTables int, dbName string, tablePrefix string) { st := time.Now().UnixNano() - if (threads < 1) { - threads = 1; + if threads < 1 { + threads = 1 } - a := ntables / threads; - if (a < 1) { - threads = ntables; - a = 1; + a := nTables / threads + if a < 1 { + threads = nTables + a = 1 } - b := ntables % threads; + b := nTables % threads - last := 0; + last := 0 endTblId := 0 wg := sync.WaitGroup{} for i := 0; i < threads; i++ { startTblId := last - if (i < b ) { + if i < b { endTblId = last + a } else { endTblId = last + a - 1 } last = endTblId + 1 wg.Add(1) - go insertData(dbName, tablePrefix, startTblId , endTblId, &wg) + go insertData(dbName, tablePrefix, startTblId, endTblId, &wg) } wg.Wait() et := time.Now().UnixNano() fmt.Printf("insert data spent duration: %6.6fs\n", (float32(et-st))/1e9) } -func selectTest(dbName string, tbPrefix string, supTblName string){ + +func selectTest(dbName string, tbPrefix string, supTblName string) { db, err := sql.Open(taosDriverName, url) if err != nil { - fmt.Println("Open database error: %s\n", err) + fmt.Printf("Open database error: %s\n", err) os.Exit(1) } defer db.Close() @@ -332,12 +337,12 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("query sql: %s\n", sqlStr) for rows.Next() { var ( - ts string - current float32 - voltage int - phase float32 - location string - groupid int + ts string + current float32 + voltage int + phase float32 + location string + groupid int ) err := rows.Scan(&ts, ¤t, &voltage, &phase, &location, &groupid) if err != nil { @@ -352,7 +357,7 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ } // select sql 2 - sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa( rand.Int() % configPara.numOftables) + sqlStr = "select avg(voltage), min(voltage), max(voltage) from " + dbName + "." + tbPrefix + strconv.Itoa(rand.Int()%configPara.numOftables) rows, err = db.Query(sqlStr) checkErr(err, sqlStr) @@ -360,9 +365,9 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("\nquery sql: %s\n", sqlStr) for rows.Next() { var ( - voltageAvg float32 - voltageMin int - voltageMax int + voltageAvg float32 + voltageMin int + voltageMax int ) err := rows.Scan(&voltageAvg, &voltageMin, &voltageMax) if err != nil { @@ -385,10 +390,10 @@ func selectTest(dbName string, tbPrefix string, supTblName string){ fmt.Printf("\nquery sql: %s\n", sqlStr) for rows.Next() { var ( - lastTs string - lastCurrent float32 - lastVoltage int - lastPhase float32 + lastTs string + lastCurrent float32 + lastVoltage int + lastPhase float32 ) err := rows.Scan(&lastTs, &lastCurrent, &lastVoltage, &lastPhase) if err != nil { diff --git a/tests/perftest-scripts/perftest-query.sh b/tests/perftest-scripts/perftest-query.sh index e43a45ed5e..8b9fa1a546 100755 --- a/tests/perftest-scripts/perftest-query.sh +++ b/tests/perftest-scripts/perftest-query.sh @@ -50,7 +50,7 @@ function buildTDengine { echo "repo up-to-date" else echo "repo need to pull" - git pull > /dev/null + git pull > /dev/null 2>&1 LOCAL_COMMIT=`git rev-parse --short @` cd debug @@ -71,27 +71,14 @@ function runQueryPerfTest { python3 query/queryPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT python3 insert/insertFromCSVPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT - - yes | taosdemo -c /etc/taosperf/ -d taosdemo_insert_test -x > taosdemoperf.txt - - CREATETABLETIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'` - INSERTRECORDSTIME=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'` - REQUESTSPERSECOND=`grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $13}'` - delay=`grep 'delay' taosdemoperf.txt | awk '{print $4}'` - AVGDELAY=`echo ${delay:0:${#delay}-3}` - delay=`grep 'delay' taosdemoperf.txt | awk '{print $6}'` - MAXDELAY=`echo ${delay:0:${#delay}-3}` - delay=`grep 'delay' taosdemoperf.txt | awk '{print $8}'` - MINDELAY=`echo ${delay:0:${#delay}-2}` - python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT -t $CREATETABLETIME -i $INSERTRECORDSTIME -r $REQUESTSPERSECOND -avg $AVGDELAY -max $MAXDELAY -min $MINDELAY | tee -a $PERFORMANCE_TEST_REPORT - [ -f taosdemoperf.txt ] && rm taosdemoperf.txt + python3 tools/taosdemoPerformance.py -c $LOCAL_COMMIT | tee -a $PERFORMANCE_TEST_REPORT } function sendReport { echo "send report" - receiver="pxiao@taosdata.com" + receiver="develop@taosdata.com" mimebody="MIME-Version: 1.0\nContent-Type: text/html; charset=utf-8\n" cd $TDENGINE_DIR diff --git a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh index 0057a970ca..60c81cd82b 100755 --- a/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh +++ b/tests/pytest/cluster/clusterEnvSetup/buildClusterEnv.sh @@ -68,20 +68,14 @@ function prepareBuild { rm -rf $CURR_DIR/../../../../release/* fi - if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then - cd $CURR_DIR/../../../../packaging - echo $CURR_DIR - echo $IN_TDINTERNAL - echo "generating TDeninger packages" - if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then - pwd - ./release.sh -v cluster -n $VERSION >> /dev/null 2>&1 - else - pwd - ./release.sh -v edge -n $VERSION >> /dev/null 2>&1 - fi + cd $CURR_DIR/../../../../packaging - if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then + if [ ! -e $DOCKER_DIR/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + + echo "generating TDeninge enterprise packages" + ./release.sh -v cluster -n $VERSION >> /dev/null 2>&1 + if [ ! -e $CURR_DIR/../../../../release/TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 @@ -91,7 +85,17 @@ function prepareBuild { echo "no arbitrator install package found" exit 1 fi - else + + cd $CURR_DIR/../../../../release + mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR + fi + else + if [ ! -e $DOCKER_DIR/TDengine-server-$VERSION-Linux-x64.tar.gz ] || [ ! -e $DOCKER_DIR/TDengine-arbitrator-$VERSION-Linux-x64.tar.gz ]; then + + echo "generating TDeninge community packages" + ./release.sh -v edge -n $VERSION >> /dev/null 2>&1 + if [ ! -e $CURR_DIR/../../../../release/TDengine-server-$VERSION-Linux-x64.tar.gz ]; then echo "no TDengine install package found" exit 1 @@ -101,16 +105,11 @@ function prepareBuild { echo "no arbitrator install package found" exit 1 fi - fi - cd $CURR_DIR/../../../../release - if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then - mv TDengine-enterprise-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR - mv TDengine-enterprise-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR - else + cd $CURR_DIR/../../../../release mv TDengine-server-$VERSION-Linux-x64.tar.gz $DOCKER_DIR mv TDengine-arbitrator-$VERSION-Linux-x64.tar.gz $DOCKER_DIR - fi + fi fi rm -rf $DOCKER_DIR/*.yml diff --git a/tests/pytest/cluster/clusterEnvSetup/insert.json b/tests/pytest/cluster/clusterEnvSetup/insert.json index 56a64b7b85..4548ef74e6 100644 --- a/tests/pytest/cluster/clusterEnvSetup/insert.json +++ b/tests/pytest/cluster/clusterEnvSetup/insert.json @@ -39,7 +39,7 @@ "insert_rows": 100000, "multi_thread_write_one_tbl": "no", "number_of_tbl_in_one_sql": 1, - "rows_per_tbl": 100, + "interlace_rows": 100, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index 5037f2c399..bda52ffe6c 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -139,6 +139,18 @@ python3 ./test.py -f import_merge/importInsertThenImport.py python3 ./test.py -f import_merge/importCSV.py #======================p1-end=============== #======================p2-start=============== +# tools +python3 test.py -f tools/taosdumpTest.py + +python3 test.py -f tools/taosdemoTest.py +python3 test.py -f tools/taosdemoTestWithoutMetric.py +python3 test.py -f tools/taosdemoTestWithJson.py +python3 test.py -f tools/taosdemoTestLimitOffset.py +python3 test.py -f tools/taosdemoTestTblAlt.py +python3 test.py -f tools/taosdemoTestSampleData.py +python3 test.py -f tools/taosdemoTestInterlace.py +python3 test.py -f tools/taosdemoTestQuery.py + # update python3 ./test.py -f update/allow_update.py python3 ./test.py -f update/allow_update-0.py @@ -164,6 +176,11 @@ python3 ./test.py -f user/pass_len.py # stable python3 ./test.py -f stable/query_after_reset.py +# perfbenchmark +python3 ./test.py -f perfbenchmark/bug3433.py +python3 ./test.py -f perfbenchmark/bug3589.py + + #query python3 ./test.py -f query/filter.py python3 ./test.py -f query/filterCombo.py @@ -196,6 +213,11 @@ python3 ./test.py -f query/bug2119.py python3 ./test.py -f query/isNullTest.py python3 ./test.py -f query/queryWithTaosdKilled.py python3 ./test.py -f query/floatCompare.py +python3 ./test.py -f query/query1970YearsAf.py +python3 ./test.py -f query/bug3351.py +python3 ./test.py -f query/bug3375.py + + #stream python3 ./test.py -f stream/metric_1.py @@ -230,16 +252,6 @@ python3 client/twoClients.py python3 test.py -f query/queryInterval.py python3 test.py -f query/queryFillTest.py -# tools -python3 test.py -f tools/taosdemoTest.py -python3 test.py -f tools/taosdemoTestWithoutMetric.py -python3 test.py -f tools/taosdemoTestWithJson.py -python3 test.py -f tools/taosdemoTestLimitOffset.py -python3 test.py -f tools/taosdumpTest.py -python3 test.py -f tools/taosdemoTest2.py -python3 test.py -f tools/taosdemoTestSampleData.py -python3 test.py -f tools/taosdemoTestInterlace.py - # subscribe python3 test.py -f subscribe/singlemeter.py #python3 test.py -f subscribe/stability.py diff --git a/tests/pytest/handle_taosd_val_log.sh b/tests/pytest/handle_taosd_val_log.sh new file mode 100755 index 0000000000..6b7f669efe --- /dev/null +++ b/tests/pytest/handle_taosd_val_log.sh @@ -0,0 +1,68 @@ +#!/bin/bash +# Color setting +RED='\033[0;31m' +GREEN='\033[1;32m' +GREEN_DARK='\033[0;32m' +GREEN_UNDERLINE='\033[4;32m' +NC='\033[0m' +IN_TDINTERNAL="community" +TDIR=`pwd` +if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then + cd ../.. +else + cd ../../.. +fi +TOP_DIR=`pwd` +TAOSD_DIR=`find . -name "taosd"|grep -v community|grep debug|head -n1` +VALGRIND_OUT=taosd_valgrind.out +VALGRIND_ERR=taosd_valgrind.err +rm -rf /var/lib/taos/* +# nohup valgrind --tool=memcheck --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR & +nohup valgrind --leak-check=yes $TAOSD_DIR > $TDIR/$VALGRIND_OUT 2> $TDIR/$VALGRIND_ERR & +sleep 20 +cd - +./crash_gen.sh -p -t 10 -s 200 +ps -ef |grep valgrind|grep -v grep|awk '{print $2}'|xargs kill -term +while true +do + monitoring=` ps -ef|grep valgrind |grep -v grep| wc -l` + if [ $monitoring -eq 0 ] + then + echo "Manipulator is not running " + break + else + sleep 1 + fi +done + +grep 'start to execute\|ERROR SUMMARY' $VALGRIND_ERR | grep -v 'grep' | uniq | tee taosd_mem_err.log + +for memError in `grep 'ERROR SUMMARY' taosd_mem_err.log | awk '{print $4}'` +do +memError=(${memError//,/}) +if [ -n "$memError" ]; then + if [ "$memError" -gt 12 ]; then + echo -e "${RED} ## Memory errors number valgrind reports is $memError.\ + More than our threshold! ## ${NC}" + fi +fi +done + +grep 'start to execute\|definitely lost:' $VALGRIND_ERR|grep -v 'grep'|uniq|tee taosd-definitely-lost-out.log +for defiMemError in `grep 'definitely lost:' taosd-definitely-lost-out.log | awk '{print $7}'` +do +defiMemError=(${defiMemError//,/}) +if [ -n "$defiMemError" ]; then + if [ "$defiMemError" -gt 0 -a "$defiMemError" -lt 1013 ]; then + cat $VALGRIND_ERR + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + exit 8 + elif [ "$defiMemError" -gt 1013 ];then #add for azure + cat $VALGRIND_ERR + echo -e "${RED} ## Memory errors number valgrind reports \ + Definitely lost is $defiMemError. More than our threshold! ## ${NC}" + exit 8 + fi +fi +done diff --git a/tests/pytest/insert/metadataUpdate.py b/tests/pytest/insert/metadataUpdate.py index 76b9a3ae8f..4c32b0e39a 100644 --- a/tests/pytest/insert/metadataUpdate.py +++ b/tests/pytest/insert/metadataUpdate.py @@ -25,6 +25,8 @@ class TDTestCase: tdSql.init(conn.cursor(), logSql) self.ts = 1538548685000 + self.tables = 10 + self.rows = 1000 def updateMetadata(self): self.host = "127.0.0.1" @@ -37,6 +39,29 @@ class TDTestCase: self.cursor.execute("alter table db.tb add column col2 int") print("alter table done") + def deleteTableAndRecreate(self): + self.host = "127.0.0.1" + self.user = "root" + self.password = "taosdata" + self.config = tdDnodes.getSimCfgPath() + + self.conn = taos.connect(host = self.host, user = self.user, password = self.password, config = self.config) + self.cursor = self.conn.cursor() + + self.cursor.execute("use test") + print("drop table stb") + self.cursor.execute("drop table stb") + + print("create table stb") + self.cursor.execute("create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20))") + print("insert data") + for i in range(self.tables): + city = "beijing" if i % 2 == 0 else "shanghai" + self.cursor.execute("create table tb%d using stb tags(%d, '%s')" % (i, i, city)) + for j in range(self.rows): + self.cursor.execute("insert into tb%d values(%d, %d)" % (i, self.ts + j, j * 100000)) + + def run(self): tdSql.prepare() @@ -59,6 +84,36 @@ class TDTestCase: tdSql.query("select * from tb") tdSql.checkRows(2) + # Add test case: https://jira.taosdata.com:18080/browse/TD-3474 + + print("==============step1") + tdSql.execute("create database test") + tdSql.execute("use test") + tdSql.execute("create table if not exists stb (ts timestamp, col1 int) tags(areaid int, city nchar(20))") + + for i in range(self.tables): + city = "beijing" if i % 2 == 0 else "shanghai" + tdSql.execute("create table tb%d using stb tags(%d, '%s')" % (i, i, city)) + for j in range(self.rows): + tdSql.execute("insert into tb%d values(%d, %d)" % (i, self.ts + j, j * 100000)) + + tdSql.query("select count(*) from stb") + tdSql.checkData(0, 0, 10000) + + tdSql.query("select count(*) from tb1") + tdSql.checkData(0, 0, 1000) + + p = Process(target=self.deleteTableAndRecreate, args=()) + p.start() + p.join() + p.terminate() + + tdSql.query("select count(*) from stb") + tdSql.checkData(0, 0, 10000) + + tdSql.query("select count(*) from tb1") + tdSql.checkData(0, 0, 1000) + def stop(self): tdSql.close() tdLog.success("%s successfully executed" % __file__) diff --git a/tests/pytest/perfbenchmark/bug3433.py b/tests/pytest/perfbenchmark/bug3433.py new file mode 100644 index 0000000000..80e9cce0dc --- /dev/null +++ b/tests/pytest/perfbenchmark/bug3433.py @@ -0,0 +1,245 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import json + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def __init__(self): + self.path = "" + + def init(self, conn, logSql): + tdLog.debug(f"start to execute {__file__}") + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def getcfgPath(self, path): + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug(f"binPath {binPath}") + binPath = os.path.realpath(binPath) + tdLog.debug(f"binPath real path {binPath}") + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + return self.path + + def getCfgDir(self): + self.getcfgPath(self.path) + self.cfgDir = f"{self.path}/sim/psim/cfg" + return self.cfgDir + + def creatcfg(self): + dbinfo = { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 3650, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # set stable schema + stable1 = { + "name": "stb1", + "child_table_exists": "no", + "childtable_count": 1000, + "childtable_prefix": "t1", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "2020-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + + ], + "tags": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + ] + } + stable2 = { + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 1000, + "childtable_prefix": "t2", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 1, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "2020-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + + ], + "tags": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + ] + } + + # create different stables like stable1 and add to list super_tables + super_tables = [] + super_tables.append(stable1) + super_tables.append(stable2) + database = { + "dbinfo": dbinfo, + "super_tables": super_tables + } + + cfgdir = self.getCfgDir() + create_table = { + "filetype": "insert", + "cfgdir": cfgdir, + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "databases": [database] + } + return create_table + + def createinsertfile(self): + create_table = self.creatcfg() + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(create_table, f) + return file_create_table + + def inserttable(self, filepath): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info(f"taosd found in {buildPath}") + binPath = buildPath + "/build/bin/" + + create_table_cmd = f"{binPath}taosdemo -f {filepath} > /dev/null 2>&1" + _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") + + def droptmpfile(self): + drop_file_cmd = "rm -f /tmp/insert_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + + def run(self): + tdLog.printNoPrefix("==========step1:create database and insert records") + file_create_table = self.createinsertfile() + self.inserttable(file_create_table) + + tdLog.printNoPrefix("==========step2:check database and stable records") + tdSql.query("show databases") + tdSql.checkData(0, 2, 2000) + tdSql.execute("use db") + tdSql.query("show stables") + tdSql.checkData(0, 4, 1000) + tdSql.checkData(1, 4, 1000) + + self.droptmpfile() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/pytest/perfbenchmark/bug3589.py b/tests/pytest/perfbenchmark/bug3589.py new file mode 100644 index 0000000000..c54ef8595d --- /dev/null +++ b/tests/pytest/perfbenchmark/bug3589.py @@ -0,0 +1,156 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import json + +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def __init__(self): + self.path = "" + + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/debug/build/bin")] + break + return buildPath + + def getCfgDir(self): + return self.getBuildPath() + "/sim/psim/cfg" + + def querycfg(self): + cfgdir = self.getCfgDir() + querycfg={ + "filetype": "query", + "cfgdir": cfgdir, + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "yes", + "databases": "db", + "specified_table_query": { + "query_interval": 0, + "concurrent": 1, + "sqls": [ + { + "sql": "select * from t10, t11 where t10.ts=t11.ts" + } + ] + } + } + + return querycfg + + def querycfgfile(self): + querycfg = self.querycfg() + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + querycfg.get("specified_table_query").get("sqls")[0]["result"] = f"/tmp/query_{date}.log" + file_query_table = f"/tmp/query_{date}.json" + with open(file_query_table, "w") as f: + json.dump(querycfg, f) + + return [file_query_table, querycfg] + + def querytable(self, filepath): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info(f"taosd found in {buildPath}") + binPath = buildPath + "/debug/build/bin/" + + query_table_cmd = f"yes | {binPath}taosdemo -f {filepath}" + _ = subprocess.check_output(query_table_cmd, shell=True).decode("utf-8") + + def checkqueryresult(self, expectrows): + querycfg = self.querycfgfile()[1] + result_file = querycfg.get("specified_table_query").get("sqls")[0].get("result") + "-0" + if result_file: + check_cmd = f"wc -l {result_file}" + check_data_init = subprocess.check_output(check_cmd, shell=True).decode("utf-8") + check_data = int(check_data_init[0]) + if check_data == expectrows: + tdLog.info(f"queryResultRows:{check_data} == expect:{expectrows}") + else: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, check_data, expectrows) + tdLog.exit(f"{args[0]}({args[1]}) failed: result:{args[2]} != expect:{args[3]}") + + def droptmpfile(self): + drop_file_cmd = "rm -f /tmp/query_* " + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + drop_file_cmd = "rm -f querySystemInfo-*" + _ = subprocess.check_output(drop_file_cmd, shell=True).decode("utf-8") + + def run(self): + tdSql.prepare() + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute("alter database db keep 36500") + tdSql.execute( + "create table stb1 (ts timestamp, c1 int) TAGS(t1 int)" + ) + tdSql.execute("create table t10 using stb1 tags(1)") + tdSql.execute("create table t11 using stb1 tags(2)") + + tdSql.execute("insert into t10 values (-865000000, 1)") + tdSql.execute("insert into t11 values (-865000000, 2)") + tdSql.execute("insert into t10 values ('1969-12-31 23:59:59.000', 2)") + tdSql.execute("insert into t11 values ('1969-12-31 23:59:59.000', 3)") + tdSql.execute("insert into t10 values ('1970-01-01 00:00:00.000', 3)") + tdSql.execute("insert into t11 values ('1970-01-01 00:00:00.000', 4)") + tdSql.execute("insert into t10 values (-15230000, 4)") + tdSql.execute("insert into t11 values (-15230000, 5)") + tdSql.execute("insert into t10 values (-15220000, 5)") + tdSql.execute("insert into t11 values (-15220000, 6)") + tdSql.execute("insert into t10 values (-15210000, 6)") + tdSql.execute("insert into t11 values (-15210000, 7)") + tdSql.execute("insert into t10 values (0, 7)") + tdSql.execute("insert into t11 values (0, 8)") + tdSql.execute("insert into t10 values ('2020-10-01 00:00:00.000', 8)") + tdSql.execute("insert into t11 values ('2020-10-01 00:00:00.000', 9)") + + tdLog.printNoPrefix("==========step2:query") + query_file = self.querycfgfile()[0] + self.querytable(query_file) + self.checkqueryresult(8) + + self.droptmpfile() + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug3351.py b/tests/pytest/query/bug3351.py new file mode 100644 index 0000000000..288d071a69 --- /dev/null +++ b/tests/pytest/query/bug3351.py @@ -0,0 +1,74 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + tdLog.printNoPrefix("==========step1:create table && insert data") + + tdSql.execute( + "create table stb1 (ts timestamp, c1 int) TAGS(t1 int)" + ) + tdSql.execute("create table t0 using stb1 tags(1)") + tdSql.execute("insert into t0 values (-865000000, 1)") + tdSql.execute("insert into t0 values (-864000000, 2)") + tdSql.execute("insert into t0 values (-863000000, 3)") + tdSql.execute("insert into t0 values (-15230000, 4)") + tdSql.execute("insert into t0 values (-15220000, 5)") + tdSql.execute("insert into t0 values (-15210000, 6)") + + tdLog.printNoPrefix("==========step2:query") + # bug1:when ts > -864000000, return 0 rows; + # bug2:when ts = -15220000, return 0 rows. + tdSql.query('select * from t0 where ts < -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts <= -864000000') + tdSql.checkRows(2) + tdSql.query('select * from t0 where ts = -864000000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -864000000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts >= -864000000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts < -15220000') + tdSql.checkRows(4) + tdSql.query('select * from t0 where ts <= -15220000') + tdSql.checkRows(5) + tdSql.query('select * from t0 where ts = -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts > -15220000') + tdSql.checkRows(1) + tdSql.query('select * from t0 where ts >= -15220000') + tdSql.checkRows(2) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/bug3375.py b/tests/pytest/query/bug3375.py new file mode 100644 index 0000000000..25c3467c05 --- /dev/null +++ b/tests/pytest/query/bug3375.py @@ -0,0 +1,61 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + def run(self): + tdSql.prepare() + tdSql.execute("drop database if exists db") + tdSql.execute("create database if not exists db keep 36500") + tdSql.execute("use db") + + tdLog.printNoPrefix("==========step1:create table && insert data") + tdSql.execute( + "create table stb1 (ts timestamp, c11 int) TAGS(t11 int, t12 int )" + ) + tdSql.execute( + "create table stb2 (ts timestamp, c21 int) TAGS(t21 int, t22 int )" + ) + tdSql.execute("create table t10 using stb1 tags(1, 10)") + tdSql.execute("create table t20 using stb2 tags(1, 12)") + tdSql.execute("insert into t10 values (1600000000000, 1)") + tdSql.execute("insert into t10 values (1610000000000, 2)") + tdSql.execute("insert into t20 values (1600000000000, 3)") + tdSql.execute("insert into t20 values (1610000000000, 4)") + + tdLog.printNoPrefix("==========step2:query crash test") + tdSql.query("select stb1.c11, stb1.t11, stb1.t12 from stb2,stb1 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts") + tdSql.checkRows(2) + tdSql.query("select stb2.c21, stb2.t21, stb2.t21 from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts") + tdSql.checkRows(2) + tdSql.query("select top(stb2.c21,2) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts") + tdSql.checkRows(2) + tdSql.query("select last(stb2.c21) from stb1, stb2 where stb2.t21 = stb1.t11 and stb1.ts = stb2.ts") + tdSql.checkRows(1) + + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/query/query1970YearsAf.py b/tests/pytest/query/query1970YearsAf.py new file mode 100644 index 0000000000..e66a79f5e6 --- /dev/null +++ b/tests/pytest/query/query1970YearsAf.py @@ -0,0 +1,258 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import taos +import sys +import os +import json +import subprocess +import datetime + + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.dnodes import TDDnode + +class TDTestCase: + + def __init__(self): + self.path = "" + + def init(self, conn, logSql): + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor()) + + def getcfgPath(self, path): + binPath = os.path.dirname(os.path.realpath(__file__)) + binPath = binPath + "/../../../debug/" + tdLog.debug(f"binPath {binPath}") + binPath = os.path.realpath(binPath) + tdLog.debug(f"binPath real path {binPath}") + + if path == "": + self.path = os.path.abspath(binPath + "../../") + else: + self.path = os.path.realpath(path) + return self.path + + def getCfgDir(self): + self.getcfgPath(self.path) + self.cfgDir = f"{self.path}/sim/psim/cfg" + return self.cfgDir + + def creatcfg(self): + dbinfo = { + "name": "db", + "drop": "yes", + "replica": 1, + "days": 10, + "cache": 16, + "blocks": 8, + "precision": "ms", + "keep": 36500, + "minRows": 100, + "maxRows": 4096, + "comp": 2, + "walLevel": 1, + "cachelast": 0, + "quorum": 1, + "fsync": 3000, + "update": 0 + } + + # set stable schema + stable1 = { + "name": "stb2", + "child_table_exists": "no", + "childtable_count": 10, + "childtable_prefix": "t", + "auto_create_table": "no", + "data_source": "rand", + "insert_mode": "taosc", + "insert_rows": 5000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 1, + "max_sql_len": 65480, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 20000, + "start_timestamp": "1969-12-31 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + + ], + "tags": [ + {"type": "INT", "count": 2}, + {"type": "DOUBLE", "count": 2}, + {"type": "BIGINT", "count": 2}, + {"type": "FLOAT", "count": 2}, + {"type": "SMALLINT", "count": 2}, + {"type": "TINYINT", "count": 2}, + {"type": "BOOL", "count": 2}, + {"type": "NCHAR", "len": 3, "count": 1}, + {"type": "BINARY", "len": 8, "count": 1} + ] + } + + # create different stables like stable1 and add to list super_tables + super_tables = [] + super_tables.append(stable1) + database = { + "dbinfo": dbinfo, + "super_tables": super_tables + } + + cfgdir = self.getCfgDir() + create_table = { + "filetype": "insert", + "cfgdir": cfgdir, + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 4, + "thread_count_create_tbl": 4, + "result_file": "/tmp/insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 100, + "databases": [database] + } + return create_table + + def createinsertfile(self): + create_table = self.creatcfg() + date = datetime.datetime.now().strftime("%Y%m%d%H%M") + file_create_table = f"/tmp/insert_{date}.json" + + with open(file_create_table, 'w') as f: + json.dump(create_table, f) + return file_create_table + + def inserttable(self, filepath): + create_table_cmd = f"taosdemo -f {filepath} > /dev/null 2>&1" + _ = subprocess.check_output(create_table_cmd, shell=True).decode("utf-8") + + def sqlsquery(self): + # stable query + tdSql.query( + "select * from stb2 where stb2.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(43200) + + tdSql.query( + "select * from stb2 where stb2.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(6800) + + tdSql.query( + "select * from stb2 where stb2.ts > '1969-12-31 23:00:00.000' and stb2.ts <'1970-01-01 01:00:00.000' " + ) + tdSql.checkRows(3590) + + # child-tables query + tdSql.query( + "select * from t0 where t0.ts < '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(4320) + + tdSql.query( + "select * from t1 where t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select * from t9 where t9.ts > '1969-12-31 22:00:00.000' and t9.ts <'1970-01-01 02:00:00.000' " + ) + tdSql.checkRows(719) + + tdSql.query( + "select * from t0,t1 where t0.ts=t1.ts and t1.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(680) + + tdSql.query( + "select diff(col1) from t0 where t0.ts >= '1970-01-01 00:00:00.000' " + ) + tdSql.checkRows(679) + + tdSql.query( + "select t0,col1 from stb2 where stb2.ts < '1970-01-01 00:00:00.000' order by ts" + ) + tdSql.checkRows(43200) + + # query with timestamp in 'where ...' + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts > -28800000 and stb2.ts < '1970-01-01 08:00:00.000' " + ) + tdSql.checkRows(6790) + + tdSql.query( + "select * from stb2 where stb2.ts < -28800000 and stb2.ts > '1969-12-31 22:00:00.000' " + ) + tdSql.checkRows(3590) + + def run(self): + s = 'reset query cache' + tdSql.execute(s) + s = 'create database if not exists db' + tdSql.execute(s) + s = 'use db' + tdSql.execute(s) + + tdLog.info("==========step1:create table stable and child table,then insert data automatically") + insertfile = self.createinsertfile() + self.inserttable(insertfile) + + tdLog.info("==========step2:query join") + self.sqlsquery() + + # after wal and sync, check again + tdSql.query("show dnodes") + index = tdSql.getData(0, 0) + tdDnodes.stop(index) + tdDnodes.start(index) + + tdLog.info("==========step3: query join again") + self.sqlsquery() + + # delete temporary file + rm_cmd = f"rm -f /tmp/insert* > /dev/null 2>&1" + _ = subprocess.check_output(rm_cmd, shell=True).decode("utf-8") + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) \ No newline at end of file diff --git a/tests/pytest/tools/insert-interlace.json b/tests/pytest/tools/insert-interlace.json index a2ff2c001c..d4767ad064 100644 --- a/tests/pytest/tools/insert-interlace.json +++ b/tests/pytest/tools/insert-interlace.json @@ -10,7 +10,7 @@ "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 5000, - "rows_per_tbl": 50, + "interlace_rows": 50, "num_of_records_per_req": 100, "max_sql_len": 1024000, "databases": [{ @@ -42,7 +42,7 @@ "insert_mode": "taosc", "insert_rows": 250, "multi_thread_write_one_tbl": "no", - "rows_per_tbl": 80, + "interlace_rows": 80, "max_sql_len": 1024000, "disorder_ratio": 0, "disorder_range": 1000, diff --git a/tests/pytest/tools/query.json b/tests/pytest/tools/query.json new file mode 100644 index 0000000000..d486423865 --- /dev/null +++ b/tests/pytest/tools/query.json @@ -0,0 +1,22 @@ +{ + "filetype": "query", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "confirm_parameter_prompt": "no", + "databases": "test", + "query_times": 1, + "super_table_query": { + "stblname": "meters", + "query_interval": 10, + "threads": 8, + "sqls": [ + { + "sql": "select last_row(ts) from xxxx", + "result": "" + } + ] + } +} diff --git a/tests/pytest/tools/taosdemoPerformance.py b/tests/pytest/tools/taosdemoPerformance.py index 28f451b6a0..1156cc2484 100644 --- a/tests/pytest/tools/taosdemoPerformance.py +++ b/tests/pytest/tools/taosdemoPerformance.py @@ -11,26 +11,16 @@ # -*- coding: utf-8 -*- -import sys import taos -import time -import datetime -import csv -import random import pandas as pd import argparse import os.path +import json class taosdemoPerformace: - def __init__(self, commitID, dbName, createTableTime, insertRecordsTime, recordsPerSecond, avgDelay, maxDelay, minDelay): + def __init__(self, commitID, dbName): self.commitID = commitID - self.dbName = dbName - self.createTableTime = createTableTime - self.insertRecordsTime = insertRecordsTime - self.recordsPerSecond = recordsPerSecond - self.avgDelay = avgDelay - self.maxDelay = maxDelay - self.minDelay = minDelay + self.dbName = dbName self.host = "127.0.0.1" self.user = "root" self.password = "taosdata" @@ -39,8 +29,97 @@ class taosdemoPerformace: self.host, self.user, self.password, - self.config) + self.config) + self.insertDB = "insertDB"; + def generateJson(self): + db = { + "name": "%s" % self.insertDB, + "drop": "yes", + "replica": 1 + } + + stb = { + "name": "meters", + "child_table_exists":"no", + "childtable_count": 10000, + "childtable_prefix": "stb_", + "auto_create_table": "no", + "data_source": "rand", + "batch_create_tbl_num": 10, + "insert_mode": "taosc", + "insert_rows": 100000, + "multi_thread_write_one_tbl": "no", + "number_of_tbl_in_one_sql": 0, + "rows_per_tbl": 100, + "max_sql_len": 1024000, + "disorder_ratio": 0, + "disorder_range": 1000, + "timestamp_step": 1, + "start_timestamp": "2020-10-01 00:00:00.000", + "sample_format": "csv", + "sample_file": "./sample.csv", + "tags_file": "", + "columns": [ + {"type": "INT", "count": 4} + ], + "tags": [ + {"type": "INT", "count":1}, + {"type": "BINARY", "len": 16} + ] + } + + stables = [] + stables.append(stb) + + db = { + "dbinfo": db, + "super_tables": stables + } + + insert_data = { + "filetype": "insert", + "cfgdir": "/etc/taosperf", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "thread_count": 10, + "thread_count_create_tbl": 10, + "result_file": "./insert_res.txt", + "confirm_parameter_prompt": "no", + "insert_interval": 0, + "num_of_records_per_req": 30000, + "databases": [db] + } + + insert_json_file = f"/tmp/insert.json" + + with open(insert_json_file, 'w') as f: + json.dump(insert_data, f) + return insert_json_file + + def getCMDOutput(self, cmd): + cmd = os.popen(cmd) + output = cmd.read() + cmd.close() + return output + + def insertData(self): + os.system("taosdemo -f %s > taosdemoperf.txt" % self.generateJson()) + self.createTableTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==1{print $2}'") + self.insertRecordsTime = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $2}'") + self.recordsPerSecond = self.getCMDOutput("grep 'Spent' taosdemoperf.txt | awk 'NR==2{print $16}'") + self.commitID = self.getCMDOutput("git rev-parse --short HEAD") + delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $4}'") + self.avgDelay = delay[:-4] + delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $6}'") + self.maxDelay = delay[:-4] + delay = self.getCMDOutput("grep 'delay' taosdemoperf.txt | awk '{print $8}'") + self.minDelay = delay[:-3] + + os.system("[ -f taosdemoperf.txt ] && rm taosdemoperf.txt") + def createTablesAndStoreData(self): cursor = self.conn.cursor() @@ -48,14 +127,15 @@ class taosdemoPerformace: cursor.execute("use %s" % self.dbName) cursor.execute("create table if not exists taosdemo_perf (ts timestamp, create_table_time float, insert_records_time float, records_per_second float, commit_id binary(50), avg_delay float, max_delay float, min_delay float)") print("==================== taosdemo performance ====================") - print("create tables time: %f" % self.createTableTime) - print("insert records time: %f" % self.insertRecordsTime) - print("records per second: %f" % self.recordsPerSecond) - print("avg delay: %f" % self.avgDelay) - print("max delay: %f" % self.maxDelay) - print("min delay: %f" % self.minDelay) - cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" % (self.createTableTime, self.insertRecordsTime, self.recordsPerSecond, self.commitID, self.avgDelay, self.maxDelay, self.minDelay)) - cursor.execute("drop database if exists taosdemo_insert_test") + print("create tables time: %f" % float(self.createTableTime)) + print("insert records time: %f" % float(self.insertRecordsTime)) + print("records per second: %f" % float(self.recordsPerSecond)) + print("avg delay: %f" % float(self.avgDelay)) + print("max delay: %f" % float(self.maxDelay)) + print("min delay: %f" % float(self.minDelay)) + cursor.execute("insert into taosdemo_perf values(now, %f, %f, %f, '%s', %f, %f, %f)" % + (float(self.createTableTime), float(self.insertRecordsTime), float(self.recordsPerSecond), self.commitID, float(self.avgDelay), float(self.maxDelay), float(self.minDelay))) + cursor.execute("drop database if exists %s" % self.insertDB) cursor.close() @@ -64,7 +144,7 @@ if __name__ == '__main__': parser.add_argument( '-c', '--commit-id', - action='store', + action='store', type=str, help='git commit id (default: null)') parser.add_argument( @@ -74,46 +154,9 @@ if __name__ == '__main__': default='perf', type=str, help='Database name to be created (default: perf)') - parser.add_argument( - '-t', - '--create-table', - action='store', - type=float, - help='create table time') - parser.add_argument( - '-i', - '--insert-records', - action='store', - type=float, - help='insert records time') - parser.add_argument( - '-r', - '---records-per-second', - action='store', - type=float, - help='records per request') - parser.add_argument( - '-avg', - '---avg-delay', - action='store', - type=float, - help='avg delay') - parser.add_argument( - '-max', - '---max-delay', - action='store', - type=float, - help='max delay') - parser.add_argument( - '-min', - '---min-delay', - action='store', - type=float, - help='min delay') - args = parser.parse_args() - perftest = taosdemoPerformace(args.commit_id, args.database_name, args.create_table, args.insert_records, args.records_per_second, - args.avg_delay, args.max_delay, args.min_delay) + perftest = taosdemoPerformace(args.commit_id, args.database_name) + perftest.insertData() perftest.createTablesAndStoreData() \ No newline at end of file diff --git a/tests/pytest/tools/taosdemoTestInterlace.py b/tests/pytest/tools/taosdemoTestInterlace.py index 953bfff90f..4c551f327a 100644 --- a/tests/pytest/tools/taosdemoTestInterlace.py +++ b/tests/pytest/tools/taosdemoTestInterlace.py @@ -1,4 +1,4 @@ -################################################################### +################################################################## # Copyright (c) 2016 by TAOS Technologies, Inc. # All rights reserved. # @@ -25,9 +25,6 @@ class TDTestCase: tdLog.debug("start to execute %s" % __file__) tdSql.init(conn.cursor(), logSql) - self.numberOfTables = 10000 - self.numberOfRecords = 100 - def getBuildPath(self): selfPath = os.path.dirname(os.path.realpath(__file__)) diff --git a/tests/pytest/tools/taosdemoTestQuery.py b/tests/pytest/tools/taosdemoTestQuery.py new file mode 100644 index 0000000000..bb2bb85052 --- /dev/null +++ b/tests/pytest/tools/taosdemoTestQuery.py @@ -0,0 +1,78 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +import sys +import os +import time +from util.log import * +from util.cases import * +from util.sql import * +from util.dnodes import * +import subprocess + + +class TDTestCase: + def init(self, conn, logSql): + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor(), logSql) + + self.numberOfTables = 1000 + self.numberOfRecords = 100 + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def run(self): + tdSql.prepare() + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + os.system("%staosdemo -y -t %d -n %d" % + (binPath, self.numberOfTables, self.numberOfRecords)) + print("Sleep 2 seconds..") + time.sleep(2) + os.system('%staosdemo -f tools/query.json ' % binPath) +# taosdemoCmd = '%staosdemo -f tools/query.json ' % binPath +# threads = subprocess.check_output( +# taosdemoCmd, shell=True).decode("utf-8") +# print("threads: %d" % int(threads)) + +# if (int(threads) != 8): +# caller = inspect.getframeinfo(inspect.stack()[0][0]) +# tdLog.exit( +# "%s(%d) failed: expected threads 8, actual %d" % +# (caller.filename, caller.lineno, int(threads))) +# + def stop(self): + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/pytest/tools/taosdemoTest2.py b/tests/pytest/tools/taosdemoTestTblAlt.py similarity index 57% rename from tests/pytest/tools/taosdemoTest2.py rename to tests/pytest/tools/taosdemoTestTblAlt.py index 92f6fa1265..bb367817cf 100644 --- a/tests/pytest/tools/taosdemoTest2.py +++ b/tests/pytest/tools/taosdemoTestTblAlt.py @@ -29,17 +29,54 @@ class TDTestCase: self.numberOfTables = 10 self.numberOfRecords = 1000000 + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + def insertDataAndAlterTable(self, threadID): + buildPath = self.getBuildPath() + if (buildPath == ""): + tdLog.exit("taosd not found!") + else: + tdLog.info("taosd found in %s" % buildPath) + binPath = buildPath + "/build/bin/" + if(threadID == 0): - os.system("taosdemo -y -t %d -n %d" % - (self.numberOfTables, self.numberOfRecords)) + os.system("%staosdemo -y -t %d -n %d" % + (binPath, self.numberOfTables, self.numberOfRecords)) if(threadID == 1): time.sleep(2) print("use test") - tdSql.execute("use test") + while True: + try: + tdSql.execute("use test") + break + except Exception as e: + tdLog.info("use database test failed") + time.sleep(1) + continue + # check if all the tables have heen created while True: - tdSql.query("show tables") + try: + tdSql.query("show tables") + except Exception as e: + tdLog.info("show tables test failed") + time.sleep(1) + continue + rows = tdSql.queryRows print("number of tables: %d" % rows) if(rows == self.numberOfTables): @@ -48,16 +85,23 @@ class TDTestCase: # check if there are any records in the last created table while True: print("query started") - tdSql.query("select * from test.t9") + try: + tdSql.query("select * from test.t9") + except Exception as e: + tdLog.info("select * test failed") + time.sleep(2) + continue + rows = tdSql.queryRows print("number of records: %d" % rows) if(rows > 0): break time.sleep(1) + print("alter table test.meters add column col10 int") tdSql.execute("alter table test.meters add column col10 int") - print("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") - tdSql.execute("insert into test.t0 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + print("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") + tdSql.execute("insert into test.t9 values (now, 1, 2, 3, 4, 0.1, 0.01,'test', '测试', TRUE, 1610000000000, 0)") def run(self): tdSql.prepare() @@ -70,6 +114,8 @@ class TDTestCase: t1.join() t2.join() + time.sleep(3) + tdSql.query("select count(*) from test.meters") tdSql.checkData(0, 0, self.numberOfRecords * self.numberOfTables + 1) diff --git a/tests/script/general/db/topic1.sim b/tests/script/general/db/topic1.sim index e17a4996f5..42613405af 100644 --- a/tests/script/general/db/topic1.sim +++ b/tests/script/general/db/topic1.sim @@ -878,5 +878,7 @@ if $rows != 0 then endi sql drop topic t2 +sql_error drop topic abc +sql drop topic if exists abc; system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/join_multitables.sim b/tests/script/general/parser/join_multitables.sim new file mode 100644 index 0000000000..acb8be10e7 --- /dev/null +++ b/tests/script/general/parser/join_multitables.sim @@ -0,0 +1,2326 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c walLevel -v 0 +system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 2 +system sh/exec.sh -n dnode1 -s start + +sleep 100 +sql connect +print ======================== dnode1 start + +$db = testdb + +sql create database $db +sql use $db + +sql create stable st0 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st1 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st2 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st3 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st4 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st5 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st6 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st7 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st8 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable st9 (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable sta (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); +sql create stable stb (ts timestamp, f1 int, f2 double, f3 binary(10)) tags(id1 int, id2 smallint, id3 double, id4 bool, id5 binary(5)); + +sql create table tb0_1 using st0 tags(0,1,2.0,true,'3'); +sql create table tb0_2 using st0 tags(1,2,3.0,false,'4'); +sql create table tb0_3 using st0 tags(2,3,4.0,true,'5'); +sql create table tb0_4 using st0 tags(3,4,5.0,false,'6'); +sql create table tb0_5 using st0 tags(4,5,6.0,true,'7'); + +sql create table tb1_1 using st1 tags(0,1,2.0,true,'3'); +sql create table tb1_2 using st1 tags(1,2,3.0,false,'4'); +sql create table tb1_3 using st1 tags(2,3,4.0,true,'5'); +sql create table tb1_4 using st1 tags(3,4,5.0,false,'6'); +sql create table tb1_5 using st1 tags(4,5,6.0,true,'7'); + +sql create table tb2_1 using st2 tags(0,1,2.0,true,'3'); +sql create table tb2_2 using st2 tags(1,2,3.0,false,'4'); +sql create table tb2_3 using st2 tags(2,3,4.0,true,'5'); +sql create table tb2_4 using st2 tags(3,4,5.0,false,'6'); +sql create table tb2_5 using st2 tags(4,5,6.0,true,'7'); + +sql create table tb3_1 using st3 tags(0,1,2.0,true,'3'); +sql create table tb3_2 using st3 tags(1,2,3.0,false,'4'); +sql create table tb3_3 using st3 tags(2,3,4.0,true,'5'); +sql create table tb3_4 using st3 tags(3,4,5.0,false,'6'); +sql create table tb3_5 using st3 tags(4,5,6.0,true,'7'); + +sql create table tb4_1 using st4 tags(0,1,2.0,true,'3'); +sql create table tb4_2 using st4 tags(1,2,3.0,false,'4'); +sql create table tb4_3 using st4 tags(2,3,4.0,true,'5'); +sql create table tb4_4 using st4 tags(3,4,5.0,false,'6'); +sql create table tb4_5 using st4 tags(4,5,6.0,true,'7'); + +sql create table tb5_1 using st5 tags(0,1,2.0,true,'3'); +sql create table tb5_2 using st5 tags(1,2,3.0,false,'4'); +sql create table tb5_3 using st5 tags(2,3,4.0,true,'5'); +sql create table tb5_4 using st5 tags(3,4,5.0,false,'6'); +sql create table tb5_5 using st5 tags(4,5,6.0,true,'7'); + +sql create table tb6_1 using st6 tags(0,1,2.0,true,'3'); +sql create table tb6_2 using st6 tags(1,2,3.0,false,'4'); +sql create table tb6_3 using st6 tags(2,3,4.0,true,'5'); +sql create table tb6_4 using st6 tags(3,4,5.0,false,'6'); +sql create table tb6_5 using st6 tags(4,5,6.0,true,'7'); + +sql create table tb7_1 using st7 tags(0,1,2.0,true,'3'); +sql create table tb7_2 using st7 tags(1,2,3.0,false,'4'); +sql create table tb7_3 using st7 tags(2,3,4.0,true,'5'); +sql create table tb7_4 using st7 tags(3,4,5.0,false,'6'); +sql create table tb7_5 using st7 tags(4,5,6.0,true,'7'); + +sql create table tb8_1 using st8 tags(0,1,2.0,true,'3'); +sql create table tb8_2 using st8 tags(1,2,3.0,false,'4'); +sql create table tb8_3 using st8 tags(2,3,4.0,true,'5'); +sql create table tb8_4 using st8 tags(3,4,5.0,false,'6'); +sql create table tb8_5 using st8 tags(4,5,6.0,true,'7'); + +sql create table tb9_1 using st9 tags(0,1,2.0,true,'3'); +sql create table tb9_2 using st9 tags(1,2,3.0,false,'4'); +sql create table tb9_3 using st9 tags(2,3,4.0,true,'5'); +sql create table tb9_4 using st9 tags(3,4,5.0,false,'6'); +sql create table tb9_5 using st9 tags(4,5,6.0,true,'7'); + +sql create table tba_1 using sta tags(0,1,2.0,true,'3'); +sql create table tba_2 using sta tags(0,1,2.0,true,'3'); +sql create table tba_3 using sta tags(0,1,2.0,true,'3'); +sql create table tba_4 using sta tags(0,1,2.0,true,'3'); +sql create table tba_5 using sta tags(0,1,2.0,true,'3'); + +sql create table tbb_1 using stb tags(0,1,2.0,true,'3'); +sql create table tbb_2 using stb tags(0,1,2.0,true,'3'); +sql create table tbb_3 using stb tags(0,1,2.0,true,'3'); +sql create table tbb_4 using stb tags(0,1,2.0,true,'3'); +sql create table tbb_5 using stb tags(0,1,2.0,true,'3'); + +sql insert into tb0_1 values('2021-03-01 01:00:00.000', 9901,9901.0,'01'); +sql insert into tb0_1 values('2021-03-02 01:00:00.000', 9901,9901.0,'01'); +sql insert into tb0_1 values('2021-03-03 01:00:00.000', 9901,9901.0,'01'); +sql insert into tb0_1 values('2021-03-04 01:00:00.000', 9901,9901.0,'01'); +sql insert into tb0_1 values('2021-03-05 01:00:00.000', 9901,9901.0,'01'); +sql insert into tb0_2 values('2021-03-01 02:00:00.000', 9902,9902.0,'02'); +sql insert into tb0_2 values('2021-03-02 02:00:00.000', 9902,9902.0,'02'); +sql insert into tb0_2 values('2021-03-03 02:00:00.000', 9902,9902.0,'02'); +sql insert into tb0_2 values('2021-03-04 02:00:00.000', 9902,9902.0,'02'); +sql insert into tb0_2 values('2021-03-05 02:00:00.000', 9902,9902.0,'02'); +sql insert into tb0_3 values('2021-03-01 03:00:00.000', 9903,9903.0,'03'); +sql insert into tb0_3 values('2021-03-02 03:00:00.000', 9903,9903.0,'03'); +sql insert into tb0_3 values('2021-03-03 03:00:00.000', 9903,9903.0,'03'); +sql insert into tb0_3 values('2021-03-04 03:00:00.000', 9903,9903.0,'03'); +sql insert into tb0_3 values('2021-03-05 03:00:00.000', 9903,9903.0,'03'); +sql insert into tb0_4 values('2021-03-01 04:00:00.000', 9904,9904.0,'04'); +sql insert into tb0_4 values('2021-03-02 04:00:00.000', 9904,9904.0,'04'); +sql insert into tb0_4 values('2021-03-03 04:00:00.000', 9904,9904.0,'04'); +sql insert into tb0_4 values('2021-03-04 04:00:00.000', 9904,9904.0,'04'); +sql insert into tb0_4 values('2021-03-05 04:00:00.000', 9904,9904.0,'04'); +sql insert into tb0_5 values('2021-03-01 05:00:00.000', 9905,9905.0,'05'); +sql insert into tb0_5 values('2021-03-02 05:00:00.000', 9905,9905.0,'05'); +sql insert into tb0_5 values('2021-03-03 05:00:00.000', 9905,9905.0,'05'); +sql insert into tb0_5 values('2021-03-04 05:00:00.000', 9905,9905.0,'05'); +sql insert into tb0_5 values('2021-03-05 05:00:00.000', 9905,9905.0,'05'); + +sql insert into tb1_1 values('2021-03-01 01:00:00.000', 9911,9911.0,'11'); +sql insert into tb1_1 values('2021-03-02 01:00:00.000', 9911,9911.0,'11'); +sql insert into tb1_1 values('2021-03-03 01:00:00.000', 9911,9911.0,'11'); +sql insert into tb1_1 values('2021-03-04 01:00:00.000', 9911,9911.0,'11'); +sql insert into tb1_1 values('2021-03-05 01:00:00.000', 9911,9911.0,'11'); +sql insert into tb1_2 values('2021-03-01 02:00:00.000', 9912,9912.0,'12'); +sql insert into tb1_2 values('2021-03-02 02:00:00.000', 9912,9912.0,'12'); +sql insert into tb1_2 values('2021-03-03 02:00:00.000', 9912,9912.0,'12'); +sql insert into tb1_2 values('2021-03-04 02:00:00.000', 9912,9912.0,'12'); +sql insert into tb1_2 values('2021-03-05 02:00:00.000', 9912,9912.0,'12'); +sql insert into tb1_3 values('2021-03-01 03:00:00.000', 9913,9913.0,'13'); +sql insert into tb1_3 values('2021-03-02 03:00:00.000', 9913,9913.0,'13'); +sql insert into tb1_3 values('2021-03-03 03:00:00.000', 9913,9913.0,'13'); +sql insert into tb1_3 values('2021-03-04 03:00:00.000', 9913,9913.0,'13'); +sql insert into tb1_3 values('2021-03-05 03:00:00.000', 9913,9913.0,'13'); +sql insert into tb1_4 values('2021-03-01 04:00:00.000', 9914,9914.0,'14'); +sql insert into tb1_4 values('2021-03-02 04:00:00.000', 9914,9914.0,'14'); +sql insert into tb1_4 values('2021-03-03 04:00:00.000', 9914,9914.0,'14'); +sql insert into tb1_4 values('2021-03-04 04:00:00.000', 9914,9914.0,'14'); +sql insert into tb1_4 values('2021-03-05 04:00:00.000', 9914,9914.0,'14'); +sql insert into tb1_5 values('2021-03-01 05:00:00.000', 9915,9915.0,'15'); +sql insert into tb1_5 values('2021-03-02 05:00:00.000', 9915,9915.0,'15'); +sql insert into tb1_5 values('2021-03-03 05:00:00.000', 9915,9915.0,'15'); +sql insert into tb1_5 values('2021-03-04 05:00:00.000', 9915,9915.0,'15'); +sql insert into tb1_5 values('2021-03-05 05:00:00.000', 9915,9915.0,'15'); + +sql insert into tb2_1 values('2021-03-01 01:00:00.000', 9921,9921.0,'21'); +sql insert into tb2_1 values('2021-03-02 01:00:00.000', 9921,9921.0,'21'); +sql insert into tb2_1 values('2021-03-03 01:00:00.000', 9921,9921.0,'21'); +sql insert into tb2_1 values('2021-03-04 01:00:00.000', 9921,9921.0,'21'); +sql insert into tb2_1 values('2021-03-05 01:00:00.000', 9921,9921.0,'21'); +sql insert into tb2_2 values('2021-03-01 02:00:00.000', 9922,9922.0,'22'); +sql insert into tb2_2 values('2021-03-02 02:00:00.000', 9922,9922.0,'22'); +sql insert into tb2_2 values('2021-03-03 02:00:00.000', 9922,9922.0,'22'); +sql insert into tb2_2 values('2021-03-04 02:00:00.000', 9922,9922.0,'22'); +sql insert into tb2_2 values('2021-03-05 02:00:00.000', 9922,9922.0,'22'); +sql insert into tb2_3 values('2021-03-01 03:00:00.000', 9923,9923.0,'23'); +sql insert into tb2_3 values('2021-03-02 03:00:00.000', 9923,9923.0,'23'); +sql insert into tb2_3 values('2021-03-03 03:00:00.000', 9923,9923.0,'23'); +sql insert into tb2_3 values('2021-03-04 03:00:00.000', 9923,9923.0,'23'); +sql insert into tb2_3 values('2021-03-05 03:00:00.000', 9923,9923.0,'23'); +sql insert into tb2_4 values('2021-03-01 04:00:00.000', 9924,9924.0,'24'); +sql insert into tb2_4 values('2021-03-02 04:00:00.000', 9924,9924.0,'24'); +sql insert into tb2_4 values('2021-03-03 04:00:00.000', 9924,9924.0,'24'); +sql insert into tb2_4 values('2021-03-04 04:00:00.000', 9924,9924.0,'24'); +sql insert into tb2_4 values('2021-03-05 04:00:00.000', 9924,9924.0,'24'); +sql insert into tb2_5 values('2021-03-01 05:00:00.000', 9925,9925.0,'25'); +sql insert into tb2_5 values('2021-03-02 05:00:00.000', 9925,9925.0,'25'); +sql insert into tb2_5 values('2021-03-03 05:00:00.000', 9925,9925.0,'25'); +sql insert into tb2_5 values('2021-03-04 05:00:00.000', 9925,9925.0,'25'); +sql insert into tb2_5 values('2021-03-05 05:00:00.000', 9925,9925.0,'25'); + + +sql insert into tb3_1 values('2021-03-01 01:00:00.000', 9931,9931.0,'31'); +sql insert into tb3_1 values('2021-03-02 01:00:00.000', 9931,9931.0,'31'); +sql insert into tb3_1 values('2021-03-03 01:00:00.000', 9931,9931.0,'31'); +sql insert into tb3_1 values('2021-03-04 01:00:00.000', 9931,9931.0,'31'); +sql insert into tb3_1 values('2021-03-05 01:00:00.000', 9931,9931.0,'31'); +sql insert into tb3_2 values('2021-03-01 02:00:00.000', 9932,9932.0,'32'); +sql insert into tb3_2 values('2021-03-02 02:00:00.000', 9932,9932.0,'32'); +sql insert into tb3_2 values('2021-03-03 02:00:00.000', 9932,9932.0,'32'); +sql insert into tb3_2 values('2021-03-04 02:00:00.000', 9932,9932.0,'32'); +sql insert into tb3_2 values('2021-03-05 02:00:00.000', 9932,9932.0,'32'); +sql insert into tb3_3 values('2021-03-01 03:00:00.000', 9933,9933.0,'33'); +sql insert into tb3_3 values('2021-03-02 03:00:00.000', 9933,9933.0,'33'); +sql insert into tb3_3 values('2021-03-03 03:00:00.000', 9933,9933.0,'33'); +sql insert into tb3_3 values('2021-03-04 03:00:00.000', 9933,9933.0,'33'); +sql insert into tb3_3 values('2021-03-05 03:00:00.000', 9933,9933.0,'33'); +sql insert into tb3_4 values('2021-03-01 04:00:00.000', 9934,9934.0,'34'); +sql insert into tb3_4 values('2021-03-02 04:00:00.000', 9934,9934.0,'34'); +sql insert into tb3_4 values('2021-03-03 04:00:00.000', 9934,9934.0,'34'); +sql insert into tb3_4 values('2021-03-04 04:00:00.000', 9934,9934.0,'34'); +sql insert into tb3_4 values('2021-03-05 04:00:00.000', 9934,9934.0,'34'); +sql insert into tb3_5 values('2021-03-01 05:00:00.000', 9935,9935.0,'35'); +sql insert into tb3_5 values('2021-03-02 05:00:00.000', 9935,9935.0,'35'); +sql insert into tb3_5 values('2021-03-03 05:00:00.000', 9935,9935.0,'35'); +sql insert into tb3_5 values('2021-03-04 05:00:00.000', 9935,9935.0,'35'); +sql insert into tb3_5 values('2021-03-05 05:00:00.000', 9935,9935.0,'35'); + + +sql insert into tb4_1 values('2021-03-01 01:00:00.000', 9941,9941.0,'41'); +sql insert into tb4_1 values('2021-03-02 01:00:00.000', 9941,9941.0,'41'); +sql insert into tb4_1 values('2021-03-03 01:00:00.000', 9941,9941.0,'41'); +sql insert into tb4_1 values('2021-03-04 01:00:00.000', 9941,9941.0,'41'); +sql insert into tb4_1 values('2021-03-05 01:00:00.000', 9941,9941.0,'41'); +sql insert into tb4_2 values('2021-03-01 02:00:00.000', 9942,9942.0,'42'); +sql insert into tb4_2 values('2021-03-02 02:00:00.000', 9942,9942.0,'42'); +sql insert into tb4_2 values('2021-03-03 02:00:00.000', 9942,9942.0,'42'); +sql insert into tb4_2 values('2021-03-04 02:00:00.000', 9942,9942.0,'42'); +sql insert into tb4_2 values('2021-03-05 02:00:00.000', 9942,9942.0,'42'); +sql insert into tb4_3 values('2021-03-01 03:00:00.000', 9943,9943.0,'43'); +sql insert into tb4_3 values('2021-03-02 03:00:00.000', 9943,9943.0,'43'); +sql insert into tb4_3 values('2021-03-03 03:00:00.000', 9943,9943.0,'43'); +sql insert into tb4_3 values('2021-03-04 03:00:00.000', 9943,9943.0,'43'); +sql insert into tb4_3 values('2021-03-05 03:00:00.000', 9943,9943.0,'43'); +sql insert into tb4_4 values('2021-03-01 04:00:00.000', 9944,9944.0,'44'); +sql insert into tb4_4 values('2021-03-02 04:00:00.000', 9944,9944.0,'44'); +sql insert into tb4_4 values('2021-03-03 04:00:00.000', 9944,9944.0,'44'); +sql insert into tb4_4 values('2021-03-04 04:00:00.000', 9944,9944.0,'44'); +sql insert into tb4_4 values('2021-03-05 04:00:00.000', 9944,9944.0,'44'); +sql insert into tb4_5 values('2021-03-01 05:00:00.000', 9945,9945.0,'45'); +sql insert into tb4_5 values('2021-03-02 05:00:00.000', 9945,9945.0,'45'); +sql insert into tb4_5 values('2021-03-03 05:00:00.000', 9945,9945.0,'45'); +sql insert into tb4_5 values('2021-03-04 05:00:00.000', 9945,9945.0,'45'); +sql insert into tb4_5 values('2021-03-05 05:00:00.000', 9945,9945.0,'45'); + +sql insert into tb5_1 values('2021-03-01 01:00:00.000', 9951,9951.0,'51'); +sql insert into tb5_1 values('2021-03-02 01:00:00.000', 9951,9951.0,'51'); +sql insert into tb5_1 values('2021-03-03 01:00:00.000', 9951,9951.0,'51'); +sql insert into tb5_1 values('2021-03-04 01:00:00.000', 9951,9951.0,'51'); +sql insert into tb5_1 values('2021-03-05 01:00:00.000', 9951,9951.0,'51'); +sql insert into tb5_2 values('2021-03-01 02:00:00.000', 9952,9952.0,'52'); +sql insert into tb5_2 values('2021-03-02 02:00:00.000', 9952,9952.0,'52'); +sql insert into tb5_2 values('2021-03-03 02:00:00.000', 9952,9952.0,'52'); +sql insert into tb5_2 values('2021-03-04 02:00:00.000', 9952,9952.0,'52'); +sql insert into tb5_2 values('2021-03-05 02:00:00.000', 9952,9952.0,'52'); +sql insert into tb5_3 values('2021-03-01 03:00:00.000', 9953,9953.0,'53'); +sql insert into tb5_3 values('2021-03-02 03:00:00.000', 9953,9953.0,'53'); +sql insert into tb5_3 values('2021-03-03 03:00:00.000', 9953,9953.0,'53'); +sql insert into tb5_3 values('2021-03-04 03:00:00.000', 9953,9953.0,'53'); +sql insert into tb5_3 values('2021-03-05 03:00:00.000', 9953,9953.0,'53'); +sql insert into tb5_4 values('2021-03-01 04:00:00.000', 9954,9954.0,'54'); +sql insert into tb5_4 values('2021-03-02 04:00:00.000', 9954,9954.0,'54'); +sql insert into tb5_4 values('2021-03-03 04:00:00.000', 9954,9954.0,'54'); +sql insert into tb5_4 values('2021-03-04 04:00:00.000', 9954,9954.0,'54'); +sql insert into tb5_4 values('2021-03-05 04:00:00.000', 9954,9954.0,'54'); +sql insert into tb5_5 values('2021-03-01 05:00:00.000', 9955,9955.0,'55'); +sql insert into tb5_5 values('2021-03-02 05:00:00.000', 9955,9955.0,'55'); +sql insert into tb5_5 values('2021-03-03 05:00:00.000', 9955,9955.0,'55'); +sql insert into tb5_5 values('2021-03-04 05:00:00.000', 9955,9955.0,'55'); +sql insert into tb5_5 values('2021-03-05 05:00:00.000', 9955,9955.0,'55'); + +sql insert into tb6_1 values('2021-03-01 01:00:00.000', 9961,9961.0,'61'); +sql insert into tb6_1 values('2021-03-02 01:00:00.000', 9961,9961.0,'61'); +sql insert into tb6_1 values('2021-03-03 01:00:00.000', 9961,9961.0,'61'); +sql insert into tb6_1 values('2021-03-04 01:00:00.000', 9961,9961.0,'61'); +sql insert into tb6_1 values('2021-03-05 01:00:00.000', 9961,9961.0,'61'); +sql insert into tb6_2 values('2021-03-01 02:00:00.000', 9962,9962.0,'62'); +sql insert into tb6_2 values('2021-03-02 02:00:00.000', 9962,9962.0,'62'); +sql insert into tb6_2 values('2021-03-03 02:00:00.000', 9962,9962.0,'62'); +sql insert into tb6_2 values('2021-03-04 02:00:00.000', 9962,9962.0,'62'); +sql insert into tb6_2 values('2021-03-05 02:00:00.000', 9962,9962.0,'62'); +sql insert into tb6_3 values('2021-03-01 03:00:00.000', 9963,9963.0,'63'); +sql insert into tb6_3 values('2021-03-02 03:00:00.000', 9963,9963.0,'63'); +sql insert into tb6_3 values('2021-03-03 03:00:00.000', 9963,9963.0,'63'); +sql insert into tb6_3 values('2021-03-04 03:00:00.000', 9963,9963.0,'63'); +sql insert into tb6_3 values('2021-03-05 03:00:00.000', 9963,9963.0,'63'); +sql insert into tb6_4 values('2021-03-01 04:00:00.000', 9964,9964.0,'64'); +sql insert into tb6_4 values('2021-03-02 04:00:00.000', 9964,9964.0,'64'); +sql insert into tb6_4 values('2021-03-03 04:00:00.000', 9964,9964.0,'64'); +sql insert into tb6_4 values('2021-03-04 04:00:00.000', 9964,9964.0,'64'); +sql insert into tb6_4 values('2021-03-05 04:00:00.000', 9964,9964.0,'64'); +sql insert into tb6_5 values('2021-03-01 05:00:00.000', 9965,9965.0,'65'); +sql insert into tb6_5 values('2021-03-02 05:00:00.000', 9965,9965.0,'65'); +sql insert into tb6_5 values('2021-03-03 05:00:00.000', 9965,9965.0,'65'); +sql insert into tb6_5 values('2021-03-04 05:00:00.000', 9965,9965.0,'65'); +sql insert into tb6_5 values('2021-03-05 05:00:00.000', 9965,9965.0,'65'); + +sql insert into tb7_1 values('2021-03-01 01:00:00.000', 9971,9971.0,'71'); +sql insert into tb7_1 values('2021-03-02 01:00:00.000', 9971,9971.0,'71'); +sql insert into tb7_1 values('2021-03-03 01:00:00.000', 9971,9971.0,'71'); +sql insert into tb7_1 values('2021-03-04 01:00:00.000', 9971,9971.0,'71'); +sql insert into tb7_1 values('2021-03-05 01:00:00.000', 9971,9971.0,'71'); +sql insert into tb7_2 values('2021-03-01 02:00:00.000', 9972,9972.0,'72'); +sql insert into tb7_2 values('2021-03-02 02:00:00.000', 9972,9972.0,'72'); +sql insert into tb7_2 values('2021-03-03 02:00:00.000', 9972,9972.0,'72'); +sql insert into tb7_2 values('2021-03-04 02:00:00.000', 9972,9972.0,'72'); +sql insert into tb7_2 values('2021-03-05 02:00:00.000', 9972,9972.0,'72'); +sql insert into tb7_3 values('2021-03-01 03:00:00.000', 9973,9973.0,'73'); +sql insert into tb7_3 values('2021-03-02 03:00:00.000', 9973,9973.0,'73'); +sql insert into tb7_3 values('2021-03-03 03:00:00.000', 9973,9973.0,'73'); +sql insert into tb7_3 values('2021-03-04 03:00:00.000', 9973,9973.0,'73'); +sql insert into tb7_3 values('2021-03-05 03:00:00.000', 9973,9973.0,'73'); +sql insert into tb7_4 values('2021-03-01 04:00:00.000', 9974,9974.0,'74'); +sql insert into tb7_4 values('2021-03-02 04:00:00.000', 9974,9974.0,'74'); +sql insert into tb7_4 values('2021-03-03 04:00:00.000', 9974,9974.0,'74'); +sql insert into tb7_4 values('2021-03-04 04:00:00.000', 9974,9974.0,'74'); +sql insert into tb7_4 values('2021-03-05 04:00:00.000', 9974,9974.0,'74'); +sql insert into tb7_5 values('2021-03-01 05:00:00.000', 9975,9975.0,'75'); +sql insert into tb7_5 values('2021-03-02 05:00:00.000', 9975,9975.0,'75'); +sql insert into tb7_5 values('2021-03-03 05:00:00.000', 9975,9975.0,'75'); +sql insert into tb7_5 values('2021-03-04 05:00:00.000', 9975,9975.0,'75'); +sql insert into tb7_5 values('2021-03-05 05:00:00.000', 9975,9975.0,'75'); + +sql insert into tb8_1 values('2021-03-01 01:00:00.000', 9981,9981.0,'81'); +sql insert into tb8_1 values('2021-03-02 01:00:00.000', 9981,9981.0,'81'); +sql insert into tb8_1 values('2021-03-03 01:00:00.000', 9981,9981.0,'81'); +sql insert into tb8_1 values('2021-03-04 01:00:00.000', 9981,9981.0,'81'); +sql insert into tb8_1 values('2021-03-05 01:00:00.000', 9981,9981.0,'81'); +sql insert into tb8_2 values('2021-03-01 02:00:00.000', 9982,9982.0,'82'); +sql insert into tb8_2 values('2021-03-02 02:00:00.000', 9982,9982.0,'82'); +sql insert into tb8_2 values('2021-03-03 02:00:00.000', 9982,9982.0,'82'); +sql insert into tb8_2 values('2021-03-04 02:00:00.000', 9982,9982.0,'82'); +sql insert into tb8_2 values('2021-03-05 02:00:00.000', 9982,9982.0,'82'); +sql insert into tb8_3 values('2021-03-01 03:00:00.000', 9983,9983.0,'83'); +sql insert into tb8_3 values('2021-03-02 03:00:00.000', 9983,9983.0,'83'); +sql insert into tb8_3 values('2021-03-03 03:00:00.000', 9983,9983.0,'83'); +sql insert into tb8_3 values('2021-03-04 03:00:00.000', 9983,9983.0,'83'); +sql insert into tb8_3 values('2021-03-05 03:00:00.000', 9983,9983.0,'83'); +sql insert into tb8_4 values('2021-03-01 04:00:00.000', 9984,9984.0,'84'); +sql insert into tb8_4 values('2021-03-02 04:00:00.000', 9984,9984.0,'84'); +sql insert into tb8_4 values('2021-03-03 04:00:00.000', 9984,9984.0,'84'); +sql insert into tb8_4 values('2021-03-04 04:00:00.000', 9984,9984.0,'84'); +sql insert into tb8_4 values('2021-03-05 04:00:00.000', 9984,9984.0,'84'); +sql insert into tb8_5 values('2021-03-01 05:00:00.000', 9985,9985.0,'85'); +sql insert into tb8_5 values('2021-03-02 05:00:00.000', 9985,9985.0,'85'); +sql insert into tb8_5 values('2021-03-03 05:00:00.000', 9985,9985.0,'85'); +sql insert into tb8_5 values('2021-03-04 05:00:00.000', 9985,9985.0,'85'); +sql insert into tb8_5 values('2021-03-05 05:00:00.000', 9985,9985.0,'85'); + +sql insert into tb9_1 values('2021-03-01 01:00:00.000', 9991,9991.0,'91'); +sql insert into tb9_1 values('2021-03-02 01:00:00.000', 9991,9991.0,'91'); +sql insert into tb9_1 values('2021-03-03 01:00:00.000', 9991,9991.0,'91'); +sql insert into tb9_1 values('2021-03-04 01:00:00.000', 9991,9991.0,'91'); +sql insert into tb9_1 values('2021-03-05 01:00:00.000', 9991,9991.0,'91'); +sql insert into tb9_2 values('2021-03-01 02:00:00.000', 9992,9992.0,'92'); +sql insert into tb9_2 values('2021-03-02 02:00:00.000', 9992,9992.0,'92'); +sql insert into tb9_2 values('2021-03-03 02:00:00.000', 9992,9992.0,'92'); +sql insert into tb9_2 values('2021-03-04 02:00:00.000', 9992,9992.0,'92'); +sql insert into tb9_2 values('2021-03-05 02:00:00.000', 9992,9992.0,'92'); +sql insert into tb9_3 values('2021-03-01 03:00:00.000', 9993,9993.0,'93'); +sql insert into tb9_3 values('2021-03-02 03:00:00.000', 9993,9993.0,'93'); +sql insert into tb9_3 values('2021-03-03 03:00:00.000', 9993,9993.0,'93'); +sql insert into tb9_3 values('2021-03-04 03:00:00.000', 9993,9993.0,'93'); +sql insert into tb9_3 values('2021-03-05 03:00:00.000', 9993,9993.0,'93'); +sql insert into tb9_4 values('2021-03-01 04:00:00.000', 9994,9994.0,'94'); +sql insert into tb9_4 values('2021-03-02 04:00:00.000', 9994,9994.0,'94'); +sql insert into tb9_4 values('2021-03-03 04:00:00.000', 9994,9994.0,'94'); +sql insert into tb9_4 values('2021-03-04 04:00:00.000', 9994,9994.0,'94'); +sql insert into tb9_4 values('2021-03-05 04:00:00.000', 9994,9994.0,'94'); +sql insert into tb9_5 values('2021-03-01 05:00:00.000', 9995,9995.0,'95'); +sql insert into tb9_5 values('2021-03-02 05:00:00.000', 9995,9995.0,'95'); +sql insert into tb9_5 values('2021-03-03 05:00:00.000', 9995,9995.0,'95'); +sql insert into tb9_5 values('2021-03-04 05:00:00.000', 9995,9995.0,'95'); +sql insert into tb9_5 values('2021-03-05 05:00:00.000', 9995,9995.0,'95'); + +sql insert into tba_1 values('2021-03-01 01:00:00.000', 99101,99101.0,'a1'); +sql insert into tba_1 values('2021-03-02 01:00:00.000', 99101,99101.0,'a1'); +sql insert into tba_1 values('2021-03-03 01:00:00.000', 99101,99101.0,'a1'); +sql insert into tba_1 values('2021-03-04 01:00:00.000', 99101,99101.0,'a1'); +sql insert into tba_1 values('2021-03-05 01:00:00.000', 99101,99101.0,'a1'); +sql insert into tba_2 values('2021-03-01 02:00:00.000', 99102,99102.0,'a2'); +sql insert into tba_2 values('2021-03-02 02:00:00.000', 99102,99102.0,'a2'); +sql insert into tba_2 values('2021-03-03 02:00:00.000', 99102,99102.0,'a2'); +sql insert into tba_2 values('2021-03-04 02:00:00.000', 99102,99102.0,'a2'); +sql insert into tba_2 values('2021-03-05 02:00:00.000', 99102,99102.0,'a2'); +sql insert into tba_3 values('2021-03-01 03:00:00.000', 99103,99103.0,'a3'); +sql insert into tba_3 values('2021-03-02 03:00:00.000', 99103,99103.0,'a3'); +sql insert into tba_3 values('2021-03-03 03:00:00.000', 99103,99103.0,'a3'); +sql insert into tba_3 values('2021-03-04 03:00:00.000', 99103,99103.0,'a3'); +sql insert into tba_3 values('2021-03-05 03:00:00.000', 99103,99103.0,'a3'); +sql insert into tba_4 values('2021-03-01 04:00:00.000', 99104,99104.0,'a4'); +sql insert into tba_4 values('2021-03-02 04:00:00.000', 99104,99104.0,'a4'); +sql insert into tba_4 values('2021-03-03 04:00:00.000', 99104,99104.0,'a4'); +sql insert into tba_4 values('2021-03-04 04:00:00.000', 99104,99104.0,'a4'); +sql insert into tba_4 values('2021-03-05 04:00:00.000', 99104,99104.0,'a4'); +sql insert into tba_5 values('2021-03-01 05:00:00.000', 99105,99105.0,'a5'); +sql insert into tba_5 values('2021-03-02 05:00:00.000', 99105,99105.0,'a5'); +sql insert into tba_5 values('2021-03-03 05:00:00.000', 99105,99105.0,'a5'); +sql insert into tba_5 values('2021-03-04 05:00:00.000', 99105,99105.0,'a5'); +sql insert into tba_5 values('2021-03-05 05:00:00.000', 99105,99105.0,'a5'); + +sql insert into tbb_1 values('2021-03-01 01:00:00.000', 99111,99111.0,'b1'); +sql insert into tbb_1 values('2021-03-02 01:00:00.000', 99111,99111.0,'b1'); +sql insert into tbb_1 values('2021-03-03 01:00:00.000', 99111,99111.0,'b1'); +sql insert into tbb_1 values('2021-03-04 01:00:00.000', 99111,99111.0,'b1'); +sql insert into tbb_1 values('2021-03-05 01:00:00.000', 99111,99111.0,'b1'); +sql insert into tbb_2 values('2021-03-01 02:00:00.000', 99112,99112.0,'b2'); +sql insert into tbb_2 values('2021-03-02 02:00:00.000', 99112,99112.0,'b2'); +sql insert into tbb_2 values('2021-03-03 02:00:00.000', 99112,99112.0,'b2'); +sql insert into tbb_2 values('2021-03-04 02:00:00.000', 99112,99112.0,'b2'); +sql insert into tbb_2 values('2021-03-05 02:00:00.000', 99112,99112.0,'b2'); +sql insert into tbb_3 values('2021-03-01 03:00:00.000', 99113,99113.0,'b3'); +sql insert into tbb_3 values('2021-03-02 03:00:00.000', 99113,99113.0,'b3'); +sql insert into tbb_3 values('2021-03-03 03:00:00.000', 99113,99113.0,'b3'); +sql insert into tbb_3 values('2021-03-04 03:00:00.000', 99113,99113.0,'b3'); +sql insert into tbb_3 values('2021-03-05 03:00:00.000', 99113,99113.0,'b3'); +sql insert into tbb_4 values('2021-03-01 04:00:00.000', 99114,99114.0,'b4'); +sql insert into tbb_4 values('2021-03-02 04:00:00.000', 99114,99114.0,'b4'); +sql insert into tbb_4 values('2021-03-03 04:00:00.000', 99114,99114.0,'b4'); +sql insert into tbb_4 values('2021-03-04 04:00:00.000', 99114,99114.0,'b4'); +sql insert into tbb_4 values('2021-03-05 04:00:00.000', 99114,99114.0,'b4'); +sql insert into tbb_5 values('2021-03-01 05:00:00.000', 99115,99115.0,'b5'); +sql insert into tbb_5 values('2021-03-02 05:00:00.000', 99115,99115.0,'b5'); +sql insert into tbb_5 values('2021-03-03 05:00:00.000', 99115,99115.0,'b5'); +sql insert into tbb_5 values('2021-03-04 05:00:00.000', 99115,99115.0,'b5'); +sql insert into tbb_5 values('2021-03-05 05:00:00.000', 99115,99115.0,'b5'); + + +sql select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi +if $data19 != @21-03-02 01:00:00.000@ then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi +if $data29 != @21-03-03 01:00:00.000@ then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi +if $data39 != @21-03-04 01:00:00.000@ then + return -1 +endi + + + + +sql select * from st0, st1 where st0.ts=st1.ts and st0.id2=st1.id2; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi +if $data19 != @21-03-02 01:00:00.000@ then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi +if $data29 != @21-03-03 01:00:00.000@ then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi +if $data39 != @21-03-04 01:00:00.000@ then + return -1 +endi + + + + +sql select * from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi +if $data19 != @21-03-02 01:00:00.000@ then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi +if $data29 != @21-03-03 01:00:00.000@ then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi +if $data39 != @21-03-04 01:00:00.000@ then + return -1 +endi + + + + + +sql select * from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi +if $data19 != @21-03-02 01:00:00.000@ then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi +if $data29 != @21-03-03 01:00:00.000@ then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi +if $data39 != @21-03-04 01:00:00.000@ then + return -1 +endi + + + + +sql select st0.* from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi + + + + +sql select st0.* from st0, st1 where st0.ts=st1.ts and st1.id2=st0.id2; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi + + + + + +sql select st0.* from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9901 then + return -1 +endi +if $data22 != 9901.000000000 then + return -1 +endi +if $data23 != 01 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9901 then + return -1 +endi +if $data32 != 9901.000000000 then + return -1 +endi +if $data33 != 01 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi + + + + +sql select st1.* from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9911 then + return -1 +endi +if $data02 != 9911.000000000 then + return -1 +endi +if $data03 != 11 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + print $data07 + return -1 +endi +if $data08 != 3 then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9911 then + return -1 +endi +if $data12 != 9911.000000000 then + return -1 +endi +if $data13 != 11 then + return -1 +endi +if $data14 != 0 then + return -1 +endi +if $data15 != 1 then + return -1 +endi +if $data16 != 2.000000000 then + return -1 +endi +if $data17 != 1 then + return -1 +endi +if $data18 != 3 then + return -1 +endi + + + +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != 9911 then + return -1 +endi +if $data22 != 9911.000000000 then + return -1 +endi +if $data23 != 11 then + return -1 +endi +if $data24 != 0 then + return -1 +endi +if $data25 != 1 then + return -1 +endi +if $data26 != 2.000000000 then + return -1 +endi +if $data27 != 1 then + return -1 +endi +if $data28 != 3 then + return -1 +endi + + +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != 9911 then + return -1 +endi +if $data32 != 9911.000000000 then + return -1 +endi +if $data33 != 11 then + return -1 +endi +if $data34 != 0 then + return -1 +endi +if $data35 != 1 then + return -1 +endi +if $data36 != 2.000000000 then + return -1 +endi +if $data37 != 1 then + return -1 +endi +if $data38 != 3 then + return -1 +endi + + +sql select st0.f1,st1.f1 from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1; + +if $rows != 25 then + return -1 +endi + +if $data00 != 9901 then + return -1 +endi +if $data01 != 9911 then + return -1 +endi +if $data10 != 9901 then + return -1 +endi +if $data11 != 9911 then + return -1 +endi +if $data20 != 9901 then + return -1 +endi +if $data21 != 9911 then + return -1 +endi +if $data30 != 9901 then + return -1 +endi +if $data31 != 9911 then + return -1 +endi + + + + +sql select st0.ts,st1.ts from st0, st1 where st0.ts=st1.ts and st1.id2=st0.id2; + + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data20 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data21 != @21-03-03 01:00:00.000@ then + return -1 +endi +if $data30 != @21-03-04 01:00:00.000@ then + return -1 +endi +if $data31 != @21-03-04 01:00:00.000@ then + return -1 +endi + + + +sql select st1.ts,st0.ts,st0.id3,st1.id3,st0.f3,st1.f3 from st0, st1 where st0.id3=st1.id3 and st1.ts=st0.ts; + + +if $rows != 25 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data02 != 2.000000000 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi +if $data04 != 01 then + return -1 +endi +if $data05 != 11 then + return -1 +endi + +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data02 != 2.000000000 then + return -1 +endi +if $data03 != 2.000000000 then + return -1 +endi +if $data04 != 01 then + return -1 +endi +if $data05 != 11 then + return -1 +endi + + + +sql select st0.ts,st0.f2,st1.f3,st1.f2,st0.f3 from st0, st1 where st1.id5=st0.id5 and st0.ts=st1.ts; +if $rows != 25 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901.000000000 then + return -1 +endi +if $data02 != 11 then + return -1 +endi +if $data03 != 9911.000000000 then + return -1 +endi +if $data04 != 01 then + return -1 +endi +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901.000000000 then + return -1 +endi +if $data12 != 11 then + return -1 +endi +if $data13 != 9911.000000000 then + return -1 +endi +if $data14 != 01 then + return -1 +endi + + + + +sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(10a); +if $rows != 25 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data02 != 9901 then + return -1 +endi +if $data03 != 9901.000000000 then + return -1 +endi +if $data04 != 01 then + return -1 +endi +if $data05 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data06 != 9911 then + return -1 +endi +if $data07 != 9911.000000000 then + return -1 +endi +if $data08 != 11 then + return -1 +endi +if $data10 != @21-03-01 02:00:00.000@ then + return -1 +endi +if $data11 != @21-03-01 02:00:00.000@ then + return -1 +endi +if $data12 != 9902 then + return -1 +endi +if $data13 != 9902.000000000 then + return -1 +endi +if $data14 != 02 then + return -1 +endi +if $data15 != @21-03-01 02:00:00.000@ then + return -1 +endi +if $data16 != 9912 then + return -1 +endi +if $data17 != 9912.000000000 then + return -1 +endi +if $data18 != 12 then + return -1 +endi + + + +sql select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval(1d) sliding(1d); +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 00:00:00.000@ then + return -1 +endi +if $data01 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data02 != 9905 then + return -1 +endi +if $data03 != 9905.000000000 then + return -1 +endi +if $data04 != 05 then + return -1 +endi +if $data05 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data06 != 9915 then + return -1 +endi +if $data07 != 9915.000000000 then + return -1 +endi +if $data08 != 15 then + return -1 +endi +if $data10 != @21-03-02 00:00:00.000@ then + return -1 +endi +if $data11 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data12 != 9905 then + return -1 +endi +if $data13 != 9905.000000000 then + return -1 +endi +if $data14 != 05 then + return -1 +endi +if $data15 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data16 != 9915 then + return -1 +endi +if $data17 != 9915.000000000 then + return -1 +endi +if $data18 != 15 then + return -1 +endi + + + +sql select st0.*,st1.* from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; +if $rows != 25 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + + + + + +sql select st0.ts,* from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 order by st0.ts; + +if $rows != 25 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data02 != 9901 then + return -1 +endi +if $data03 != 9901.000000000 then + return -1 +endi +if $data04 != 01 then + return -1 +endi +if $data05 != 0 then + return -1 +endi +if $data06 != 1 then + return -1 +endi +if $data07 != 2.000000000 then + return -1 +endi +if $data08 != 1 then + return -1 +endi +if $data09 != 3 then + return -1 +endi + + + + + +sql select st0.*,st1.* from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1 order by st0.ts limit 5 offset 5 +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + print $data00 + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + + + +sql select top(st1.f1, 5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data01 != 9915 then + return -1 +endi +if $data10 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data11 != 9915 then + return -1 +endi +if $data20 != @21-03-03 05:00:00.000@ then + return -1 +endi +if $data21 != 9915 then + return -1 +endi +if $data30 != @21-03-04 05:00:00.000@ then + return -1 +endi +if $data31 != 9915 then + return -1 +endi +if $data40 != @21-03-05 05:00:00.000@ then + return -1 +endi +if $data41 != 9915 then + return -1 +endi + + + + + +sql select top(st0.f1,5) from st0, st1 where st1.id1=st0.id1 and st0.ts=st1.ts and st1.ts=st0.ts and st0.id1=st1.id1; +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data01 != 9905 then + return -1 +endi +if $data10 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data11 != 9905 then + return -1 +endi +if $data20 != @21-03-03 05:00:00.000@ then + return -1 +endi +if $data21 != 9905 then + return -1 +endi +if $data30 != @21-03-04 05:00:00.000@ then + return -1 +endi +if $data31 != 9905 then + return -1 +endi +if $data40 != @21-03-05 05:00:00.000@ then + return -1 +endi +if $data41 != 9905 then + return -1 +endi + + +#sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st3.id1 and st3.ts=st2.ts and st2.id1=st1.id1 and st1.ts=st0.ts; +#sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st3.id1 and st3.ts=st2.ts and st2.id1=st1.id1 and st1.ts=st0.ts and st0.id1=st2.id1 and st1.ts=st2.ts; +#if $rows != 25 then +# print $rows +# return -1 +#endi +#if $data00 != @21-03-01 01:00:00.000@ then +# return -1 +#endi +#if $data01 != 9901 then +# return -1 +#endi +#if $data02 != 9901.000000000 then +# return -1 +#endi +#if $data03 != 01 then +# return -1 +#endi +#if $data04 != 0 then +# return -1 +#endi +#if $data05 != 1 then +# return -1 +#endi +#if $data06 != 2.000000000 then +# return -1 +#endi +#if $data07 != 1 then +# return -1 +#endi +#if $data08 != 3 then +# return -1 +#endi +#if $data09 != @21-03-01 01:00:00.000@ then +# return -1 +#endi + + + +#sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st1.id1 and st1.ts=st0.ts and st2.id1=st3.id1 and st3.ts=st2.ts; +#sql select st0.*,st1.*,st2.*,st3.* from st3,st2,st1,st0 where st0.id1=st1.id1 and st1.ts=st0.ts and st2.id1=st3.id1 and st3.ts=st2.ts and st0.id1=st2.id1 and st0.ts=st2.ts; +#if $rows != 25 then +# return -1 +#endi +#if $data00 != @21-03-01 01:00:00.000@ then +# return -1 +#endi +#if $data01 != 9901 then +# return -1 +#endi +#if $data02 != 9901.000000000 then +# return -1 +#endi +#if $data03 != 01 then +# return -1 +#endi +#if $data04 != 0 then +# return -1 +#endi +#if $data05 != 1 then +# return -1 +#endi +#if $data06 != 2.000000000 then +# return -1 +#endi +#if $data07 != 1 then +# return -1 +#endi +#if $data08 != 3 then +# return -1 +#endi +#if $data09 != @21-03-01 01:00:00.000@ then +# return -1 +#endi + + + +sql select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9 where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.ts=st1.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1 and st0.id1=st1.id1; +if $rows != 25 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != 0 then + return -1 +endi +if $data05 != 1 then + return -1 +endi +if $data06 != 2.000000000 then + return -1 +endi +if $data07 != 1 then + return -1 +endi +if $data08 != 3 then + return -1 +endi +if $data09 != @21-03-01 01:00:00.000@ then + return -1 +endi + + + +sql select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts; +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data05 != 9911 then + return -1 +endi +if $data06 != 9911.000000000 then + return -1 +endi +if $data07 != 11 then + return -1 +endi +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data15 != 9911 then + return -1 +endi +if $data16 != 9911.000000000 then + return -1 +endi +if $data17 != 11 then + return -1 +endi + + + +sql select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id1; +if $rows != 5 then + return -1 +endi +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data05 != 9911 then + return -1 +endi +if $data06 != 9911.000000000 then + return -1 +endi +if $data07 != 11 then + return -1 +endi +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data15 != 9911 then + return -1 +endi +if $data16 != 9911.000000000 then + return -1 +endi +if $data17 != 11 then + return -1 +endi + + +sql select tb0_1.*, tb1_2.*,tb2_3.*,tb3_4.*,tb4_5.* from tb0_1, tb1_2, tb2_3, tb3_4, tb4_5 where tb0_1.ts=tb1_2.ts and tb0_1.ts=tb2_3.ts and tb0_1.ts=tb3_4.ts and tb0_1.ts=tb4_5.ts; +if $rows != 0 then + return -1 +endi + + + +sql select tb0_1.*, tb1_1.*,tb2_1.*,tb3_1.*,tb4_1.* from tb0_1, tb1_1, tb2_1, tb3_1, tb4_1 where tb0_1.ts=tb1_1.ts and tb0_1.ts=tb2_1.ts and tb0_1.ts=tb3_1.ts and tb0_1.ts=tb4_1.ts; + +if $rows != 5 then + return -1 +endi + +if $data00 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data01 != 9901 then + return -1 +endi +if $data02 != 9901.000000000 then + return -1 +endi +if $data03 != 01 then + return -1 +endi +if $data04 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data05 != 9911 then + return -1 +endi +if $data06 != 9911.000000000 then + return -1 +endi +if $data07 != 11 then + return -1 +endi +if $data08 != @21-03-01 01:00:00.000@ then + return -1 +endi +if $data09 != 9921 then + return -1 +endi +if $data10 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data11 != 9901 then + return -1 +endi +if $data12 != 9901.000000000 then + return -1 +endi +if $data13 != 01 then + return -1 +endi +if $data14 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data15 != 9911 then + return -1 +endi +if $data16 != 9911.000000000 then + return -1 +endi +if $data17 != 11 then + return -1 +endi +if $data18 != @21-03-02 01:00:00.000@ then + return -1 +endi +if $data19 != 9921 then + return -1 +endi + + + +sql select tb0_5.*, tb1_5.*,tb2_5.*,tb3_5.*,tb4_5.*,tb5_5.*, tb6_5.*,tb7_5.*,tb8_5.*,tb9_5.* from tb0_5, tb1_5, tb2_5, tb3_5, tb4_5,tb5_5, tb6_5, tb7_5, tb8_5, tb9_5 where tb9_5.ts=tb8_5.ts and tb8_5.ts=tb7_5.ts and tb7_5.ts=tb6_5.ts and tb6_5.ts=tb5_5.ts and tb5_5.ts=tb4_5.ts and tb4_5.ts=tb3_5.ts and tb3_5.ts=tb2_5.ts and tb2_5.ts=tb1_5.ts and tb1_5.ts=tb0_5.ts; + +if $rows != 5 then + return -1 +endi + +if $data00 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data01 != 9905 then + return -1 +endi +if $data02 != 9905.000000000 then + return -1 +endi +if $data03 != 05 then + return -1 +endi +if $data04 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data05 != 9915 then + return -1 +endi +if $data06 != 9915.000000000 then + return -1 +endi +if $data07 != 15 then + return -1 +endi +if $data08 != @21-03-01 05:00:00.000@ then + return -1 +endi +if $data09 != 9925 then + return -1 +endi +if $data10 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data11 != 9905 then + return -1 +endi +if $data12 != 9905.000000000 then + return -1 +endi +if $data13 != 05 then + return -1 +endi +if $data14 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data15 != 9915 then + return -1 +endi +if $data16 != 9915.000000000 then + return -1 +endi +if $data17 != 15 then + return -1 +endi +if $data18 != @21-03-02 05:00:00.000@ then + return -1 +endi +if $data19 != 9925 then + return -1 +endi + + +sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.f1=tb1_1.f1; +sql_error select tb0_1.*, tb1_1.* from tb0_1, tb1_1 where tb0_1.ts=tb1_1.ts and tb0_1.id1=tb1_1.id2; +sql_error select tb0_5.*, tb1_5.*,tb2_5.*,tb3_5.*,tb4_5.*,tb5_5.*, tb6_5.*,tb7_5.*,tb8_5.*,tb9_5.*,tba_5.* from tb0_5, tb1_5, tb2_5, tb3_5, tb4_5,tb5_5, tb6_5, tb7_5, tb8_5, tb9_5, tba_5 where tb9_5.ts=tb8_5.ts and tb8_5.ts=tb7_5.ts and tb7_5.ts=tb6_5.ts and tb6_5.ts=tb5_5.ts and tb5_5.ts=tb4_5.ts and tb4_5.ts=tb3_5.ts and tb3_5.ts=tb2_5.ts and tb2_5.ts=tb1_5.ts and tb1_5.ts=tb0_5.ts and tb0_5.ts=tba_5.ts; + +sql_error select * from st0, st1 where st0.ts=st1.ts; +sql_error select * from st0, st1 where st0.id1=st1.id1; +sql_error select * from st0, st1 where st0.f1=st1.f1 and st0.id1=st1.id1; +sql_error select * from st0, st1, st2, st3 where st0.id1=st1.id1 and st2.id1=st3.id1 and st0.ts=st1.ts and st1.ts=st2.ts and st2.ts=st3.ts; +sql_error select * from st0, st1, st2 where st0.id1=st1.id1; +sql_error select * from st0, st1 where st0.id1=st1.id1 and st0.id2=st1.id3; +sql_error select * from st0, st1 where st0.id1=st1.id1 or st0.ts=st1.ts; +sql_error select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 or st0.id2=st1.id2; +sql_error select * from st0, st1, st2 where st0.ts=st1.ts and st0.id1=st1.id1; +sql_error select * from st0, st1 where st0.id1=st1.ts and st0.ts=st1.id1; +sql_error select * from st0, st1 where st0.id1=st1.id2 and st0.ts=st1.ts; +sql_error select * from st0, st1 where st1.id4=st0.id4 and st1.ts=st0.ts; +sql_error select * from st0, st1 where st0.id1=st1.id2 and st1.ts=st0.ts; +sql_error select * from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 interval 10a; +sql_error select last(*) from st0, st1 where st0.ts=st1.ts and st0.id1=st1.id1 group by f1; +sql_error select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9 where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1; +sql_error select st0.*,st1.*,st2.*,st3.*,st4.*,st5.*,st6.*,st7.*,st8.*,st9.* from st0,st1,st2,st3,st4,st5,st6,st7,st8,st9,sta where st0.ts=st2.ts and st0.ts=st4.ts and st0.ts=st6.ts and st0.ts=st8.ts and st1.ts=st3.ts and st3.ts=st5.ts and st5.ts=st7.ts and st7.ts=st9.ts and st0.ts=st1.ts and st0.id1=st2.id1 and st0.id1=st4.id1 and st0.id1=st6.id1 and st0.id1=st8.id1 and st1.id1=st3.id1 and st3.id1=st5.id1 and st5.id1=st7.id1 and st7.id1=st9.id1 and st0.id1=st1.id1 and st0.id1=sta.id1 and st0.ts=sta.ts; + + + + + + + +system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/general/parser/topbot.sim b/tests/script/general/parser/topbot.sim index e23bbf6724..5c61acb2ba 100644 --- a/tests/script/general/parser/topbot.sim +++ b/tests/script/general/parser/topbot.sim @@ -325,4 +325,56 @@ if $row != 0 then return -1 endi +print ===============================>td-3621 +sql create table ttm2(ts timestamp, k bool); +sql insert into ttm2 values('2021-1-1 1:1:1', true) +sql insert into ttm2 values('2021-1-1 1:1:2', NULL) +sql insert into ttm2 values('2021-1-1 1:1:3', false) +sql select * from ttm2 where k is not null +if $row != 2 then + return -1 +endi + +if $data00 != @21-01-01 01:01:01.000@ then + print expect 21-01-01 01:01:01.000, actual $data00 + return -1 +endi + +sql select * from ttm2 where k is null +if $row != 1 then + return -1 +endi + +if $data00 != @21-01-01 01:01:02.000@ then + return -1 +endi + +sql select * from ttm2 where k=true +if $row != 1 then + return -1 +endi + +if $data00 != @21-01-01 01:01:01.000@ then + return -1 +endi + +sql select * from ttm2 where k=false +if $row != 1 then + return -1 +endi + +if $data00 != @21-01-01 01:01:03.000@ then + return -1 +endi + +sql select * from ttm2 where k<>false +if $row != 1 then + return -1 +endi + +sql_error select * from ttm2 where k=null +sql_error select * from ttm2 where k<>null +sql_error select * from ttm2 where k like null +sql_error select * from ttm2 where k ../../../sim/case.log 2>&1 && \ + ./test.sh -f $case > case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) else echo -n $case ./test.sh -f $case > ../../sim/case.log 2>&1 && \ ( grep -q 'script.*'$case'.*failed.*, err.*lineNum' ../../sim/tsim/log/taoslog0.0 && echo -e "${RED} failed${NC}" | tee -a out.log || echo -e "${GREEN} success${NC}" | tee -a out.log )|| \ ( grep -q 'script.*success.*m$' ../../sim/tsim/log/taoslog0.0 && echo -e "${GREEN} success${NC}" | tee -a out.log ) || \ - ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat ../../sim/case.log ) + ( echo -e "${RED} failed${NC}" | tee -a out.log && echo '=====================log=====================' && cat case.log ) fi out_log=`tail -1 out.log ` if [[ $out_log =~ 'failed' ]];then + rm case.log if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then cp -r ../../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S"` - rm -rf ../../../sim/case.log else cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - rm -rf ../../sim/case.log fi - exit 8 + dohavecore $2 1 + if [[ $2 == 1 ]];then + exit 8 + fi fi end_time=`date +%s` echo execution time of $case was `expr $end_time - $start_time`s. | tee -a out.log - dohavecore $2 + dohavecore $2 1 fi done rm -rf ../../../sim/case.log @@ -136,7 +152,6 @@ function runPyCaseOneByOne { else $line > /dev/null 2>&1 fi - dohavecore 0 fi done < $1 } @@ -162,23 +177,26 @@ function runPyCaseOneByOnefq() { start_time=`date +%s` date +%F\ %T | tee -a pytest-out.log echo -n $case - $line > ../../sim/case.log 2>&1 && \ + $line > case.log 2>&1 && \ echo -e "${GREEN} success${NC}" | tee -a pytest-out.log || \ echo -e "${RED} failed${NC}" | tee -a pytest-out.log end_time=`date +%s` out_log=`tail -1 pytest-out.log ` if [[ $out_log =~ 'failed' ]];then cp -r ../../sim ~/sim_`date "+%Y_%m_%d_%H:%M:%S" ` - echo '=====================log=====================' - cat ../../sim/case.log - rm -rf ../../sim/case.log - exit 8 + echo '=====================log===================== ' + cat case.log + rm -rf case.log + dohavecore $2 2 + if [[ $2 == 1 ]];then + exit 8 + fi fi echo execution time of $case was `expr $end_time - $start_time`s. | tee -a pytest-out.log else $line > /dev/null 2>&1 fi - dohavecore $2 + dohavecore $2 2 fi done rm -rf ../../sim/case.log @@ -188,9 +206,10 @@ totalFailed=0 totalPyFailed=0 totalJDBCFailed=0 totalUnitFailed=0 +totalExampleFailed=0 corepath=`grep -oP '.*(?=core_)' /proc/sys/kernel/core_pattern||grep -oP '.*(?=core-)' /proc/sys/kernel/core_pattern` -if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then +if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then echo "### run TSIM test case ###" cd $tests_dir/script @@ -205,15 +224,15 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then echo "### run TSIM b1 test ###" runSimCaseOneByOnefq b1 0 runSimCaseOneByOnefq b4 0 - runSimCaseOneByOnefq b5 0 - runSimCaseOneByOnefq b6 0 runSimCaseOneByOnefq b7 0 elif [ "$1" == "b2" ]; then echo "### run TSIM b2 test ###" runSimCaseOneByOnefq b2 0 + runSimCaseOneByOnefq b5 0 elif [ "$1" == "b3" ]; then echo "### run TSIM b3 test ###" runSimCaseOneByOnefq b3 0 + runSimCaseOneByOnefq b6 0 elif [ "$1" == "b1fq" ]; then echo "### run TSIM b1 test ###" runSimCaseOneByOnefq b1 1 @@ -259,7 +278,7 @@ if [ "$2" != "jdbc" ] && [ "$2" != "python" ] && [ "$2" != "unit" ]; then fi fi -if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then +if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$2" != "example" ]; then echo "### run Python test case ###" cd $tests_dir @@ -328,7 +347,7 @@ if [ "$2" != "sim" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ]; then fi -if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then +if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$2" != "example" ] && [ "$1" == "full" ]; then echo "### run JDBC test cases ###" cd $tests_dir @@ -372,7 +391,7 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "unit" ] && [ "$1" == dohavecore 1 fi -if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" == "full" ]; then +if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != "example" ] && [ "$1" == "full" ]; then echo "### run Unit tests ###" stopTaosd @@ -408,5 +427,80 @@ if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$1" == dohavecore 1 fi +if [ "$2" != "sim" ] && [ "$2" != "python" ] && [ "$2" != "jdbc" ] && [ "$2" != "unit" ] && [ "$1" == "full" ]; then + echo "### run Example tests ###" -exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed)) + stopTaosd + cd $tests_dir + + if [[ "$tests_dir" == *"$IN_TDINTERNAL"* ]]; then + cd ../../ + else + cd ../ + fi + + pwd + cd debug/build/bin + rm -rf /var/lib/taos/* + nohup ./taosd -c /etc/taos/ > /dev/null 2>&1 & + echo "sleeping for 30 seconds" + #sleep 30 + + cd $tests_dir + echo "current dir: " + pwd + cd examples/c + echo "building applications" + make > /dev/null + totalExamplePass=0 + + echo "Running tests" + ./apitest > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "apitest failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "apitest pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi + + ./prepare 127.0.0.1 > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "prepare failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "prepare pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi + + ./subscribe -test > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "subscribe failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "subscribe pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi + + yes |./asyncdemo 127.0.0.1 test 1000 10 > /dev/null 2>&1 + if [ $? != "0" ]; then + echo "asyncdemo failed" + totalExampleFailed=`expr $totalExampleFailed + 1` + else + echo "asyncdemo pass" + totalExamplePass=`expr $totalExamplePass + 1` + fi + + if [ "$totalExamplePass" -gt "0" ]; then + echo -e "\n${GREEN} ### Total $totalExamplePass examples succeed! ### ${NC}" + fi + + if [ "$totalExampleFailed" -ne "0" ]; then + echo -e "\n${RED} ### Total $totalExampleFailed examples failed! ### ${NC}" + fi + + dohavecore 1 +fi + + +exit $(($totalFailed + $totalPyFailed + $totalJDBCFailed + $totalUnitFailed + $totalExampleFailed))