diff --git a/.github/workflows/taoskeeper-ci.yml b/.github/workflows/taoskeeper-ci.yml new file mode 100644 index 0000000000..38dfb6d43c --- /dev/null +++ b/.github/workflows/taoskeeper-ci.yml @@ -0,0 +1,58 @@ +name: TaosKeeper CI + +on: + push: + paths: + - tools/keeper/** + +jobs: + build: + runs-on: ubuntu-latest + name: Run unit tests + + steps: + - name: Checkout the repository + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: 1.18 + + - name: Install system dependencies + run: | + sudo apt update -y + sudo apt install -y build-essential cmake libgeos-dev + + - name: Install TDengine + run: | + mkdir debug + cd debug + cmake .. -DBUILD_HTTP=false -DBUILD_JDBC=false -DBUILD_TOOLS=false -DBUILD_TEST=off -DBUILD_KEEPER=true + make -j 4 + sudo make install + which taosd + which taosadapter + which taoskeeper + + - name: Start taosd + run: | + cp /etc/taos/taos.cfg ./ + sudo echo "supportVnodes 256" >> taos.cfg + nohup sudo taosd -c taos.cfg & + + - name: Start taosadapter + run: nohup sudo taosadapter & + + - name: Run tests with coverage + working-directory: tools/keeper + run: | + go mod tidy + go test -v -coverpkg=./... -coverprofile=coverage.out ./... + go tool cover -func=coverage.out + + - name: Clean up + if: always() + run: | + if pgrep taosd; then sudo pkill taosd; fi + if pgrep taosadapter; then sudo pkill taosadapter; fi diff --git a/README-CN.md b/README-CN.md index 06ac087859..1f785eb458 100644 --- a/README-CN.md +++ b/README-CN.md @@ -348,7 +348,7 @@ TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java # 成为社区贡献者 -点击 [这里](https://www.taosdata.com/cn/contributor/),了解如何成为 TDengine 的贡献者。 +点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。 # 加入技术交流群 diff --git a/cmake/cmake.define b/cmake/cmake.define index 8b762011a4..3fa99a7c93 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -1,6 +1,7 @@ cmake_minimum_required(VERSION 3.0) set(CMAKE_VERBOSE_MAKEFILE FALSE) set(TD_BUILD_TAOSA_INTERNAL FALSE) +set(TD_BUILD_KEEPER_INTERNAL FALSE) # set output directory SET(LIBRARY_OUTPUT_PATH ${PROJECT_BINARY_DIR}/build/lib) @@ -57,6 +58,19 @@ IF(TD_BUILD_HTTP) ADD_DEFINITIONS(-DHTTP_EMBEDDED) ENDIF() +IF("${BUILD_KEEPER}" STREQUAL "") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "false") + SET(TD_BUILD_KEEPER FALSE) +ELSEIF(${BUILD_KEEPER} MATCHES "true") + SET(TD_BUILD_KEEPER TRUE) +ELSEIF(${BUILD_KEEPER} MATCHES "internal") + SET(TD_BUILD_KEEPER FALSE) + SET(TD_BUILD_KEEPER_INTERNAL TRUE) +ELSE() + SET(TD_BUILD_KEEPER FALSE) +ENDIF() + IF("${BUILD_TOOLS}" STREQUAL "") IF(TD_LINUX) IF(TD_ARM_32) diff --git a/docs/en/04-get-started/01-docker.md b/docs/en/04-get-started/01-docker.md index 882e2ef194..f361e5a10f 100644 --- a/docs/en/04-get-started/01-docker.md +++ b/docs/en/04-get-started/01-docker.md @@ -75,9 +75,9 @@ taos> ## TDegnine Graphic User Interface -From TDengine 3.3.0.0, there is a new componenet called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. +From TDengine 3.3.0.0, there is a new component called `taos-explorer` added in the TDengine docker image. You can use it to manage the databases, super tables, child tables, and data in your TDengine system. There are also some features only available in TDengine Enterprise Edition, please contact TDengine sales team in case you need these features. -To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. When you use it the first time, you need to register with your enterprise email, then can logon using your user name and password in the TDengine database management system. +To use taos-explorer in the container, you need to access the host port mapped from container port 6060. Assuming the host name is abc.com, and the port used on host is 6060, you need to access `http://abc.com:6060`. taos-explorer uses port 6060 by default in the container. The default username and password to log in to the TDengine Database Management System is "root/taosdata". ## Test data insert performance diff --git a/docs/en/14-reference/01-components/10-taosbenchmark b/docs/en/14-reference/01-components/10-taosbenchmark index e4884b889c..45c5cd2fb8 100644 --- a/docs/en/14-reference/01-components/10-taosbenchmark +++ b/docs/en/14-reference/01-components/10-taosbenchmark @@ -364,6 +364,9 @@ The configuration parameters for specifying super table tag columns and data col - **min**: The minimum value of the column/label of the data type. The generated value will equal or large than the minimum value. - **max**: The maximum value of the column/label of the data type. The generated value will less than the maximum value. + +- **scalingFactor**: Floating-point precision enhancement factor, which takes effect only when the data type is float/double. It has a valid range of positive integers from 1 to 1,000,000. It is used to enhance the precision of generated floating-point numbers, particularly when the min or max values are small. This property enhances the precision after the decimal point by powers of 10: scalingFactor of 10 indicates an enhancement of 1 decimal precision, 100 indicates an enhancement of 2 decimal precision, and so on. + - **fun**: This column of data is filled with functions. Currently, only the sin and cos functions are supported. The input parameter is the timestamp and converted to an angle value. The conversion formula is: angle x=input time column ts value % 360. At the same time, it supports coefficient adjustment and random fluctuation factor adjustment, presented in a fixed format expression, such as fun="10\*sin(x)+100\*random(5)", where x represents the angle, ranging from 0 to 360 degrees, and the growth step size is consistent with the time column step size. 10 represents the coefficient of multiplication, 100 represents the coefficient of addition or subtraction, and 5 represents the fluctuation range within a random range of 5%. The currently supported data types are int, bigint, float, and double. Note: The expression is fixed and cannot be reversed. - **values**: The value field of the nchar/binary column/label, which will be chosen randomly from the values. diff --git a/docs/en/14-reference/03-taos-sql/21-node.md b/docs/en/14-reference/03-taos-sql/21-node.md index 2ebccb76f7..cdc4bdd020 100644 --- a/docs/en/14-reference/03-taos-sql/21-node.md +++ b/docs/en/14-reference/03-taos-sql/21-node.md @@ -27,11 +27,15 @@ The preceding SQL command shows all dnodes in the cluster with the ID, endpoint, ## Delete a DNODE ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` Note that deleting a dnode does not stop its process. You must stop the process after the dnode is deleted. +Only online node is allowed to be deleted. Drop is executed with force option if the offline node need to be deleted. + +Drop is executed with unsafe option if the node with single replica is offline, and the data on it is not able to be restored. + ## Modify Dnode Configuration ```sql diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index a399f3aa6a..647855dc48 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class ConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class ConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class ConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class ConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index a59bfc282f..378ef8ae6d 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -31,7 +31,11 @@ public class ConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 6db65f47f2..02db97a5a9 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class WsConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class WsConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index 70e29503f8..77c6a4fd1b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -28,7 +28,11 @@ public abstract class WsConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java index 8678f65231..fa6ebf0858 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -13,6 +13,9 @@ public class DataBaseMonitor { public DataBaseMonitor init() throws SQLException { if (conn == null) { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } conn = DriverManager.getConnection(jdbcURL); stmt = conn.createStatement(); } diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java index dc820f161c..1497992f6b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -69,6 +69,9 @@ public class SQLWriter { */ private static Connection getConnection() throws SQLException { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } return DriverManager.getConnection(jdbcURL); } diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index e014a3b315..a92ddd116c 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -17,6 +17,37 @@ public class TestAll { stmt.execute("drop database if exists " + dbName); } } + waitTransaction(); + } + + public void dropTopic(String topicName) throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop topic if exists " + topicName); + } + } + waitTransaction(); + } + + public void waitTransaction() throws SQLException { + + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + for (int i = 0; i < 10; i++) { + stmt.execute("show transactions"); + try (ResultSet resultSet = stmt.getResultSet()) { + if (resultSet.next()) { + int count = resultSet.getInt(1); + if (count == 0) { + break; + } + } + } + } + } + } } public void insertData() throws SQLException { @@ -104,14 +135,20 @@ public class TestAll { SubscribeDemo.main(args); } -// @Test -// public void testSubscribeJni() throws SQLException, InterruptedException { -// dropDB("power"); -// ConsumerLoopFull.main(args); -// } -// @Test -// public void testSubscribeWs() throws SQLException, InterruptedException { -// dropDB("power"); -// WsConsumerLoopFull.main(args); -// } + @Test + public void testSubscribeJni() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + ConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } + @Test + public void testSubscribeWs() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + WsConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } } diff --git a/docs/zh/01-index.md b/docs/zh/01-index.md index 32ea117fbb..1fda72024c 100644 --- a/docs/zh/01-index.md +++ b/docs/zh/01-index.md @@ -4,9 +4,9 @@ sidebar_label: 文档首页 slug: / --- -TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept) +TDengine 是一款[开源](https://www.taosdata.com/tdengine/open_source_time-series_database)、[高性能](https://www.taosdata.com/fast)、[云原生](https://www.taosdata.com/tdengine/cloud_native_time-series_database)的时序数据库Time Series Database, TSDB), 它专为物联网、车联网、工业互联网、金融、IT 运维等场景优化设计。同时它还带有内建的缓存、流式计算、数据订阅等系统功能,能大幅减少系统设计的复杂度,降低研发和运营成本,是一款极简的时序数据处理平台。本文档是 TDengine 的用户手册,主要是介绍 TDengine 的基本概念、安装、使用、功能、开发接口、运营维护、TDengine 内核设计等等,它主要是面向架构师、开发工程师与系统管理员的。如果你对时序数据的基本概念、价值以及其所能带来的业务价值尚不了解,请参考[时序数据基础](./concept)。 -TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论如何,请您仔细阅读[数据模型](./basic/model)一章。 +TDengine 充分利用了时序数据的特点,提出了“一个数据采集点一张表”与“超级表”的概念,设计了创新的存储引擎,让数据的写入、查询和存储效率都得到极大的提升。为正确理解并使用 TDengine,无论你在工作中是什么角色,请您仔细阅读[数据模型](./basic/model)一章。 如果你是开发工程师,请一定仔细阅读[开发指南](./develop)一章,该部分对数据库连接、建模、插入数据、查询、流式计算、缓存、数据订阅、用户自定义函数等功能都做了详细介绍,并配有各种编程语言的示例代码。大部分情况下,你只要复制粘贴示例代码,针对自己的应用稍作改动,就能跑起来。对 REST API、各种编程语言的连接器(Connector)想做更多详细了解的话,请看[连接器](./reference/connector)一章。 @@ -16,6 +16,8 @@ TDengine 采用 SQL 作为查询语言,大大降低学习成本、降低迁移 如果你是系统管理员,关心安装、升级、容错灾备、关心数据导入、导出、配置参数,如何监测 TDengine 是否健康运行,如何提升系统运行的性能,请仔细参考[运维指南](./operation)一章。 +如果你对数据库内核设计感兴趣,或是开源爱好者,建议仔细阅读[技术内幕](./tdinterna)一章。该章从分布式架构到存储引擎、查询引擎、数据订阅,再到流计算引擎都做了详细阐述。建议对照文档,查看TDengine在GitHub的源代码,对TDengine的设计和编码做深入了解,更欢迎加入开源社区,贡献代码。 + 最后,作为一个开源软件,欢迎大家的参与。如果发现文档有任何错误、描述不清晰的地方,请在每个页面的最下方,点击“编辑本文档”直接进行修改。 Together, we make a difference! diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index cadde10e0c..4bd9322595 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -17,7 +17,7 @@ docker pull tdengine/tdengine:latest 或者指定版本的容器镜像: ```shell -docker pull tdengine/tdengine:3.0.1.4 +docker pull tdengine/tdengine:3.3.3.0 ``` 然后只需执行下面的命令: @@ -121,4 +121,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 diff --git a/docs/zh/04-get-started/03-package.md b/docs/zh/04-get-started/03-package.md index 2a1f594b4f..9724c0a1c9 100644 --- a/docs/zh/04-get-started/03-package.md +++ b/docs/zh/04-get-started/03-package.md @@ -14,7 +14,9 @@ TDengine 完整的软件包包括服务端(taosd)、应用驱动(taosc) 为方便使用,标准的服务端安装包包含了 taosd、taosAdapter、taosc、taos、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 Lite 版本的安装包。 -在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包,也支持通过 `apt-get` 工具从线上进行安装。需要注意的是,RPM 和 Deb 包不含 `taosdump` 和 TDinsight 安装脚本,这些工具需要通过安装 taosTools 包获得。TDengine 也提供 Windows x64 平台和 macOS x64/m1 平台的安装包。 +在 Linux 系统上,TDengine 社区版提供 Deb 和 RPM 格式安装包,其中 Deb 支持 Debian/Ubuntu 及其衍生系统,RPM 支持 CentOS/RHEL/SUSE 及其衍生系统,用户可以根据自己的运行环境自行选择。同时我们也提供了 tar.gz 格式安装包,以及 `apt-get` 工具从线上进行安装。 + +此外,TDengine 也提供 macOS x64/m1 平台的 pkg 安装包。 ## 运行环境要求 在linux系统中,运行环境最低要求如下: @@ -317,4 +319,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file diff --git a/docs/zh/04-get-started/_07-use.md b/docs/zh/04-get-started/_07-use.md index d206ed4102..8c976e9b55 100644 --- a/docs/zh/04-get-started/_07-use.md +++ b/docs/zh/04-get-started/_07-use.md @@ -54,4 +54,4 @@ SELECT AVG(current), MAX(voltage), MIN(phase) FROM test.meters WHERE groupId = 1 SELECT _wstart, AVG(current), MAX(voltage), MIN(phase) FROM test.d1001 INTERVAL(10s); ``` -在上面的查询中,使用系统提供的伪列_wstart 来给出每个窗口的开始时间。 \ No newline at end of file +在上面的查询中,使用系统提供的伪列 _wstart 来给出每个窗口的开始时间。 \ No newline at end of file diff --git a/docs/zh/06-advanced/02-cache.md b/docs/zh/06-advanced/02-cache.md index ca1da30dbf..b83509c2dd 100644 --- a/docs/zh/06-advanced/02-cache.md +++ b/docs/zh/06-advanced/02-cache.md @@ -54,7 +54,7 @@ TDengine 利用这些日志文件实现故障前的状态恢复。在写入 WAL 数据库参数 wal_level 和 wal_fsync_period 共同决定了 WAL 的保存行为。。 - wal_level:此参数控制 WAL 的保存级别。级别 1 表示仅将数据写入 WAL,但不立即执行 fsync 函数;级别 2 则表示在写入 WAL 的同时执行 fsync 函数。默认情况下,wal_level 设为 1。虽然执行 fsync 函数可以提高数据的持久性,但相应地也会降低写入性能。 -- wal_fsync_period:当 wal_level 设置为 2 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 +- wal_fsync_period:当 wal_level 设置为 1 时,这个参数控制执行 fsync 的频率。设置为 0 表示每次写入后立即执行 fsync,这可以确保数据的安全性,但可能会牺牲一些性能。当设置为大于 0 的数值时,表示 fsync 周期,默认为 3000,范围是[1, 180000],单位毫秒。 ```sql CREATE DATABASE POWER WAL_LEVEL 1 WAL_FSYNC_PERIOD 3000; @@ -119,4 +119,4 @@ taos> select last_row(ts,current) from meters; Query OK, 1 row(s) in set (0.046682s) ``` -可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 \ No newline at end of file +可以看到查询的时延从 353/344ms 缩短到了 44ms,提升约 8 倍。 diff --git a/docs/zh/06-advanced/05-data-in/index.md b/docs/zh/06-advanced/05-data-in/index.md index 7e5c467010..0dfa04db56 100644 --- a/docs/zh/06-advanced/05-data-in/index.md +++ b/docs/zh/06-advanced/05-data-in/index.md @@ -14,18 +14,26 @@ TDengine Enterprise 配备了一个强大的可视化数据管理工具—taosEx ## 支持的数据源 -目前 TDengine 支持的数据源如下: +目前 TDengine 支持的数据源如下表: -1. Aveva PI System:一个工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 -2. Aveva Historian:一个工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据。 -3. OPC DA/UA:OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题。OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制;2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本。 -4. MQTT:Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 -5. Kafka:由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 -6. OpenTSDB:基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 -7. CSV:Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 -8. TDengine 2:泛指运行 TDengine 2.x 版本的 TDengine 实例。 -9. TDengine 3:泛指运行 TDengine 3.x 版本的 TDengine 实例。 -10. MySQL, PostgreSQL, Oracle 等关系型数据库。 +| 数据源 | 支持版本 | 描述 | +| --- | --- | --- | +| Aveva PI System | PI AF Server Version 2.10.9.593 或以上 | 工业数据管理和分析平台,前身为 OSIsoft PI System,它能够实时采集、整合、分析和可视化工业数据,助力企业实现智能化决策和精细化管理 | +| Aveva Historian | AVEVA Historian 2020 RS SP1 | 工业大数据分析软件,前身为 Wonderware Historian,专为工业环境设计,用于存储、管理和分析来自各种工业设备、传感器的实时和历史数据 | +| OPC DA | Matrikon OPC version: 1.7.2.7433 | OPC 是 Open Platform Communications 的缩写,是一种开放式、标准化的通信协议,用于不同厂商的自动化设备之间进行数据交换。它最初由微软公司开发,旨在解决工业控制领域中不同设备之间互操作性差的问题;OPC 协议最初于 1996 年发布,当时称为 OPC DA (Data Access),主要用于实时数据采集和控制。 | +| OPC UA | KeepWare KEPServerEx 6.5 | 2006 年,OPC 基金会发布了 OPC UA (Unified Architecture) 标准,它是一种基于服务的面向对象的协议,具有更高的灵活性和可扩展性,已成为 OPC 协议的主流版本 | +| MQTT | emqx: 3.0.0 到 5.7.1
hivemq: 4.0.0 到 4.31.0
mosquitto: 1.4.4 到 2.0.18 | Message Queuing Telemetry Transport 的缩写,一种基于发布/订阅模式的轻量级通讯协议,专为低开销、低带宽占用的即时通讯设计,广泛适用于物联网、小型设备、移动应用等领域。 | +| Kafka | 2.11 ~ 3.8.0 | 由 Apache 软件基金会开发的一个开源流处理平台,主要用于处理实时数据,并提供一个统一、高通量、低延迟的消息系统。它具备高速度、可伸缩性、持久性和分布式设计等特点,使得它能够在每秒处理数十万次的读写操作,支持上千个客户端,同时保持数据的可靠性和可用性。 | +| InfluxDB | 1.7、1.8、2.0-2.7 | InfluxDB 是一种流行的开源时间序列数据库,它针对处理大量时间序列数据进行了优化。| +| OpenTSDB | 2.4.1 | 基于 HBase 的分布式、可伸缩的时序数据库。它主要用于存储、索引和提供从大规模集群(包括网络设备、操作系统、应用程序等)中收集的指标数据,使这些数据更易于访问和图形化展示。 | +| MySQL | 5.6,5.7,8.0+ | MySQL是最流行的关系型数据库管理系统之一,由于其体积小、速度快、总体拥有成本低,尤其是开放源码这一特点,一般中小型和大型网站的开发都选择 MySQL 作为网站数据库。 | +| Oracle | 11G/12c/19c | Oracle 数据库系统是世界上流行的关系数据库管理系统,系统可移植性好、使用方便、功能强,适用于各类大、中、小微机环境。它是一种高效率的、可靠性好的、适应高吞吐量的数据库方案。 | +| PostgreSQL | v15.0+ | PostgreSQL 是一个功能非常强大的、源代码开放的客户/服务器关系型数据库管理系统, 有很多在大型商业RDBMS中所具有的特性,包括事务、子选择、触发器、视图、外键引用完整性和复杂锁定功能。| +| SQL Server | 2012/2022 | Microsoft SQL Server 是一种关系型数据库管理系统,由 Microsoft 公司开发,具有使用方便可伸缩性好与相关软件集成程度高等优点。 | +| MongoDB | 3.6+ | MongoDB 是一个介于关系型数据库与非关系型数据库之间的产品,被广泛应用于内容管理系统、移动应用与物联网等众多领域。 | +| CSV | - | Comma Separated Values 的缩写,是一种以逗号分隔的纯文本文件格式,通常用于电子表格或数据库软件。 | +| TDengine 2.x | 2.4 或 2.6+ | TDengine 旧版本,已不再维护,推荐升级到 3.0 最新版本。 | +| TDengine 3.x | 源端版本+ | 使用 TMQ 进行 TDengine 指定从数据库或超级表的订阅。 | ## 数据提取、过滤和转换 diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-data-analysis/01-arima.md new file mode 100644 index 0000000000..1668da453c --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/01-arima.md @@ -0,0 +1,10 @@ +--- +title: "ARIMA" +sidebar_label: "ARIMA" +--- + +本节讲述如何 ARIMA 算法的使用方法。 + +## 功能概述 + +…… \ No newline at end of file diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md new file mode 100644 index 0000000000..a1ca53d7d0 --- /dev/null +++ b/docs/zh/06-advanced/06-data-analysis/index.md @@ -0,0 +1,13 @@ +--- +sidebar_label: 数据分析 +title: 数据分析功能 +--- + +## 概述 + +TDengine 提供数据分析功能的扩展组件,通过引入 ANode,TDengine 能够支持时间序列的机器学习分析 + +下图展示了数据分析的技术架构。 + +![数据分析功能架构图](./pic/data-analysis.png) + diff --git a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png new file mode 100644 index 0000000000..1598238f2c Binary files /dev/null and b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png differ diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 66da1df8bf..396fd0d3d5 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100) 与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 -由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 +由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 **注意** - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。 @@ -146,11 +146,11 @@ TDengine 的多级存储功能在使用上还具备以下优点。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 -|接口或组件 | 端口 | -|:---------------------------:|:---------:| -|原生接口(taosc) | 6030 | -|RESTful 接口 | 6041 | -|WebSocket 接口 |6041 | -|taosKeeper | 6043 | -|taosX | 6050, 6055 | -|taosExplorer | 6060 | \ No newline at end of file +| 接口或组件 | 端口 | +| :---------------: | :--------: | +| 原生接口(taosc) | 6030 | +| RESTful 接口 | 6041 | +| WebSocket 接口 | 6041 | +| taosKeeper | 6043 | +| taosX | 6050, 6055 | +| taosExplorer | 6060 | diff --git a/docs/zh/08-operation/03-deployment.md b/docs/zh/08-operation/03-deployment.md index 2e0c2a7989..e549e8613d 100644 --- a/docs/zh/08-operation/03-deployment.md +++ b/docs/zh/08-operation/03-deployment.md @@ -368,6 +368,18 @@ spec: labels: app: "tdengine" spec: + affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - weight: 100 + podAffinityTerm: + labelSelector: + matchExpressions: + - key: app + operator: In + values: + - tdengine + topologyKey: kubernetes.io/hostname containers: - name: "tdengine" image: "tdengine/tdengine:3.2.3.0" @@ -837,4 +849,4 @@ Helm 管理下,清理操作也变得简单: helm uninstall tdengine ``` -但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 \ No newline at end of file +但 Helm 也不会自动移除 PVC,需要手动获取 PVC 然后删除掉。 diff --git a/docs/zh/08-operation/12-multi.md b/docs/zh/08-operation/12-multi.md index a5608ad5fa..5489226ce1 100644 --- a/docs/zh/08-operation/12-multi.md +++ b/docs/zh/08-operation/12-multi.md @@ -76,7 +76,7 @@ dataDir /mnt/data6 2 0 |s3UploadDelaySec | data 文件持续多长时间不再变动后上传至 s3,单位:秒。最小值:1;最大值:2592000 (30天),默认值 60 秒 | |s3PageCacheSize |s3 page cache 缓存页数目,单位:页。最小值:4;最大值:1024*1024\*1024。 ,默认值 4096| |s3MigrateIntervalSec | 本地数据文件自动上传 S3 的触发周期,单位为秒。最小值:600;最大值:100000。默认值 3600 | -|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 1,表示开启自动 S3 迁移,可配置为 0。 | +|s3MigrateEnabled | 是否自动进行 S3 迁移,默认值为 0,表示关闭自动 S3 迁移,可配置为 1。 | ### 检查配置参数可用性 @@ -108,9 +108,37 @@ s3migrate database ; | # | 参数 | 默认值 | 最小值 | 最大值 | 描述 | | :--- | :----------- | :----- | :----- | :------ | :----------------------------------------------------------- | -| 1 | s3_keeplocal | 3650 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 | -| 2 | s3_chunksize | 262144 | 131072 | 1048576 | 上传对象的大小阈值,与 TSDB_PAGESIZE 参数一样,不可修改,单位为 TSDB 页 | -| 3 | s3_compact | 0 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 | +| 1 | s3_keeplocal | 365 | 1 | 365000 | 数据在本地保留的天数,即 data 文件在本地磁盘保留多长时间后可以上传到 S3。默认单位:天,支持 m(分钟)、h(小时)和 d(天)三个单位 | +| 2 | s3_chunkpages | 131072 | 131072 | 1048576 | 上传对象的大小阈值,与 tsdb_pagesize 参数一样,不可修改,单位为 TSDB 页 | +| 3 | s3_compact | 1 | 0 | 1 | TSDB 文件组首次上传 S3 时,是否自动进行 compact 操作。 | + +### 对象存储读写次数估算 + +对象存储服务的使用成本与存储的数据量及请求次数相关,下面分别介绍数据的上传及下载过程。 + +#### 数据上传 + +当 TSDB 时序数据超过 `s3_keeplocal` 参数指定的时间,相关的数据文件会被切分成多个文件块,每个文件块的默认大小是 512M 字节 (`s3_chunkpages * tsdb_pagesize`)。除了最后一个文件块保留在本地文件系统外,其余的文件块会被上传到对象存储服务。 + +```math +上传次数 = 数据文件大小 / (s3_chunkpages * tsdb_pagesize) - 1 +``` + +在创建数据库时,可以通过 `s3_chunkpages` 参数调整每个文件块的大小,从而控制每个数据文件的上传次数。 + +其它类型的文件如 head, stt, sma 等,保留在本地文件系统,以加速预计算相关查询。 + +#### 数据下载 + +在查询操作中,如果需要访问对象存储中的数据,TSDB 不会下载整个数据文件,而是计算所需数据在文件中的位置,只下载相应的数据到 TSDB 页缓存中,然后将数据返回给查询执行引擎。后续查询首先检查页缓存,查看数据是否已被缓存。如果数据已缓存,则直接使用缓存中的数据,而无需重复从对象存储下载,从而有效降低从对象存储下载数据的次数。 + +相邻的多个数据页会作为一个数据块从对象存储下载一次,以减少从对象存储下载的次数。每个数据页的大小,在创建数据库时,通过 `tsdb_pagesize` 参数指定,默认 4K 字节。 + +```math +下载次数 = 查询需要的数据块数量 - 已缓存的数据块数量 +``` + +页缓存是内存缓存,节点重启后,再次查询需要重新下载数据。缓存采用 LRU (Least Recently Used) 策略,当缓存空间不足时,最近最少使用的数据将被淘汰。缓存的大小可以通过 `s3PageCacheSize` 参数进行调整,通常来说,缓存越大,下载次数越少。 ## Azure Blob 存储 本节介绍在 TDengine Enterprise 如何使用微软 Azure Blob 对象存储。本功能是上一小节‘对象存储’功能的扩展,需额外依赖 Flexify 服务提供的 S3 网关。通过适当的参数配置,可以把大部分较冷的时序数据存储到 Azure Blob 服务中。 diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index fbf086bf6b..f11321c84f 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -33,7 +33,7 @@ taosd 命令行参数如下 | secondEp | taosd 启动时,如果 firstEp 连接不上,尝试连接集群中第二个 dnode 的 endpoint,缺省值:无 | | fqdn | 启动 taosd 后所监听的服务地址,缺省值:所在服务器上配置的第一个 hostname | | serverPort | 启动 taosd 后所监听的端口,缺省值:6030 | -| numOfRpcSessions | 允许一个客户端能创建的最大连接数,取值范围 100-100000,缺省值:30000 | +| numOfRpcSessions | 允许一个 dnode 能发起的最大连接数,取值范围 100-100000,缺省值:30000 | | timeToGetAvailableConn | 获得可用连接的最长等待时间,取值范围 10-50000000,单位为毫秒,缺省值:500000 | ### 监控相关 @@ -163,7 +163,7 @@ charset 的有效值是 UTF-8。 | 参数名称 | 参数说明 | | :----------------: | :---------------------------------------------: | -| numOfCommitThreads | 写入线程的最大数量,取值范围 0-1024,缺省值为 4 | +| numOfCommitThreads | 落盘线程的最大数量,取值范围 0-1024,缺省值为 4 | ### 日志相关 @@ -223,16 +223,16 @@ lossyColumns float|double | 参数名称 | 参数说明 | | :--------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | -| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值 为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 | -| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值 为 0 | -| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变 ;默认值 为 | -| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值 为20 | -| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值: 3 | +| enableCoreFile | crash 时是否生成 core 文件;0: 不生成,1:生成;默认值为 1; 不同的启动方式,生成 core 文件的目录如下:1、systemctl start taosd 启动:生成的 core 在根目录下
2、手动启动,就在 taosd 执行目录下。 | +| udf | 是否启动 UDF 服务;0: 不启动,1:启动;默认值为 0 | +| ttlChangeOnWrite | ttl 到期时间是否伴随表的修改操作改变; 0: 不改变,1:改变;默认值为 0 | +| tmqMaxTopicNum | 订阅最多可建立的 topic 数量; 取值范围 1-10000;缺省值为20 | +| maxTsmaNum | 集群内可创建的TSMA个数;取值范围:0-3;缺省值为 3 | ## taosd 监控指标 -taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 +taosd 会将监控指标上报给 taosKeeper,这些监控指标会被 taosKeeper 写入监控数据库,默认是 `log` 库,可以在 taoskeeper 配置文件中修改。以下是这些监控指标的详细介绍。 ### taosd\_cluster\_basic 表 @@ -458,4 +458,4 @@ TDengine 的日志文件主要包括普通日志和慢日志两种类型。 3. 多个客户端的日志存储在相应日志路径下的同一个 taosSlowLog.yyyy.mm.dd 文件里。 4. 慢日志文件不自动删除,不压缩。 5. 使用和普通日志文件相同的三个参数 logDir, minimalLogDirGB, asyncLog。另外两个参数 numOfLogLines,logKeepDays 不适用于慢日志。 - \ No newline at end of file + diff --git a/docs/zh/14-reference/01-components/07-explorer.md b/docs/zh/14-reference/01-components/07-explorer.md index 499fb3697c..eab4aef15b 100644 --- a/docs/zh/14-reference/01-components/07-explorer.md +++ b/docs/zh/14-reference/01-components/07-explorer.md @@ -8,7 +8,7 @@ taosExplorer 是一个为用户提供 TDengine 实例的可视化管理交互工 ## 安装 -taosEexplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 +taosExplorer 无需单独安装,从 TDengine 3.3.0.0 版本开始,它随着 TDengine 安装包一起发布,安装完成后,就可以看到 `taos-explorer` 服务。如果按照 GitHub 里步骤自己编译 TDengine 源代码生成的安装包不包含 taosExplorer。 ## 配置 diff --git a/docs/zh/14-reference/02-tools/10-taosbenchmark.md b/docs/zh/14-reference/02-tools/10-taosbenchmark.md index 3c43c58915..303cfaa395 100644 --- a/docs/zh/14-reference/02-tools/10-taosbenchmark.md +++ b/docs/zh/14-reference/02-tools/10-taosbenchmark.md @@ -364,6 +364,8 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\) - **max** : 数据类型的 列/标签 的最大值。生成的值将小于最小值。 +- **scalingFactor** : 浮点数精度增强因子,仅当数据类型是float/double时生效,有效值范围为1至1000000的正整数。用于增强生成浮点数的精度,特别是在min或max值较小的情况下。此属性按10的幂次增强小数点后的精度:scalingFactor为10表示增强1位小数精度,100表示增强2位,依此类推。 + - **fun** : 此列数据以函数填充,目前只支持 sin 和 cos 两函数,输入参数为时间戳换算成角度值,换算公式: 角度 x = 输入的时间列ts值 % 360。同时支持系数调节,随机波动因子调节,以固定格式的表达式展现,如 fun=“10\*sin(x)+100\*random(5)” , x 表示角度,取值 0 ~ 360度,增长步长与时间列步长一致。10 表示乘的系数,100 表示加或减的系数,5 表示波动幅度在 5% 的随机范围内。目前支持的数据类型为 int, bigint, float, double 四种数据类型。注意:表达式为固定模式,不可前后颠倒。 - **values** : nchar/binary 列/标签的值域,将从值中随机选择。 diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index cad9190bd9..9e4cc66eaf 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -87,7 +87,7 @@ CREATE TABLE [IF NOT EXISTS] USING [db_name.]stb_name (field1_name [, field2_nam **参数说明** -1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考数据写入章节。 +1. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。CSV 文件中应仅包含 table name 与 tag 值。如需插入数据,请参考'数据写入'章节。 2. 为指定的 stb_name 创建子表,该超级表必须已经存在。 3. field_name 列表顺序与 CSV 文件各列内容顺序一致。列表中不允许出现重复项,且必须包含 `tbname`,可包含零个或多个超级表中已定义的标签列。未包含在列表中的标签值将被设置为 NULL。 diff --git a/docs/zh/14-reference/03-taos-sql/05-insert.md b/docs/zh/14-reference/03-taos-sql/05-insert.md index b2c34f4c55..40f8e95006 100644 --- a/docs/zh/14-reference/03-taos-sql/05-insert.md +++ b/docs/zh/14-reference/03-taos-sql/05-insert.md @@ -1,7 +1,7 @@ --- sidebar_label: 数据写入 title: 数据写入 -description: 写入数据的详细语法 +description: 写入数据的详细语法 --- ## 写入语法 @@ -25,9 +25,9 @@ INSERT INTO tb_name [(field1_name, ...)] subquery ### 超级表语法 ```sql INSERT INTO - stb1_name [(field1_name, ...)] + stb1_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path - [stb2_name [(field1_name, ...)] + [stb2_name [(field1_name, ...)] VALUES (field1_value, ...) [(field1_value2, ...) ...] | FILE csv_file_path ...]; ``` @@ -47,7 +47,7 @@ INSERT INTO 2. VALUES 语法表示了要插入的一行或多行数据。 -3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。 +3. FILE 语法表示数据来自于 CSV 文件(英文逗号分隔、英文单引号括住每个值),CSV 文件无需表头。如仅需创建子表,请参考'表'章节。 4. `INSERT ... VALUES` 语句和 `INSERT ... FILE` 语句均可以在一条 INSERT 语句中同时向多个表插入数据。 @@ -154,12 +154,20 @@ INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/c INSERT INTO d21001 USING meters TAGS ('California.SanFrancisco', 2) FILE '/tmp/csvfile_21001.csv' d21002 USING meters (groupId) TAGS (2) FILE '/tmp/csvfile_21002.csv'; ``` -## 超级表语法 +## 向超级表插入数据并自动创建子表 -自动建表, 表名通过tbname列指定 +自动建表, 表名通过 tbname 列指定 ```sql -INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) - values('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + VALUES ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('d31001', 'California.SanFrancisco', 2, '2021-07-13 14:06:35.779', 10.15, 217, 0.33) - ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) + ('d31002', NULL, 2, '2021-07-13 14:06:34.255', 10.15, 217, 0.33) +``` +## 通过 CSV 文件向超级表插入数据并自动创建子表 + +根据 csv 文件内容,为 超级表创建子表,并填充相应 column 与 tag + +```sql +INSERT INTO meters(tbname, location, groupId, ts, current, voltage, phase) + FILE '/tmp/csvfile_21002.csv' ``` diff --git a/docs/zh/14-reference/03-taos-sql/14-stream.md b/docs/zh/14-reference/03-taos-sql/14-stream.md index cd5c76a4ad..6baad76e23 100644 --- a/docs/zh/14-reference/03-taos-sql/14-stream.md +++ b/docs/zh/14-reference/03-taos-sql/14-stream.md @@ -212,11 +212,11 @@ TDengine 对于修改数据提供两种处理方式,由 IGNORE UPDATE 选项 ```sql [field1_name,...] ``` -用来指定stb_name的列与subquery输出结果的对应关系。如果stb_name的列与subquery输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果stb_name的列与subquery输出结果的数据类型不匹配,会把subquery输出结果的类型转换成对应的stb_name的列的类型。 +在本页文档顶部的 [field1_name,...] 是用来指定 stb_name 的列与 subquery 输出结果的对应关系的。如果 stb_name 的列与 subquery 输出结果的位置、数量全部匹配,则不需要显示指定对应关系。如果 stb_name 的列与 subquery 输出结果的数据类型不匹配,会把 subquery 输出结果的类型转换成对应的 stb_name 的列的类型。 对于已经存在的超级表,检查列的schema信息 -1. 检查列的schema信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于4096byte时才报错,其余场景都能进行类型转换。 -2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与subquery的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 +1. 检查列的 schema 信息是否匹配,对于不匹配的,则自动进行类型转换,当前只有数据长度大于 4096byte 时才报错,其余场景都能进行类型转换。 +2. 检查列的个数是否相同,如果不同,需要显示的指定超级表与 subquery 的列的对应关系,否则报错;如果相同,可以指定对应关系,也可以不指定,不指定则按位置顺序对应。 ## 自定义TAG diff --git a/docs/zh/14-reference/03-taos-sql/21-node.md b/docs/zh/14-reference/03-taos-sql/21-node.md index 967cb51127..e3a672790c 100644 --- a/docs/zh/14-reference/03-taos-sql/21-node.md +++ b/docs/zh/14-reference/03-taos-sql/21-node.md @@ -27,11 +27,15 @@ SHOW DNODES; ## 删除数据节点 ```sql -DROP DNODE dnode_id +DROP DNODE dnode_id [force] [unsafe] ``` 注意删除 dnode 不等于停止相应的进程。实际中推荐先将一个 dnode 删除之后再停止其所对应的进程。 +只有在线节点可以被删除。如果要强制删除离线节点,需要执行强制删除操作, 即指定force选项。 + +当节点上存在单副本,并且节点处于离线,如果要强制删除该节点,需要执行非安全删除,即制定unsafe,并且数据不可再恢复。 + ## 修改数据节点配置 ```sql diff --git a/docs/zh/14-reference/05-connector/50-odbc.mdx b/docs/zh/14-reference/05-connector/50-odbc.mdx index 8518d2ffd7..63837fd282 100644 --- a/docs/zh/14-reference/05-connector/50-odbc.mdx +++ b/docs/zh/14-reference/05-connector/50-odbc.mdx @@ -3,11 +3,14 @@ sidebar_label: ODBC title: TDengine ODBC --- -TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库的数据表/视图。 +TDengine ODBC 是为 TDengine 实现的 ODBC 驱动程序,支持 Windows 系统的应用(如 [PowerBI](https://powerbi.microsoft.com/zh-cn/) 等)以及用户自定义开发的应用程序,通过 ODBC 标准接口访问本地、远程和云服务的 TDengine 数据库。 -TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 +TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连接 TDengine 数据库,使用时可以为 TDengine 数据源设置不同的连接方式。访问云服务时必须使用 WebSocket 连接方式。 -注意:TDengine ODBC 支持 32/64 位 Windows 系统,调用 TDengine ODBC 需要通过相应位数的 ODBC 驱动管理器进行。在 32 位 Windows 系统或者 64 位 Windows 系统的 32 位应用程序中,仅支持使用 WebSocket 连接方式访问 TDengine 数据库。 +TDengine ODBC 提供 64 位 和 32 位 两种驱动程序。但 32 位仅企业版支持,且仅支持 WebSocket 连接方式。 +**注意:** +- 驱动管理器:确保使用与应用程序架构匹配的 ODBC 驱动管理器。32 位应用程序需要使用 32 位 ODBC 驱动管理器,64 位应用程序需要使用 64 位 ODBC 驱动管理器。 +- 数据源名称(DSN):32 位和 64 位 ODBC 驱动管理器都可以看到所有 DSN,用户 DSN 标签页下的 DSN 如果名字相同会共用,因此需要在 DSN 名称上去区分。 想更多了解 TDengine 时序时序数据库的使用,可访问 [TDengine官方文档](https://docs.taosdata.com/intro/)。 @@ -24,17 +27,17 @@ TDengine ODBC 提供基于 WebSocket(推荐)和 原生连接两种方式连 ### 数据源连接类型与区别 -TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 Native 连接,其区别如下: +TDengine ODBC 支持两种连接 TDengine 数据库方式:WebSocket 连接与 Native 连接,其区别如下: -1. 访问云服务仅支持使用 Websocket 连接方式。 +1. 访问云服务仅支持使用 WebSocket 连接方式。 2. 32 位应用程序仅支持使用 WebSocket 连接方式。 -3. Websocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 +3. WebSocket 连接的兼容性更好,一般不需要随着 TDengine 数据库服务端升级而升级客户端的库。 4. Native 连接通常性能更好一点,但是必须与 TDengine 数据库服务端的版本保持一致。 -5. 对于一般用户,建议使用 **Websocket** 连接方式,性能与 Native 差别不大,兼容性更好。 +5. 对于一般用户,建议使用 **WebSocket** 连接方式,性能与 Native 差别不大,兼容性更好。 ### WebSocket 连接 @@ -46,11 +49,11 @@ TDengine ODBC 支持两种连接 TDengine 数据库方式:Websocket 连接与 4. 点击完成,进入 TDengine ODBC 数据源配置页面,填写如下必要信息 - ![ODBC websocket connection config](./assets/odbc-ws-config-zh.webp) + ![ODBC WebSocket connection config](./assets/odbc-ws-config-zh.webp) 4.1 【DSN】:Data Source Name 必填,为新添加的 ODBC 数据源命名 - 4.2【连接类型】 : 必选,选择连接类型,这里选择 【Websocket】 + 4.2【连接类型】 : 必选,选择连接类型,这里选择 【WebSocket】 4.3【URL】必填,ODBC 数据源 URL,示例: `http://localhost:6041`, 云服务的 url 示例: `https://gw.cloud.taosdata.com?token=your_token` @@ -111,7 +114,7 @@ WebSocket 连接方式除此之外还支持 Windows X64系统上运行的 32 位 | taos_odbc 版本 | 主要变化 | TDengine 版本 | | :----------- | :-------------------------------------------------------------------------------------------------- | :---------------- | -| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型; | 3.3.3.0及更高版本 | +| v1.1.0 | 1. 支持视图功能;
2. 支持 VARBINARY/GEOMETRY 数据类型;
3. 支持 ODBC 32 位 WebSocket 连接方式(仅企业版支持);
4. 支持 ODBC 数据源配置对话框设置对工业软件 KingSCADA、Kepware 等的兼容性适配选项(仅企业版支持); | 3.3.3.0及更高版本 | | v1.0.2 | 支持 CP1252 字符编码; | 3.2.3.0及更高版本 | | v1.0.1 | 1. 支持 DSN 设置 BI 模式,在 BI 模式下 TDengine 数据库不返回系统数据库和超级表子表信息;
2. 重构字符集转换模块,提升读写性能;
3. ODBC 数据源配置对话框中默认修改默认连接方式为“WebSocket”;
4. ODBC 数据源配置对话框增加“测试连接”控件;
5. ODBC 数据源配置支持中文/英文界面; | - | | v1.0.0.0 | 发布初始版本,支持与Tdengine数据库交互以读写数据,具体请参考“API 参考”一节 | 3.2.2.0及更高版本 | diff --git a/docs/zh/14-reference/07-supported.md b/docs/zh/14-reference/07-supported.md index 10ca237653..5115bd8188 100644 --- a/docs/zh/14-reference/07-supported.md +++ b/docs/zh/14-reference/07-supported.md @@ -6,15 +6,28 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" ## TDengine 服务端支持的平台列表 -| | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** | -| ------------ | ---------------------------- | ----------------- | ---------------- | ------------------ | ------------ | ----------------- | ---------------- | --------- | -| X64 | ●/E | ●/E | ● | ● | ●/E | ●/E | ●/E | ● | -| 树莓派 ARM64 | | | ● | | | | | | -| 华为云 ARM64 | | | | ● | | | | | -| M1 | | | | | | | | ● | +| | **版本** | **X64 64bit** | **ARM64** | +| ----------------------|----------------| ------------- | --------- | +| **CentOS** | **7.9 以上** | ● | ● | +| **Ubuntu** | **18 以上** | ● | ● | +| **RedHat** | **RHEL 7 以上** | ● | ● | +| **Debian** | **6.0 以上** | ● | ● | +| **FreeBSD** | **12 以上** | ● | ● | +| **OpenSUSE** | **全部版本** | ● | ● | +| **SUSE Linux** | **11 以上** | ● | ● | +| **Fedora** | **21 以上** | ● | ● | +| **Windows Server** | **2016 以上** | ●/E | | +| **Windows** | **10/11** | ●/E | | +| **银河麒麟** | **V10 以上** | ●/E | ●/E | +| **中标麒麟** | **V7.0 以上** | ●/E | ●/E | +| **统信 UOS** | **V20 以上** | ●/E | | +| **凝思磐石** | **V8.0 以上** | ●/E | | +| **华为欧拉 openEuler** | **V20.03 以上** | ●/E | | +| **龙蜥 Anolis OS** | **V8.6 以上** | ●/E | | +| **macOS** | **11.0 以上** | | ● | 注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 - 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 + 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/CentOS Stream/RedHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 ## TDengine 客户端和连接器支持的平台列表 @@ -22,16 +35,16 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" 对照矩阵如下: -| **CPU** | **X64 64bit** | **X64 64bit** | **ARM64** | **X64 64bit** | **ARM64** | -| ----------- | ------------- | ------------- | --------- | ------------- | --------- | -| **OS** | **Linux** | **Win64** | **Linux** | **macOS** | **macOS** | -| **C/C++** | ● | ● | ● | ● | ● | -| **JDBC** | ● | ● | ● | ○ | ○ | -| **Python** | ● | ● | ● | ● | ● | -| **Go** | ● | ● | ● | ● | ● | -| **NodeJs** | ● | ● | ● | ○ | ○ | -| **C#** | ● | ● | ○ | ○ | ○ | -| **Rust** | ● | ● | ○ | ● | ● | -| **RESTful** | ● | ● | ● | ● | ● | +| **CPU** | **X64 64bit** | **X64 64bit** | **X64 64bit** | **ARM64** | **ARM64** | +| ----------- | ------------- | ------------- | ------------- | --------- | --------- | +| **OS** | **Linux** | **Win64** | **macOS** | **Linux** | **macOS** | +| **C/C++** | ● | ● | ● | ● | ● | +| **JDBC** | ● | ● | ● | ● | ● | +| **Python** | ● | ● | ● | ● | ● | +| **Go** | ● | ● | ● | ● | ● | +| **NodeJs** | ● | ● | ● | ● | ● | +| **C#** | ● | ● | ○ | ● | ○ | +| **Rust** | ● | ● | ● | ○ | ● | +| **RESTful** | ● | ● | ● | ● | ● | 注:● 表示官方测试验证通过,○ 表示非官方测试验证通过,-- 表示未经验证。 diff --git a/docs/zh/26-tdinternal/01-arch.md b/docs/zh/26-tdinternal/01-arch.md index 04e47797a8..8aa69e45d5 100644 --- a/docs/zh/26-tdinternal/01-arch.md +++ b/docs/zh/26-tdinternal/01-arch.md @@ -178,7 +178,7 @@ TDengine 集群可以容纳单个、多个甚至几千个数据节点。应用 TDengine 存储的数据包括采集的时序数据以及库、表相关的元数据、标签数据等,这些数据具体分为三部分: -- 时序数据:TDengine 的核心存储对象,存放于 vnode 里,由 data、head 和 last 三个文件组成,数据量大,查询量取决于应用场景。允许乱序写入,但暂时不支持删除操作,并且仅在 update 参数设置为 1 时允许更新操作。通过采用一个采集点一张表的模型,一个时间段的数据是连续存储,对单张表的写入是简单的追加操作,一次读,可以读到多条记录,这样保证对单个采集点的插入和查询操作,性能达到最优。 +- 时序数据:时序数据是 TDengine 的核心存储对象,它们被存储在 vnode 中。时序数据由 data、head、sma 和 stt 4 类文件组成,这些文件共同构成了时序数据的完整存储结构。由于时序数据的特点是数据量大且查询需求取决于具体应用场景,因此 TDengine 采用了“一个数据采集点一张表”的模型来优化存储和查询性能。在这种模型下,一个时间段内的数据是连续存储的,对单张表的写入是简单的追加操作,一次读取可以获取多条记录。这种设计确保了单个数据采集点的写入和查询操作都能达到最优性能。 - 数据表元数据:包含标签信息和 Table Schema 信息,存放于 vnode 里的 meta 文件,支持增删改查四个标准操作。数据量很大,有 N 张表,就有 N 条记录,因此采用 LRU 存储,支持标签数据的索引。TDengine 支持多核多线程并发查询。只要计算内存足够,元数据全内存存储,千万级别规模的标签数据过滤结果能毫秒级返回。在内存资源不足的情况下,仍然可以支持数千万张表的快速查询。 - 数据库元数据:存放于 mnode 里,包含系统节点、用户、DB、STable Schema 等信息,支持增删改查四个标准操作。这部分数据的量不大,可以全内存保存,而且由于客户端有缓存,查询量也不大。因此目前的设计虽是集中式存储管理,但不会构成性能瓶颈。 @@ -321,4 +321,4 @@ TDengine 采用了一种数据驱动的策略来实现缓存数据的持久化 此外,TDengine 还提供了数据分级存储的功能,允许用户将不同时间段的数据存储在不同存储设备的目录中,以此实现将“热”数据和“冷”数据分开存储。这样做可以充分利用各种存储资源,同时节约成本。例如,对于最新采集且需要频繁访问的数据,由于其读取性能要求较高,用户可以配置将这些数据存储在高性能的固态硬盘上。而对于超过一定期限、查询需求较低的数据,则可以将其存储在成本相对较低的机械硬盘上。 -为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 \ No newline at end of file +为了进一步降低存储成本,TDengine 还支持将时序数据存储在对象存储系统中。通过其创新性的设计,在大多数情况下,从对象存储系统中查询时序数据的性能接近本地硬盘的一半,而在某些场景下,性能甚至可以与本地硬盘相媲美。同时,TDengine 还允许用户对存储在对象存储中的时序数据执行删除和更新操作。 diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 01808d4f2f..739d257d86 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -4104,18 +4104,16 @@ void tDeleteMqMetaRsp(SMqMetaRsp* pRsp); #define MQ_DATA_RSP_VERSION 100 typedef struct { - struct { - SMqRspHead head; - STqOffsetVal rspOffset; - STqOffsetVal reqOffset; - int32_t blockNum; - int8_t withTbName; - int8_t withSchema; - SArray* blockDataLen; - SArray* blockData; - SArray* blockTbName; - SArray* blockSchema; - }; + SMqRspHead head; + STqOffsetVal rspOffset; + STqOffsetVal reqOffset; + int32_t blockNum; + int8_t withTbName; + int8_t withSchema; + SArray* blockDataLen; + SArray* blockData; + SArray* blockTbName; + SArray* blockSchema; union{ struct{ diff --git a/include/common/ttokendef.h b/include/common/ttokendef.h index b86830869c..3b2a0a0094 100644 --- a/include/common/ttokendef.h +++ b/include/common/ttokendef.h @@ -16,394 +16,394 @@ #ifndef _TD_COMMON_TOKEN_H_ #define _TD_COMMON_TOKEN_H_ -#define TK_OR 1 -#define TK_AND 2 -#define TK_UNION 3 -#define TK_ALL 4 -#define TK_MINUS 5 -#define TK_EXCEPT 6 -#define TK_INTERSECT 7 -#define TK_NK_BITAND 8 -#define TK_NK_BITOR 9 -#define TK_NK_LSHIFT 10 -#define TK_NK_RSHIFT 11 -#define TK_NK_PLUS 12 -#define TK_NK_MINUS 13 -#define TK_NK_STAR 14 -#define TK_NK_SLASH 15 -#define TK_NK_REM 16 -#define TK_NK_CONCAT 17 -#define TK_CREATE 18 -#define TK_ACCOUNT 19 -#define TK_NK_ID 20 -#define TK_PASS 21 -#define TK_NK_STRING 22 -#define TK_ALTER 23 -#define TK_PPS 24 -#define TK_TSERIES 25 -#define TK_STORAGE 26 -#define TK_STREAMS 27 -#define TK_QTIME 28 -#define TK_DBS 29 -#define TK_USERS 30 -#define TK_CONNS 31 -#define TK_STATE 32 -#define TK_NK_COMMA 33 -#define TK_HOST 34 -#define TK_IS_IMPORT 35 -#define TK_NK_INTEGER 36 -#define TK_CREATEDB 37 -#define TK_USER 38 -#define TK_ENABLE 39 -#define TK_SYSINFO 40 -#define TK_ADD 41 -#define TK_DROP 42 -#define TK_GRANT 43 -#define TK_ON 44 -#define TK_TO 45 -#define TK_REVOKE 46 -#define TK_FROM 47 -#define TK_SUBSCRIBE 48 -#define TK_READ 49 -#define TK_WRITE 50 -#define TK_NK_DOT 51 -#define TK_WITH 52 -#define TK_ENCRYPT_KEY 53 -#define TK_ANODE 54 -#define TK_UPDATE 55 -#define TK_ANODES 56 -#define TK_DNODE 57 -#define TK_PORT 58 -#define TK_DNODES 59 -#define TK_RESTORE 60 -#define TK_NK_IPTOKEN 61 -#define TK_FORCE 62 -#define TK_UNSAFE 63 -#define TK_CLUSTER 64 -#define TK_LOCAL 65 -#define TK_QNODE 66 -#define TK_BNODE 67 -#define TK_SNODE 68 -#define TK_MNODE 69 -#define TK_VNODE 70 -#define TK_DATABASE 71 -#define TK_USE 72 -#define TK_FLUSH 73 -#define TK_TRIM 74 -#define TK_S3MIGRATE 75 -#define TK_COMPACT 76 -#define TK_IF 77 -#define TK_NOT 78 -#define TK_EXISTS 79 -#define TK_BUFFER 80 -#define TK_CACHEMODEL 81 -#define TK_CACHESIZE 82 -#define TK_COMP 83 -#define TK_DURATION 84 -#define TK_NK_VARIABLE 85 -#define TK_MAXROWS 86 -#define TK_MINROWS 87 -#define TK_KEEP 88 -#define TK_PAGES 89 -#define TK_PAGESIZE 90 -#define TK_TSDB_PAGESIZE 91 -#define TK_PRECISION 92 -#define TK_REPLICA 93 -#define TK_VGROUPS 94 -#define TK_SINGLE_STABLE 95 -#define TK_RETENTIONS 96 -#define TK_SCHEMALESS 97 -#define TK_WAL_LEVEL 98 -#define TK_WAL_FSYNC_PERIOD 99 -#define TK_WAL_RETENTION_PERIOD 100 -#define TK_WAL_RETENTION_SIZE 101 -#define TK_WAL_ROLL_PERIOD 102 -#define TK_WAL_SEGMENT_SIZE 103 -#define TK_STT_TRIGGER 104 -#define TK_TABLE_PREFIX 105 -#define TK_TABLE_SUFFIX 106 -#define TK_S3_CHUNKSIZE 107 -#define TK_S3_KEEPLOCAL 108 -#define TK_S3_COMPACT 109 -#define TK_KEEP_TIME_OFFSET 110 -#define TK_ENCRYPT_ALGORITHM 111 -#define TK_NK_COLON 112 -#define TK_BWLIMIT 113 -#define TK_START 114 -#define TK_TIMESTAMP 115 -#define TK_END 116 -#define TK_TABLE 117 -#define TK_NK_LP 118 -#define TK_NK_RP 119 -#define TK_USING 120 -#define TK_FILE 121 -#define TK_STABLE 122 -#define TK_COLUMN 123 -#define TK_MODIFY 124 -#define TK_RENAME 125 -#define TK_TAG 126 -#define TK_SET 127 -#define TK_NK_EQ 128 -#define TK_TAGS 129 -#define TK_BOOL 130 -#define TK_TINYINT 131 -#define TK_SMALLINT 132 -#define TK_INT 133 -#define TK_INTEGER 134 -#define TK_BIGINT 135 -#define TK_FLOAT 136 -#define TK_DOUBLE 137 -#define TK_BINARY 138 -#define TK_NCHAR 139 -#define TK_UNSIGNED 140 -#define TK_JSON 141 -#define TK_VARCHAR 142 -#define TK_MEDIUMBLOB 143 -#define TK_BLOB 144 -#define TK_VARBINARY 145 -#define TK_GEOMETRY 146 -#define TK_DECIMAL 147 -#define TK_COMMENT 148 -#define TK_MAX_DELAY 149 -#define TK_WATERMARK 150 -#define TK_ROLLUP 151 -#define TK_TTL 152 -#define TK_SMA 153 -#define TK_DELETE_MARK 154 -#define TK_FIRST 155 -#define TK_LAST 156 -#define TK_SHOW 157 -#define TK_FULL 158 -#define TK_PRIVILEGES 159 -#define TK_DATABASES 160 -#define TK_TABLES 161 -#define TK_STABLES 162 -#define TK_MNODES 163 -#define TK_QNODES 164 -#define TK_ARBGROUPS 165 -#define TK_FUNCTIONS 166 -#define TK_INDEXES 167 -#define TK_ACCOUNTS 168 -#define TK_APPS 169 -#define TK_CONNECTIONS 170 -#define TK_LICENCES 171 -#define TK_GRANTS 172 -#define TK_LOGS 173 -#define TK_MACHINES 174 -#define TK_ENCRYPTIONS 175 -#define TK_QUERIES 176 -#define TK_SCORES 177 -#define TK_TOPICS 178 -#define TK_VARIABLES 179 -#define TK_BNODES 180 -#define TK_SNODES 181 -#define TK_TRANSACTIONS 182 -#define TK_DISTRIBUTED 183 -#define TK_CONSUMERS 184 -#define TK_SUBSCRIPTIONS 185 -#define TK_VNODES 186 -#define TK_ALIVE 187 -#define TK_VIEWS 188 -#define TK_VIEW 189 -#define TK_COMPACTS 190 -#define TK_NORMAL 191 -#define TK_CHILD 192 -#define TK_LIKE 193 -#define TK_TBNAME 194 -#define TK_QTAGS 195 -#define TK_AS 196 -#define TK_SYSTEM 197 -#define TK_TSMA 198 -#define TK_INTERVAL 199 -#define TK_RECURSIVE 200 -#define TK_TSMAS 201 -#define TK_FUNCTION 202 -#define TK_INDEX 203 -#define TK_COUNT 204 -#define TK_LAST_ROW 205 -#define TK_META 206 -#define TK_ONLY 207 -#define TK_TOPIC 208 -#define TK_CONSUMER 209 -#define TK_GROUP 210 -#define TK_DESC 211 -#define TK_DESCRIBE 212 -#define TK_RESET 213 -#define TK_QUERY 214 -#define TK_CACHE 215 -#define TK_EXPLAIN 216 -#define TK_ANALYZE 217 -#define TK_VERBOSE 218 -#define TK_NK_BOOL 219 -#define TK_RATIO 220 -#define TK_NK_FLOAT 221 -#define TK_OUTPUTTYPE 222 -#define TK_AGGREGATE 223 -#define TK_BUFSIZE 224 -#define TK_LANGUAGE 225 -#define TK_REPLACE 226 -#define TK_STREAM 227 -#define TK_INTO 228 -#define TK_PAUSE 229 -#define TK_RESUME 230 -#define TK_PRIMARY 231 -#define TK_KEY 232 -#define TK_TRIGGER 233 -#define TK_AT_ONCE 234 -#define TK_WINDOW_CLOSE 235 -#define TK_IGNORE 236 -#define TK_EXPIRED 237 -#define TK_FILL_HISTORY 238 -#define TK_SUBTABLE 239 -#define TK_UNTREATED 240 -#define TK_KILL 241 -#define TK_CONNECTION 242 -#define TK_TRANSACTION 243 -#define TK_BALANCE 244 -#define TK_VGROUP 245 -#define TK_LEADER 246 -#define TK_MERGE 247 -#define TK_REDISTRIBUTE 248 -#define TK_SPLIT 249 -#define TK_DELETE 250 -#define TK_INSERT 251 -#define TK_NK_BIN 252 -#define TK_NK_HEX 253 -#define TK_NULL 254 -#define TK_NK_QUESTION 255 -#define TK_NK_ALIAS 256 -#define TK_NK_ARROW 257 -#define TK_ROWTS 258 -#define TK_QSTART 259 -#define TK_QEND 260 -#define TK_QDURATION 261 -#define TK_WSTART 262 -#define TK_WEND 263 -#define TK_WDURATION 264 -#define TK_IROWTS 265 -#define TK_ISFILLED 266 -#define TK_FLOW 267 -#define TK_FHIGH 268 -#define TK_FROWTS 269 -#define TK_CAST 270 -#define TK_POSITION 271 -#define TK_IN 272 -#define TK_FOR 273 -#define TK_NOW 274 -#define TK_TODAY 275 -#define TK_RAND 276 -#define TK_SUBSTR 277 -#define TK_SUBSTRING 278 -#define TK_BOTH 279 -#define TK_TRAILING 280 -#define TK_LEADING 281 -#define TK_TIMEZONE 282 -#define TK_CLIENT_VERSION 283 -#define TK_SERVER_VERSION 284 -#define TK_SERVER_STATUS 285 -#define TK_CURRENT_USER 286 -#define TK_PI 287 -#define TK_CASE 288 -#define TK_WHEN 289 -#define TK_THEN 290 -#define TK_ELSE 291 -#define TK_BETWEEN 292 -#define TK_IS 293 -#define TK_NK_LT 294 -#define TK_NK_GT 295 -#define TK_NK_LE 296 -#define TK_NK_GE 297 -#define TK_NK_NE 298 -#define TK_MATCH 299 -#define TK_NMATCH 300 -#define TK_CONTAINS 301 -#define TK_JOIN 302 -#define TK_INNER 303 -#define TK_LEFT 304 -#define TK_RIGHT 305 -#define TK_OUTER 306 -#define TK_SEMI 307 -#define TK_ANTI 308 -#define TK_ASOF 309 -#define TK_WINDOW 310 -#define TK_WINDOW_OFFSET 311 -#define TK_JLIMIT 312 -#define TK_SELECT 313 -#define TK_NK_HINT 314 -#define TK_DISTINCT 315 -#define TK_WHERE 316 -#define TK_PARTITION 317 -#define TK_BY 318 -#define TK_SESSION 319 -#define TK_STATE_WINDOW 320 -#define TK_EVENT_WINDOW 321 -#define TK_COUNT_WINDOW 322 -#define TK_ANOMALY_WINDOW 323 -#define TK_SLIDING 324 -#define TK_FILL 325 -#define TK_VALUE 326 -#define TK_VALUE_F 327 -#define TK_NONE 328 -#define TK_PREV 329 -#define TK_NULL_F 330 -#define TK_LINEAR 331 -#define TK_NEXT 332 -#define TK_HAVING 333 -#define TK_RANGE 334 -#define TK_EVERY 335 -#define TK_ORDER 336 -#define TK_SLIMIT 337 -#define TK_SOFFSET 338 -#define TK_LIMIT 339 -#define TK_OFFSET 340 -#define TK_ASC 341 -#define TK_NULLS 342 -#define TK_ABORT 343 -#define TK_AFTER 344 -#define TK_ATTACH 345 -#define TK_BEFORE 346 -#define TK_BEGIN 347 -#define TK_BITAND 348 -#define TK_BITNOT 349 -#define TK_BITOR 350 -#define TK_BLOCKS 351 -#define TK_CHANGE 352 -#define TK_COMMA 353 -#define TK_CONCAT 354 -#define TK_CONFLICT 355 -#define TK_COPY 356 -#define TK_DEFERRED 357 -#define TK_DELIMITERS 358 -#define TK_DETACH 359 -#define TK_DIVIDE 360 -#define TK_DOT 361 -#define TK_EACH 362 -#define TK_FAIL 363 -#define TK_GLOB 364 -#define TK_ID 365 -#define TK_IMMEDIATE 366 -#define TK_IMPORT 367 -#define TK_INITIALLY 368 -#define TK_INSTEAD 369 -#define TK_ISNULL 370 -#define TK_MODULES 371 -#define TK_NK_BITNOT 372 -#define TK_NK_SEMI 373 -#define TK_NOTNULL 374 -#define TK_OF 375 -#define TK_PLUS 376 -#define TK_PRIVILEGE 377 -#define TK_RAISE 378 -#define TK_RESTRICT 379 -#define TK_ROW 380 -#define TK_STAR 381 -#define TK_STATEMENT 382 -#define TK_STRICT 383 -#define TK_STRING 384 -#define TK_TIMES 385 -#define TK_VALUES 386 -#define TK_VARIABLE 387 -#define TK_WAL 388 +#define TK_OR 1 +#define TK_AND 2 +#define TK_UNION 3 +#define TK_ALL 4 +#define TK_MINUS 5 +#define TK_EXCEPT 6 +#define TK_INTERSECT 7 +#define TK_NK_BITAND 8 +#define TK_NK_BITOR 9 +#define TK_NK_LSHIFT 10 +#define TK_NK_RSHIFT 11 +#define TK_NK_PLUS 12 +#define TK_NK_MINUS 13 +#define TK_NK_STAR 14 +#define TK_NK_SLASH 15 +#define TK_NK_REM 16 +#define TK_NK_CONCAT 17 +#define TK_CREATE 18 +#define TK_ACCOUNT 19 +#define TK_NK_ID 20 +#define TK_PASS 21 +#define TK_NK_STRING 22 +#define TK_ALTER 23 +#define TK_PPS 24 +#define TK_TSERIES 25 +#define TK_STORAGE 26 +#define TK_STREAMS 27 +#define TK_QTIME 28 +#define TK_DBS 29 +#define TK_USERS 30 +#define TK_CONNS 31 +#define TK_STATE 32 +#define TK_NK_COMMA 33 +#define TK_HOST 34 +#define TK_IS_IMPORT 35 +#define TK_NK_INTEGER 36 +#define TK_CREATEDB 37 +#define TK_USER 38 +#define TK_ENABLE 39 +#define TK_SYSINFO 40 +#define TK_ADD 41 +#define TK_DROP 42 +#define TK_GRANT 43 +#define TK_ON 44 +#define TK_TO 45 +#define TK_REVOKE 46 +#define TK_FROM 47 +#define TK_SUBSCRIBE 48 +#define TK_READ 49 +#define TK_WRITE 50 +#define TK_NK_DOT 51 +#define TK_WITH 52 +#define TK_ENCRYPT_KEY 53 +#define TK_ANODE 54 +#define TK_UPDATE 55 +#define TK_ANODES 56 +#define TK_DNODE 57 +#define TK_PORT 58 +#define TK_DNODES 59 +#define TK_RESTORE 60 +#define TK_NK_IPTOKEN 61 +#define TK_FORCE 62 +#define TK_UNSAFE 63 +#define TK_CLUSTER 64 +#define TK_LOCAL 65 +#define TK_QNODE 66 +#define TK_BNODE 67 +#define TK_SNODE 68 +#define TK_MNODE 69 +#define TK_VNODE 70 +#define TK_DATABASE 71 +#define TK_USE 72 +#define TK_FLUSH 73 +#define TK_TRIM 74 +#define TK_S3MIGRATE 75 +#define TK_COMPACT 76 +#define TK_IF 77 +#define TK_NOT 78 +#define TK_EXISTS 79 +#define TK_BUFFER 80 +#define TK_CACHEMODEL 81 +#define TK_CACHESIZE 82 +#define TK_COMP 83 +#define TK_DURATION 84 +#define TK_NK_VARIABLE 85 +#define TK_MAXROWS 86 +#define TK_MINROWS 87 +#define TK_KEEP 88 +#define TK_PAGES 89 +#define TK_PAGESIZE 90 +#define TK_TSDB_PAGESIZE 91 +#define TK_PRECISION 92 +#define TK_REPLICA 93 +#define TK_VGROUPS 94 +#define TK_SINGLE_STABLE 95 +#define TK_RETENTIONS 96 +#define TK_SCHEMALESS 97 +#define TK_WAL_LEVEL 98 +#define TK_WAL_FSYNC_PERIOD 99 +#define TK_WAL_RETENTION_PERIOD 100 +#define TK_WAL_RETENTION_SIZE 101 +#define TK_WAL_ROLL_PERIOD 102 +#define TK_WAL_SEGMENT_SIZE 103 +#define TK_STT_TRIGGER 104 +#define TK_TABLE_PREFIX 105 +#define TK_TABLE_SUFFIX 106 +#define TK_S3_CHUNKPAGES 107 +#define TK_S3_KEEPLOCAL 108 +#define TK_S3_COMPACT 109 +#define TK_KEEP_TIME_OFFSET 110 +#define TK_ENCRYPT_ALGORITHM 111 +#define TK_NK_COLON 112 +#define TK_BWLIMIT 113 +#define TK_START 114 +#define TK_TIMESTAMP 115 +#define TK_END 116 +#define TK_TABLE 117 +#define TK_NK_LP 118 +#define TK_NK_RP 119 +#define TK_USING 120 +#define TK_FILE 121 +#define TK_STABLE 122 +#define TK_COLUMN 123 +#define TK_MODIFY 124 +#define TK_RENAME 125 +#define TK_TAG 126 +#define TK_SET 127 +#define TK_NK_EQ 128 +#define TK_TAGS 129 +#define TK_BOOL 130 +#define TK_TINYINT 131 +#define TK_SMALLINT 132 +#define TK_INT 133 +#define TK_INTEGER 134 +#define TK_BIGINT 135 +#define TK_FLOAT 136 +#define TK_DOUBLE 137 +#define TK_BINARY 138 +#define TK_NCHAR 139 +#define TK_UNSIGNED 140 +#define TK_JSON 141 +#define TK_VARCHAR 142 +#define TK_MEDIUMBLOB 143 +#define TK_BLOB 144 +#define TK_VARBINARY 145 +#define TK_GEOMETRY 146 +#define TK_DECIMAL 147 +#define TK_COMMENT 148 +#define TK_MAX_DELAY 149 +#define TK_WATERMARK 150 +#define TK_ROLLUP 151 +#define TK_TTL 152 +#define TK_SMA 153 +#define TK_DELETE_MARK 154 +#define TK_FIRST 155 +#define TK_LAST 156 +#define TK_SHOW 157 +#define TK_FULL 158 +#define TK_PRIVILEGES 159 +#define TK_DATABASES 160 +#define TK_TABLES 161 +#define TK_STABLES 162 +#define TK_MNODES 163 +#define TK_QNODES 164 +#define TK_ARBGROUPS 165 +#define TK_FUNCTIONS 166 +#define TK_INDEXES 167 +#define TK_ACCOUNTS 168 +#define TK_APPS 169 +#define TK_CONNECTIONS 170 +#define TK_LICENCES 171 +#define TK_GRANTS 172 +#define TK_LOGS 173 +#define TK_MACHINES 174 +#define TK_ENCRYPTIONS 175 +#define TK_QUERIES 176 +#define TK_SCORES 177 +#define TK_TOPICS 178 +#define TK_VARIABLES 179 +#define TK_BNODES 180 +#define TK_SNODES 181 +#define TK_TRANSACTIONS 182 +#define TK_DISTRIBUTED 183 +#define TK_CONSUMERS 184 +#define TK_SUBSCRIPTIONS 185 +#define TK_VNODES 186 +#define TK_ALIVE 187 +#define TK_VIEWS 188 +#define TK_VIEW 189 +#define TK_COMPACTS 190 +#define TK_NORMAL 191 +#define TK_CHILD 192 +#define TK_LIKE 193 +#define TK_TBNAME 194 +#define TK_QTAGS 195 +#define TK_AS 196 +#define TK_SYSTEM 197 +#define TK_TSMA 198 +#define TK_INTERVAL 199 +#define TK_RECURSIVE 200 +#define TK_TSMAS 201 +#define TK_FUNCTION 202 +#define TK_INDEX 203 +#define TK_COUNT 204 +#define TK_LAST_ROW 205 +#define TK_META 206 +#define TK_ONLY 207 +#define TK_TOPIC 208 +#define TK_CONSUMER 209 +#define TK_GROUP 210 +#define TK_DESC 211 +#define TK_DESCRIBE 212 +#define TK_RESET 213 +#define TK_QUERY 214 +#define TK_CACHE 215 +#define TK_EXPLAIN 216 +#define TK_ANALYZE 217 +#define TK_VERBOSE 218 +#define TK_NK_BOOL 219 +#define TK_RATIO 220 +#define TK_NK_FLOAT 221 +#define TK_OUTPUTTYPE 222 +#define TK_AGGREGATE 223 +#define TK_BUFSIZE 224 +#define TK_LANGUAGE 225 +#define TK_REPLACE 226 +#define TK_STREAM 227 +#define TK_INTO 228 +#define TK_PAUSE 229 +#define TK_RESUME 230 +#define TK_PRIMARY 231 +#define TK_KEY 232 +#define TK_TRIGGER 233 +#define TK_AT_ONCE 234 +#define TK_WINDOW_CLOSE 235 +#define TK_IGNORE 236 +#define TK_EXPIRED 237 +#define TK_FILL_HISTORY 238 +#define TK_SUBTABLE 239 +#define TK_UNTREATED 240 +#define TK_KILL 241 +#define TK_CONNECTION 242 +#define TK_TRANSACTION 243 +#define TK_BALANCE 244 +#define TK_VGROUP 245 +#define TK_LEADER 246 +#define TK_MERGE 247 +#define TK_REDISTRIBUTE 248 +#define TK_SPLIT 249 +#define TK_DELETE 250 +#define TK_INSERT 251 +#define TK_NK_BIN 252 +#define TK_NK_HEX 253 +#define TK_NULL 254 +#define TK_NK_QUESTION 255 +#define TK_NK_ALIAS 256 +#define TK_NK_ARROW 257 +#define TK_ROWTS 258 +#define TK_QSTART 259 +#define TK_QEND 260 +#define TK_QDURATION 261 +#define TK_WSTART 262 +#define TK_WEND 263 +#define TK_WDURATION 264 +#define TK_IROWTS 265 +#define TK_ISFILLED 266 +#define TK_FLOW 267 +#define TK_FHIGH 268 +#define TK_FROWTS 269 +#define TK_CAST 270 +#define TK_POSITION 271 +#define TK_IN 272 +#define TK_FOR 273 +#define TK_NOW 274 +#define TK_TODAY 275 +#define TK_RAND 276 +#define TK_SUBSTR 277 +#define TK_SUBSTRING 278 +#define TK_BOTH 279 +#define TK_TRAILING 280 +#define TK_LEADING 281 +#define TK_TIMEZONE 282 +#define TK_CLIENT_VERSION 283 +#define TK_SERVER_VERSION 284 +#define TK_SERVER_STATUS 285 +#define TK_CURRENT_USER 286 +#define TK_PI 287 +#define TK_CASE 288 +#define TK_WHEN 289 +#define TK_THEN 290 +#define TK_ELSE 291 +#define TK_BETWEEN 292 +#define TK_IS 293 +#define TK_NK_LT 294 +#define TK_NK_GT 295 +#define TK_NK_LE 296 +#define TK_NK_GE 297 +#define TK_NK_NE 298 +#define TK_MATCH 299 +#define TK_NMATCH 300 +#define TK_CONTAINS 301 +#define TK_JOIN 302 +#define TK_INNER 303 +#define TK_LEFT 304 +#define TK_RIGHT 305 +#define TK_OUTER 306 +#define TK_SEMI 307 +#define TK_ANTI 308 +#define TK_ASOF 309 +#define TK_WINDOW 310 +#define TK_WINDOW_OFFSET 311 +#define TK_JLIMIT 312 +#define TK_SELECT 313 +#define TK_NK_HINT 314 +#define TK_DISTINCT 315 +#define TK_WHERE 316 +#define TK_PARTITION 317 +#define TK_BY 318 +#define TK_SESSION 319 +#define TK_STATE_WINDOW 320 +#define TK_EVENT_WINDOW 321 +#define TK_COUNT_WINDOW 322 +#define TK_ANOMALY_WINDOW 323 +#define TK_SLIDING 324 +#define TK_FILL 325 +#define TK_VALUE 326 +#define TK_VALUE_F 327 +#define TK_NONE 328 +#define TK_PREV 329 +#define TK_NULL_F 330 +#define TK_LINEAR 331 +#define TK_NEXT 332 +#define TK_HAVING 333 +#define TK_RANGE 334 +#define TK_EVERY 335 +#define TK_ORDER 336 +#define TK_SLIMIT 337 +#define TK_SOFFSET 338 +#define TK_LIMIT 339 +#define TK_OFFSET 340 +#define TK_ASC 341 +#define TK_NULLS 342 +#define TK_ABORT 343 +#define TK_AFTER 344 +#define TK_ATTACH 345 +#define TK_BEFORE 346 +#define TK_BEGIN 347 +#define TK_BITAND 348 +#define TK_BITNOT 349 +#define TK_BITOR 350 +#define TK_BLOCKS 351 +#define TK_CHANGE 352 +#define TK_COMMA 353 +#define TK_CONCAT 354 +#define TK_CONFLICT 355 +#define TK_COPY 356 +#define TK_DEFERRED 357 +#define TK_DELIMITERS 358 +#define TK_DETACH 359 +#define TK_DIVIDE 360 +#define TK_DOT 361 +#define TK_EACH 362 +#define TK_FAIL 363 +#define TK_GLOB 364 +#define TK_ID 365 +#define TK_IMMEDIATE 366 +#define TK_IMPORT 367 +#define TK_INITIALLY 368 +#define TK_INSTEAD 369 +#define TK_ISNULL 370 +#define TK_MODULES 371 +#define TK_NK_BITNOT 372 +#define TK_NK_SEMI 373 +#define TK_NOTNULL 374 +#define TK_OF 375 +#define TK_PLUS 376 +#define TK_PRIVILEGE 377 +#define TK_RAISE 378 +#define TK_RESTRICT 379 +#define TK_ROW 380 +#define TK_STAR 381 +#define TK_STATEMENT 382 +#define TK_STRICT 383 +#define TK_STRING 384 +#define TK_TIMES 385 +#define TK_VALUES 386 +#define TK_VARIABLE 387 +#define TK_WAL 388 #define TK_NK_SPACE 600 #define TK_NK_COMMENT 601 diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index ae26d5f2ae..1c04da08be 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -151,8 +151,9 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, * @param tversion * @return */ -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet); +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet); /** * The main task execution function, including query on both table and multiple tables, @@ -209,7 +210,7 @@ SMqBatchMetaRsp* qStreamExtractMetaMsg(qTaskInfo_t tinfo); const SSchemaWrapper* qExtractSchemaFromTask(qTaskInfo_t tinfo); -const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); +const char* qExtractTbnameFromTask(qTaskInfo_t tinfo); void* qExtractReaderFromStreamScanner(void* scanner); diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 7271da8ff6..832e4f8863 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -176,8 +176,8 @@ int32_t smlBindData(SQuery* handle, bool dataFormat, SArray* tags, SArray* colsS STableMeta* pTableMeta, char* tableName, const char* sTableName, int32_t sTableNameLen, int32_t ttl, char* msgBuf, int32_t msgBufLen); int32_t smlBuildOutput(SQuery* handle, SHashObj* pVgHash); -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* fields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen); +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* fields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw); int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray); int32_t serializeVgroupsCreateTableBatch(SHashObj* pVgroupHashmap, SArray** pOut); diff --git a/include/util/tdef.h b/include/util/tdef.h index e15ec0b499..b924b377da 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -450,7 +450,7 @@ typedef enum ELogicConditionType { #define TSDB_MIN_S3_CHUNK_SIZE (128 * 1024) #define TSDB_MAX_S3_CHUNK_SIZE (1024 * 1024) -#define TSDB_DEFAULT_S3_CHUNK_SIZE (256 * 1024) +#define TSDB_DEFAULT_S3_CHUNK_SIZE (128 * 1024) #define TSDB_MIN_S3_KEEP_LOCAL (1 * 1440) // unit minute #define TSDB_MAX_S3_KEEP_LOCAL (365000 * 1440) #define TSDB_DEFAULT_S3_KEEP_LOCAL (365 * 1440) diff --git a/include/util/tlog.h b/include/util/tlog.h index 09ebb35e8f..a6c87593d1 100644 --- a/include/util/tlog.h +++ b/include/util/tlog.h @@ -70,7 +70,7 @@ extern int32_t tdbDebugFlag; extern int32_t sndDebugFlag; extern int32_t simDebugFlag; -extern int32_t tqClientDebug; +extern int32_t tqClientDebugFlag; int32_t taosInitLog(const char *logName, int32_t maxFiles, bool tsc); void taosCloseLog(); diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat index 0b2a55b89c..04d342ea06 100644 --- a/packaging/tools/make_install.bat +++ b/packaging/tools/make_install.bat @@ -12,9 +12,18 @@ if exist C:\\TDengine\\data\\dnode\\dnodeCfg.json ( rem // stop and delete service mshta vbscript:createobject("shell.application").shellexecute("%~s0",":stop_delete","","runas",1)(window.close) -echo This might take a few moment to accomplish deleting service taosd/taosadapter ... + +if exist %binary_dir%\\build\\bin\\taosadapter.exe ( + echo This might take a few moment to accomplish deleting service taosd/taosadapter ... +) + +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo This might take a few moment to accomplish deleting service taosd/taoskeeper ... +) + call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper set source_dir=%2 set source_dir=%source_dir:/=\\% @@ -46,6 +55,11 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml ( copy %binary_dir%\\test\\cfg\\taosadapter.toml %target_dir%\\cfg\\taosadapter.toml > nul ) ) +if exist %binary_dir%\\test\\cfg\\taoskeeper.toml ( + if not exist %target_dir%\\cfg\\taoskeeper.toml ( + copy %binary_dir%\\test\\cfg\\taoskeeper.toml %target_dir%\\cfg\\taoskeeper.toml > nul + ) +) copy %source_dir%\\include\\client\\taos.h %target_dir%\\include > nul copy %source_dir%\\include\\util\\taoserror.h %target_dir%\\include > nul copy %source_dir%\\include\\libs\\function\\taosudf.h %target_dir%\\include > nul @@ -98,12 +112,15 @@ if %Enterprise% == TRUE ( copy %binary_dir%\\build\\bin\\*explorer.exe %target_dir% > nul ) ) - + copy %binary_dir%\\build\\bin\\taosd.exe %target_dir% > nul copy %binary_dir%\\build\\bin\\udfd.exe %target_dir% > nul if exist %binary_dir%\\build\\bin\\taosadapter.exe ( copy %binary_dir%\\build\\bin\\taosadapter.exe %target_dir% > nul ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + copy %binary_dir%\\build\\bin\\taoskeeper.exe %target_dir% > nul +) mshta vbscript:createobject("shell.application").shellexecute("%~s0",":hasAdmin","","runas",1)(window.close) @@ -116,6 +133,10 @@ if exist %binary_dir%\\build\\bin\\taosadapter.exe ( echo To start/stop taosAdapter with administrator privileges: %ESC%[92msc start/stop taosadapter %ESC%[0m ) +if exist %binary_dir%\\build\\bin\\taoskeeper.exe ( + echo To start/stop taosKeeper with administrator privileges: %ESC%[92msc start/stop taoskeeper %ESC%[0m +) + goto :eof :hasAdmin @@ -123,6 +144,7 @@ goto :eof call :stop_delete call :check_svc taosd call :check_svc taosadapter +call :check_svc taoskeeper if exist c:\\windows\\sysnative ( echo x86 @@ -141,6 +163,7 @@ if exist c:\\windows\\sysnative ( rem // create services sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND +sc create "taoskeeper" binPath= "C:\\TDengine\\taoskeeper.exe" start= DEMAND set "env=HKLM\System\CurrentControlSet\Control\Session Manager\Environment" for /f "tokens=2*" %%I in ('reg query "%env%" /v Path ^| findstr /i "\"') do ( @@ -181,6 +204,8 @@ sc stop taosd sc delete taosd sc stop taosadapter sc delete taosadapter +sc stop taoskeeper +sc delete taoskeeper exit /B 0 :check_svc diff --git a/packaging/tools/make_install.sh b/packaging/tools/make_install.sh index 13447bd5e4..1b8fa2fb70 100755 --- a/packaging/tools/make_install.sh +++ b/packaging/tools/make_install.sh @@ -129,6 +129,13 @@ function kill_taosadapter() { fi } +function kill_taoskeeper() { + pid=$(ps -ef | grep "taoskeeper" | grep -v "grep" | awk '{print $2}') + if [ -n "$pid" ]; then + ${csudo}kill -9 $pid || : + fi +} + function kill_taosd() { pid=$(ps -ef | grep -w ${serverName} | grep -v "grep" | awk '{print $2}') if [ -n "$pid" ]; then @@ -155,6 +162,7 @@ function install_bin() { ${csudo}rm -f ${bin_link_dir}/${clientName} || : ${csudo}rm -f ${bin_link_dir}/${serverName} || : ${csudo}rm -f ${bin_link_dir}/taosadapter || : + ${csudo}rm -f ${bin_link_dir}/taoskeeper || : ${csudo}rm -f ${bin_link_dir}/udfd || : ${csudo}rm -f ${bin_link_dir}/taosdemo || : ${csudo}rm -f ${bin_link_dir}/taosdump || : @@ -169,6 +177,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : ${csudo}cp -r ${binary_dir}/build/bin/${serverName} ${install_main_dir}/bin || : @@ -183,6 +192,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdemo ] && ${csudo}ln -s ${install_main_dir}/bin/taosdemo ${bin_link_dir}/taosdemo > /dev/null 2>&1 || : @@ -197,6 +207,7 @@ function install_bin() { [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : [ -f ${binary_dir}/build/bin/taosdump ] && ${csudo}cp -r ${binary_dir}/build/bin/taosdump ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosadapter ] && ${csudo}cp -r ${binary_dir}/build/bin/taosadapter ${install_main_dir}/bin || : + [ -f ${binary_dir}/build/bin/taoskeeper ] && ${csudo}cp -r ${binary_dir}/build/bin/taoskeeper ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/udfd ] && ${csudo}cp -r ${binary_dir}/build/bin/udfd ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/taosx ] && ${csudo}cp -r ${binary_dir}/build/bin/taosx ${install_main_dir}/bin || : [ -f ${binary_dir}/build/bin/*explorer ] && ${csudo}cp -r ${binary_dir}/build/bin/*explorer ${install_main_dir}/bin || : @@ -208,6 +219,7 @@ function install_bin() { [ -x ${install_main_dir}/bin/${clientName} ] && ${csudo}ln -s ${install_main_dir}/bin/${clientName} ${bin_link_dir}/${clientName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/${serverName} ] && ${csudo}ln -s ${install_main_dir}/bin/${serverName} ${bin_link_dir}/${serverName} > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosadapter ] && ${csudo}ln -s ${install_main_dir}/bin/taosadapter ${bin_link_dir}/taosadapter > /dev/null 2>&1 || : + [ -x ${install_main_dir}/bin/taoskeeper ] && ${csudo}ln -s ${install_main_dir}/bin/taoskeeper ${bin_link_dir}/taoskeeper > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/udfd ] && ${csudo}ln -s ${install_main_dir}/bin/udfd ${bin_link_dir}/udfd > /dev/null 2>&1 || : [ -x ${install_main_dir}/bin/taosdump ] && ${csudo}ln -s ${install_main_dir}/bin/taosdump ${bin_link_dir}/taosdump > /dev/null 2>&1 || : [ -f ${install_main_dir}/bin/taosBenchmark ] && ${csudo}ln -sf ${install_main_dir}/bin/taosBenchmark ${install_main_dir}/bin/taosdemo > /dev/null 2>&1 || : @@ -407,6 +419,29 @@ function install_taosadapter_config() { fi } +function install_taoskeeper_config() { + if [ ! -f "${cfg_install_dir}/taoskeeper.toml" ]; then + ${csudo}mkdir -p ${cfg_install_dir} || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_install_dir} && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}chmod 644 ${cfg_install_dir}/taoskeeper.toml || : + [ -f ${binary_dir}/test/cfg/taoskeeper.toml ] && + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + [ -f ${cfg_install_dir}/taoskeeper.toml ] && + ${csudo}ln -s ${cfg_install_dir}/taoskeeper.toml \ + ${install_main_dir}/cfg/taoskeeper.toml > /dev/null 2>&1 || : + else + if [ -f "${binary_dir}/test/cfg/taoskeeper.toml" ]; then + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml \ + ${cfg_install_dir}/taoskeeper.toml.${verNumber} || : + ${csudo}cp -f ${binary_dir}/test/cfg/taoskeeper.toml ${cfg_dir} || : + fi + fi +} + function install_log() { ${csudo}rm -rf ${log_dir} || : ${csudo}mkdir -p ${log_dir} && ${csudo}chmod 777 ${log_dir} @@ -526,6 +561,15 @@ function install_taosadapter_service() { fi } +function install_taoskeeper_service() { + if ((${service_mod} == 0)); then + [ -f ${binary_dir}/test/cfg/taoskeeper.service ] && + ${csudo}cp ${binary_dir}/test/cfg/taoskeeper.service \ + ${service_config_dir}/ || : + ${csudo}systemctl daemon-reload + fi +} + function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosd.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosd.plist /Library/LaunchDaemons/com.taosdata.taosd.plist @@ -534,6 +578,10 @@ function install_service_on_launchctl() { ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : ${csudo}cp ${script_dir}/com.taosdata.taosadapter.plist /Library/LaunchDaemons/com.taosdata.taosadapter.plist ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taosadapter.plist > /dev/null 2>&1 || : + + ${csudo}launchctl unload -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : + ${csudo}cp ${script_dir}/com.taosdata.taoskeeper.plist /Library/LaunchDaemons/com.taosdata.taoskeeper.plist + ${csudo}launchctl load -w /Library/LaunchDaemons/com.taosdata.taoskeeper.plist > /dev/null 2>&1 || : } function install_service() { @@ -549,6 +597,7 @@ function install_service() { install_service_on_launchctl fi } + function install_app() { if [ "$osType" = "Darwin" ]; then ${csudo}rm -rf /Applications/TDengine.app && @@ -573,6 +622,7 @@ function update_TDengine() { elif ((${service_mod} == 1)); then ${csudo}service ${serverName} stop || : else + kill_taoskeeper kill_taosadapter kill_taosd fi @@ -591,9 +641,11 @@ function update_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_config echo echo -e "\033[44;32;1m${productName} is updated successfully!${NC}" @@ -602,22 +654,33 @@ function update_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi @@ -643,9 +706,11 @@ function install_TDengine() { install_service install_taosadapter_service + install_taoskeeper_service install_config install_taosadapter_config + install_taoskeeper_config # Ask if to start the service echo @@ -654,22 +719,33 @@ function install_TDengine() { echo -e "${GREEN_DARK}To configure ${productName} ${NC}: edit ${configDir}/${configFile}" [ -f ${configDir}/taosadapter.toml ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To configure Adapter ${NC}: edit ${configDir}/taosadapter.toml" + [ -f ${configDir}/taoskeeper.toml ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To configure Keeper ${NC}: edit ${configDir}/taoskeeper.toml" if ((${service_mod} == 0)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}systemctl start ${serverName}${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}systemctl start taosadapter ${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}systemctl start taoskeeper ${NC}" elif ((${service_mod} == 1)); then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${csudo}service ${serverName} start${NC}" [ -f ${service_config_dir}/taosadapter.service ] && [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: ${csudo}service taosadapter start${NC}" + [ -f ${service_config_dir}/taoskeeper.service ] && [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: ${csudo}service taoskeeper start${NC}" else if [ "$osType" != "Darwin" ]; then echo -e "${GREEN_DARK}To start ${productName} ${NC}: ${serverName}${NC}" [ -f ${installDir}/bin/taosadapter ] && \ echo -e "${GREEN_DARK}To start Adapter ${NC}: taosadapter &${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: taoskeeper &${NC}" else echo -e "${GREEN_DARK}To start service ${NC}: sudo launchctl start com.tdengine.taosd${NC}" - echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taosadapter ] && \ + echo -e "${GREEN_DARK}To start Adapter ${NC}: sudo launchctl start com.tdengine.taosadapter${NC}" + [ -f ${installDir}/bin/taoskeeper ] && \ + echo -e "${GREEN_DARK}To start Keeper ${NC}: sudo launchctl start com.tdengine.taoskeeper${NC}" fi fi diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index acba8117c6..80403986aa 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -52,10 +52,8 @@ #define TMQ_META_VERSION "1.0" -static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); - +static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen); static tb_uid_t processSuid(tb_uid_t suid, char* db) { return suid + MurmurHash3_32(db, strlen(db)); } - static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* schemaTag, char* name, int64_t id, int8_t t, SColCmprWrapper* pColCmprRow, cJSON** pJson) { int32_t code = TSDB_CODE_SUCCESS; @@ -457,7 +455,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { cJSON* tvalue = NULL; if (IS_VAR_DATA_TYPE(pTagVal->type)) { - char* buf = NULL; + char* buf = NULL; int64_t bufSize = 0; if (pTagVal->type == TSDB_DATA_TYPE_VARBINARY) { bufSize = pTagVal->nData * 2 + 2 + 3; @@ -890,9 +888,6 @@ end: } static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateStbReq req = {0}; SDecoder coder; SMCreateStbReq pReq = {0}; @@ -1003,9 +998,6 @@ end: } static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropStbReq req = {0}; SDecoder coder = {0}; SMDropStbReq pReq = {0}; @@ -1115,9 +1107,6 @@ static void destroyCreateTbReqBatch(void* data) { } static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVCreateTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1304,9 +1293,6 @@ static void destroyDropTbReqBatch(void* data) { } static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVDropTbBatchReq req = {0}; SDecoder coder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1419,9 +1405,6 @@ end: } static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SDeleteRes req = {0}; SDecoder coder = {0}; char sql[256] = {0}; @@ -1457,9 +1440,6 @@ end: } static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { - if (taos == NULL || meta == NULL) { - return TSDB_CODE_INVALID_PARA; - } SVAlterTbReq req = {0}; SDecoder dcoder = {0}; int32_t code = TSDB_CODE_SUCCESS; @@ -1622,7 +1602,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, fields, numFields, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1682,7 +1662,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha RAW_NULL_CHECK(pVgHash); RAW_RETURN_CHECK( taosHashPut(pVgHash, (const char*)&vgData.vgId, sizeof(vgData.vgId), (char*)&vgData, sizeof(vgData))); - RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0)); + RAW_RETURN_CHECK(rawBlockBindData(pQuery, pTableMeta, pData, NULL, NULL, 0, false, NULL, 0, false)); RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); launchQueryImpl(pRequest, pQuery, true, NULL); @@ -1708,116 +1688,6 @@ static void* getRawDataFromRes(void* pRetrieve) { return rawData; } -static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { - if (taos == NULL || data == NULL) { - SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); - return TSDB_CODE_INVALID_PARA; - } - int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - STableMeta* pTableMeta = NULL; - - SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); - - uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); - pRequest->syncQuery = true; - rspObj.resIter = -1; - rspObj.resType = RES_TYPE__TMQ; - - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); - } - tDecoderInit(&decoder, data, dataLen); - code = tDecodeMqDataRsp(&decoder, &rspObj.dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq data rsp failed"); - code = TSDB_CODE_INVALID_MSG; - goto end; - } - - if (!pRequest->pDb) { - code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; - goto end; - } - - struct SCatalog* pCatalog = NULL; - RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); - - SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); - - RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); - while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); - if (!rspObj.dataRsp.withSchema) { - goto end; - } - - const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); - RAW_NULL_CHECK(tbName); - - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); - tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); - - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); - - SVgroupInfo vg = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); - - void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); - if (hData == NULL) { - RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); - } - - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); - RAW_NULL_CHECK(fields); - for (int i = 0; i < pSW->nCols; i++) { - fields[i].type = pSW->pSchema[i].type; - fields[i].bytes = pSW->pSchema[i].bytes; - tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); - } - void* rawData = getRawDataFromRes(pRetrieve); - char err[ERR_MSG_LEN] = {0}; - code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, fields, pSW->nCols, true, err, ERR_MSG_LEN); - taosMemoryFree(fields); - taosMemoryFreeClear(pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); - goto end; - } - } - - RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - -end: - uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); - tDeleteMqDataRsp(&rspObj.dataRsp); - tDecoderClear(&decoder); - qDestroyQuery(pQuery); - destroyRequest(pRequest); - taosHashCleanup(pVgHash); - taosMemoryFreeClear(pTableMeta); - return code; -} - static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { // find schema data info int32_t code = 0; @@ -1855,152 +1725,368 @@ end: return code; } -static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { - if (taos == NULL || data == NULL) { - SET_ERROR_MSG("taos:%p or data:%p is NULL", taos, data); - return TSDB_CODE_INVALID_PARA; +typedef enum { + WRITE_RAW_INIT_START = 0, + WRITE_RAW_INIT_OK, + WRITE_RAW_INIT_FAIL, +} WRITE_RAW_INIT_STATUS; + +static SHashObj* writeRawCache = NULL; +static int8_t initFlag = 0; +static int8_t initedFlag = WRITE_RAW_INIT_START; + +typedef struct { + SHashObj* pVgHash; + SHashObj* pNameHash; + SHashObj* pMetaHash; +} rawCacheInfo; + +typedef struct { + SVgroupInfo vgInfo; + int64_t uid; + int64_t suid; +} tbInfo; + +static void tmqFreeMeta(void* data) { + STableMeta* pTableMeta = *(STableMeta**)data; + taosMemoryFree(pTableMeta); +} + +static void freeRawCache(void* data) { + rawCacheInfo* pRawCache = (rawCacheInfo*)data; + taosHashCleanup(pRawCache->pMetaHash); + taosHashCleanup(pRawCache->pNameHash); + taosHashCleanup(pRawCache->pVgHash); +} + +static int32_t initRawCacheHash() { + if (writeRawCache == NULL) { + writeRawCache = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_ENTRY_LOCK); + if (writeRawCache == NULL) { + return terrno; + } + taosHashSetFreeFp(writeRawCache, freeRawCache); } - int32_t code = TSDB_CODE_SUCCESS; - SHashObj* pVgHash = NULL; - SQuery* pQuery = NULL; - SMqRspObj rspObj = {0}; - SDecoder decoder = {0}; - STableMeta* pTableMeta = NULL; - SHashObj* pCreateTbHash = NULL; + return 0; +} - SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); +static bool needRefreshMeta(void* rawData, STableMeta* pTableMeta, SSchemaWrapper* pSW) { + char* p = (char*)rawData; + // | version | total length | total rows | blankFill | total columns | flag seg| block group id | column schema | each + // column length | + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(int32_t); + p += sizeof(uint64_t); + int8_t* fields = p; - uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); - pRequest->syncQuery = true; - rspObj.resIter = -1; - rspObj.resType = RES_TYPE__TMQ_METADATA; + if (pSW->nCols != pTableMeta->tableInfo.numOfColumns) { + return true; + } + for (int i = 0; i < pSW->nCols; i++) { + int j = 0; + for (; j < pTableMeta->tableInfo.numOfColumns; j++) { + SSchema* pColSchema = &pTableMeta->schema[j]; + char* fieldName = pSW->pSchema[i].name; - int8_t dataVersion = *(int8_t*)data; - if (dataVersion >= MQ_DATA_RSP_VERSION) { - data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); - dataLen -= sizeof(int8_t) + sizeof(int32_t); + if (strcmp(pColSchema->name, fieldName) == 0) { + if (*fields != pColSchema->type || *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { + return true; + } + break; + } + } + fields += sizeof(int8_t) + sizeof(int32_t); + + if (j == pTableMeta->tableInfo.numOfColumns) return true; + } + return false; +} + +static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** pMetaHash, void* key) { + int32_t code = 0; + void* cacheInfo = taosHashGet(writeRawCache, &key, POINTER_BYTES); + if (cacheInfo == NULL) { + *pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pVgHash); + *pNameHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pNameHash); + *pMetaHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK); + RAW_NULL_CHECK(*pMetaHash); + taosHashSetFreeFp(*pMetaHash, tmqFreeMeta); + rawCacheInfo info = {*pVgHash, *pNameHash, *pMetaHash}; + RAW_RETURN_CHECK(taosHashPut(writeRawCache, &key, POINTER_BYTES, &info, sizeof(rawCacheInfo))); + } else { + rawCacheInfo* info = (rawCacheInfo*)cacheInfo; + *pVgHash = info->pVgHash; + *pNameHash = info->pNameHash; + *pMetaHash = info->pMetaHash; } - tDecoderInit(&decoder, data, dataLen); - code = tDecodeSTaosxRsp(&decoder, &rspObj.dataRsp); - if (code != 0) { - SET_ERROR_MSG("decode mq taosx data rsp failed"); - code = TSDB_CODE_INVALID_MSG; - goto end; - } + return 0; +end: + taosHashCleanup(*pMetaHash); + taosHashCleanup(*pNameHash); + taosHashCleanup(*pVgHash); + return code; +} - if (!pRequest->pDb) { +static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pCatalog, SRequestConnInfo* conn) { + int32_t code = 0; + RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, pRequest)); + (*pRequest)->syncQuery = true; + if (!(*pRequest)->pDb) { code = TSDB_CODE_PAR_DB_NOT_SPECIFIED; goto end; } - struct SCatalog* pCatalog = NULL; - RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); + RAW_RETURN_CHECK(catalogGetHandle((*pRequest)->pTscObj->pAppInfo->clusterId, pCatalog)); + conn->pTrans = (*pRequest)->pTscObj->pAppInfo->pTransporter; + conn->requestId = (*pRequest)->requestId; + conn->requestObjRefId = (*pRequest)->self; + conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp); +end: + return code; +} + +typedef int32_t _raw_decode_func_(SDecoder* pDecoder, SMqDataRsp* pRsp); +static int32_t decodeRawData(SDecoder* decoder, void* data, int32_t dataLen, _raw_decode_func_ func, + SMqRspObj* rspObj) { + int8_t dataVersion = *(int8_t*)data; + if (dataVersion >= MQ_DATA_RSP_VERSION) { + data = POINTER_SHIFT(data, sizeof(int8_t) + sizeof(int32_t)); + dataLen -= sizeof(int8_t) + sizeof(int32_t); + } + + rspObj->resIter = -1; + tDecoderInit(decoder, data, dataLen); + int32_t code = func(decoder, &rspObj->dataRsp); + if (code != 0) { + SET_ERROR_MSG("decode mq taosx data rsp failed"); + } + return code; +} + +static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj* pMetaHash, + SVCreateTbReq* pCreateReqDst, SCatalog* pCatalog, SRequestConnInfo* conn, SName* pName, + STableMeta** pMeta, SSchemaWrapper* pSW, void* rawData, int32_t retry) { + int32_t code = 0; + STableMeta* pTableMeta = NULL; + tbInfo* tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + if (tmpInfo == NULL || retry > 0) { + tbInfo info = {0}; + + RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, conn, pName, &info.vgInfo)); + if (pCreateReqDst && tmpInfo == NULL) { // change stable name to get meta + tstrncpy(pName->tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); + } + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + info.uid = pTableMeta->uid; + if (pTableMeta->tableType == TSDB_CHILD_TABLE) { + info.suid = pTableMeta->suid; + } else { + info.suid = pTableMeta->uid; + } + code = taosHashPut(pMetaHash, &info.suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0) { + taosMemoryFree(pTableMeta); + goto end; + } + if (pCreateReqDst) { + pTableMeta->vgId = info.vgInfo.vgId; + pTableMeta->uid = pCreateReqDst->uid; + pCreateReqDst->ctb.suid = pTableMeta->suid; + } + + RAW_RETURN_CHECK(taosHashPut(pNameHash, pName->tname, strlen(pName->tname), &info, sizeof(tbInfo))); + tmpInfo = (tbInfo*)taosHashGet(pNameHash, pName->tname, strlen(pName->tname)); + RAW_RETURN_CHECK( + taosHashPut(pVgHash, &info.vgInfo.vgId, sizeof(info.vgInfo.vgId), &info.vgInfo, sizeof(SVgroupInfo))); + } + + if (pTableMeta == NULL || retry > 0) { + STableMeta** pTableMetaTmp = (STableMeta**)taosHashGet(pMetaHash, &tmpInfo->suid, LONG_BYTES); + if (pTableMetaTmp == NULL || retry > 0 || needRefreshMeta(rawData, *pTableMetaTmp, pSW)) { + RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, conn, pName, &pTableMeta)); + code = taosHashPut(pMetaHash, &tmpInfo->suid, LONG_BYTES, &pTableMeta, POINTER_BYTES); + if (code != 0) { + taosMemoryFree(pTableMeta); + goto end; + } + + } else { + pTableMeta = *pTableMetaTmp; + pTableMeta->uid = tmpInfo->uid; + pTableMeta->vgId = tmpInfo->vgInfo.vgId; + } + } + *pMeta = pTableMeta; + +end: + return code; +} + +static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; SRequestConnInfo conn = {0}; - conn.pTrans = pRequest->pTscObj->pAppInfo->pTransporter; - conn.requestId = pRequest->requestId; - conn.requestObjRefId = pRequest->self; - conn.mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp); + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeMqDataRsp, &rspObj)); + + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); + int retry = 0; + while (1) { + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + if (!rspObj.dataRsp.withSchema) { + goto end; + } + + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); + + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + + STableMeta* pTableMeta = NULL; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, NULL, pCatalog, &conn, &pName, &pTableMeta, pSW, + rawData, retry)); + char err[ERR_MSG_LEN] = {0}; + code = rawBlockBindData(pQuery, pTableMeta, rawData, NULL, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); + goto end; + } + } + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; + + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; + } + break; + } + +end: + uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteMqDataRsp(&rspObj.dataRsp); + tDecoderClear(&decoder); + qDestroyQuery(pQuery); + destroyRequest(pRequest); + return code; +} + +static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) { + int32_t code = TSDB_CODE_SUCCESS; + SQuery* pQuery = NULL; + SMqRspObj rspObj = {0}; + SDecoder decoder = {0}; + SHashObj* pCreateTbHash = NULL; + + SRequestObj* pRequest = NULL; + SCatalog* pCatalog = NULL; + SRequestConnInfo conn = {0}; + + RAW_RETURN_CHECK(buildRawRequest(taos, &pRequest, &pCatalog, &conn)); + uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); + RAW_RETURN_CHECK(decodeRawData(&decoder, data, dataLen, tDecodeSTaosxRsp, &rspObj)); - RAW_RETURN_CHECK(smlInitHandle(&pQuery)); - pVgHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); - RAW_NULL_CHECK(pVgHash); pCreateTbHash = taosHashInit(16, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); RAW_NULL_CHECK(pCreateTbHash); RAW_RETURN_CHECK(buildCreateTbMap(&rspObj.dataRsp, pCreateTbHash)); - uDebug(LOG_ID_TAG " write raw metadata block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); - while (++rspObj.resIter < rspObj.dataRsp.blockNum) { - void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); - RAW_NULL_CHECK(pRetrieve); - if (!rspObj.dataRsp.withSchema) { - goto end; - } + SHashObj* pVgHash = NULL; + SHashObj* pNameHash = NULL; + SHashObj* pMetaHash = NULL; + RAW_RETURN_CHECK(getRawCache(&pVgHash, &pNameHash, &pMetaHash, taos)); + int retry = 0; + while (1) { + RAW_RETURN_CHECK(smlInitHandle(&pQuery)); + uDebug(LOG_ID_TAG " write raw meta data block num:%d", LOG_ID_VALUE, rspObj.dataRsp.blockNum); + while (++rspObj.resIter < rspObj.dataRsp.blockNum) { + if (!rspObj.dataRsp.withSchema) { + goto end; + } - const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); - if (!tbName) { - SET_ERROR_MSG("block tbname is null"); - code = terrno; - goto end; - } + const char* tbName = (const char*)taosArrayGetP(rspObj.dataRsp.blockTbName, rspObj.resIter); + RAW_NULL_CHECK(tbName); + SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); + RAW_NULL_CHECK(pSW); + void* pRetrieve = taosArrayGetP(rspObj.dataRsp.blockData, rspObj.resIter); + RAW_NULL_CHECK(pRetrieve); + void* rawData = getRawDataFromRes(pRetrieve); + RAW_NULL_CHECK(rawData); - uDebug(LOG_ID_TAG " write raw metadata block tbname:%s", LOG_ID_VALUE, tbName); - SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; - tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); - tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); + uDebug(LOG_ID_TAG " write raw data block tbname:%s", LOG_ID_VALUE, tbName); + SName pName = {TSDB_TABLE_NAME_T, pRequest->pTscObj->acctId, {0}, {0}}; + tstrncpy(pName.dbname, pRequest->pDb, TSDB_DB_NAME_LEN); + tstrncpy(pName.tname, tbName, TSDB_TABLE_NAME_LEN); - // find schema data info - SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, tbName, strlen(tbName)); - SVgroupInfo vg = {0}; - RAW_RETURN_CHECK(catalogGetTableHashVgroup(pCatalog, &conn, &pName, &vg)); - if (pCreateReqDst) { // change stable name to get meta - tstrncpy(pName.tname, pCreateReqDst->ctb.stbName, TSDB_TABLE_NAME_LEN); + // find schema data info + SVCreateTbReq* pCreateReqDst = (SVCreateTbReq*)taosHashGet(pCreateTbHash, pName.tname, strlen(pName.tname)); + STableMeta* pTableMeta = NULL; + RAW_RETURN_CHECK(processCacheMeta(pVgHash, pNameHash, pMetaHash, pCreateReqDst, pCatalog, &conn, &pName, + &pTableMeta, pSW, rawData, retry)); + char err[ERR_MSG_LEN] = {0}; + code = + rawBlockBindData(pQuery, pTableMeta, rawData, pCreateReqDst, pSW, pSW->nCols, true, err, ERR_MSG_LEN, true); + if (code != TSDB_CODE_SUCCESS) { + SET_ERROR_MSG("table:%s, err:%s", pName.tname, err); + goto end; + } } - RAW_RETURN_CHECK(catalogGetTableMeta(pCatalog, &conn, &pName, &pTableMeta)); + RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); + launchQueryImpl(pRequest, pQuery, true, NULL); + code = pRequest->code; - if (pCreateReqDst) { - pTableMeta->vgId = vg.vgId; - pTableMeta->uid = pCreateReqDst->uid; - pCreateReqDst->ctb.suid = pTableMeta->suid; - } - void* hData = taosHashGet(pVgHash, &vg.vgId, sizeof(vg.vgId)); - if (hData == NULL) { - RAW_RETURN_CHECK(taosHashPut(pVgHash, (const char*)&vg.vgId, sizeof(vg.vgId), (char*)&vg, sizeof(vg))); - } - - SSchemaWrapper* pSW = (SSchemaWrapper*)taosArrayGetP(rspObj.dataRsp.blockSchema, rspObj.resIter); - RAW_NULL_CHECK(pSW); - TAOS_FIELD* fields = taosMemoryCalloc(pSW->nCols, sizeof(TAOS_FIELD)); - if (fields == NULL) { - SET_ERROR_MSG("calloc fields failed"); - code = terrno; - goto end; - } - for (int i = 0; i < pSW->nCols; i++) { - fields[i].type = pSW->pSchema[i].type; - fields[i].bytes = pSW->pSchema[i].bytes; - tstrncpy(fields[i].name, pSW->pSchema[i].name, tListLen(pSW->pSchema[i].name)); - } - void* rawData = getRawDataFromRes(pRetrieve); - char err[ERR_MSG_LEN] = {0}; - SVCreateTbReq* pCreateReqTmp = NULL; - if (pCreateReqDst) { - RAW_RETURN_CHECK(cloneSVreateTbReq(pCreateReqDst, &pCreateReqTmp)); - } - code = rawBlockBindData(pQuery, pTableMeta, rawData, &pCreateReqTmp, fields, pSW->nCols, true, err, ERR_MSG_LEN); - if (pCreateReqTmp != NULL) { - tdDestroySVCreateTbReq(pCreateReqTmp); - taosMemoryFree(pCreateReqTmp); - } - taosMemoryFree(fields); - taosMemoryFreeClear(pTableMeta); - if (code != TSDB_CODE_SUCCESS) { - SET_ERROR_MSG("table:%s, err:%s", tbName, err); - goto end; + if (NEED_CLIENT_HANDLE_ERROR(code) && retry++ < 3) { + uInfo("write raw retry:%d/3 end code:%d, msg:%s", retry, code, tstrerror(code)); + qDestroyQuery(pQuery); + pQuery = NULL; + rspObj.resIter = -1; + continue; } + break; } - RAW_RETURN_CHECK(smlBuildOutput(pQuery, pVgHash)); - - launchQueryImpl(pRequest, pQuery, true, NULL); - code = pRequest->code; - end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); + tDeleteSTaosxRsp(&rspObj.dataRsp); void* pIter = taosHashIterate(pCreateTbHash, NULL); while (pIter) { tDestroySVCreateTbReq(pIter, TSDB_MSG_FLG_DECODE); pIter = taosHashIterate(pCreateTbHash, pIter); } taosHashCleanup(pCreateTbHash); - tDeleteSTaosxRsp(&rspObj.dataRsp); tDecoderClear(&decoder); qDestroyQuery(pQuery); destroyRequest(pRequest); - taosHashCleanup(pVgHash); - taosMemoryFreeClear(pTableMeta); return code; } @@ -2076,18 +2162,18 @@ char* tmq_get_json_meta(TAOS_RES* res) { return NULL; } - char* string = NULL; + char* string = NULL; SMqRspObj* rspObj = (SMqRspObj*)res; if (TD_RES_TMQ_METADATA(res)) { processAutoCreateTable(&rspObj->dataRsp, &string); } else if (TD_RES_TMQ_BATCH_META(res)) { processBatchMetaToJson(&rspObj->batchMetaRsp, &string); } else if (TD_RES_TMQ_META(res)) { - cJSON* pJson = NULL; + cJSON* pJson = NULL; processSimpleMeta(&rspObj->metaRsp, &pJson); string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); - } else{ + } else { uError("tmq_get_json_meta res:%d, invalid type", *(int8_t*)res); } @@ -2098,7 +2184,7 @@ char* tmq_get_json_meta(TAOS_RES* res) { void tmq_free_json_meta(char* jsonMeta) { taosMemoryFreeClear(jsonMeta); } static int32_t getOffSetLen(const SMqDataRsp* pRsp) { - SEncoder coder = {0}; + SEncoder coder = {0}; tEncoderInit(&coder, NULL, 0); if (tEncodeSTqOffsetVal(&coder, &pRsp->reqOffset) < 0) return -1; if (tEncodeSTqOffsetVal(&coder, &pRsp->rspOffset) < 0) return -1; @@ -2108,45 +2194,45 @@ static int32_t getOffSetLen(const SMqDataRsp* pRsp) { } typedef int32_t __encode_func__(SEncoder* pEncoder, const SMqDataRsp* pRsp); -static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { - int32_t len = 0; - int32_t code = 0; - SEncoder encoder = {0}; - void* buf = NULL; - tEncodeSize(encodeFunc, rspObj, len, code); - if (code < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; +static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, tmq_raw_data* raw) { + int32_t len = 0; + int32_t code = 0; + SEncoder encoder = {0}; + void* buf = NULL; + tEncodeSize(encodeFunc, rspObj, len, code); + if (code < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - len += sizeof(int8_t) + sizeof(int32_t); - buf = taosMemoryCalloc(1, len); - if (buf == NULL) { - code = terrno; - goto FAILED; + len += sizeof(int8_t) + sizeof(int32_t); + buf = taosMemoryCalloc(1, len); + if (buf == NULL) { + code = terrno; + goto FAILED; } - tEncoderInit(&encoder, buf, len); - if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + tEncoderInit(&encoder, buf, len); + if (tEncodeI8(&encoder, MQ_DATA_RSP_VERSION) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - int32_t offsetLen = getOffSetLen(rspObj); - if (offsetLen <= 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + int32_t offsetLen = getOffSetLen(rspObj); + if (offsetLen <= 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (tEncodeI32(&encoder, offsetLen) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (tEncodeI32(&encoder, offsetLen) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - if (encodeFunc(&encoder, rspObj) < 0) { - code = TSDB_CODE_INVALID_MSG; - goto FAILED; + if (encodeFunc(&encoder, rspObj) < 0) { + code = TSDB_CODE_INVALID_MSG; + goto FAILED; } - tEncoderClear(&encoder); + tEncoderClear(&encoder); - raw->raw = buf; - raw->raw_len = len; - return code; + raw->raw = buf; + raw->raw_len = len; + return code; FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); @@ -2164,7 +2250,7 @@ int32_t tmq_get_raw(TAOS_RES* res, tmq_raw_data* raw) { raw->raw_type = rspObj->metaRsp.resMsgType; uDebug("tmq get raw type meta:%p", raw); } else if (TD_RES_TMQ(res)) { - int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); + int32_t code = encodeMqDataRsp(tEncodeMqDataRsp, &rspObj->dataRsp, raw); if (code != 0) { uError("tmq get raw type error:%d", terrno); return code; @@ -2199,7 +2285,31 @@ void tmq_free_raw(tmq_raw_data raw) { (void)memset(terrMsg, 0, ERR_MSG_LEN); } +static int32_t writeRawInit() { + while (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_START) { + int8_t old = atomic_val_compare_exchange_8(&initFlag, 0, 1); + if (old == 0) { + int32_t code = initRawCacheHash(); + if (code != 0) { + uError("tmq writeRawImpl init error:%d", code); + atomic_store_8(&initedFlag, WRITE_RAW_INIT_FAIL); + return code; + } + atomic_store_8(&initedFlag, WRITE_RAW_INIT_OK); + } + } + + if (atomic_load_8(&initedFlag) == WRITE_RAW_INIT_FAIL) { + return TSDB_CODE_INTERNAL_ERROR; + } + return 0; +} + static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) { + if (writeRawInit() != 0) { + return TSDB_CODE_INTERNAL_ERROR; + } + if (type == TDMT_VND_CREATE_STB) { return taosCreateStb(taos, buf, len); } else if (type == TDMT_VND_ALTER_STB) { @@ -2214,10 +2324,10 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) return taosDropTable(taos, buf, len); } else if (type == TDMT_VND_DELETE) { return taosDeleteData(taos, buf, len); - } else if (type == RES_TYPE__TMQ) { - return tmqWriteRawDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ_METADATA) { return tmqWriteRawMetaDataImpl(taos, buf, len); + } else if (type == RES_TYPE__TMQ) { + return tmqWriteRawDataImpl(taos, buf, len); } else if (type == RES_TYPE__TMQ_BATCH_META) { return tmqWriteBatchMetaDataImpl(taos, buf, len); } @@ -2225,7 +2335,8 @@ static int32_t writeRawImpl(TAOS* taos, void* buf, uint32_t len, uint16_t type) } int32_t tmq_write_raw(TAOS* taos, tmq_raw_data raw) { - if (!taos) { + if (taos == NULL || raw.raw == NULL || raw.raw_len <= 0) { + SET_ERROR_MSG("taos:%p or data:%p is NULL or raw_len <= 0", taos, raw.raw); return TSDB_CODE_INVALID_PARA; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index fd6ca831d1..950e0f7f34 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -24,12 +24,9 @@ #include "tref.h" #include "ttimer.h" -#define tqFatalC(...) do { if (cDebugFlag & DEBUG_FATAL || tqClientDebug) { taosPrintLog("TQ FATAL ", DEBUG_FATAL, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebug) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqWarnC(...) do { if (cDebugFlag & DEBUG_WARN || tqClientDebug) { taosPrintLog("TQ WARN ", DEBUG_WARN, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebug) { taosPrintLog("TQ ", DEBUG_INFO, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebug) { taosPrintLog("TQ ", DEBUG_DEBUG, tqDebugFlag, __VA_ARGS__); }} while(0) -#define tqTraceC(...) do { if (cDebugFlag & DEBUG_TRACE || tqClientDebug) { taosPrintLog("TQ ", DEBUG_TRACE, tqDebugFlag, __VA_ARGS__); }} while(0) +#define tqErrorC(...) do { if (cDebugFlag & DEBUG_ERROR || tqClientDebugFlag & DEBUG_ERROR) { taosPrintLog("TQ ERROR ", DEBUG_ERROR, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) +#define tqInfoC(...) do { if (cDebugFlag & DEBUG_INFO || tqClientDebugFlag & DEBUG_INFO) { taosPrintLog("TQ ", DEBUG_INFO, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) +#define tqDebugC(...) do { if (cDebugFlag & DEBUG_DEBUG || tqClientDebugFlag & DEBUG_DEBUG) { taosPrintLog("TQ ", DEBUG_DEBUG, tqClientDebugFlag|cDebugFlag, __VA_ARGS__); }} while(0) #define EMPTY_BLOCK_POLL_IDLE_DURATION 10 #define DEFAULT_AUTO_COMMIT_INTERVAL 5000 @@ -831,8 +828,8 @@ static int32_t innerCommitAll(tmq_t* tmq, SMqCommitCbParamSet* pParamSet){ } code = innerCommit(tmq, pTopic->topicName, &pVg->offsetInfo.endOffset, pVg, pParamSet); - if (code != 0){ - tqDebugC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ + tqErrorC("consumer:0x%" PRIx64 " topic:%s vgId:%d, no commit, code:%s, current offset version:%" PRId64 ", ordinal:%d/%d", tmq->consumerId, pTopic->topicName, pVg->vgId, tstrerror(code), pVg->offsetInfo.endOffset.version, j + 1, numOfVgroups); } } @@ -857,7 +854,7 @@ static void asyncCommitAllOffsets(tmq_t* tmq, tmq_commit_cb* pCommitFp, void* us return; } code = innerCommitAll(tmq, pParamSet); - if (code != 0){ + if (code != 0 && code != TSDB_CODE_TMQ_SAME_COMMITTED_VALUE){ tqErrorC("consumer:0x%" PRIx64 " innerCommitAll failed, code:%s", tmq->consumerId, tstrerror(code)); } @@ -957,7 +954,8 @@ int32_t tmqHbCb(void* param, SDataBuf* pMsg, int32_t code) { } } - tqClientDebug = rsp.debugFlag; + tqClientDebugFlag = rsp.debugFlag; + tDestroySMqHbRsp(&rsp); END: @@ -978,6 +976,7 @@ void tmqSendHbReq(void* param, void* tmrId) { req.consumerId = tmq->consumerId; req.epoch = tmq->epoch; req.pollFlag = atomic_load_8(&tmq->pollFlag); + tqDebugC("consumer:0x%" PRIx64 " send heartbeat, pollFlag:%d", tmq->consumerId, req.pollFlag); req.topics = taosArrayInit(taosArrayGetSize(tmq->clientTopics), sizeof(TopicOffsetRows)); if (req.topics == NULL) { goto END; @@ -1063,7 +1062,7 @@ END: tDestroySMqHbReq(&req); if (tmrId != NULL) { bool ret = taosTmrReset(tmqSendHbReq, tmq->heartBeatIntervalMs, param, tmqMgmt.timer, &tmq->hbLiveTimer); - tqDebugC("reset timer fo tmq hb:%d", ret); + tqDebugC("consumer:0x%" PRIx64 " reset timer for tmq heartbeat:%d, pollFlag:%d", tmq->consumerId, ret, tmq->pollFlag); } int32_t ret = taosReleaseRef(tmqMgmt.rsetId, refId); if (ret != 0){ @@ -1269,7 +1268,9 @@ static int32_t askEpCb(void* param, SDataBuf* pMsg, int32_t code) { } if (code != TSDB_CODE_SUCCESS) { - tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + if (code != TSDB_CODE_MND_CONSUMER_NOT_READY){ + tqErrorC("consumer:0x%" PRIx64 ", get topic endpoint error, code:%s", tmq->consumerId, tstrerror(code)); + } goto END; } @@ -1422,7 +1423,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { tqDebugC("consumer:0x%" PRIx64 " retrieve ep from mnode in 1s", pTmq->consumerId); bool ret = taosTmrReset(tmqAssignAskEpTask, DEFAULT_ASKEP_INTERVAL, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->epTimer); - tqDebugC("reset timer fo tmq ask ep:%d", ret); + tqDebugC("reset timer for tmq ask ep:%d", ret); } else if (*pTaskType == TMQ_DELAYED_TASK__COMMIT) { tmq_commit_cb* pCallbackFn = (pTmq->commitCb != NULL) ? pTmq->commitCb : defaultCommitCbFn; asyncCommitAllOffsets(pTmq, pCallbackFn, pTmq->commitCbUserParam); @@ -1430,7 +1431,7 @@ void tmqHandleAllDelayedTask(tmq_t* pTmq) { pTmq->autoCommitInterval / 1000.0); bool ret = taosTmrReset(tmqAssignDelayedCommitTask, pTmq->autoCommitInterval, (void*)(pTmq->refId), tmqMgmt.timer, &pTmq->commitTimer); - tqDebugC("reset timer fo commit:%d", ret); + tqDebugC("reset timer for commit:%d", ret); } else { tqErrorC("consumer:0x%" PRIx64 " invalid task type:%d", pTmq->consumerId, *pTaskType); } diff --git a/source/common/src/systable.c b/source/common/src/systable.c index eef38bf18e..02efb40f9f 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -118,7 +118,7 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "table_suffix", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT, .sysInfo = true}, {.name = "tsdb_pagesize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "keep_time_offset", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = false}, - {.name = "s3_chunksize", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, + {.name = "s3_chunkpages", .bytes = 4, .type = TSDB_DATA_TYPE_INT, .sysInfo = true}, {.name = "s3_keeplocal", .bytes = 10 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true}, {.name = "s3_compact", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, {.name = "with_arbitrator", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT, .sysInfo = true}, diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 43fe88171d..7ea6b01bf3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -548,7 +548,7 @@ static int32_t taosAddServerLogCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "sDebugFlag", sDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tsdbDebugFlag", tsdbDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqDebugFlag", tqDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); - TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebug", tqClientDebug, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "tqClientDebugFlag", tqClientDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "fsDebugFlag", fsDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "udfDebugFlag", udfDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "smaDebugFlag", smaDebugFlag, 0, 255, CFG_SCOPE_SERVER, CFG_DYN_SERVER)); @@ -2000,7 +2000,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"tdbDebugFlag", &tdbDebugFlag}, {"tmrDebugFlag", &tmrDebugFlag}, {"uDebugFlag", &uDebugFlag}, {"smaDebugFlag", &smaDebugFlag}, {"rpcDebugFlag", &rpcDebugFlag}, {"qDebugFlag", &qDebugFlag}, {"metaDebugFlag", &metaDebugFlag}, {"stDebugFlag", &stDebugFlag}, {"sndDebugFlag", &sndDebugFlag}, - {"tqClientDebug", &tqClientDebug}, + {"tqClientDebugFlag", &tqClientDebugFlag}, }; static OptionNameAndVar options[] = {{"audit", &tsEnableAudit}, diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 63fcf900bf..9c8544fcd4 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -10983,6 +10983,7 @@ _exit: int32_t tEncodeMqDataRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_RETURN(tEncodeMqDataRspCommon(pEncoder, pRsp)); TAOS_CHECK_RETURN(tEncodeI64(pEncoder, pRsp->sleepTime)); + return 0; } @@ -11094,6 +11095,7 @@ int32_t tEncodeSTaosxRsp(SEncoder *pEncoder, const SMqDataRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeBinary(pEncoder, createTableReq, createTableLen)); } } + _exit: return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h index 0e1a4bc98e..989adf84ac 100644 --- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h +++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h @@ -36,12 +36,13 @@ typedef struct SVnodeMgmt { SSingleWorker mgmtWorker; SSingleWorker mgmtMultiWorker; SHashObj *hash; + SHashObj *closedHash; TdThreadRwlock lock; SVnodesStat state; STfs *pTfs; TdThread thread; bool stop; - TdThreadMutex createLock; + TdThreadMutex fileLock; } SVnodeMgmt; typedef struct { @@ -94,7 +95,7 @@ SVnodeObj *vmAcquireVnode(SVnodeMgmt *pMgmt, int32_t vgId); SVnodeObj *vmAcquireVnodeImpl(SVnodeMgmt *pMgmt, int32_t vgId, bool strict); void vmReleaseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode); int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl); -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal); +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed); // vmHandle.c SArray *vmGetMsgHandles(); @@ -111,6 +112,7 @@ int32_t vmProcessArbHeartBeatReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg); int32_t vmGetVnodeListFromFile(SVnodeMgmt *pMgmt, SWrapperCfg **ppCfgs, int32_t *numOfVnodes); int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt); int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes); // vmWorker.c int32_t vmStartWorker(SVnodeMgmt *pMgmt); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c index 5fabd4cdde..d07fe9c5ac 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmFile.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmFile.c @@ -19,6 +19,54 @@ #define MAX_CONTENT_LEN 2 * 1024 * 1024 +int32_t vmGetAllVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { + (void)taosThreadRwlockRdlock(&pMgmt->lock); + + int32_t num = 0; + int32_t size = taosHashGetSize(pMgmt->hash); + int32_t closedSize = taosHashGetSize(pMgmt->closedHash); + size += closedSize; + SVnodeObj **pVnodes = taosMemoryCalloc(size, sizeof(SVnodeObj *)); + if (pVnodes == NULL) { + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return terrno; + } + + void *pIter = taosHashIterate(pMgmt->hash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->hash, pIter); + } else { + taosHashCancelIterate(pMgmt->hash, pIter); + } + } + + pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + SVnodeObj *pVnode = *ppVnode; + if (pVnode && num < size) { + int32_t refCount = atomic_add_fetch_32(&pVnode->refCount, 1); + // dTrace("vgId:%d, acquire vnode list, ref:%d", pVnode->vgId, refCount); + pVnodes[num++] = (*ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } else { + taosHashCancelIterate(pMgmt->closedHash, pIter); + } + } + + (void)taosThreadRwlockUnlock(&pMgmt->lock); + *numOfVnodes = num; + *ppVnodes = pVnodes; + + return 0; +} + int32_t vmGetVnodeListFromHash(SVnodeMgmt *pMgmt, int32_t *numOfVnodes, SVnodeObj ***ppVnodes) { (void)taosThreadRwlockRdlock(&pMgmt->lock); @@ -203,6 +251,8 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { SVnodeObj **ppVnodes = NULL; char file[PATH_MAX] = {0}; char realfile[PATH_MAX] = {0}; + int32_t lino = 0; + int32_t ret = -1; int32_t nBytes = snprintf(file, sizeof(file), "%s%svnodes_tmp.json", pMgmt->path, TD_DIRSEP); if (nBytes <= 0 || nBytes >= sizeof(file)) { @@ -215,8 +265,7 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { } int32_t numOfVnodes = 0; - code = vmGetVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes); - if (code) goto _OVER; + TAOS_CHECK_GOTO(vmGetAllVnodeListFromHash(pMgmt, &numOfVnodes, &ppVnodes), &lino, _OVER); // terrno = TSDB_CODE_OUT_OF_MEMORY; pJson = tjsonCreateObject(); @@ -224,39 +273,56 @@ int32_t vmWriteVnodeListToFile(SVnodeMgmt *pMgmt) { code = terrno; goto _OVER; } - if ((code = vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes)) != 0) goto _OVER; + TAOS_CHECK_GOTO(vmEncodeVnodeList(pJson, ppVnodes, numOfVnodes), &lino, _OVER); buffer = tjsonToString(pJson); if (buffer == NULL) { code = TSDB_CODE_INVALID_JSON_FORMAT; + lino = __LINE__; + goto _OVER; + } + + code = taosThreadMutexLock(&pMgmt->fileLock); + if (code != 0) { + lino = __LINE__; goto _OVER; } pFile = taosOpenFile(file, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_WRITE_THROUGH); if (pFile == NULL) { code = terrno; - goto _OVER; + lino = __LINE__; + goto _OVER1; } int32_t len = strlen(buffer); if (taosWriteFile(pFile, buffer, len) <= 0) { code = terrno; - goto _OVER; + lino = __LINE__; + goto _OVER1; } if (taosFsyncFile(pFile) < 0) { code = TAOS_SYSTEM_ERROR(errno); - goto _OVER; + lino = __LINE__; + goto _OVER1; } code = taosCloseFile(&pFile); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); - goto _OVER; + lino = __LINE__; + goto _OVER1; } - TAOS_CHECK_GOTO(taosRenameFile(file, realfile), NULL, _OVER); + TAOS_CHECK_GOTO(taosRenameFile(file, realfile), &lino, _OVER1); dInfo("succeed to write vnodes file:%s, vnodes:%d", realfile, numOfVnodes); +_OVER1: + ret = taosThreadMutexUnlock(&pMgmt->fileLock); + if (ret != 0) { + dError("failed to unlock since %s", tstrerror(ret)); + } + _OVER: if (pJson != NULL) tjsonDelete(pJson); if (buffer != NULL) taosMemoryFree(buffer); @@ -272,7 +338,8 @@ _OVER: } if (code != 0) { - dError("failed to write vnodes file:%s since %s, vnodes:%d", realfile, tstrerror(code), numOfVnodes); + dError("failed to write vnodes file:%s at line:%d since %s, vnodes:%d", realfile, lino, tstrerror(code), + numOfVnodes); } return code; } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index 7e950ef1be..8a8dcc74a5 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -415,27 +415,30 @@ int32_t vmProcessCreateVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { goto _OVER; } - code = taosThreadMutexLock(&pMgmt->createLock); - if (code != 0) { - dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(code)); - goto _OVER; - } code = vmWriteVnodeListToFile(pMgmt); if (code != 0) { code = terrno != 0 ? terrno : code; - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } goto _OVER; } - int32_t ret = taosThreadMutexUnlock(&pMgmt->createLock); - if (ret != 0) { - dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(ret)); - } _OVER: if (code != 0) { + int32_t r = 0; + r = taosThreadRwlockWrlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to lock since %s", req.vgId, tstrerror(r)); + } + if (r == 0) { + dInfo("vgId:%d, remove from hash", req.vgId); + r = taosHashRemove(pMgmt->hash, &req.vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode since %s", req.vgId, tstrerror(r)); + } + } + r = taosThreadRwlockUnlock(&pMgmt->lock); + if (r != 0) { + dError("vgId:%d, failed to unlock since %s", req.vgId, tstrerror(r)); + } vnodeClose(pImpl); vnodeDestroy(0, path, pMgmt->pTfs, 0); } else { @@ -535,7 +538,7 @@ int32_t vmProcessAlterVnodeTypeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -683,7 +686,7 @@ int32_t vmProcessAlterHashRangeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { } dInfo("vgId:%d, close vnode", srcVgId); - vmCloseVnode(pMgmt, pVnode, true); + vmCloseVnode(pMgmt, pVnode, true, false); int32_t diskPrimary = wrapperCfg.diskPrimary; char srcPath[TSDB_FILENAME_LEN] = {0}; @@ -792,7 +795,7 @@ int32_t vmProcessAlterVnodeReplicaReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { tstrncpy(wrapperCfg.path, pVnode->path, sizeof(wrapperCfg.path)); bool commitAndRemoveWal = vnodeShouldRemoveWal(pVnode->pImpl); - vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal); + vmCloseVnode(pMgmt, pVnode, commitAndRemoveWal, true); int32_t diskPrimary = wrapperCfg.diskPrimary; char path[TSDB_FILENAME_LEN] = {0}; @@ -860,7 +863,7 @@ int32_t vmProcessDropVnodeReq(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return code; } - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); if (vmWriteVnodeListToFile(pMgmt) != 0) { dError("vgId:%d, failed to write vnode list since %s", vgId, terrstr()); } diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 20618dbdf3..a285043382 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -166,16 +166,34 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) { (void)taosThreadRwlockWrlock(&pMgmt->lock); SVnodeObj *pOld = NULL; int32_t r = taosHashGetDup(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from hash", pVnode->vgId); + } if (pOld) { vmFreeVnodeObj(&pOld); } int32_t code = taosHashPut(pMgmt->hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnodeObj *)); + + pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + + dInfo("vgId:%d, remove from closedHash", pVnode->vgId); + r = taosHashRemove(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } (void)taosThreadRwlockUnlock(&pMgmt->lock); return code; } -void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) { +void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal, bool keepClosed) { char path[TSDB_FILENAME_LEN] = {0}; bool atExit = true; @@ -185,7 +203,40 @@ void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode, bool commitAndRemoveWal) (void)taosThreadRwlockWrlock(&pMgmt->lock); int32_t r = taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t)); + if (r != 0) { + dError("vgId:%d, failed to remove vnode from hash", pVnode->vgId); + } + if (keepClosed) { + SVnodeObj *pClosedVnode = taosMemoryCalloc(1, sizeof(SVnodeObj)); + (void)memset(pClosedVnode, 0, sizeof(SVnodeObj)); + if (pVnode == NULL) { + dError("vgId:%d, failed to alloc vnode since %s", pVnode->vgId, terrstr()); + (void)taosThreadRwlockUnlock(&pMgmt->lock); + return; + } + + pClosedVnode->vgId = pVnode->vgId; + pClosedVnode->dropped = pVnode->dropped; + pClosedVnode->vgVersion = pVnode->vgVersion; + pClosedVnode->diskPrimary = pVnode->diskPrimary; + pClosedVnode->toVgId = pVnode->toVgId; + + SVnodeObj *pOld = NULL; + r = taosHashGetDup(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), (void *)&pOld); + if (r != 0) { + dError("vgId:%d, failed to get vnode from closedHash", pVnode->vgId); + } + if (pOld) { + vmFreeVnodeObj(&pOld); + } + dInfo("vgId:%d, put vnode to closedHash", pVnode->vgId); + r = taosHashPut(pMgmt->closedHash, &pVnode->vgId, sizeof(int32_t), &pClosedVnode, sizeof(SVnodeObj *)); + if (r != 0) { + dError("vgId:%d, failed to put vnode to closedHash", pVnode->vgId); + } + } (void)taosThreadRwlockUnlock(&pMgmt->lock); + vmReleaseVnode(pMgmt, pVnode); if (pVnode->failed) { @@ -362,9 +413,15 @@ static void *vmOpenVnodeInThread(void *param) { static int32_t vmOpenVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (pMgmt->hash == NULL) { - terrno = TSDB_CODE_OUT_OF_MEMORY; dError("failed to init vnode hash since %s", terrstr()); - return -1; + return TSDB_CODE_OUT_OF_MEMORY; + } + + pMgmt->closedHash = + taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); + if (pMgmt->hash == NULL) { + dError("failed to init vnode closed hash since %s", terrstr()); + return TSDB_CODE_OUT_OF_MEMORY; } SWrapperCfg *pCfgs = NULL; @@ -459,7 +516,7 @@ static void *vmCloseVnodeInThread(void *param) { pMgmt->state.openVnodes, pMgmt->state.totalVnodes); tmsgReportStartup("vnode-close", stepDesc); - vmCloseVnode(pMgmt, pVnode, false); + vmCloseVnode(pMgmt, pVnode, false, false); } dInfo("thread:%d, numOfVnodes:%d is closed", pThread->threadIndex, pThread->vnodeNum); @@ -537,6 +594,18 @@ static void vmCloseVnodes(SVnodeMgmt *pMgmt) { pMgmt->hash = NULL; } + void *pIter = taosHashIterate(pMgmt->closedHash, NULL); + while (pIter) { + SVnodeObj **ppVnode = pIter; + vmFreeVnodeObj(ppVnode); + pIter = taosHashIterate(pMgmt->closedHash, pIter); + } + + if (pMgmt->closedHash != NULL) { + taosHashCleanup(pMgmt->closedHash); + pMgmt->closedHash = NULL; + } + dInfo("total vnodes:%d are all closed", numOfVnodes); } @@ -545,7 +614,7 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmStopWorker(pMgmt); vnodeCleanup(); (void)taosThreadRwlockDestroy(&pMgmt->lock); - (void)taosThreadMutexDestroy(&pMgmt->createLock); + (void)taosThreadMutexDestroy(&pMgmt->fileLock); taosMemoryFree(pMgmt); } @@ -637,7 +706,7 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { goto _OVER; } - code = taosThreadMutexInit(&pMgmt->createLock, NULL); + code = taosThreadMutexInit(&pMgmt->fileLock, NULL); if (code != 0) { code = TAOS_SYSTEM_ERROR(errno); goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 97bf661bc3..1dd21900e3 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -15,13 +15,10 @@ #define _DEFAULT_SOURCE #include "mndArbGroup.h" -#include "audit.h" #include "mndDb.h" #include "mndDnode.h" -#include "mndPrivilege.h" #include "mndShow.h" #include "mndTrans.h" -#include "mndUser.h" #include "mndVgroup.h" #define ARBGROUP_VER_NUMBER 1 @@ -245,11 +242,11 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p } for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { - (void)memcpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->members[i].state.token, pNew->members[i].state.token, TSDB_ARB_TOKEN_SIZE); } pOld->isSync = pNew->isSync; pOld->assignedLeader.dnodeId = pNew->assignedLeader.dnodeId; - (void)memcpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pOld->assignedLeader.token, pNew->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); pOld->assignedLeader.acked = pNew->assignedLeader.acked; pOld->version++; @@ -834,12 +831,12 @@ static int32_t mndProcessArbUpdateGroupBatchReq(SRpcMsg *pReq) { newGroup.dbUid = pUpdateGroup->dbUid; for (int i = 0; i < TSDB_ARB_GROUP_MEMBER_NUM; i++) { newGroup.members[i].info.dnodeId = pUpdateGroup->members[i].dnodeId; - (void)memcpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.members[i].state.token, pUpdateGroup->members[i].token, TSDB_ARB_TOKEN_SIZE); } newGroup.isSync = pUpdateGroup->isSync; newGroup.assignedLeader.dnodeId = pUpdateGroup->assignedLeader.dnodeId; - (void)memcpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(newGroup.assignedLeader.token, pUpdateGroup->assignedLeader.token, TSDB_ARB_TOKEN_SIZE); newGroup.assignedLeader.acked = pUpdateGroup->assignedLeader.acked; newGroup.version = pUpdateGroup->version; @@ -897,7 +894,7 @@ static void mndArbGroupSetAssignedLeader(SArbGroup *pGroup, int32_t index) { SArbGroupMember *pMember = &pGroup->members[index]; pGroup->assignedLeader.dnodeId = pMember->info.dnodeId; - (void)strncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pGroup->assignedLeader.token, pMember->state.token, TSDB_ARB_TOKEN_SIZE); pGroup->assignedLeader.acked = false; } @@ -979,7 +976,7 @@ bool mndUpdateArbGroupByHeartBeat(SArbGroup *pGroup, SVArbHbRspMember *pRspMembe // update token mndArbGroupDupObj(pGroup, pNewGroup); - (void)memcpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); + tstrncpy(pNewGroup->members[index].state.token, pRspMember->memberToken, TSDB_ARB_TOKEN_SIZE); pNewGroup->isSync = false; bool resetAssigned = false; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 9f7c163eec..0538a5fe83 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -239,12 +239,13 @@ static int32_t mndProcessMqHbReq(SRpcMsg *pMsg) { MND_TMQ_RETURN_CHECK(mndAcquireConsumer(pMnode, consumerId, &pConsumer)); MND_TMQ_RETURN_CHECK(checkPrivilege(pMnode, pConsumer, &rsp, pMsg->info.conn.user)); atomic_store_32(&pConsumer->hbStatus, 0); + mDebug("consumer:0x%" PRIx64 " receive hb pollFlag:%d %d", consumerId, req.pollFlag, pConsumer->pollStatus); if (req.pollFlag == 1){ atomic_store_32(&pConsumer->pollStatus, 0); } storeOffsetRows(pMnode, &req, pConsumer); - rsp.debugFlag = tqClientDebug; + rsp.debugFlag = tqClientDebugFlag; code = buildMqHbRsp(pMsg, &rsp); END: diff --git a/source/dnode/mnode/sdb/src/sdbFile.c b/source/dnode/mnode/sdb/src/sdbFile.c index 227ff15da9..474b22cca0 100644 --- a/source/dnode/mnode/sdb/src/sdbFile.c +++ b/source/dnode/mnode/sdb/src/sdbFile.c @@ -400,8 +400,8 @@ static int32_t sdbReadFileImp(SSdb *pSdb) { pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; memcpy(pSdb->tableVer, tableVer, sizeof(tableVer)); - mInfo("read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, pSdb->commitIndex, - pSdb->commitTerm, pSdb->commitConfig); + mInfo("vgId:1, trans:0, read sdb file:%s success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64, file, + pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig); _OVER: if ((ret = taosCloseFile(&pFile)) != 0) { @@ -573,7 +573,8 @@ static int32_t sdbWriteFileImp(SSdb *pSdb, int32_t skip_type) { pSdb->commitIndex = pSdb->applyIndex; pSdb->commitTerm = pSdb->applyTerm; pSdb->commitConfig = pSdb->applyConfig; - mInfo("write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 " file:%s", + mInfo("vgId:1, trans:0, write sdb file success, commit index:%" PRId64 " term:%" PRId64 " config:%" PRId64 + " file:%s", pSdb->commitIndex, pSdb->commitTerm, pSdb->commitConfig, curfile); } @@ -610,8 +611,8 @@ int32_t sdbWriteFile(SSdb *pSdb, int32_t delta) { if (code != 0) { mError("failed to write sdb file since %s", tstrerror(code)); } else { - mInfo("write sdb file success, apply index:%" PRId64 " term:%" PRId64 " config:%" PRId64, pSdb->applyIndex, - pSdb->applyTerm, pSdb->applyConfig); + mInfo("vgId:1, trans:0, write sdb file success, apply index:%" PRId64 ", term:%" PRId64 ", config:%" PRId64, + pSdb->applyIndex, pSdb->applyTerm, pSdb->applyConfig); } (void)taosThreadMutexUnlock(&pSdb->filelock); return code; diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h index 204311aa98..610ba43673 100644 --- a/source/dnode/vnode/inc/vnode.h +++ b/source/dnode/vnode/inc/vnode.h @@ -243,7 +243,7 @@ int32_t extractMsgFromWal(SWalReader *pReader, void **pItem, int64_t maxVer, con int32_t tqReaderSetSubmitMsg(STqReader *pReader, void *msgStr, int32_t msgLen, int64_t ver); bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids); int32_t tqRetrieveDataBlock(STqReader *pReader, SSDataBlock **pRes, const char *idstr); -int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet); +int32_t tqRetrieveTaosxBlock(STqReader *pReader, SArray *blocks, SArray *schemas, SSubmitTbData **pSubmitTbDataRet, int64_t *createTime); int32_t tqGetStreamExecInfo(SVnode *pVnode, int64_t streamId, int64_t *pDelay, bool *fhFinished); // sma diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index fc98d6578b..54efa0314b 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -163,7 +163,7 @@ int32_t metaDropTables(SMeta* pMeta, SArray* tbUids); int metaTtlFindExpired(SMeta* pMeta, int64_t timePointMs, SArray* tbUids, int32_t ttlDropMaxCount); int metaAlterTable(SMeta* pMeta, int64_t version, SVAlterTbReq* pReq, STableMetaRsp* pMetaRsp); int metaUpdateChangeTimeWithLock(SMeta* pMeta, tb_uid_t uid, int64_t changeTimeMs); -SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); +SSchemaWrapper* metaGetTableSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime); int32_t metaGetTbTSchemaNotNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); int32_t metaGetTbTSchemaMaybeNull(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock, STSchema** ppTSchema); STSchema* metaGetTbTSchema(SMeta* pMeta, tb_uid_t uid, int32_t sver, int lock); diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 484c5c0a16..e2ba8d9ccb 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -371,7 +371,7 @@ int32_t metaTbCursorPrev(SMTbCursor *pTbCur, ETableType jumpTableType) { return 0; } -SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { +SSchemaWrapper *metaGetTableSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock, int64_t *createTime) { void *pData = NULL; int nData = 0; int64_t version; @@ -407,6 +407,9 @@ _query: } } else if (me.type == TSDB_CHILD_TABLE) { uid = me.ctbEntry.suid; + if (createTime != NULL){ + *createTime = me.ctbEntry.btime; + } tDecoderClear(&dc); goto _query; } else { @@ -617,7 +620,7 @@ STSchema *metaGetTbTSchema(SMeta *pMeta, tb_uid_t uid, int32_t sver, int lock) { STSchema *pTSchema = NULL; SSchemaWrapper *pSW = NULL; - pSW = metaGetTableSchema(pMeta, uid, sver, lock); + pSW = metaGetTableSchema(pMeta, uid, sver, lock, NULL); if (!pSW) return NULL; pTSchema = tBuildTSchema(pSW->pSchema, pSW->nCols, pSW->version); diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index 0936d8f092..b2826ec45a 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -552,7 +552,7 @@ int32_t setForSnapShot(SSnapContext* ctx, int64_t uid) { void taosXSetTablePrimaryKey(SSnapContext* ctx, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(ctx->pMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index f2f85773b5..fedcc0e82d 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -263,7 +263,7 @@ bool tqGetTablePrimaryKey(STqReader* pReader) { return pReader->hasPrimaryKey; } void tqSetTablePrimaryKey(STqReader* pReader, int64_t uid) { bool ret = false; - SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1); + SSchemaWrapper* schema = metaGetTableSchema(pReader->pVnodeMeta, uid, -1, 1, NULL); if (schema && schema->nCols >= 2 && schema->pSchema[1].flags & COL_IS_KEY) { ret = true; } @@ -669,7 +669,7 @@ int32_t tqRetrieveDataBlock(STqReader* pReader, SSDataBlock** pRes, const char* (pReader->cachedSchemaVer != sversion)) { tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, NULL); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", uid:%" PRId64 "version %d, possibly dropped table", @@ -961,10 +961,8 @@ END: return code; } -int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet) { - tqDebug("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); - SSDataBlock* block = NULL; - +int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas, SSubmitTbData** pSubmitTbDataRet, int64_t *createTime) { + tqTrace("tq reader retrieve data block %p, %d", pReader->msg.msgStr, pReader->nextBlk); SSubmitTbData* pSubmitTbData = taosArrayGet(pReader->submit.aSubmitTbData, pReader->nextBlk); if (pSubmitTbData == NULL) { return terrno; @@ -980,7 +978,7 @@ int32_t tqRetrieveTaosxBlock(STqReader* pReader, SArray* blocks, SArray* schemas pReader->lastBlkUid = uid; tDeleteSchemaWrapper(pReader->pSchemaWrapper); - pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1); + pReader->pSchemaWrapper = metaGetTableSchema(pReader->pVnodeMeta, uid, sversion, 1, createTime); if (pReader->pSchemaWrapper == NULL) { tqWarn("vgId:%d, cannot found schema wrapper for table: suid:%" PRId64 ", version %d, possibly dropped table", pReader->pWalReader->pWal->cfg.vgId, uid, pReader->cachedSchemaVer); diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index dbc1b16cf5..0e7e27fcff 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -210,36 +210,21 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat if (pDataBlock != NULL && pDataBlock->info.rows > 0) { if (pRsp->withTbName) { - if (pOffset->type == TMQ_OFFSET__LOG) { - int64_t uid = pExec->pTqReader->lastBlkUid; - if (tqAddTbNameToRsp(pTq, uid, pRsp, 1) < 0) { - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - char* tbName = taosStrdup(qExtractTbnameFromTask(task)); - if (tbName == NULL) { - tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); - return terrno; - } - if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ - tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); - continue; - } + char* tbName = taosStrdup(qExtractTbnameFromTask(task)); + if (tbName == NULL) { + tqError("vgId:%d, failed to add tbname to rsp msg, null", pTq->pVnode->config.vgId); + return terrno; + } + if (taosArrayPush(pRsp->blockTbName, &tbName) == NULL){ + tqError("vgId:%d, failed to add tbname to rsp msg", pTq->pVnode->config.vgId); + continue; } } if (pRsp->withSchema) { - if (pOffset->type == TMQ_OFFSET__LOG) { - if (tqAddBlockSchemaToRsp(pExec, pRsp) != 0){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } - } else { - SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); - if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ - tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); - continue; - } + SSchemaWrapper* pSW = tCloneSSchemaWrapper(qExtractSchemaFromTask(task)); + if(taosArrayPush(pRsp->blockSchema, &pSW) == NULL){ + tqError("vgId:%d, failed to add schema to rsp msg", pTq->pVnode->config.vgId); + continue; } } @@ -249,12 +234,9 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat continue; } pRsp->blockNum++; - if (pOffset->type == TMQ_OFFSET__LOG) { - continue; - } else { - rowCnt += pDataBlock->info.rows; - if (rowCnt <= tmqRowSize) continue; - } + rowCnt += pDataBlock->info.rows; + if (rowCnt <= tmqRowSize) continue; + } // get meta @@ -296,6 +278,54 @@ int32_t tqScanTaosx(STQ* pTq, const STqHandle* pHandle, SMqDataRsp* pRsp, SMqBat return code; } +static int32_t buildCreateTbInfo(SMqDataRsp* pRsp, SVCreateTbReq* pCreateTbReq){ + int32_t code = 0; + void* createReq = NULL; + if (pRsp->createTableNum == 0) { + pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); + if (pRsp->createTableLen == NULL) { + code = terrno; + goto END; + } + pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); + if (pRsp->createTableReq == NULL) { + code = terrno; + goto END; + } + } + + uint32_t len = 0; + tEncodeSize(tEncodeSVCreateTbReq, pCreateTbReq, len, code); + if (TSDB_CODE_SUCCESS != code) { + goto END; + } + createReq = taosMemoryCalloc(1, len); + if (createReq == NULL){ + code = terrno; + goto END; + } + SEncoder encoder = {0}; + tEncoderInit(&encoder, createReq, len); + code = tEncodeSVCreateTbReq(&encoder, pCreateTbReq); + tEncoderClear(&encoder); + if (code < 0) { + goto END; + } + if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ + code = terrno; + goto END; + } + if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ + code = terrno; + goto END; + } + pRsp->createTableNum++; + + return 0; +END: + taosMemoryFree(createReq); + return code; +} static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int32_t* totalRows, int8_t sourceExcluded){ int32_t code = 0; @@ -315,7 +345,8 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } SSubmitTbData* pSubmitTbDataRet = NULL; - code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet); + int64_t createTime = INT64_MAX; + code = tqRetrieveTaosxBlock(pReader, pBlocks, pSchemas, &pSubmitTbDataRet, &createTime); if (code != 0) { tqError("vgId:%d, failed to retrieve block", pTq->pVnode->config.vgId); goto END; @@ -333,46 +364,13 @@ static void tqProcessSubData(STQ* pTq, STqHandle* pHandle, SMqDataRsp* pRsp, int } } if (pHandle->fetchMeta != WITH_DATA && pSubmitTbDataRet->pCreateTbReq != NULL) { - if (pRsp->createTableNum == 0) { - pRsp->createTableLen = taosArrayInit(0, sizeof(int32_t)); - if (pRsp->createTableLen == NULL) { - code = terrno; - goto END; - } - pRsp->createTableReq = taosArrayInit(0, sizeof(void*)); - if (pRsp->createTableReq == NULL) { - code = terrno; + if (pSubmitTbDataRet->ctimeMs - createTime <= 1000) { // judge if table is already created to avoid sending crateTbReq + code = buildCreateTbInfo(pRsp, pSubmitTbDataRet->pCreateTbReq); + if (code != 0){ + tqError("vgId:%d, failed to build create table info", pTq->pVnode->config.vgId); goto END; } } - - uint32_t len = 0; - tEncodeSize(tEncodeSVCreateTbReq, pSubmitTbDataRet->pCreateTbReq, len, code); - if (TSDB_CODE_SUCCESS != code) { - goto END; - } - void* createReq = taosMemoryCalloc(1, len); - if (createReq == NULL){ - code = terrno; - goto END; - } - SEncoder encoder = {0}; - tEncoderInit(&encoder, createReq, len); - code = tEncodeSVCreateTbReq(&encoder, pSubmitTbDataRet->pCreateTbReq); - tEncoderClear(&encoder); - if (code < 0) { - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableLen, &len) == NULL){ - taosMemoryFree(createReq); - goto END; - } - if (taosArrayPush(pRsp->createTableReq, &createReq) == NULL){ - taosMemoryFree(createReq); - goto END; - } - pRsp->createTableNum++; } if (pHandle->fetchMeta == ONLY_META && pSubmitTbDataRet->pCreateTbReq == NULL) { goto END; diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index b4866b8c65..3f11937463 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -51,7 +51,8 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { pRsp->blockTbName = taosArrayInit(0, sizeof(void*)); pRsp->blockSchema = taosArrayInit(0, sizeof(void*)); - if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { + if (pRsp->blockData == NULL || pRsp->blockDataLen == NULL || + pRsp->blockTbName == NULL || pRsp->blockSchema == NULL) { if (pRsp->blockData != NULL) { taosArrayDestroy(pRsp->blockData); pRsp->blockData = NULL; @@ -71,6 +72,7 @@ static int32_t tqInitTaosxRsp(SMqDataRsp* pRsp, STqOffsetVal pOffset) { taosArrayDestroy(pRsp->blockSchema); pRsp->blockSchema = NULL; } + return terrno; } diff --git a/source/dnode/vnode/src/tsdb/tsdbCache.c b/source/dnode/vnode/src/tsdb/tsdbCache.c index 5583e464ed..cbb4f9e873 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCache.c +++ b/source/dnode/vnode/src/tsdb/tsdbCache.c @@ -723,34 +723,32 @@ static int32_t tsdbCacheDropTableColumn(STsdb *pTsdb, int64_t uid, int16_t cid, rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[0] != NULL) { + code = tsdbCacheDeserialize(values_list[0], values_list_sizes[0], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[0], klen); + } + taosMemoryFreeClear(pLastCol); } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[0], klen); - } - taosMemoryFreeClear(pLastCol); pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[1] != NULL) { + code = tsdbCacheDeserialize(values_list[1], values_list_sizes[1], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } + if (NULL != pLastCol) { + rocksdb_writebatch_delete(wb, keys_list[1], klen); + } + taosMemoryFreeClear(pLastCol); } - if (NULL != pLastCol) { - rocksdb_writebatch_delete(wb, keys_list[1], klen); - } - taosMemoryFreeClear(pLastCol); rocksdb_free(values_list[0]); rocksdb_free(values_list[1]); @@ -1218,14 +1216,13 @@ static int32_t tsdbCacheUpdate(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, SArray SColVal *pColVal = &updCtx->colVal; SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } /* if (code) { @@ -1692,14 +1689,13 @@ static int32_t tsdbCacheLoadFromRocks(STsdb *pTsdb, tb_uid_t uid, SArray *pLastA continue; } - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SLastCol *pToFree = pLastCol; SIdxKey *idxKey = &((SIdxKey *)TARRAY_DATA(remainCols))[j]; @@ -1959,14 +1955,13 @@ int32_t tsdbCacheDel(STsdb *pTsdb, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKE rocksdb_writebatch_t *wb = pTsdb->rCache.writebatch; for (int i = 0; i < numKeys; ++i) { SLastCol *pLastCol = NULL; - code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); - if (code == TSDB_CODE_INVALID_PARA) { - tsdbTrace("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - } else if (code != TSDB_CODE_SUCCESS) { - tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, - tstrerror(code)); - goto _exit; + if (values_list[i] != NULL) { + code = tsdbCacheDeserialize(values_list[i], values_list_sizes[i], &pLastCol); + if (code != TSDB_CODE_SUCCESS) { + tsdbError("vgId:%d, %s deserialize failed at line %d since %s", TD_VID(pTsdb->pVnode), __func__, __LINE__, + tstrerror(code)); + goto _exit; + } } SIdxKey *idxKey = taosArrayGet(remainCols, i); SLastKey *pLastKey = &idxKey->key; diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 7c6a2e7313..0929953e1c 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -702,7 +702,7 @@ int32_t vnodeGetCtbNum(SVnode *pVnode, int64_t suid, int64_t *num) { } int32_t vnodeGetStbColumnNum(SVnode *pVnode, tb_uid_t suid, int *num) { - SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0); + SSchemaWrapper *pSW = metaGetTableSchema(pVnode->pMeta, suid, -1, 0, NULL); if (pSW) { *num = pSW->nCols; tDeleteSchemaWrapper(pSW); diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index b2417a8597..716296345f 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -50,7 +50,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe (*pRsp)->numOfCols = htonl(numOfCols); int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, numOfCols); - if(len < 0) { + if (len < 0) { taosMemoryFree(*pRsp); return terrno; } @@ -292,7 +292,7 @@ static int32_t buildRetension(SArray* pRetension, char** ppRetentions) { } const int lMaxLen = 128; - char* p1 = taosMemoryCalloc(1, lMaxLen); + char* p1 = taosMemoryCalloc(1, lMaxLen); if (NULL == p1) { return terrno; } @@ -346,20 +346,20 @@ static const char* encryptAlgorithmStr(int8_t encryptAlgorithm) { } int32_t formatDurationOrKeep(char* buffer, int64_t bufSize, int32_t timeInMinutes) { - if (buffer == NULL || bufSize <= 0) { - return 0; - } - int32_t len = 0; - if (timeInMinutes % 1440 == 0) { - int32_t days = timeInMinutes / 1440; - len = tsnprintf(buffer, bufSize, "%dd", days); - } else if (timeInMinutes % 60 == 0) { - int32_t hours = timeInMinutes / 60; - len = tsnprintf(buffer, bufSize, "%dh", hours); - } else { - len = tsnprintf(buffer, bufSize, "%dm", timeInMinutes); - } - return len; + if (buffer == NULL || bufSize <= 0) { + return 0; + } + int32_t len = 0; + if (timeInMinutes % 1440 == 0) { + int32_t days = timeInMinutes / 1440; + len = tsnprintf(buffer, bufSize, "%dd", days); + } else if (timeInMinutes % 60 == 0) { + int32_t hours = timeInMinutes / 60; + len = tsnprintf(buffer, bufSize, "%dh", hours); + } else { + len = tsnprintf(buffer, bufSize, "%dm", timeInMinutes); + } + return len; } static int32_t setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, char* dbFName, SDbCfgInfo* pCfg) { @@ -410,27 +410,27 @@ static int32_t setCreateDBResultIntoDataBlock(SSDataBlock* pBlock, char* dbName, int32_t lenKeep2 = formatDurationOrKeep(keep2Str, sizeof(keep2Str), pCfg->daysToKeep2); if (IS_SYS_DBNAME(dbName)) { - len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, "CREATE DATABASE `%s`", dbName); - } else { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %s " - "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %s,%s,%s PAGES %d PAGESIZE %d " - "PRECISION '%s' REPLICA %d " - "WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d " - "WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 - " KEEP_TIME_OFFSET %d ENCRYPT_ALGORITHM '%s' S3_CHUNKSIZE %d S3_KEEPLOCAL %dm S3_COMPACT %d", - dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, - durationStr, - pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, - keep0Str, keep1Str, keep2Str, - pCfg->pages, pCfg->pageSize, prec, - pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups, 1 == pCfg->numOfStables, hashPrefix, - pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, pCfg->walRetentionSize, - pCfg->keepTimeOffset, encryptAlgorithmStr(pCfg->encryptAlgorithm), pCfg->s3ChunkSize, - pCfg->s3KeepLocal, pCfg->s3Compact); + "CREATE DATABASE `%s`", dbName); + } else { + len += + tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, + "CREATE DATABASE `%s` BUFFER %d CACHESIZE %d CACHEMODEL '%s' COMP %d DURATION %s " + "WAL_FSYNC_PERIOD %d MAXROWS %d MINROWS %d STT_TRIGGER %d KEEP %s,%s,%s PAGES %d PAGESIZE %d " + "PRECISION '%s' REPLICA %d " + "WAL_LEVEL %d VGROUPS %d SINGLE_STABLE %d TABLE_PREFIX %d TABLE_SUFFIX %d TSDB_PAGESIZE %d " + "WAL_RETENTION_PERIOD %d WAL_RETENTION_SIZE %" PRId64 + " KEEP_TIME_OFFSET %d ENCRYPT_ALGORITHM '%s' S3_CHUNKPAGES %d S3_KEEPLOCAL %dm S3_COMPACT %d", + dbName, pCfg->buffer, pCfg->cacheSize, cacheModelStr(pCfg->cacheLast), pCfg->compression, durationStr, + pCfg->walFsyncPeriod, pCfg->maxRows, pCfg->minRows, pCfg->sstTrigger, keep0Str, keep1Str, keep2Str, + pCfg->pages, pCfg->pageSize, prec, pCfg->replications, pCfg->walLevel, pCfg->numOfVgroups, + 1 == pCfg->numOfStables, hashPrefix, pCfg->hashSuffix, pCfg->tsdbPageSize, pCfg->walRetentionPeriod, + pCfg->walRetentionSize, pCfg->keepTimeOffset, encryptAlgorithmStr(pCfg->encryptAlgorithm), + pCfg->s3ChunkSize, pCfg->s3KeepLocal, pCfg->s3Compact); if (pRetentions) { - len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, " RETENTIONS %s", pRetentions); + len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_DB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, + " RETENTIONS %s", pRetentions); } } @@ -510,30 +510,30 @@ void appendColumnFields(char* buf, int32_t* len, STableCfg* pCfg) { #define LTYPE_LEN (32 + 60) // 60 byte for compress info char type[LTYPE_LEN]; snprintf(type, LTYPE_LEN, "%s", tDataTypes[pSchema->type].name); - int typeLen = strlen(type); + int typeLen = strlen(type); if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_VARBINARY == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); } else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, "(%d)", - (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); + (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } if (useCompress(pCfg->tableType) && pCfg->pSchemaExt) { typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " ENCODE \'%s\'", - columnEncodeStr(COMPRESS_L1_TYPE_U32(pCfg->pSchemaExt[i].compress))); + columnEncodeStr(COMPRESS_L1_TYPE_U32(pCfg->pSchemaExt[i].compress))); typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " COMPRESS \'%s\'", - columnCompressStr(COMPRESS_L2_TYPE_U32(pCfg->pSchemaExt[i].compress))); + columnCompressStr(COMPRESS_L2_TYPE_U32(pCfg->pSchemaExt[i].compress))); typeLen += tsnprintf(type + typeLen, LTYPE_LEN - typeLen, " LEVEL \'%s\'", - columnLevelStr(COMPRESS_L2_TYPE_LEVEL_U32(pCfg->pSchemaExt[i].compress))); + columnLevelStr(COMPRESS_L2_TYPE_LEVEL_U32(pCfg->pSchemaExt[i].compress))); } if (!(pSchema->flags & COL_IS_KEY)) { - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", - ((i > 0) ? ", " : ""), pSchema->name, type); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } else { char* pk = "PRIMARY KEY"; - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s %s", - ((i > 0) ? ", " : ""), pSchema->name, type, pk); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s %s", ((i > 0) ? ", " : ""), pSchema->name, type, pk); } } } @@ -545,14 +545,15 @@ void appendTagFields(char* buf, int32_t* len, STableCfg* pCfg) { snprintf(type, sizeof(type), "%s", tDataTypes[pSchema->type].name); if (TSDB_DATA_TYPE_VARCHAR == pSchema->type || TSDB_DATA_TYPE_VARBINARY == pSchema->type || TSDB_DATA_TYPE_GEOMETRY == pSchema->type) { - snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); + snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", + (int32_t)(pSchema->bytes - VARSTR_HEADER_SIZE)); } else if (TSDB_DATA_TYPE_NCHAR == pSchema->type) { snprintf(type + strlen(type), sizeof(type) - strlen(type), "(%d)", (int32_t)((pSchema->bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } - *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "%s`%s` %s", - ((i > 0) ? ", " : ""), pSchema->name, type); + *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), + "%s`%s` %s", ((i > 0) ? ", " : ""), pSchema->name, type); } } @@ -560,7 +561,7 @@ void appendTagNameFields(char* buf, int32_t* len, STableCfg* pCfg) { for (int32_t i = 0; i < pCfg->numOfTags; ++i) { SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s`%s`", ((i > 0) ? ", " : ""), pSchema->name); + "%s`%s`", ((i > 0) ? ", " : ""), pSchema->name); } } @@ -582,7 +583,7 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { return terrno; } *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s", pJson); + "%s", pJson); taosMemoryFree(pJson); return TSDB_CODE_SUCCESS; @@ -596,12 +597,12 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { SSchema* pSchema = pCfg->pSchemas + pCfg->numOfColumns + i; if (i > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", "); + ", "); } if (j >= valueNum) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "NULL"); + "NULL"); continue; } @@ -624,14 +625,15 @@ int32_t appendTagValues(char* buf, int32_t* len, STableCfg* pCfg) { code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, pTagVal->pData, pTagVal->nData, &tlen); TAOS_CHECK_ERRNO(code); } else { - code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, &pTagVal->i64, tDataTypes[type].bytes, &tlen); + code = dataConverToStr(buf + VARSTR_HEADER_SIZE + *len, leftSize, type, &pTagVal->i64, tDataTypes[type].bytes, + &tlen); TAOS_CHECK_ERRNO(code); } *len += tlen; j++; } else { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "NULL"); + "NULL"); } } _exit: @@ -643,38 +645,38 @@ _exit: void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* pCfg) { if (pCfg->commentLen > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " COMMENT '%s'", pCfg->pComment); + " COMMENT '%s'", pCfg->pComment); } else if (0 == pCfg->commentLen) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " COMMENT ''"); + " COMMENT ''"); } if (NULL != pDbCfg->pRetensions && pCfg->watermark1 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " WATERMARK %" PRId64 "a", pCfg->watermark1); + " WATERMARK %" PRId64 "a", pCfg->watermark1); if (pCfg->watermark2 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", %" PRId64 "a", pCfg->watermark2); + ", %" PRId64 "a", pCfg->watermark2); } } if (NULL != pDbCfg->pRetensions && pCfg->delay1 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " MAX_DELAY %" PRId64 "a", pCfg->delay1); + " MAX_DELAY %" PRId64 "a", pCfg->delay1); if (pCfg->delay2 > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - ", %" PRId64 "a", pCfg->delay2); + ", %" PRId64 "a", pCfg->delay2); } } int32_t funcNum = taosArrayGetSize(pCfg->pFuncs); if (NULL != pDbCfg->pRetensions && funcNum > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " ROLLUP("); + " ROLLUP("); for (int32_t i = 0; i < funcNum; ++i) { char* pFunc = taosArrayGet(pCfg->pFuncs, i); *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - "%s%s", ((i > 0) ? ", " : ""), pFunc); + "%s%s", ((i > 0) ? ", " : ""), pFunc); } *len += snprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ")"); @@ -682,7 +684,7 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* if (pCfg->ttl > 0) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " TTL %d", pCfg->ttl); + " TTL %d", pCfg->ttl); } if (TSDB_SUPER_TABLE == pCfg->tableType || TSDB_NORMAL_TABLE == pCfg->tableType) { @@ -696,18 +698,18 @@ void appendTableOptions(char* buf, int32_t* len, SDbCfgInfo* pDbCfg, STableCfg* if (nSma < pCfg->numOfColumns && nSma > 0) { bool smaOn = false; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), - " SMA("); + " SMA("); for (int32_t i = 0; i < pCfg->numOfColumns; ++i) { if (IS_BSMA_ON(pCfg->pSchemas + i)) { if (smaOn) { *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, - SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ",`%s`", - (pCfg->pSchemas + i)->name); + SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), ",`%s`", + (pCfg->pSchemas + i)->name); } else { smaOn = true; *len += tsnprintf(buf + VARSTR_HEADER_SIZE + *len, - SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "`%s`", - (pCfg->pSchemas + i)->name); + SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + *len), "`%s`", + (pCfg->pSchemas + i)->name); } } } @@ -736,20 +738,20 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p if (TSDB_SUPER_TABLE == pCfg->tableType) { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE STABLE `%s` (", tbName); + "CREATE STABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), - ") TAGS ("); + ") TAGS ("); appendTagFields(buf2, &len, pCfg); len += snprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), ")"); appendTableOptions(buf2, &len, pDbCfg, pCfg); } else if (TSDB_CHILD_TABLE == pCfg->tableType) { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE TABLE `%s` USING `%s` (", tbName, pCfg->stbName); + "CREATE TABLE `%s` USING `%s` (", tbName, pCfg->stbName); appendTagNameFields(buf2, &len, pCfg); len += tsnprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), - ") TAGS ("); + ") TAGS ("); code = appendTagValues(buf2, &len, pCfg); TAOS_CHECK_ERRNO(code); len += @@ -757,7 +759,7 @@ static int32_t setCreateTBResultIntoDataBlock(SSDataBlock* pBlock, SDbCfgInfo* p appendTableOptions(buf2, &len, pDbCfg, pCfg); } else { len += tsnprintf(buf2 + VARSTR_HEADER_SIZE, SHOW_CREATE_TB_RESULT_FIELD2_LEN - VARSTR_HEADER_SIZE, - "CREATE TABLE `%s` (", tbName); + "CREATE TABLE `%s` (", tbName); appendColumnFields(buf2, &len, pCfg); len += snprintf(buf2 + VARSTR_HEADER_SIZE + len, SHOW_CREATE_TB_RESULT_FIELD2_LEN - (VARSTR_HEADER_SIZE + len), ")"); @@ -793,7 +795,7 @@ static int32_t setCreateViewResultIntoDataBlock(SSDataBlock* pBlock, SShowCreate } SViewMeta* pMeta = pStmt->pViewMeta; - if(NULL == pMeta) { + if (NULL == pMeta) { qError("exception: view meta is null"); return TSDB_CODE_APP_ERROR; } diff --git a/source/libs/executor/src/anomalywindowoperator.c b/source/libs/executor/src/anomalywindowoperator.c index 7f3430b837..d03e527c2b 100644 --- a/source/libs/executor/src/anomalywindowoperator.c +++ b/source/libs/executor/src/anomalywindowoperator.c @@ -86,7 +86,7 @@ int32_t createAnomalywindowOperatorInfo(SOperatorInfo* downstream, SPhysiNode* p pOperator->exprSupp.hasWindowOrGroup = true; pInfo->tsSlotId = ((SColumnNode*)pAnomalyNode->window.pTspk)->slotId; - strncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); + tstrncpy(pInfo->anomalyOpt, pAnomalyNode->anomalyOpt, sizeof(pInfo->anomalyOpt)); if (pAnomalyNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; diff --git a/source/libs/executor/src/exchangeoperator.c b/source/libs/executor/src/exchangeoperator.c index 60442c34ee..042fcf0120 100644 --- a/source/libs/executor/src/exchangeoperator.c +++ b/source/libs/executor/src/exchangeoperator.c @@ -320,7 +320,7 @@ static int32_t initDataSource(int32_t numOfSources, SExchangeInfo* pInfo, const if (!pInfo->pTaskId) { return terrno; } - strncpy(pInfo->pTaskId, id, len); + tstrncpy(pInfo->pTaskId, id, len); for (int32_t i = 0; i < numOfSources; ++i) { SSourceDataInfo dataInfo = {0}; dataInfo.status = EX_SOURCE_DATA_NOT_READY; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 27dd687f40..73dc72fc09 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -545,8 +545,9 @@ int32_t qUpdateTableListForStreamScanner(qTaskInfo_t tinfo, const SArray* tableI return code; } -int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* tableName, int32_t* sversion, - int32_t* tversion, int32_t idx, bool* tbGet) { +int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, int32_t dbNameBuffLen, char* tableName, + int32_t tbaleNameBuffLen, int32_t* sversion, int32_t* tversion, int32_t idx, + bool* tbGet) { *tbGet = false; if (tinfo == NULL || dbName == NULL || tableName == NULL) { @@ -567,12 +568,12 @@ int32_t qGetQueryTableSchemaVersion(qTaskInfo_t tinfo, char* dbName, char* table *sversion = pSchemaInfo->sw->version; *tversion = pSchemaInfo->tversion; if (pSchemaInfo->dbname) { - strcpy(dbName, pSchemaInfo->dbname); + tstrncpy(dbName, pSchemaInfo->dbname, dbNameBuffLen); } else { dbName[0] = 0; } if (pSchemaInfo->tablename) { - strcpy(tableName, pSchemaInfo->tablename); + tstrncpy(tableName, pSchemaInfo->tablename, tbaleNameBuffLen); } else { tableName[0] = 0; } @@ -1494,6 +1495,7 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT cleanupQueryTableDataCond(&pTaskInfo->streamInfo.tableCond); tstrncpy(pTaskInfo->streamInfo.tbName, mtInfo.tbName, TSDB_TABLE_NAME_LEN); +// pTaskInfo->streamInfo.suid = mtInfo.suid == 0 ? mtInfo.uid : mtInfo.suid; tDeleteSchemaWrapper(pTaskInfo->streamInfo.schema); pTaskInfo->streamInfo.schema = mtInfo.schema; diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index bae9926f63..95846087d0 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -6345,7 +6345,7 @@ int32_t fillTableCountScanDataBlock(STableCountScanSupp* pSupp, char* dbName, ch QUERY_CHECK_NULL(colInfoData, code, lino, _end, terrno); if (strlen(stbName) != 0) { char varStbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE] = {0}; - strncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); + tstrncpy(varDataVal(varStbName), stbName, TSDB_TABLE_NAME_LEN); varDataSetLen(varStbName, strlen(stbName)); code = colDataSetVal(colInfoData, 0, varStbName, false); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/executor/src/sysscanoperator.c b/source/libs/executor/src/sysscanoperator.c index 051a06ba5c..8aad415f70 100644 --- a/source/libs/executor/src/sysscanoperator.c +++ b/source/libs/executor/src/sysscanoperator.c @@ -425,7 +425,7 @@ static bool sysTableIsOperatorCondOnOneTable(SNode* pCond, char* condTable) { SValueNode* pValue = (SValueNode*)node->pRight; if (pValue->node.resType.type == TSDB_DATA_TYPE_NCHAR || pValue->node.resType.type == TSDB_DATA_TYPE_VARCHAR) { char* value = nodesGetValueFromNode(pValue); - strncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); + tstrncpy(condTable, varDataVal(value), TSDB_TABLE_NAME_LEN); return true; } } @@ -914,41 +914,41 @@ _end: } } -int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int32_t* len) { +int32_t convertTagDataToStr(char* str, int32_t strBuffLen, int type, void* buf, int32_t bufSize, int32_t* len) { int32_t n = 0; switch (type) { case TSDB_DATA_TYPE_NULL: - n = sprintf(str, "null"); + n = tsnprintf(str, strBuffLen, "null"); break; case TSDB_DATA_TYPE_BOOL: - n = sprintf(str, (*(int8_t*)buf) ? "true" : "false"); + n = tsnprintf(str, strBuffLen, (*(int8_t*)buf) ? "true" : "false"); break; case TSDB_DATA_TYPE_TINYINT: - n = sprintf(str, "%d", *(int8_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int8_t*)buf); break; case TSDB_DATA_TYPE_SMALLINT: - n = sprintf(str, "%d", *(int16_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int16_t*)buf); break; case TSDB_DATA_TYPE_INT: - n = sprintf(str, "%d", *(int32_t*)buf); + n = tsnprintf(str, strBuffLen, "%d", *(int32_t*)buf); break; case TSDB_DATA_TYPE_BIGINT: case TSDB_DATA_TYPE_TIMESTAMP: - n = sprintf(str, "%" PRId64, *(int64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRId64, *(int64_t*)buf); break; case TSDB_DATA_TYPE_FLOAT: - n = sprintf(str, "%.5f", GET_FLOAT_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.5f", GET_FLOAT_VAL(buf)); break; case TSDB_DATA_TYPE_DOUBLE: - n = sprintf(str, "%.9f", GET_DOUBLE_VAL(buf)); + n = tsnprintf(str, strBuffLen, "%.9f", GET_DOUBLE_VAL(buf)); break; case TSDB_DATA_TYPE_BINARY: @@ -973,19 +973,19 @@ int32_t convertTagDataToStr(char* str, int type, void* buf, int32_t bufSize, int n = length; break; case TSDB_DATA_TYPE_UTINYINT: - n = sprintf(str, "%u", *(uint8_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint8_t*)buf); break; case TSDB_DATA_TYPE_USMALLINT: - n = sprintf(str, "%u", *(uint16_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint16_t*)buf); break; case TSDB_DATA_TYPE_UINT: - n = sprintf(str, "%u", *(uint32_t*)buf); + n = tsnprintf(str, strBuffLen, "%u", *(uint32_t*)buf); break; case TSDB_DATA_TYPE_UBIGINT: - n = sprintf(str, "%" PRIu64, *(uint64_t*)buf); + n = tsnprintf(str, strBuffLen, "%" PRIu64, *(uint64_t*)buf); break; default: @@ -1065,14 +1065,21 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, int8_t tagType = (*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); + int32_t tagStrBufflen = 32; char tagTypeStr[VARSTR_HEADER_SIZE + 32]; - int tagTypeLen = sprintf(varDataVal(tagTypeStr), "%s", tDataTypes[tagType].name); + int tagTypeLen = tsnprintf(varDataVal(tagTypeStr), tagStrBufflen, "%s", tDataTypes[tagType].name); + tagStrBufflen -= tagTypeLen; + if (tagStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } + if (tagType == TSDB_DATA_TYPE_NCHAR) { - tagTypeLen += sprintf( - varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf( + varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)(((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } else if (IS_VAR_DATA_TYPE(tagType)) { - tagTypeLen += sprintf(varDataVal(tagTypeStr) + tagTypeLen, "(%d)", + tagTypeLen += tsnprintf(varDataVal(tagTypeStr) + tagTypeLen, tagStrBufflen, "(%d)", (int32_t)((*smrSuperTable).me.stbEntry.schemaTag.pSchema[i].bytes - VARSTR_HEADER_SIZE)); } varDataSetLen(tagTypeStr, tagTypeLen); @@ -1127,7 +1134,7 @@ static int32_t sysTableUserTagsFillOneTableTags(const SSysTableScanInfo* pInfo, QUERY_CHECK_NULL(tagVarChar, code, lino, _end, terrno); int32_t len = -1; if (tagLen > 0) - convertTagDataToStr(varDataVal(tagVarChar), tagType, tagData, tagLen, &len); + convertTagDataToStr(varDataVal(tagVarChar), bufSize + 1 - VARSTR_HEADER_SIZE, tagType, tagData, tagLen, &len); else len = 0; varDataSetLen(tagVarChar, len); @@ -1197,13 +1204,19 @@ static int32_t sysTableUserColsFillOneTableCols(const SSysTableScanInfo* pInfo, int8_t colType = schemaRow->pSchema[i].type; pColInfoData = taosArrayGet(dataBlock->pDataBlock, 4); QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); + int32_t colStrBufflen = 32; char colTypeStr[VARSTR_HEADER_SIZE + 32]; - int colTypeLen = sprintf(varDataVal(colTypeStr), "%s", tDataTypes[colType].name); + int colTypeLen = tsnprintf(varDataVal(colTypeStr), colStrBufflen, "%s", tDataTypes[colType].name); + colStrBufflen -= colTypeLen; + if (colStrBufflen <= 0) { + code = TSDB_CODE_INVALID_PARA; + QUERY_CHECK_CODE(code, lino, _end); + } if (colType == TSDB_DATA_TYPE_VARCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)(schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE)); } else if (colType == TSDB_DATA_TYPE_NCHAR) { - colTypeLen += sprintf(varDataVal(colTypeStr) + colTypeLen, "(%d)", + colTypeLen += tsnprintf(varDataVal(colTypeStr) + colTypeLen, colStrBufflen, "(%d)", (int32_t)((schemaRow->pSchema[i].bytes - VARSTR_HEADER_SIZE) / TSDB_NCHAR_SIZE)); } varDataSetLen(colTypeStr, colTypeLen); @@ -2019,8 +2032,7 @@ static EDealRes getDBNameFromConditionWalker(SNode* pNode, void* pContext) { SValueNode* node = (SValueNode*)pNode; char* dbName = nodesGetValueFromNode(node); - strncpy(pContext, varDataVal(dbName), varDataLen(dbName)); - *((char*)pContext + varDataLen(dbName)) = 0; + tstrncpy((char*)pContext, varDataVal(dbName), TSDB_DB_NAME_LEN); return DEAL_RES_END; // stop walk } default: @@ -2056,11 +2068,11 @@ static int32_t doSysTableScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) getDBNameFromCondition(pInfo->pCondition, dbName); if (strncasecmp(name, TSDB_INS_TABLE_COMPACTS, TSDB_TABLE_FNAME_LEN) != 0 && strncasecmp(name, TSDB_INS_TABLE_COMPACT_DETAILS, TSDB_TABLE_FNAME_LEN) != 0) { - sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); } } else if (strncasecmp(name, TSDB_INS_TABLE_COLS, TSDB_TABLE_FNAME_LEN) == 0) { getDBNameFromCondition(pInfo->pCondition, dbName); - if (dbName[0]) sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); + if (dbName[0]) TAOS_UNUSED(tsnprintf(pInfo->req.db, sizeof(pInfo->req.db), "%d.%s", pInfo->accountId, dbName)); (void)sysTableIsCondOnOneTable(pInfo->pCondition, pInfo->req.filterTb); } diff --git a/source/libs/executor/test/executorTests.cpp b/source/libs/executor/test/executorTests.cpp index ff33732b23..87887d2b2f 100644 --- a/source/libs/executor/test/executorTests.cpp +++ b/source/libs/executor/test/executorTests.cpp @@ -115,7 +115,7 @@ SSDataBlock* getDummyBlock(SOperatorInfo* pOperator) { int32_t code = colDataSetVal(pColInfo, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); @@ -179,7 +179,7 @@ SSDataBlock* get2ColsDummyBlock(SOperatorInfo* pOperator) { code = colDataSetVal(pColInfo1, i, reinterpret_cast(&v), false); ASSERT(code == 0); - // sprintf(buf, "this is %d row", i); + // tsnprintf(buf, "this is %d row", i); // STR_TO_VARSTR(b1, buf); // // SColumnInfoData* pColInfo2 = static_cast(TARRAY_GET_ELEM(pBlock->pDataBlock, 1)); diff --git a/source/libs/executor/test/lhashTests.cpp b/source/libs/executor/test/lhashTests.cpp index daf59c6058..89e1cd2b07 100644 --- a/source/libs/executor/test/lhashTests.cpp +++ b/source/libs/executor/test/lhashTests.cpp @@ -26,7 +26,7 @@ TEST(testCase, linear_hash_Tests) { taosSeedRand(taosGetTimestampSec()); - strcpy(tsTempDir, "/tmp/"); + tstrncpy((char*)tsTempDir, "/tmp/", sizeof(tsTempDir)); _hash_fn_t fn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); diff --git a/source/libs/monitor/src/monFramework.c b/source/libs/monitor/src/monFramework.c index a2d03bbd6a..0dbf6e091a 100644 --- a/source/libs/monitor/src/monFramework.c +++ b/source/libs/monitor/src/monFramework.c @@ -183,7 +183,7 @@ void monGenClusterInfoTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, metric_names[i], strlen(metric_names[i])) != 0) { - uError("failed to remove metric %s", metric_names[i]); + uTrace("failed to remove metric %s", metric_names[i]); } } @@ -652,7 +652,7 @@ void monGenMnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, mnodes_role_gauges[i], strlen(mnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", mnodes_role_gauges[i]); + uTrace("failed to remove metric %s", mnodes_role_gauges[i]); } } @@ -725,7 +725,7 @@ void monGenVnodeRoleTable(SMonInfo *pMonitor){ } if (taosHashRemove(tsMonitor.metrics, vnodes_role_gauges[i], strlen(vnodes_role_gauges[i])) != 0) { - uError("failed to remove metric %s", vnodes_role_gauges[i]); + uTrace("failed to remove metric %s", vnodes_role_gauges[i]); } } diff --git a/source/libs/monitor/test/monTest.cpp b/source/libs/monitor/test/monTest.cpp index 2660cff216..a788a5a341 100644 --- a/source/libs/monitor/test/monTest.cpp +++ b/source/libs/monitor/test/monTest.cpp @@ -26,7 +26,10 @@ class MonitorTest : public ::testing::Test { monInit(&cfg); } - static void TearDownTestSuite() { monCleanup(); } + static void TearDownTestSuite() { + monCleanup(); + taosMsleep(100); + } public: void SetUp() override {} diff --git a/source/libs/parser/inc/parAst.h b/source/libs/parser/inc/parAst.h index 28e867965f..597ee5f5d2 100644 --- a/source/libs/parser/inc/parAst.h +++ b/source/libs/parser/inc/parAst.h @@ -64,7 +64,7 @@ typedef enum EDatabaseOptionType { DB_OPTION_STT_TRIGGER, DB_OPTION_TABLE_PREFIX, DB_OPTION_TABLE_SUFFIX, - DB_OPTION_S3_CHUNKSIZE, + DB_OPTION_S3_CHUNKPAGES, DB_OPTION_S3_KEEPLOCAL, DB_OPTION_S3_COMPACT, DB_OPTION_KEEP_TIME_OFFSET, @@ -244,7 +244,7 @@ SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pLikePattern); SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint); SNode* createShowTableTagsStmt(SAstCreateContext* pCxt, SNode* pTbName, SNode* pDbName, SNodeList* pTags); -SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo, +SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo, int8_t createdb, int8_t is_import); SNode* addCreateUserStmtWhiteList(SAstCreateContext* pCxt, SNode* pStmt, SNodeList* pIpRangesNodeList); SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, void* pAlterInfo); diff --git a/source/libs/parser/inc/sql.y b/source/libs/parser/inc/sql.y index 99f301445a..067a8e3ccc 100644 --- a/source/libs/parser/inc/sql.y +++ b/source/libs/parser/inc/sql.y @@ -280,7 +280,7 @@ db_options(A) ::= db_options(B) WAL_SEGMENT_SIZE NK_INTEGER(C). db_options(A) ::= db_options(B) STT_TRIGGER NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STT_TRIGGER, &C); } db_options(A) ::= db_options(B) TABLE_PREFIX signed(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_PREFIX, C); } db_options(A) ::= db_options(B) TABLE_SUFFIX signed(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_TABLE_SUFFIX, C); } -db_options(A) ::= db_options(B) S3_CHUNKSIZE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_CHUNKSIZE, &C); } +db_options(A) ::= db_options(B) S3_CHUNKPAGES NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_CHUNKPAGES, &C); } db_options(A) ::= db_options(B) S3_KEEPLOCAL NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_KEEPLOCAL, &C); } db_options(A) ::= db_options(B) S3_KEEPLOCAL NK_VARIABLE(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_KEEPLOCAL, &C); } db_options(A) ::= db_options(B) S3_COMPACT NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_S3_COMPACT, &C); } diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index e031ee0fe1..afc5ed6463 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -43,11 +43,11 @@ } \ } while (0) -#define CHECK_NAME(p) \ - do { \ - if (!p) { \ - goto _err; \ - } \ +#define CHECK_NAME(p) \ + do { \ + if (!p) { \ + goto _err; \ + } \ } while (0) #define COPY_STRING_FORM_ID_TOKEN(buf, pToken) strncpy(buf, (pToken)->z, TMIN((pToken)->n, sizeof(buf) - 1)) @@ -333,7 +333,7 @@ SNode* releaseRawExprNode(SAstCreateContext* pCxt, SNode* pNode) { // Len of pRawExpr->p could be larger than len of aliasName[TSDB_COL_NAME_LEN]. // If aliasName is truncated, hash value of aliasName could be the same. uint64_t hashVal = MurmurHash3_64(pRawExpr->p, pRawExpr->n); - sprintf(pExpr->aliasName, "%"PRIu64, hashVal); + sprintf(pExpr->aliasName, "%" PRIu64, hashVal); strncpy(pExpr->userAlias, pRawExpr->p, len); pExpr->userAlias[len] = '\0'; } @@ -405,7 +405,7 @@ SNode* createValueNode(SAstCreateContext* pCxt, int32_t dataType, const SToken* pCxt->errCode = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&val); CHECK_MAKE_NODE(val); val->literal = taosStrndup(pLiteral->z, pLiteral->n); - if(!val->literal) { + if (!val->literal) { pCxt->errCode = terrno; nodesDestroyNode((SNode*)val); return NULL; @@ -586,8 +586,8 @@ SNodeList* createHintNodeList(SAstCreateContext* pCxt, const SToken* pLiteral) { if (NULL == pLiteral || pLiteral->n <= 5) { return NULL; } - SNodeList* pHintList = NULL; - char* hint = taosStrndup(pLiteral->z + 3, pLiteral->n - 5); + SNodeList* pHintList = NULL; + char* hint = taosStrndup(pLiteral->z + 3, pLiteral->n - 5); if (!hint) return NULL; int32_t i = 0; bool quit = false; @@ -971,7 +971,7 @@ _err: } SNode* createBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { - SNode* pNew = NULL, *pGE = NULL, *pLE = NULL; + SNode *pNew = NULL, *pGE = NULL, *pLE = NULL; CHECK_PARSER_STATUS(pCxt); pCxt->errCode = nodesCloneNode(pExpr, &pNew); CHECK_PARSER_STATUS(pCxt); @@ -993,7 +993,7 @@ _err: } SNode* createNotBetweenAnd(SAstCreateContext* pCxt, SNode* pExpr, SNode* pLeft, SNode* pRight) { - SNode* pNew = NULL, *pLT = NULL, *pGT = NULL; + SNode *pNew = NULL, *pLT = NULL, *pGT = NULL; CHECK_PARSER_STATUS(pCxt); pCxt->errCode = nodesCloneNode(pExpr, &pNew); CHECK_PARSER_STATUS(pCxt); @@ -1959,7 +1959,7 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED nodesDestroyNode((SNode*)pNode); break; } - case DB_OPTION_S3_CHUNKSIZE: + case DB_OPTION_S3_CHUNKPAGES: pDbOptions->s3ChunkSize = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; case DB_OPTION_S3_KEEPLOCAL: { @@ -1974,14 +1974,13 @@ static SNode* setDatabaseOptionImpl(SAstCreateContext* pCxt, SNode* pOptions, ED case DB_OPTION_S3_COMPACT: pDbOptions->s3Compact = taosStr2Int8(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_KEEP_TIME_OFFSET: { + case DB_OPTION_KEEP_TIME_OFFSET: pDbOptions->keepTimeOffset = taosStr2Int32(((SToken*)pVal)->z, NULL, 10); break; - case DB_OPTION_ENCRYPT_ALGORITHM: - COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); - pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; - break; - } + case DB_OPTION_ENCRYPT_ALGORITHM: + COPY_STRING_FORM_STR_TOKEN(pDbOptions->encryptAlgorithmStr, (SToken*)pVal); + pDbOptions->encryptAlgorithm = TSDB_DEFAULT_ENCRYPT_ALGO; + break; default: break; } @@ -2210,7 +2209,7 @@ _err: SNode* setColumnOptions(SAstCreateContext* pCxt, SNode* pOptions, const SToken* pVal1, void* pVal2) { CHECK_PARSER_STATUS(pCxt); - char optionType[TSDB_CL_OPTION_LEN]; + char optionType[TSDB_CL_OPTION_LEN]; memset(optionType, 0, TSDB_CL_OPTION_LEN); strncpy(optionType, pVal1->z, TMIN(pVal1->n, TSDB_CL_OPTION_LEN)); @@ -2807,7 +2806,7 @@ static int32_t getIpV4RangeFromWhitelistItem(char* ipRange, SIpV4Range* pIpRange int32_t code = TSDB_CODE_SUCCESS; char* ipCopy = taosStrdup(ipRange); if (!ipCopy) return terrno; - char* slash = strchr(ipCopy, '/'); + char* slash = strchr(ipCopy, '/'); if (slash) { *slash = '\0'; struct in_addr addr; diff --git a/source/libs/parser/src/parInsertUtil.c b/source/libs/parser/src/parInsertUtil.c index 4f9b46176c..8adf32d2dd 100644 --- a/source/libs/parser/src/parInsertUtil.c +++ b/source/libs/parser/src/parInsertUtil.c @@ -886,17 +886,32 @@ static bool findFileds(SSchema* pSchema, TAOS_FIELD* fields, int numFields) { return false; } -int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq** pCreateTb, TAOS_FIELD* tFields, - int numFields, bool needChangeLength, char* errstr, int32_t errstrLen) { +int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreateTbReq* pCreateTb, void* tFields, + int numFields, bool needChangeLength, char* errstr, int32_t errstrLen, bool raw) { + int ret = 0; if(data == NULL) { uError("rawBlockBindData, data is NULL"); return TSDB_CODE_APP_ERROR; } void* tmp = taosHashGet(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, sizeof(pTableMeta->uid)); + SVCreateTbReq *pCreateReqTmp = NULL; + if (tmp == NULL && pCreateTb != NULL){ + ret = cloneSVreateTbReq(pCreateTb, &pCreateReqTmp); + if (ret != TSDB_CODE_SUCCESS){ + uError("cloneSVreateTbReq error"); + goto end; + } + } + STableDataCxt* pTableCxt = NULL; - int ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, - sizeof(pTableMeta->uid), pTableMeta, pCreateTb, &pTableCxt, true, false); + ret = insGetTableDataCxt(((SVnodeModifyOpStmt*)(query->pRoot))->pTableBlockHashObj, &pTableMeta->uid, + sizeof(pTableMeta->uid), pTableMeta, &pCreateReqTmp, &pTableCxt, true, false); + if (pCreateReqTmp != NULL) { + tdDestroySVCreateTbReq(pCreateReqTmp); + taosMemoryFree(pCreateReqTmp); + } + if (ret != TSDB_CODE_SUCCESS) { uError("insGetTableDataCxt error"); goto end; @@ -948,12 +963,17 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate ret = TSDB_CODE_INVALID_PARA; goto end; } - if (tFields != NULL && numFields > boundInfo->numOfBound) { - if (errstr != NULL) - snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); +// if (tFields != NULL && numFields > boundInfo->numOfBound) { +// if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d bigger than num of bound cols:%d", numFields, boundInfo->numOfBound); +// ret = TSDB_CODE_INVALID_PARA; +// goto end; +// } + if (tFields == NULL && numOfCols != boundInfo->numOfBound) { + if (errstr != NULL) snprintf(errstr, errstrLen, "numFields:%d not equal to num of bound cols:%d", numOfCols, boundInfo->numOfBound); ret = TSDB_CODE_INVALID_PARA; goto end; } + if (tFields == NULL) { for (int j = 0; j < boundInfo->numOfBound; j++) { SSchema* pColSchema = &pSchema[j]; @@ -991,7 +1011,13 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate for (int i = 0; i < numFields; i++) { for (int j = 0; j < boundInfo->numOfBound; j++) { SSchema* pColSchema = &pSchema[j]; - if (strcmp(pColSchema->name, tFields[i].name) == 0) { + char* fieldName = NULL; + if (raw) { + fieldName = ((SSchemaWrapper*)tFields)->pSchema[i].name; + } else { + fieldName = ((TAOS_FIELD*)tFields)[i].name; + } + if (strcmp(pColSchema->name, fieldName) == 0) { if (*fields != pColSchema->type && *(int32_t*)(fields + sizeof(int8_t)) != pColSchema->bytes) { if (errstr != NULL) snprintf(errstr, errstrLen, @@ -1011,6 +1037,11 @@ int rawBlockBindData(SQuery* query, STableMeta* pTableMeta, void* data, SVCreate pStart += numOfRows * sizeof(int32_t); } else { pStart += BitmapLen(numOfRows); +// for(int k = 0; k < numOfRows; k++) { +// if(!colDataIsNull_f(offset, k) && pColSchema->type == TSDB_DATA_TYPE_INT){ +// printf("colName:%s,val:%d", fieldName, *(int32_t*)(pStart + k * sizeof(int32_t))); +// } +// } } char* pData = pStart; diff --git a/source/libs/parser/src/parTokenizer.c b/source/libs/parser/src/parTokenizer.c index 63121ec044..9b2815d2df 100644 --- a/source/libs/parser/src/parTokenizer.c +++ b/source/libs/parser/src/parTokenizer.c @@ -340,7 +340,7 @@ static SKeyword keywordTable[] = { {"_FROWTS", TK_FROWTS}, {"ALIVE", TK_ALIVE}, {"VARBINARY", TK_VARBINARY}, - {"S3_CHUNKSIZE", TK_S3_CHUNKSIZE}, + {"S3_CHUNKPAGES", TK_S3_CHUNKPAGES}, {"S3_KEEPLOCAL", TK_S3_KEEPLOCAL}, {"S3_COMPACT", TK_S3_COMPACT}, {"S3MIGRATE", TK_S3MIGRATE}, @@ -370,7 +370,7 @@ static int32_t doInitKeywordsTable(void) { keywordHashTable = taosHashInit(numOfEntries, MurmurHash3_32, true, false); for (int32_t i = 0; i < numOfEntries; i++) { keywordTable[i].len = (uint8_t)strlen(keywordTable[i].name); - void* ptr = &keywordTable[i]; + void* ptr = &keywordTable[i]; int32_t code = taosHashPut(keywordHashTable, keywordTable[i].name, keywordTable[i].len, (void*)&ptr, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { taosHashCleanup(keywordHashTable); @@ -698,7 +698,7 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) { } } if (hasNonAsciiChars) { - *tokenId = TK_NK_ALIAS; // must be alias + *tokenId = TK_NK_ALIAS; // must be alias return i; } if (IS_TRUE_STR(z, i) || IS_FALSE_STR(z, i)) { @@ -713,10 +713,10 @@ uint32_t tGetToken(const char* z, uint32_t* tokenId) { break; } bool hasNonAsciiChars = false; - for (i = 1; ; i++) { + for (i = 1;; i++) { if ((z[i] & 0x80) != 0) { hasNonAsciiChars = true; - } else if (isIdChar[(uint8_t)z[i]]){ + } else if (isIdChar[(uint8_t)z[i]]) { } else { break; } @@ -834,9 +834,7 @@ SToken tStrGetToken(const char* str, int32_t* i, bool isPrevOptr, bool* pIgnoreC bool taosIsKeyWordToken(const char* z, int32_t len) { return (tKeywordCode((char*)z, len) != TK_NK_ID); } -int32_t taosInitKeywordsTable() { - return doInitKeywordsTable(); -} +int32_t taosInitKeywordsTable() { return doInitKeywordsTable(); } void taosCleanupKeywordsTable() { void* m = keywordHashTable; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 5636c4bdad..a8ed6d977e 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -34,19 +34,19 @@ #define SYSTABLE_SHOW_TYPE_OFFSET QUERY_NODE_SHOW_DNODES_STMT -#define CHECK_RES_OUT_OF_MEM(p) \ - do { \ - int32_t code = (p); \ - if (TSDB_CODE_SUCCESS != code) { \ - return code; \ - } \ +#define CHECK_RES_OUT_OF_MEM(p) \ + do { \ + int32_t code = (p); \ + if (TSDB_CODE_SUCCESS != code) { \ + return code; \ + } \ } while (0) -#define CHECK_POINTER_OUT_OF_MEM(p) \ - do { \ - if (NULL == (p)) { \ - return code; \ - } \ +#define CHECK_POINTER_OUT_OF_MEM(p) \ + do { \ + if (NULL == (p)) { \ + return code; \ + } \ } while (0) typedef struct SRewriteTbNameContext { @@ -458,7 +458,7 @@ static int32_t collectUseDatabase(const SName* pName, SHashObj* pDbs) { } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -709,7 +709,7 @@ static int32_t getDBVgInfoImpl(STranslateContext* pCxt, const SName* pName, SArr } static int32_t getDBVgInfo(STranslateContext* pCxt, const char* pDbName, SArray** pVgInfo) { - SName name; + SName name; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); if (TSDB_CODE_SUCCESS != code) return code; char dbFname[TSDB_DB_FNAME_LEN] = {0}; @@ -725,7 +725,7 @@ static int32_t getTableHashVgroupImpl(STranslateContext* pCxt, const SName* pNam } if (TSDB_CODE_SUCCESS == code) { if (pParCxt->async) { - if(pCxt->withOpt) { + if (pCxt->withOpt) { code = getDbTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); } else { code = getTableVgroupFromCache(pCxt->pMetaCache, pName, pInfo); @@ -777,7 +777,7 @@ static int32_t getDBCfg(STranslateContext* pCxt, const char* pDbName, SDbCfgInfo SParseContext* pParCxt = pCxt->pParseCxt; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pDbName, strlen(pDbName)); if (TSDB_CODE_SUCCESS != code) return code; char dbFname[TSDB_DB_FNAME_LEN] = {0}; (void)tNameGetFullDbName(&name, dbFname); @@ -1019,8 +1019,7 @@ static uint8_t getPrecisionFromCurrStmt(SNode* pCurrStmt, uint8_t defaultVal) { if (isDeleteStmt(pCurrStmt)) { return ((SDeleteStmt*)pCurrStmt)->precision; } - if (pCurrStmt && nodeType(pCurrStmt) == QUERY_NODE_CREATE_TSMA_STMT) - return ((SCreateTSMAStmt*)pCurrStmt)->precision; + if (pCurrStmt && nodeType(pCurrStmt) == QUERY_NODE_CREATE_TSMA_STMT) return ((SCreateTSMAStmt*)pCurrStmt)->precision; return defaultVal; } @@ -1168,7 +1167,7 @@ static bool isBlockTimeLineAlignedQuery(SNode* pStmt) { return false; } -int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, SNodeList**ppOut) { +int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, SNodeList** ppOut) { *ppOut = NULL; SNodeList* pPartitionList = NULL; SNode* pNode = NULL; @@ -1194,8 +1193,7 @@ int32_t buildPartitionListFromOrderList(SNodeList* pOrderList, int32_t nodesNum, break; } } - if(TSDB_CODE_SUCCESS == code) - *ppOut = pPartitionList; + if (TSDB_CODE_SUCCESS == code) *ppOut = pPartitionList; return code; } @@ -1229,7 +1227,8 @@ static int32_t isTimeLineAlignedQuery(SNode* pStmt, bool* pRes) { } } } - if (TSDB_CODE_SUCCESS == code && QUERY_NODE_SET_OPERATOR == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { + if (TSDB_CODE_SUCCESS == code && + QUERY_NODE_SET_OPERATOR == nodeType(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { SSetOperator* pSub = (SSetOperator*)((STempTableNode*)pSelect->pFromTable)->pSubquery; if (pSelect->pPartitionByList && pSub->timeLineFromOrderBy && pSub->pOrderByList->length > 1) { SNodeList* pPartitionList = NULL; @@ -1397,12 +1396,16 @@ static void setColumnPrimTs(STranslateContext* pCxt, SColumnNode* pCol, const ST } } -static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList, bool skipProjRef) { +static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* pTable, bool igTags, SNodeList* pList, + bool skipProjRef) { int32_t code = 0; if (QUERY_NODE_REAL_TABLE == nodeType(pTable)) { const STableMeta* pMeta = ((SRealTableNode*)pTable)->pMeta; int32_t nums = pMeta->tableInfo.numOfColumns + - (igTags ? 0 : ((TSDB_SUPER_TABLE == pMeta->tableType || ((SRealTableNode*)pTable)->stbRewrite) ? pMeta->tableInfo.numOfTags : 0)); + (igTags ? 0 + : ((TSDB_SUPER_TABLE == pMeta->tableType || ((SRealTableNode*)pTable)->stbRewrite) + ? pMeta->tableInfo.numOfTags + : 0)); for (int32_t i = 0; i < nums; ++i) { if (invisibleColumn(pCxt->pParseCxt->enableSysInfo, pMeta->tableType, pMeta->schema[i].flags)) { pCxt->pParseCxt->hasInvisibleCol = true; @@ -1433,7 +1436,8 @@ static int32_t createColumnsByTable(STranslateContext* pCxt, const STableNode* p code = setColumnInfoByExpr(pTempTable, (SExprNode*)pNode, (SColumnNode**)&pCell->pNode); } if (TSDB_CODE_SUCCESS == code) { - if (!skipProjRef) pCol->projRefIdx = ((SExprNode*)pNode)->projIdx; // only set proj ref when select * from (select ...) + if (!skipProjRef) + pCol->projRefIdx = ((SExprNode*)pNode)->projIdx; // only set proj ref when select * from (select ...) } else { break; } @@ -1603,26 +1607,7 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } } if (*pFound) { - if (QUERY_NODE_FUNCTION == nodeType(pFoundNode) && (SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause)) { - pCxt->errCode = getFuncInfo(pCxt, (SFunctionNode*)pFoundNode); - if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if (fmIsVectorFunc(((SFunctionNode*)pFoundNode)->funcId)) { - pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION, (*pCol)->colName); - return DEAL_RES_ERROR; - } else if (fmIsPseudoColumnFunc(((SFunctionNode*)pFoundNode)->funcId)) { - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } else { - /* Do nothing and replace old node with found node. */ - } - } else { - return DEAL_RES_ERROR; - } - } - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pFoundNode, &pNew); if (NULL == pNew) { pCxt->errCode = code; @@ -1630,14 +1615,6 @@ static EDealRes translateColumnUseAlias(STranslateContext* pCxt, SColumnNode** p } nodesDestroyNode(*(SNode**)pCol); *(SNode**)pCol = (SNode*)pNew; - if (QUERY_NODE_COLUMN == nodeType(pFoundNode)) { - pCxt->errCode = TSDB_CODE_SUCCESS; - if ('\0' != (*pCol)->tableAlias[0]) { - return translateColumnWithPrefix(pCxt, pCol); - } else { - return translateColumnWithoutPrefix(pCxt, pCol); - } - } } return DEAL_RES_CONTINUE; } @@ -1687,7 +1664,7 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod } if (TSDB_CODE_SUCCESS == code) { snprintf(tbNameFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), (tableAlias) ? "%s.tbname" : "%stbname", - (tableAlias) ? tableAlias : ""); + (tableAlias) ? tableAlias : ""); strncpy(tbNameFunc->node.aliasName, tbNameFunc->functionName, TSDB_COL_NAME_LEN); if (funcName == NULL) { *pOutNode = (SNode*)tbNameFunc; @@ -1705,13 +1682,13 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod if (TSDB_CODE_SUCCESS == code) { if (tsKeepColumnName) { snprintf(multiResFunc->node.userAlias, sizeof(tbNameFunc->node.userAlias), - (tableAlias) ? "%s.tbname" : "%stbname", (tableAlias) ? tableAlias : ""); + (tableAlias) ? "%s.tbname" : "%stbname", (tableAlias) ? tableAlias : ""); strcpy(multiResFunc->node.aliasName, tbNameFunc->functionName); } else { snprintf(multiResFunc->node.userAlias, sizeof(multiResFunc->node.userAlias), - tableAlias ? "%s(%s.tbname)" : "%s(%stbname)", funcName, tableAlias ? tableAlias : ""); + tableAlias ? "%s(%s.tbname)" : "%s(%stbname)", funcName, tableAlias ? tableAlias : ""); biMakeAliasNameInMD5(multiResFunc->node.userAlias, strlen(multiResFunc->node.userAlias), - multiResFunc->node.aliasName); + multiResFunc->node.aliasName); } *pOutNode = (SNode*)multiResFunc; } else { @@ -1726,7 +1703,7 @@ static int32_t biMakeTbnameProjectAstNode(char* funcName, char* tableAlias, SNod static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt* pSelect, SNode* pNode, SListCell* pSelectListCell) { SNodeList* pTbnameNodeList = NULL; - int32_t code = nodesMakeList(&pTbnameNodeList); + int32_t code = nodesMakeList(&pTbnameNodeList); if (!pTbnameNodeList) return code; SFunctionNode* pFunc = (SFunctionNode*)pNode; @@ -1744,8 +1721,7 @@ static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(pFunc->functionName, NULL, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { @@ -1761,8 +1737,7 @@ static int32_t biRewriteSelectFuncParamStar(STranslateContext* pCxt, SSelectStmt ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(pFunc->functionName, pTableAlias, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { nodesListInsertListAfterPos(pSelect->pProjectionList, pSelectListCell, pTbnameNodeList); @@ -1794,8 +1769,7 @@ int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(NULL, NULL, &pTbnameNode); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { @@ -1810,8 +1784,7 @@ int32_t biRewriteSelectStar(STranslateContext* pCxt, SSelectStmt* pSelect) { ((SRealTableNode*)pTable)->pMeta != NULL && ((SRealTableNode*)pTable)->pMeta->tableType == TSDB_SUPER_TABLE) { SNode* pTbnameNode = NULL; code = biMakeTbnameProjectAstNode(NULL, pTableAlias, &pTbnameNode); - if (TSDB_CODE_SUCCESS ==code) - code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pTbnameNodeList, pTbnameNode); } if (TSDB_CODE_SUCCESS == code && LIST_LENGTH(pTbnameNodeList) > 0) { nodesListInsertListAfterPos(pSelect->pProjectionList, cell, pTbnameNodeList); @@ -1877,17 +1850,11 @@ int32_t biCheckCreateTableTbnameCol(STranslateContext* pCxt, SCreateTableStmt* p } static bool clauseSupportAlias(ESqlClause clause) { - return SQL_CLAUSE_GROUP_BY == clause || - SQL_CLAUSE_PARTITION_BY == clause || - SQL_CLAUSE_ORDER_BY == clause; + return SQL_CLAUSE_GROUP_BY == clause || SQL_CLAUSE_PARTITION_BY == clause || SQL_CLAUSE_ORDER_BY == clause; } -static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { - if (NULL == pCxt->pCurrStmt || - (isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) { - return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); - } - +static EDealRes translateColumnInGroupByClause(STranslateContext* pCxt, SColumnNode** pCol, bool *translateAsAlias) { + *translateAsAlias = false; // count(*)/first(*)/last(*) and so on if (0 == strcmp((*pCol)->colName, "*")) { return DEAL_RES_CONTINUE; @@ -1908,8 +1875,44 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithPrefix(pCxt, pCol); } else { bool found = false; - if ((pCxt->currClause == SQL_CLAUSE_ORDER_BY) && - !(*pCol)->node.asParam) { + res = translateColumnWithoutPrefix(pCxt, pCol); + if (!(*pCol)->node.asParam && + res != DEAL_RES_CONTINUE && + res != DEAL_RES_END && pCxt->errCode != TSDB_CODE_PAR_AMBIGUOUS_COLUMN) { + res = translateColumnUseAlias(pCxt, pCol, &found); + *translateAsAlias = true; + } + } + return res; +} + +static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { + if (NULL == pCxt->pCurrStmt || + (isSelectStmt(pCxt->pCurrStmt) && NULL == ((SSelectStmt*)pCxt->pCurrStmt)->pFromTable)) { + return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_COLUMN, (*pCol)->colName); + } + + // count(*)/first(*)/last(*) and so on + if (0 == strcmp((*pCol)->colName, "*")) { + return DEAL_RES_CONTINUE; + } + + if (pCxt->pParseCxt->biMode) { + SNode** ppNode = (SNode**)pCol; + bool ret; + pCxt->errCode = biRewriteToTbnameFunc(pCxt, ppNode, &ret); + if (TSDB_CODE_SUCCESS != pCxt->errCode) return DEAL_RES_ERROR; + if (ret) { + return translateFunction(pCxt, (SFunctionNode**)ppNode); + } + } + + EDealRes res = DEAL_RES_CONTINUE; + if ('\0' != (*pCol)->tableAlias[0]) { + res = translateColumnWithPrefix(pCxt, pCol); + } else { + bool found = false; + if ((pCxt->currClause == SQL_CLAUSE_ORDER_BY) && !(*pCol)->node.asParam) { res = translateColumnUseAlias(pCxt, pCol, &found); } if (DEAL_RES_ERROR != res && !found) { @@ -1919,9 +1922,7 @@ static EDealRes translateColumn(STranslateContext* pCxt, SColumnNode** pCol) { res = translateColumnWithoutPrefix(pCxt, pCol); } } - if (clauseSupportAlias(pCxt->currClause) && - !(*pCol)->node.asParam && - res != DEAL_RES_CONTINUE && + if (clauseSupportAlias(pCxt->currClause) && !(*pCol)->node.asParam && res != DEAL_RES_CONTINUE && res != DEAL_RES_END) { res = translateColumnUseAlias(pCxt, pCol, &found); } @@ -2478,8 +2479,8 @@ static int32_t translateIndefiniteRowsFunc(STranslateContext* pCxt, SFunctionNod return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } if (pSelect->hasIndefiniteRowsFunc && - (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc)) && - (pSelect->lastProcessByRowFuncId == -1 || !fmIsProcessByRowFunc(pFunc->funcId))) { + (FUNC_RETURN_ROWS_INDEFINITE == pSelect->returnRows || pSelect->returnRows != fmGetFuncReturnRows(pFunc)) && + (pSelect->lastProcessByRowFuncId == -1 || !fmIsProcessByRowFunc(pFunc->funcId))) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC); } if (pSelect->lastProcessByRowFuncId != -1 && pSelect->lastProcessByRowFuncId != pFunc->funcId) { @@ -2651,14 +2652,14 @@ static int32_t translateTimelineFunc(STranslateContext* pCxt, SFunctionNode* pFu "%s function must be used in select statements", pFunc->functionName); } SSelectStmt* pSelect = (SSelectStmt*)pCxt->pCurrStmt; - bool isTimelineAlignedQuery = false; + bool isTimelineAlignedQuery = false; if ((NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery))) { int32_t code = isTimeLineAlignedQuery(pCxt->pCurrStmt, &isTimelineAlignedQuery); if (TSDB_CODE_SUCCESS != code) return code; if (!isTimelineAlignedQuery) return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function requires valid time series input", pFunc->functionName); + "%s function requires valid time series input", pFunc->functionName); } if (NULL != pSelect->pFromTable && QUERY_NODE_JOIN_TABLE == nodeType(pSelect->pFromTable) && (TIME_LINE_GLOBAL != pSelect->timeLineCurMode && TIME_LINE_MULTI != pSelect->timeLineCurMode)) { @@ -2750,9 +2751,10 @@ static int32_t translateRepeatScanFunc(STranslateContext* pCxt, SFunctionNode* p } if (NULL != pSelect->pWindow) { - if (QUERY_NODE_EVENT_WINDOW == nodeType(pSelect->pWindow) || QUERY_NODE_COUNT_WINDOW == nodeType(pSelect->pWindow)) { - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, - "%s function is not supported in count/event window", pFunc->functionName); + if (QUERY_NODE_EVENT_WINDOW == nodeType(pSelect->pWindow) || + QUERY_NODE_COUNT_WINDOW == nodeType(pSelect->pWindow)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_NOT_ALLOWED_FUNC, + "%s function is not supported in count/event window", pFunc->functionName); } } return TSDB_CODE_SUCCESS; @@ -2861,7 +2863,7 @@ static void setFuncClassification(STranslateContext* pCxt, SFunctionNode* pFunc) static int32_t rewriteFuncToValue(STranslateContext* pCxt, char** pLiteral, SNode** pNode) { SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -2970,7 +2972,7 @@ static int32_t rewriteSystemInfoFunc(STranslateContext* pCxt, SNode** pNode) { static int32_t replacePsedudoColumnFuncWithColumn(STranslateContext* pCxt, SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3222,8 +3224,7 @@ static EDealRes translateFunction(STranslateContext* pCxt, SFunctionNode** pFunc pCxt->errCode = getFuncInfo(pCxt, *pFunc); if (TSDB_CODE_SUCCESS == pCxt->errCode) { - if ((SQL_CLAUSE_GROUP_BY == pCxt->currClause || - SQL_CLAUSE_PARTITION_BY == pCxt->currClause) && + if ((SQL_CLAUSE_GROUP_BY == pCxt->currClause || SQL_CLAUSE_PARTITION_BY == pCxt->currClause) && fmIsVectorFunc((*pFunc)->funcId)) { pCxt->errCode = TSDB_CODE_PAR_ILLEGAL_USE_AGG_FUNCTION; } @@ -3246,7 +3247,7 @@ static EDealRes translateLogicCond(STranslateContext* pCxt, SLogicConditionNode* static int32_t createCastFunc(STranslateContext* pCxt, SNode* pExpr, SDataType dt, SNode** pCast) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3281,7 +3282,7 @@ static bool isCondition(const SNode* pNode) { static int32_t rewriteIsTrue(SNode* pSrc, SNode** pIsTrue) { SOperatorNode* pOp = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOp); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOp); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -3293,9 +3294,9 @@ static int32_t rewriteIsTrue(SNode* pSrc, SNode** pIsTrue) { return TSDB_CODE_SUCCESS; } -extern int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX]; +extern int8_t gDisplyTypes[TSDB_DATA_TYPE_MAX][TSDB_DATA_TYPE_MAX]; static int32_t selectCommonType(SDataType* commonType, const SDataType* newType) { - if (commonType->type < TSDB_DATA_TYPE_NULL || commonType->type >= TSDB_DATA_TYPE_MAX || + if (commonType->type < TSDB_DATA_TYPE_NULL || commonType->type >= TSDB_DATA_TYPE_MAX || newType->type < TSDB_DATA_TYPE_NULL || newType->type >= TSDB_DATA_TYPE_MAX) { return TSDB_CODE_INVALID_PARA; } @@ -3350,7 +3351,7 @@ static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseW } allNullThen = false; pCxt->errCode = selectCommonType(&pCaseWhen->node.resType, &pThenExpr->resType); - if(TSDB_CODE_SUCCESS != pCxt->errCode){ + if (TSDB_CODE_SUCCESS != pCxt->errCode) { return DEAL_RES_ERROR; } } @@ -3358,7 +3359,7 @@ static EDealRes translateCaseWhen(STranslateContext* pCxt, SCaseWhenNode* pCaseW SExprNode* pElseExpr = (SExprNode*)pCaseWhen->pElse; if (NULL != pElseExpr) { pCxt->errCode = selectCommonType(&pCaseWhen->node.resType, &pElseExpr->resType); - if(TSDB_CODE_SUCCESS != pCxt->errCode) { + if (TSDB_CODE_SUCCESS != pCxt->errCode) { return DEAL_RES_ERROR; } } @@ -3459,7 +3460,7 @@ static int32_t getGroupByErrorCode(STranslateContext* pCxt) { static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -3482,7 +3483,7 @@ static EDealRes rewriteColToSelectValFunc(STranslateContext* pCxt, SNode** pNode static EDealRes rewriteExprToGroupKeyFunc(STranslateContext* pCxt, SNode** pNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -3866,7 +3867,7 @@ static EDealRes doCheckAggColCoexist(SNode** pNode, void* pContext) { static EDealRes doCheckGetAggColCoexist(SNode** pNode, void* pContext) { CheckAggColCoexistCxt* pCxt = (CheckAggColCoexistCxt*)pContext; - int32_t code = 0; + int32_t code = 0; if (isVectorFunc(*pNode)) { return DEAL_RES_IGNORE_CHILD; } @@ -3893,7 +3894,7 @@ static int32_t resetSelectFuncNumWithoutDup(SSelectStmt* pSelect) { pSelect->selectFuncNum = 0; pSelect->lastProcessByRowFuncId = -1; SNodeList* pNodeList = NULL; - int32_t code = nodesMakeList(&pNodeList); + int32_t code = nodesMakeList(&pNodeList); if (TSDB_CODE_SUCCESS != code) return code; code = nodesCollectSelectFuncs(pSelect, SQL_CLAUSE_FROM, NULL, fmIsSelectFunc, pNodeList); if (TSDB_CODE_SUCCESS != code) { @@ -4278,8 +4279,8 @@ static int32_t setTableTsmas(STranslateContext* pCxt, SName* pName, SRealTableNo SVgroupInfo vgInfo = {0}; bool exists = false; toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, "", &tsmaTargetTbName); - int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, pTsma->name, - pRealTable->table.tableName); + int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, + pTsma->name, pRealTable->table.tableName); len = taosCreateMD5Hash(buf, len); strncpy(tsmaTargetTbName.tname, buf, MD5_OUTPUT_LEN); code = collectUseTable(&tsmaTargetTbName, pCxt->pTargetTables); @@ -4364,7 +4365,7 @@ static EDealRes doTranslateTbName(SNode** pNode, void* pContext) { if (FUNCTION_TYPE_TBNAME == pFunc->funcType) { SRewriteTbNameContext* pCxt = (SRewriteTbNameContext*)pContext; SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) { pCxt->errCode = code; return DEAL_RES_ERROR; @@ -4957,8 +4958,7 @@ int32_t translateTable(STranslateContext* pCxt, SNode** pTable, SNode* pJoinPare } code = translateAudit(pCxt, pRealTable, &name); #endif - if (TSDB_CODE_SUCCESS == code) - code = setTableVgroupList(pCxt, &name, pRealTable); + if (TSDB_CODE_SUCCESS == code) code = setTableVgroupList(pCxt, &name, pRealTable); if (TSDB_CODE_SUCCESS == code) { code = setTableIndex(pCxt, &name, pRealTable); } @@ -5063,7 +5063,7 @@ static int32_t createAllColumns(STranslateContext* pCxt, bool igTags, SNodeList* static int32_t createMultiResFunc(SFunctionNode* pSrcFunc, SExprNode* pExpr, SNode** ppNodeOut) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -5153,12 +5153,12 @@ static int32_t createMultiResFuncsParas(STranslateContext* pCxt, SNodeList* pSrc static int32_t createMultiResFuncs(SFunctionNode* pSrcFunc, SNodeList* pExprs, SNodeList** pOutput) { SNodeList* pFuncs = NULL; - int32_t code = nodesMakeList(&pFuncs); + int32_t code = nodesMakeList(&pFuncs); if (NULL == pFuncs) { return code; } - SNode* pExpr = NULL; + SNode* pExpr = NULL; FOREACH(pExpr, pExprs) { SNode* pNode = NULL; code = createMultiResFunc(pSrcFunc, (SExprNode*)pExpr, &pNode); @@ -5205,7 +5205,7 @@ static int32_t createTags(STranslateContext* pCxt, SNodeList** pOutput) { SSchema* pTagsSchema = getTableTagSchema(pMeta); for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_OUT_OF_MEMORY); } @@ -5291,7 +5291,7 @@ static int32_t getPositionValue(const SValueNode* pVal) { } static int32_t translateClausePosition(STranslateContext* pCxt, SNodeList* pProjectionList, SNodeList* pClauseList, - bool* pOther) { + bool* pOther) { *pOther = false; SNode* pNode = NULL; WHERE_EACH(pNode, pClauseList) { @@ -5350,8 +5350,8 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) { } static EDealRes needFillImpl(SNode* pNode, void* pContext) { - if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType - && FUNCTION_TYPE_GROUP_CONST_VALUE != ((SFunctionNode*)pNode)->funcType) { + if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType && + FUNCTION_TYPE_GROUP_CONST_VALUE != ((SFunctionNode*)pNode)->funcType) { *(bool*)pContext = true; return DEAL_RES_END; } @@ -5435,8 +5435,8 @@ static int32_t checkProjectAlias(STranslateContext* pCxt, SNodeList* pProjection SHashObj* pUserAliasSet = taosHashInit(LIST_LENGTH(pProjectionList), taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK); if (!pUserAliasSet) return terrno; - SNode* pProject = NULL; - int32_t code = TSDB_CODE_SUCCESS; + SNode* pProject = NULL; + int32_t code = TSDB_CODE_SUCCESS; FOREACH(pProject, pProjectionList) { SExprNode* pExpr = (SExprNode*)pProject; if (NULL != taosHashGet(pUserAliasSet, pExpr->userAlias, strlen(pExpr->userAlias))) { @@ -5458,11 +5458,9 @@ static int32_t translateProjectionList(STranslateContext* pCxt, SSelectStmt* pSe if (!pSelect->isSubquery) { return rewriteProjectAlias(pSelect->pProjectionList); } else { - SNode* pNode; + SNode* pNode; int32_t projIdx = 1; - FOREACH(pNode, pSelect->pProjectionList) { - ((SExprNode*)pNode)->projIdx = projIdx++; - } + FOREACH(pNode, pSelect->pProjectionList) { ((SExprNode*)pNode)->projIdx = projIdx++; } return TSDB_CODE_SUCCESS; } } @@ -5472,12 +5470,13 @@ typedef struct SReplaceGroupByAliasCxt { SNodeList* pProjectionList; } SReplaceGroupByAliasCxt; -static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { +static EDealRes translateGroupPartitionByImpl(SNode** pNode, void* pContext) { SReplaceGroupByAliasCxt* pCxt = pContext; SNodeList* pProjectionList = pCxt->pProjectionList; SNode* pProject = NULL; + int32_t code = TSDB_CODE_SUCCESS; + STranslateContext* pTransCxt = pCxt->pTranslateCxt; if (QUERY_NODE_VALUE == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; SValueNode* pVal = (SValueNode*) *pNode; if (DEAL_RES_ERROR == translateValue(pTransCxt, pVal)) { return DEAL_RES_CONTINUE; @@ -5488,43 +5487,59 @@ static EDealRes replaceGroupByAliasImpl(SNode** pNode, void* pContext) { int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { SNode* pNew = NULL; - int32_t code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); + code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), (SNode**)&pNew); if (TSDB_CODE_SUCCESS != code) { pCxt->pTranslateCxt->errCode = code; return DEAL_RES_ERROR; } nodesDestroyNode(*pNode); *pNode = pNew; - return DEAL_RES_CONTINUE; - } else { + } + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; + } else if (QUERY_NODE_COLUMN == nodeType(*pNode)) { + bool asAlias = false; + EDealRes res = translateColumnInGroupByClause(pTransCxt, (SColumnNode**)pNode, &asAlias); + if (DEAL_RES_ERROR == res) { + return DEAL_RES_ERROR; + } + pTransCxt->errCode = TSDB_CODE_SUCCESS; + if (nodeType(*pNode) == QUERY_NODE_COLUMN && !asAlias) { return DEAL_RES_CONTINUE; } - } else if (QUERY_NODE_COLUMN == nodeType(*pNode)) { - STranslateContext* pTransCxt = pCxt->pTranslateCxt; - return translateColumn(pTransCxt, (SColumnNode**)pNode); + code = translateExpr(pTransCxt, pNode); + if (TSDB_CODE_SUCCESS != code) { + pTransCxt->errCode = code; + return DEAL_RES_ERROR; + } + return DEAL_RES_CONTINUE; } - - return DEAL_RES_CONTINUE; + return doTranslateExpr(pNode, pTransCxt); } -static int32_t replaceGroupByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translateGroupByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pGroupByList) { return TSDB_CODE_SUCCESS; } SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pGroupByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pGroupByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } -static int32_t replacePartitionByAlias(STranslateContext* pCxt, SSelectStmt* pSelect) { +static int32_t translatePartitionByList(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pPartitionByList) { return TSDB_CODE_SUCCESS; } + SReplaceGroupByAliasCxt cxt = { .pTranslateCxt = pCxt, .pProjectionList = pSelect->pProjectionList}; - nodesRewriteExprsPostOrder(pSelect->pPartitionByList, replaceGroupByAliasImpl, &cxt); + nodesRewriteExprsPostOrder(pSelect->pPartitionByList, translateGroupPartitionByImpl, &cxt); return pCxt->errCode; } @@ -5588,11 +5603,8 @@ static int32_t translateGroupBy(STranslateContext* pCxt, SSelectStmt* pSelect) { NODES_DESTORY_LIST(pSelect->pGroupByList); return TSDB_CODE_SUCCESS; } - code = replaceGroupByAlias(pCxt, pSelect); - } - if (TSDB_CODE_SUCCESS == code) { pSelect->timeLineResMode = TIME_LINE_NONE; - code = translateExprList(pCxt, pSelect->pGroupByList); + code = translateGroupByList(pCxt, pSelect); } return code; } @@ -5613,7 +5625,7 @@ static int32_t getQueryTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWi return TSDB_CODE_SUCCESS; } - SNode* pCond = NULL; + SNode* pCond = NULL; int32_t code = nodesCloneNode(pWhere, &pCond); if (NULL == pCond) { return code; @@ -5787,14 +5799,14 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG); } if (!fixed) { - double offsetMonth = 0, intervalMonth = 0; + double offsetMonth = 0, intervalMonth = 0; int32_t code = getMonthsFromTimeVal(pOffset->datum.i, precision, pOffset->unit, &offsetMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } code = getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit, &intervalMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } if (offsetMonth > intervalMonth) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_OFFSET_TOO_BIG); @@ -5819,14 +5831,14 @@ static int32_t checkIntervalWindow(STranslateContext* pCxt, SIntervalWindowNode* return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_SMALL); } if (valInter) { - double slidingMonth = 0, intervalMonth = 0; + double slidingMonth = 0, intervalMonth = 0; int32_t code = getMonthsFromTimeVal(pSliding->datum.i, precision, pSliding->unit, &slidingMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } code = getMonthsFromTimeVal(pInter->datum.i, precision, pInter->unit, &intervalMonth); if (TSDB_CODE_SUCCESS != code) { - return code; + return code; } if (slidingMonth > intervalMonth) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTER_SLIDING_TOO_BIG); @@ -6055,7 +6067,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { int32_t code = 0; if (QUERY_NODE_INTERVAL_WINDOW != nodeType(pSelect->pWindow)) { if (NULL != pSelect->pFromTable && QUERY_NODE_TEMP_TABLE == nodeType(pSelect->pFromTable) && - !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { + !isGlobalTimeLineQuery(((STempTableNode*)pSelect->pFromTable)->pSubquery)) { bool isTimelineAlignedQuery = false; code = isTimeLineAlignedQuery(pCxt->pCurrStmt, &isTimelineAlignedQuery); if (TSDB_CODE_SUCCESS != code) return code; @@ -6094,7 +6106,7 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { SFillNode* pFill = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FILL, (SNode**)&pFill); + int32_t code = nodesMakeNode(QUERY_NODE_FILL, (SNode**)&pFill); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -6117,7 +6129,7 @@ static int32_t createDefaultFillNode(STranslateContext* pCxt, SNode** pOutput) { static int32_t createDefaultEveryNode(STranslateContext* pCxt, SNode** pOutput) { SValueNode* pEvery = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pEvery); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pEvery); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -6287,10 +6299,7 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec (QUERY_NODE_FUNCTION == nodeType(pPar) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPar)->funcType))) { pSelect->timeLineResMode = TIME_LINE_MULTI; } - code = replacePartitionByAlias(pCxt, pSelect); - if (TSDB_CODE_SUCCESS == code) { - code = translateExprList(pCxt, pSelect->pPartitionByList); - } + code = translatePartitionByList(pCxt, pSelect); } if (TSDB_CODE_SUCCESS == code) { code = translateExprList(pCxt, pSelect->pTags); @@ -6308,7 +6317,7 @@ typedef struct SEqCondTbNameTableInfo { //[tableAlias.]tbname = tbNamVal static int32_t isOperatorEqTbnameCond(STranslateContext* pCxt, SOperatorNode* pOperator, char** ppTableAlias, - SArray** ppTabNames, bool* pRet) { + SArray** ppTabNames, bool* pRet) { if (pOperator->opType != OP_TYPE_EQUAL) { *pRet = false; return TSDB_CODE_SUCCESS; @@ -6359,7 +6368,7 @@ static int32_t isOperatorEqTbnameCond(STranslateContext* pCxt, SOperatorNode* pO //[tableAlias.]tbname in (value1, value2, ...) static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pOperator, char** ppTableAlias, - SArray** ppTbNames, bool* pRet) { + SArray** ppTbNames, bool* pRet) { if (pOperator->opType != OP_TYPE_IN) return false; if (nodeType(pOperator->pLeft) != QUERY_NODE_FUNCTION || ((SFunctionNode*)(pOperator->pLeft))->funcType != FUNCTION_TYPE_TBNAME || @@ -6383,8 +6392,8 @@ static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pO SNodeListNode* pValueListNode = (SNodeListNode*)pOperator->pRight; *ppTbNames = taosArrayInit(LIST_LENGTH(pValueListNode->pNodeList), sizeof(void*)); if (!*ppTbNames) return terrno; - SNodeList* pValueNodeList = pValueListNode->pNodeList; - SNode* pValNode = NULL; + SNodeList* pValueNodeList = pValueListNode->pNodeList; + SNode* pValNode = NULL; FOREACH(pValNode, pValueNodeList) { if (nodeType(pValNode) != QUERY_NODE_VALUE) { *pRet = false; @@ -6400,7 +6409,8 @@ static int32_t isOperatorTbnameInCond(STranslateContext* pCxt, SOperatorNode* pO return TSDB_CODE_SUCCESS; } -static int32_t findEqCondTbNameInOperatorNode(STranslateContext* pCxt, SNode* pWhere, SEqCondTbNameTableInfo* pInfo, bool* pRet) { +static int32_t findEqCondTbNameInOperatorNode(STranslateContext* pCxt, SNode* pWhere, SEqCondTbNameTableInfo* pInfo, + bool* pRet) { int32_t code = TSDB_CODE_SUCCESS; char* pTableAlias = NULL; bool eqTbnameCond = false, tbnameInCond = false; @@ -6467,7 +6477,7 @@ static int32_t findEqualCondTbnameInLogicCondAnd(STranslateContext* pCxt, SNode* static int32_t unionEqualCondTbnamesOfSameTable(SArray* aTableTbnames, SEqCondTbNameTableInfo* pInfo) { int32_t code = TSDB_CODE_SUCCESS; - bool bFoundTable = false; + bool bFoundTable = false; for (int i = 0; i < taosArrayGetSize(aTableTbnames); ++i) { SEqCondTbNameTableInfo* info = taosArrayGet(aTableTbnames, i); if (info->pRealTable == pInfo->pRealTable) { @@ -6547,7 +6557,7 @@ static int32_t findEqualCondTbname(STranslateContext* pCxt, SNode* pWhere, SArra } static void findVgroupsFromEqualTbname(STranslateContext* pCxt, SArray* aTbnames, const char* dbName, - int32_t numOfVgroups, SVgroupsInfo* vgsInfo) { + int32_t numOfVgroups, SVgroupsInfo* vgsInfo) { int32_t nVgroups = 0; int32_t nTbls = taosArrayGetSize(aTbnames); @@ -6584,10 +6594,10 @@ static void findVgroupsFromEqualTbname(STranslateContext* pCxt, SArray* aTbnames } static int32_t replaceToChildTableQuery(STranslateContext* pCxt, SEqCondTbNameTableInfo* pInfo) { - SName snameTb = {0}; - int32_t code = 0; + SName snameTb = {0}; + int32_t code = 0; SRealTableNode* pRealTable = pInfo->pRealTable; - char* tbName = taosArrayGetP(pInfo->aTbnames, 0); + char* tbName = taosArrayGetP(pInfo->aTbnames, 0); toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, tbName, &snameTb); STableMeta* pMeta = NULL; @@ -6604,14 +6614,14 @@ static int32_t replaceToChildTableQuery(STranslateContext* pCxt, SEqCondTbNameTa pRealTable->stbRewrite = true; if (pRealTable->pTsmas) { - // if select from a child table, fetch it's corresponding tsma target child table infos + // if select from a child table, fetch it's corresponding tsma target child table infos char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1]; for (int32_t i = 0; i < pRealTable->pTsmas->size; ++i) { STableTSMAInfo* pTsma = taosArrayGetP(pRealTable->pTsmas, i); SName tsmaTargetTbName = {0}; toName(pCxt->pParseCxt->acctId, pRealTable->table.dbName, "", &tsmaTargetTbName); int32_t len = tsnprintf(buf, TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN, "%s.%s_%s", pTsma->dbFName, pTsma->name, - pRealTable->table.tableName); + pRealTable->table.tableName); len = taosCreateMD5Hash(buf, len); strncpy(tsmaTargetTbName.tname, buf, MD5_OUTPUT_LEN); STsmaTargetTbInfo ctbInfo = {0}; @@ -6639,16 +6649,16 @@ _return: } static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt* pSelect, SArray* aTables) { - int32_t code = TSDB_CODE_SUCCESS; - int32_t aTableNum = taosArrayGetSize(aTables); - int32_t nTbls = 0; - bool stableQuery = false; + int32_t code = TSDB_CODE_SUCCESS; + int32_t aTableNum = taosArrayGetSize(aTables); + int32_t nTbls = 0; + bool stableQuery = false; SEqCondTbNameTableInfo* pInfo = NULL; qDebug("start to update stable vg for tbname optimize, aTableNum:%d", aTableNum); for (int i = 0; i < aTableNum; ++i) { pInfo = taosArrayGet(aTables, i); - int32_t numOfVgs = pInfo->pRealTable->pVgroupList->numOfVgroups; + int32_t numOfVgs = pInfo->pRealTable->pVgroupList->numOfVgroups; nTbls = taosArrayGetSize(pInfo->aTbnames); SVgroupsInfo* vgsInfo = taosMemoryMalloc(sizeof(SVgroupsInfo) + nTbls * sizeof(SVgroupInfo)); @@ -6714,7 +6724,8 @@ static int32_t setEqualTbnameTableVgroups(STranslateContext* pCxt, SSelectStmt* } } - qDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls, stableQuery); + qDebug("before ctbname optimize, code:%d, aTableNum:%d, nTbls:%d, stableQuery:%d", code, aTableNum, nTbls, + stableQuery); if (TSDB_CODE_SUCCESS == code && 1 == aTableNum && 1 == nTbls && stableQuery && NULL == pInfo->pRealTable->pTsmas) { code = replaceToChildTableQuery(pCxt, pInfo); @@ -6770,13 +6781,13 @@ static int32_t checkLimit(STranslateContext* pCxt, SSelectStmt* pSelect) { static int32_t createPrimaryKeyColByTable(STranslateContext* pCxt, STableNode* pTable, SNode** pPrimaryKey) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } pCol->colId = PRIMARYKEY_TIMESTAMP_COL_ID; strcpy(pCol->colName, ROWTS_PSEUDO_COLUMN_NAME); - bool found = false; + bool found = false; code = findAndSetColumn(pCxt, &pCol, pTable, &found, true); if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_VALID_PRIM_TS_REQUIRED); @@ -6808,8 +6819,8 @@ static EDealRes collectTableAlias(SNode* pNode, void* pContext) { *(SSHashObj**)pContext = pHash; } - if (TSDB_CODE_SUCCESS != tSimpleHashPut(*(SSHashObj**)pContext, pCol->tableAlias, strlen(pCol->tableAlias), pCol->tableAlias, - sizeof(pCol->tableAlias))) { + if (TSDB_CODE_SUCCESS != tSimpleHashPut(*(SSHashObj**)pContext, pCol->tableAlias, strlen(pCol->tableAlias), + pCol->tableAlias, sizeof(pCol->tableAlias))) { return DEAL_RES_ERROR; } @@ -6869,13 +6880,13 @@ static int32_t appendTsForImplicitTsFunc(STranslateContext* pCxt, SSelectStmt* p static int32_t createPkColByTable(STranslateContext* pCxt, SRealTableNode* pTable, SNode** pPk) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return code; } pCol->colId = pTable->pMeta->schema[1].colId; strcpy(pCol->colName, pTable->pMeta->schema[1].name); - bool found = false; + bool found = false; code = findAndSetColumn(pCxt, &pCol, (STableNode*)pTable, &found, true); if (TSDB_CODE_SUCCESS != code || !found) { return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INTERNAL_ERROR); @@ -6943,7 +6954,7 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { (QUERY_NODE_COLUMN == nodeType(pProject) && !nodesEqualNode(*pNode, pProject)))) { continue; } - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pProject, &pNew); if (NULL == pNew) { pCxt->pTranslateCxt->errCode = code; @@ -6966,7 +6977,7 @@ static EDealRes replaceOrderByAliasImpl(SNode** pNode, void* pContext) { } int32_t pos = getPositionValue(pVal); if (0 < pos && pos <= LIST_LENGTH(pProjectionList)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(nodesListGetNode(pProjectionList, pos - 1), &pNew); if (NULL == pNew) { pCxt->pTranslateCxt->errCode = code; @@ -7075,8 +7086,7 @@ static int32_t translateSelectFrom(STranslateContext* pCxt, SSelectStmt* pSelect } if (TSDB_CODE_SUCCESS == code) { code = resetSelectFuncNumWithoutDup(pSelect); - if (TSDB_CODE_SUCCESS == code) - code = checkAggColCoexist(pCxt, pSelect); + if (TSDB_CODE_SUCCESS == code) code = checkAggColCoexist(pCxt, pSelect); } /* if (TSDB_CODE_SUCCESS == code) { @@ -7137,7 +7147,7 @@ static int32_t translateSelect(STranslateContext* pCxt, SSelectStmt* pSelect) { static SNode* createSetOperProject(const char* pTableAlias, SNode* pNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (TSDB_CODE_SUCCESS != code) { return NULL; } @@ -7350,7 +7360,7 @@ static int32_t translateInsertQuery(STranslateContext* pCxt, SInsertStmt* pInser static int32_t addOrderByPrimaryKeyToQueryImpl(STranslateContext* pCxt, SNode* pPrimaryKeyExpr, SNodeList** pOrderByList) { SOrderByExprNode* pOrderByExpr = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR, (SNode**)&pOrderByExpr); + int32_t code = nodesMakeNode(QUERY_NODE_ORDER_BY_EXPR, (SNode**)&pOrderByExpr); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -7493,7 +7503,7 @@ static int32_t buildCreateDbRetentions(const SNodeList* pRetentions, SCreateDbRe } static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pStmt, SCreateDbReq* pReq) { - SName name = {0}; + SName name = {0}; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, pReq->db); @@ -8005,8 +8015,8 @@ static int32_t checkDatabaseOptions(STranslateContext* pCxt, const char* pDbName code = checkOptionsDependency(pCxt, pDbName, pOptions); } if (TSDB_CODE_SUCCESS == code) { - code = - checkDbRangeOption(pCxt, "s3_chunksize", pOptions->s3ChunkSize, TSDB_MIN_S3_CHUNK_SIZE, TSDB_MAX_S3_CHUNK_SIZE); + code = checkDbRangeOption(pCxt, "s3_chunkpages", pOptions->s3ChunkSize, TSDB_MIN_S3_CHUNK_SIZE, + TSDB_MAX_S3_CHUNK_SIZE); } if (TSDB_CODE_SUCCESS == code) { code = checkDbRangeOption(pCxt, "s3_compact", pOptions->s3Compact, TSDB_MIN_S3_COMPACT, TSDB_MAX_S3_COMPACT); @@ -8026,7 +8036,7 @@ static int32_t checkCreateDatabase(STranslateContext* pCxt, SCreateDatabaseStmt* CMD_TYPE* pCmdReq = genericCmd; \ char* cmdSql = taosMemoryMalloc(sqlLen); \ if (cmdSql == NULL) { \ - return terrno; \ + return terrno; \ } \ memcpy(cmdSql, sql, sqlLen); \ pCmdReq->sqlLen = sqlLen; \ @@ -8210,7 +8220,7 @@ static int32_t translateCreateDatabase(STranslateContext* pCxt, SCreateDatabaseS static int32_t translateDropDatabase(STranslateContext* pCxt, SDropDatabaseStmt* pStmt) { SDropDbReq dropReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, dropReq.db); dropReq.ignoreNotExists = pStmt->ignoreNotExists; @@ -8292,7 +8302,7 @@ static int32_t translateAlterDatabase(STranslateContext* pCxt, SAlterDatabaseStm static int32_t translateTrimDatabase(STranslateContext* pCxt, STrimDatabaseStmt* pStmt) { STrimDbReq req = {.maxSpeed = pStmt->maxSpeed}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.db); return buildCmdMsg(pCxt, TDMT_MND_TRIM_DB, (FSerializeFunc)tSerializeSTrimDbReq, &req); @@ -8329,7 +8339,7 @@ static int32_t columnDefNodeToField(SNodeList* pList, SArray** pArray, bool calB if (!pArray) return terrno; int32_t code = TSDB_CODE_SUCCESS; - SNode* pNode; + SNode* pNode; FOREACH(pNode, pList) { SColumnDefNode* pCol = (SColumnDefNode*)pNode; SFieldWithOptions field = {.type = pCol->dataType.type, .bytes = calcTypeBytes(pCol->dataType)}; @@ -8801,7 +8811,7 @@ typedef struct SSampleAstInfo { static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { SRealTableNode* pTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pTable); + int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pTable); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -8815,7 +8825,7 @@ static int32_t buildTableForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { static int32_t addWstartToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8826,7 +8836,7 @@ static int32_t addWstartToSampleProjects(SNodeList* pProjectionList) { static int32_t addWendToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8837,7 +8847,7 @@ static int32_t addWendToSampleProjects(SNodeList* pProjectionList) { static int32_t addWdurationToSampleProjects(SNodeList* pProjectionList) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8874,7 +8884,7 @@ static int32_t buildProjectsForSampleAst(SSampleAstInfo* pInfo, SNodeList** pLis static int32_t buildIntervalForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) { SIntervalWindowNode* pInterval = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW, (SNode**)&pInterval); + int32_t code = nodesMakeNode(QUERY_NODE_INTERVAL_WINDOW, (SNode**)&pInterval); if (NULL == pInterval) { return code; } @@ -8896,7 +8906,7 @@ static int32_t buildIntervalForSampleAst(SSampleAstInfo* pInfo, SNode** pOutput) static int32_t buildSampleAst(STranslateContext* pCxt, SSampleAstInfo* pInfo, char** pAst, int32_t* pLen, char** pExpr, int32_t* pExprLen, int32_t* pProjectionTotalLen) { SSelectStmt* pSelect = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); + int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); if (NULL == pSelect) { return code; } @@ -8937,7 +8947,7 @@ static void clearSampleAstInfo(SSampleAstInfo* pInfo) { static int32_t makeIntervalVal(SRetention* pRetension, int8_t precision, SNode** ppNode) { SValueNode* pVal = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (NULL == pVal) { return code; } @@ -8964,7 +8974,7 @@ static int32_t makeIntervalVal(SRetention* pRetension, int8_t precision, SNode** static int32_t createColumnFromDef(SColumnDefNode* pDef, SNode** ppCol) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -8975,7 +8985,7 @@ static int32_t createColumnFromDef(SColumnDefNode* pDef, SNode** ppCol) { static int32_t createRollupFunc(SNode* pSrcFunc, SColumnDefNode* pColDef, SNode** ppRollupFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesCloneNode(pSrcFunc, (SNode**)&pFunc); + int32_t code = nodesCloneNode(pSrcFunc, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -8995,7 +9005,7 @@ static int32_t createRollupFunc(SNode* pSrcFunc, SColumnDefNode* pColDef, SNode* static int32_t createRollupFuncs(SCreateTableStmt* pStmt, SNodeList** ppList) { SNodeList* pFuncs = NULL; - int32_t code = nodesMakeList(&pFuncs); + int32_t code = nodesMakeList(&pFuncs); if (NULL == pFuncs) { return code; } @@ -9024,7 +9034,8 @@ static int32_t createRollupFuncs(SCreateTableStmt* pStmt, SNodeList** ppList) { } *ppList = pFuncs; - return code;; + return code; + ; } static int32_t createRollupTableMeta(SCreateTableStmt* pStmt, int8_t precision, STableMeta** ppTbMeta) { @@ -9056,7 +9067,7 @@ static int32_t createRollupTableMeta(SCreateTableStmt* pStmt, int8_t precision, static int32_t createTbnameFunction(SFunctionNode** ppFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -9153,8 +9164,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm // columnDefNodeToField(pStmt->pCols, &pReq->pColumns, true); // columnDefNodeToField(pStmt->pTags, &pReq->pTags, true); code = columnDefNodeToField(pStmt->pCols, &pReq->pColumns, true); - if (TSDB_CODE_SUCCESS == code) - code = tagDefNodeToField(pStmt->pTags, &pReq->pTags, true); + if (TSDB_CODE_SUCCESS == code) code = tagDefNodeToField(pStmt->pTags, &pReq->pTags, true); if (TSDB_CODE_SUCCESS == code) { pReq->numOfColumns = LIST_LENGTH(pStmt->pCols); pReq->numOfTags = LIST_LENGTH(pStmt->pTags); @@ -9174,8 +9184,7 @@ static int32_t buildCreateStbReq(STranslateContext* pCxt, SCreateTableStmt* pStm toName(pCxt->pParseCxt->acctId, pStmt->dbName, pStmt->tableName, &tableName); code = tNameExtractFullName(&tableName, pReq->name); } - if (TSDB_CODE_SUCCESS == code) - code = collectUseTable(&tableName, pCxt->pTables); + if (TSDB_CODE_SUCCESS == code) code = collectUseTable(&tableName, pCxt->pTables); if (TSDB_CODE_SUCCESS == code) { code = collectUseTable(&tableName, pCxt->pTargetTables); } @@ -9527,12 +9536,13 @@ static int32_t translateAlterSuperTable(STranslateContext* pCxt, SAlterTableStmt static int32_t translateUseDatabase(STranslateContext* pCxt, SUseDatabaseStmt* pStmt) { SUseDbReq usedbReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS == code) { code = tNameExtractFullName(&name, usedbReq.db); } if (TSDB_CODE_SUCCESS == code) - code = getDBVgVersion(pCxt, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable, &usedbReq.stateTs); + code = + getDBVgVersion(pCxt, usedbReq.db, &usedbReq.vgVersion, &usedbReq.dbId, &usedbReq.numOfTable, &usedbReq.stateTs); if (TSDB_CODE_SUCCESS == code) { code = buildCmdMsg(pCxt, TDMT_MND_USE_DB, (FSerializeFunc)tSerializeSUseDbReq, &usedbReq); } @@ -9801,9 +9811,9 @@ static int32_t buildCreateSmaReq(STranslateContext* pCxt, SCreateIndexStmt* pStm pReq->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; pReq->offset = (NULL != pStmt->pOptions->pOffset ? ((SValueNode*)pStmt->pOptions->pOffset)->datum.i : 0); pReq->sliding = - (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pReq->interval); + (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->datum.i : pReq->interval); pReq->slidingUnit = - (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pReq->intervalUnit); + (NULL != pStmt->pOptions->pSliding ? ((SValueNode*)pStmt->pOptions->pSliding)->unit : pReq->intervalUnit); } if (TSDB_CODE_SUCCESS == code && NULL != pStmt->pOptions->pStreamOptions) { @@ -10112,8 +10122,7 @@ static int32_t buildCreateTopicReq(STranslateContext* pCxt, SCreateTopicStmt* pS } else if ('\0' != pStmt->subDbName[0]) { pReq->subType = TOPIC_SUB_TYPE__DB; code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->subDbName, strlen(pStmt->subDbName)); - if (TSDB_CODE_SUCCESS == code) - (void)tNameGetFullDbName(&name, pReq->subDbName); + if (TSDB_CODE_SUCCESS == code) (void)tNameGetFullDbName(&name, pReq->subDbName); } else { pReq->subType = TOPIC_SUB_TYPE__COLUMN; char* dbName = ((SRealTableNode*)(((SSelectStmt*)pStmt->pQuery)->pFromTable))->table.dbName; @@ -10202,7 +10211,7 @@ static int32_t checkCollectTopicTags(STranslateContext* pCxt, SCreateTopicStmt* // for (int32_t i = 0; i < pMeta->tableInfo.numOfColumns; ++i) { SSchema* column = &pMeta->schema[0]; SColumnNode* col = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); if (NULL == col) { return code; } @@ -10239,7 +10248,7 @@ static int32_t buildQueryForTableTopic(STranslateContext* pCxt, SCreateTopicStmt return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, "Only supertable table can be used"); } - SNodeList* pProjection = NULL; + SNodeList* pProjection = NULL; SRealTableNode* realTable = NULL; code = checkCollectTopicTags(pCxt, pStmt, pMeta, &pProjection); if (TSDB_CODE_SUCCESS == code) { @@ -10518,7 +10527,7 @@ static int32_t setColumnDefNodePrimaryKey(SColumnDefNode* pNode, bool isPk) { if (!pNode->pOptions) { code = nodesMakeNode(QUERY_NODE_COLUMN_OPTIONS, &pNode->pOptions); } - if (TSDB_CODE_SUCCESS ==code) ((SColumnOptions*)pNode->pOptions)->bPrimaryKey = isPk; + if (TSDB_CODE_SUCCESS == code) ((SColumnOptions*)pNode->pOptions)->bPrimaryKey = isPk; return code; } @@ -10530,7 +10539,7 @@ static int32_t addWstartTsToCreateStreamQueryImpl(STranslateContext* pCxt, SSele return TSDB_CODE_SUCCESS; } SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -10589,7 +10598,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream SNode* pPart = NULL; FOREACH(pPart, pSelect->pPartitionByList) { if (0 == strcmp(getTagNameForCreateStreamTag(pTag), ((SExprNode*)pPart)->userAlias)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pPart, &pNew); if (TSDB_CODE_SUCCESS != code) return code; if (TSDB_CODE_SUCCESS != (code = nodesListMakeStrictAppend(&pSelect->pTags, pNew))) { @@ -10608,7 +10617,7 @@ static int32_t addTagsToCreateStreamQuery(STranslateContext* pCxt, SCreateStream static int32_t createNullValue(SNode** ppNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -10625,8 +10634,7 @@ static int32_t addNullTagsForExistTable(STranslateContext* pCxt, STableMeta* pMe for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < numOfTags; ++i) { SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&pSelect->pTags, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&pSelect->pTags, pNull); } return code; } @@ -10643,7 +10651,7 @@ static EDealRes rewriteSubtable(SNode** pNode, void* pContext) { SNode* pPart = NULL; FOREACH(pPart, pCxt->pPartitionList) { if (0 == strcmp(((SColumnNode*)*pNode)->colName, ((SExprNode*)pPart)->userAlias)) { - SNode* pNew = NULL; + SNode* pNew = NULL; int32_t code = nodesCloneNode(pPart, &pNew); if (NULL == pNew) { pCxt->pCxt->errCode = code; @@ -10686,8 +10694,7 @@ static int32_t addNullTagsForCreateTable(STranslateContext* pCxt, SCreateStreamS for (int32_t i = 0; TSDB_CODE_SUCCESS == code && i < LIST_LENGTH(pStmt->pTags); ++i) { SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&((SSelectStmt*)pStmt->pQuery)->pTags, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&((SSelectStmt*)pStmt->pQuery)->pTags, pNull); } return code; } @@ -10700,9 +10707,9 @@ static int32_t addNullTagsToCreateStreamQuery(STranslateContext* pCxt, STableMet } static int32_t addColDefNodeByProj(SNodeList** ppCols, const SNode* pProject, int8_t flags) { - const SExprNode* pExpr = (const SExprNode*)pProject; - SColumnDefNode* pColDef = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); + const SExprNode* pExpr = (const SExprNode*)pProject; + SColumnDefNode* pColDef = NULL; + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); if (TSDB_CODE_SUCCESS != code) return code; strcpy(pColDef->colName, pExpr->userAlias); pColDef->dataType = pExpr->resType; @@ -10872,10 +10879,11 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm if (NULL != pStmt->pOptions->pDelay) { SValueNode* pVal = (SValueNode*)pStmt->pOptions->pDelay; - int64_t minDelay = 0; - char* str = "5s"; - if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && TSDB_CODE_SUCCESS == - parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { + int64_t minDelay = 0; + char* str = "5s"; + if (DEAL_RES_ERROR != translateValue(pCxt, pVal) && + TSDB_CODE_SUCCESS == + parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { if (pVal->datum.i < minDelay) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "stream max delay must be bigger than 5 session"); @@ -10907,7 +10915,7 @@ static int32_t adjustDataTypeOfProjections(STranslateContext* pCxt, const STable REPLACE_NODE(pFunc); } SColumnDefNode* pColDef = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN_DEF, (SNode**)&pColDef); if (TSDB_CODE_SUCCESS != code) return code; strcpy(pColDef->colName, pSchema->name); pColDef->dataType = dt; @@ -10942,7 +10950,7 @@ static int32_t projColPosCompar(const void* l, const void* r) { static void projColPosDelete(void* p) { nodesDestroyNode(((SProjColPos*)p)->pProj); } static int32_t addProjToProjColPos(STranslateContext* pCxt, const SSchema* pSchema, SNode* pProj, SArray* pProjColPos) { - SNode* pNewProj = NULL; + SNode* pNewProj = NULL; int32_t code = nodesCloneNode(pProj, &pNewProj); if (NULL == pNewProj) { return code; @@ -11170,8 +11178,7 @@ static int32_t adjustOrderOfTags(STranslateContext* pCxt, SNodeList* pTags, cons } SNode* pNull = NULL; code = createNullValue(&pNull); - if (TSDB_CODE_SUCCESS == code) - code = nodesListStrictAppend(pNewTagExprs, pNull); + if (TSDB_CODE_SUCCESS == code) code = nodesListStrictAppend(pNewTagExprs, pNull); } } @@ -11275,7 +11282,7 @@ static int32_t translateStreamTargetTable(STranslateContext* pCxt, SCreateStream static int32_t createLastTsSelectStmt(char* pDb, const char* pTable, const char* pkColName, SNode** pQuery) { SColumnNode* col = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&col); if (NULL == col) { return code; } @@ -11623,7 +11630,7 @@ static int32_t createStreamReqVersionInfo(SSDataBlock* pBlock, SArray** pArray, for (int32_t i = 0; i < pBlock->info.rows; ++i) { SVgroupVer v = {.vgId = *(int32_t*)colDataGetData(pCol1, i), .ver = *(int64_t*)colDataGetData(pCol2, i)}; - if((taosArrayPush(*pArray, &v)) == NULL) { + if ((taosArrayPush(*pArray, &v)) == NULL) { taosArrayDestroy(*pArray); return terrno; } @@ -11678,7 +11685,7 @@ int32_t translatePostCreateStream(SParseContext* pParseCxt, SQuery* pQuery, SSDa static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pStmt) { SMDropStreamReq dropReq = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, dropReq.name); dropReq.igNotExists = pStmt->ignoreNotExists; @@ -11690,7 +11697,7 @@ static int32_t translateDropStream(STranslateContext* pCxt, SDropStreamStmt* pSt static int32_t translatePauseStream(STranslateContext* pCxt, SPauseStreamStmt* pStmt) { SMPauseStreamReq req = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.name); req.igNotExists = pStmt->ignoreNotExists; @@ -11700,7 +11707,7 @@ static int32_t translatePauseStream(STranslateContext* pCxt, SPauseStreamStmt* p static int32_t translateResumeStream(STranslateContext* pCxt, SResumeStreamStmt* pStmt) { SMResumeStreamReq req = {0}; SName name; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->streamName, strlen(pStmt->streamName)); if (TSDB_CODE_SUCCESS != code) return code; (void)tNameGetFullDbName(&name, req.name); req.igNotExists = pStmt->ignoreNotExists; @@ -11775,7 +11782,7 @@ static int32_t translateDropView(STranslateContext* pCxt, SDropViewStmt* pStmt) SCMDropViewReq dropReq = {0}; SName name = {0}; - int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); + int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); if (TSDB_CODE_SUCCESS == code) { (void)tNameGetFullDbName(&name, dropReq.dbFName); strncpy(dropReq.name, pStmt->viewName, sizeof(dropReq.name) - 1); @@ -11871,7 +11878,7 @@ static int32_t translateDropFunction(STranslateContext* pCxt, SDropFunctionStmt* static int32_t createRealTableForGrantTable(SGrantStmt* pStmt, SRealTableNode** pTable) { SRealTableNode* pRealTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pRealTable); + int32_t code = nodesMakeNode(QUERY_NODE_REAL_TABLE, (SNode**)&pRealTable); if (NULL == pRealTable) { return code; } @@ -12074,11 +12081,10 @@ static int32_t translateShowCreateDatabase(STranslateContext* pCxt, SShowCreateD return terrno; } - SName name; + SName name; int32_t code = tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName)); (void)tNameGetFullDbName(&name, pStmt->dbFName); - if (TSDB_CODE_SUCCESS == code) - return getDBCfg(pCxt, pStmt->dbName, (SDbCfgInfo*)pStmt->pCfg); + if (TSDB_CODE_SUCCESS == code) return getDBCfg(pCxt, pStmt->dbName, (SDbCfgInfo*)pStmt->pCfg); return code; } @@ -12108,7 +12114,7 @@ static int32_t translateShowCreateView(STranslateContext* pCxt, SShowCreateViewS static int32_t createColumnNodeWithName(const char* name, SNode** ppCol) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (!pCol) return code; tstrncpy(pCol->colName, name, TSDB_COL_NAME_LEN); tstrncpy(pCol->node.aliasName, name, TSDB_COL_NAME_LEN); @@ -12170,7 +12176,7 @@ static int32_t buildTSMAAstStreamSubTable(SCreateTSMAStmt* pStmt, SMCreateSmaReq SFunctionNode* pConcatFunc = NULL; code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pConcatFunc); if (TSDB_CODE_SUCCESS != code) goto _end; - SValueNode* pVal = NULL; + SValueNode* pVal = NULL; code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pVal); if (TSDB_CODE_SUCCESS != code) goto _end; @@ -12218,8 +12224,7 @@ static int32_t buildTSMAAst(STranslateContext* pCxt, SCreateTSMAStmt* pStmt, SMC info.pDbName = pStmt->dbName; info.pTableName = tbName; code = nodesCloneList(pStmt->pOptions->pFuncs, &info.pFuncs); - if (TSDB_CODE_SUCCESS == code) - code = nodesCloneNode(pStmt->pOptions->pInterval, &info.pInterval); + if (TSDB_CODE_SUCCESS == code) code = nodesCloneNode(pStmt->pOptions->pInterval, &info.pInterval); SFunctionNode* pTbnameFunc = NULL; if (TSDB_CODE_SUCCESS == code) { @@ -12244,10 +12249,9 @@ static int32_t buildTSMAAst(STranslateContext* pCxt, SCreateTSMAStmt* pStmt, SMC } code = nodesListAppend(info.pPartitionByList, pTagCol); if (TSDB_CODE_SUCCESS == code) { - SNode*pNew = NULL; + SNode* pNew = NULL; code = nodesCloneNode(pTagCol, &pNew); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&info.pTags, pNew); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&info.pTags, pNew); } } @@ -12394,7 +12398,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm pReq->interval = ((SValueNode*)pStmt->pOptions->pInterval)->datum.i; pReq->intervalUnit = ((SValueNode*)pStmt->pOptions->pInterval)->unit; -#define TSMA_MIN_INTERVAL_MS 1000 * 60 // 1m +#define TSMA_MIN_INTERVAL_MS 1000 * 60 // 1m #define TSMA_MAX_INTERVAL_MS (60UL * 60UL * 1000UL * 24UL * 365UL) // 1y if (!IS_CALENDAR_TIME_DURATION(pReq->intervalUnit)) { @@ -12405,8 +12409,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm } else { if (pReq->intervalUnit == TIME_UNIT_MONTH && (pReq->interval < 1 || pReq->interval > 12)) return TSDB_CODE_TSMA_INVALID_INTERVAL; - if (pReq->intervalUnit == TIME_UNIT_YEAR && (pReq->interval != 1)) - return TSDB_CODE_TSMA_INVALID_INTERVAL; + if (pReq->intervalUnit == TIME_UNIT_YEAR && (pReq->interval != 1)) return TSDB_CODE_TSMA_INVALID_INTERVAL; } STableMeta* pTableMeta = NULL; @@ -12422,7 +12425,7 @@ static int32_t buildCreateTSMAReq(STranslateContext* pCxt, SCreateTSMAStmt* pStm if (TSDB_CODE_SUCCESS == code) { SValueNode* pInterval = (SValueNode*)pStmt->pOptions->pInterval; if (checkRecursiveTsmaInterval(pRecursiveTsma->interval, pRecursiveTsma->unit, pInterval->datum.i, - pInterval->unit, pDbInfo.precision, true)) { + pInterval->unit, pDbInfo.precision, true)) { } else { code = TSDB_CODE_TSMA_INVALID_RECURSIVE_INTERVAL; } @@ -13044,7 +13047,7 @@ int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pS static int32_t createStarCol(SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -13055,7 +13058,7 @@ static int32_t createStarCol(SNode** ppNode) { static int32_t createProjectCol(const char* pProjCol, SNode** ppNode) { SColumnNode* pCol = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); + int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)&pCol); if (NULL == pCol) { return code; } @@ -13098,7 +13101,7 @@ static int32_t createProjectCols(int32_t ncols, const char* const pCols[], SNode static int32_t createSimpleSelectStmtImpl(const char* pDb, const char* pTable, SNodeList* pProjectionList, SSelectStmt** pStmt) { SSelectStmt* pSelect = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); + int32_t code = nodesMakeNode(QUERY_NODE_SELECT_STMT, (SNode**)&pSelect); if (NULL == pSelect) { return code; } @@ -13153,7 +13156,7 @@ static int32_t createOperatorNode(EOperatorType opType, const char* pColName, co } SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (NULL == pOper) { return code; } @@ -13178,7 +13181,7 @@ static int32_t createOperatorNode(EOperatorType opType, const char* pColName, co static int32_t createParOperatorNode(EOperatorType opType, const char* pLeftCol, const char* pRightCol, SNode** ppResOp) { SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (TSDB_CODE_SUCCESS != code) return code; pOper->opType = opType; @@ -13201,7 +13204,7 @@ static int32_t createParOperatorNode(EOperatorType opType, const char* pLeftCol, static int32_t createIsOperatorNode(EOperatorType opType, const char* pColName, SNode** pOp) { SOperatorNode* pOper = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); + int32_t code = nodesMakeNode(QUERY_NODE_OPERATOR, (SNode**)&pOper); if (NULL == pOper) { return code; } @@ -13323,7 +13326,7 @@ static int32_t addShowUserDatabasesCond(SSelectStmt* pSelect) { SNode* pNameCond1 = NULL; SNode* pNameCond2 = NULL; SNode* pNameCond = NULL; - SValueNode* pValNode1 = NULL, *pValNode2 = NULL; + SValueNode *pValNode1 = NULL, *pValNode2 = NULL; code = nodesMakeValueNodeFromString(TSDB_INFORMATION_SCHEMA_DB, &pValNode1); if (TSDB_CODE_SUCCESS == code) { @@ -13337,11 +13340,9 @@ static int32_t addShowUserDatabasesCond(SSelectStmt* pSelect) { } nodesDestroyNode((SNode*)pValNode2); nodesDestroyNode((SNode*)pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_AND); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_AND); - if (TSDB_CODE_SUCCESS == code) - code = insertCondIntoSelectStmt(pSelect, &pNameCond); + if (TSDB_CODE_SUCCESS == code) code = insertCondIntoSelectStmt(pSelect, &pNameCond); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pNameCond1); @@ -13355,8 +13356,8 @@ static int32_t addShowSystemDatabasesCond(SSelectStmt* pSelect) { int32_t code = TSDB_CODE_SUCCESS; SNode* pNameCond1 = NULL; SNode* pNameCond2 = NULL; - SValueNode* pValNode1 = NULL, * pValNode2 = NULL; - SNode* pNameCond = NULL; + SValueNode *pValNode1 = NULL, *pValNode2 = NULL; + SNode* pNameCond = NULL; code = nodesMakeValueNodeFromString(TSDB_INFORMATION_SCHEMA_DB, &pValNode1); if (TSDB_CODE_SUCCESS == code) { code = nodesMakeValueNodeFromString(TSDB_PERFORMANCE_SCHEMA_DB, &pValNode2); @@ -13373,8 +13374,7 @@ static int32_t addShowSystemDatabasesCond(SSelectStmt* pSelect) { code = createLogicCondNode(&pNameCond1, &pNameCond2, &pNameCond, LOGIC_COND_TYPE_OR); } - if (TSDB_CODE_SUCCESS == code) - code = insertCondIntoSelectStmt(pSelect, &pNameCond); + if (TSDB_CODE_SUCCESS == code) code = insertCondIntoSelectStmt(pSelect, &pNameCond); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pNameCond1); @@ -13390,8 +13390,7 @@ static int32_t addShowNormalTablesCond(SSelectStmt* pSelect) { SValueNode* pValNode1 = NULL; code = nodesMakeValueNodeFromString("NORMAL_TABLE", &pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); nodesDestroyNode((SNode*)pValNode1); @@ -13406,8 +13405,7 @@ static int32_t addShowChildTablesCond(SSelectStmt* pSelect) { SValueNode* pValNode1 = NULL; code = nodesMakeValueNodeFromString("CHILD_TABLE", &pValNode1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "type", (SNode*)pValNode1, &pTypeCond); nodesDestroyNode((SNode*)pValNode1); @@ -13506,7 +13504,8 @@ static int32_t checkShowTags(STranslateContext* pCxt, const SShowStmt* pShow) { int32_t code = 0; SName name = {0}; STableMeta* pTableMeta = NULL; - toName(pCxt->pParseCxt->acctId, ((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, &name); + toName(pCxt->pParseCxt->acctId, ((SValueNode*)pShow->pDbName)->literal, ((SValueNode*)pShow->pTbName)->literal, + &name); code = getTargetMeta(pCxt, &name, &pTableMeta, true); if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_GET_META_ERROR, tstrerror(code)); @@ -13533,7 +13532,7 @@ static int32_t rewriteShowTags(STranslateContext* pCxt, SQuery* pQuery) { static int32_t createTagsFunction(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13548,7 +13547,7 @@ static int32_t createShowTableTagsProjections(SNodeList** pProjections, SNodeLis return TSDB_CODE_SUCCESS; } SFunctionNode* pTbNameFunc = NULL; - int32_t code = createTbnameFunction(&pTbNameFunc); + int32_t code = createTbnameFunction(&pTbNameFunc); if (TSDB_CODE_SUCCESS == code) { code = nodesListMakeStrictAppend(pProjections, (SNode*)pTbNameFunc); } @@ -13630,7 +13629,7 @@ static int32_t rewriteShowVnodes(STranslateContext* pCxt, SQuery* pQuery) { static int32_t createBlockDistInfoFunc(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13643,7 +13642,7 @@ static int32_t createBlockDistInfoFunc(SFunctionNode** ppNode) { static int32_t createBlockDistFunc(SFunctionNode** ppNode) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (NULL == pFunc) { return code; } @@ -13670,8 +13669,7 @@ static int32_t rewriteShowTableDist(STranslateContext* pCxt, SQuery* pQuery) { NODES_DESTORY_LIST(pStmt->pProjectionList); SFunctionNode* pFuncNew = NULL; code = createBlockDistFunc(&pFuncNew); - if (TSDB_CODE_SUCCESS == code) - code = nodesListMakeStrictAppend(&pStmt->pProjectionList, (SNode*)pFuncNew); + if (TSDB_CODE_SUCCESS == code) code = nodesListMakeStrictAppend(&pStmt->pProjectionList, (SNode*)pFuncNew); } if (TSDB_CODE_SUCCESS == code) { pCxt->showRewrite = true; @@ -13725,7 +13723,7 @@ static int32_t buildNormalTableBatchReq(int32_t acctId, const SCreateTableStmt* } SNode* pCol; col_id_t index = 0; - int32_t code = tInitDefaultSColCmprWrapperByCols(&req.colCmpr, req.ntb.schemaRow.nCols); + int32_t code = tInitDefaultSColCmprWrapperByCols(&req.colCmpr, req.ntb.schemaRow.nCols); if (TSDB_CODE_SUCCESS != code) { tdDestroySVCreateTbReq(&req); return code; @@ -13823,7 +13821,7 @@ static void destroyCreateTbReqBatch(void* data) { int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray) { SVnodeModifyOpStmt* pNewStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pNewStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pNewStmt); if (pNewStmt == NULL) { return code; } @@ -13952,7 +13950,7 @@ static int32_t addCreateTbReqIntoVgroup(SHashObj* pVgroupHashmap, const char* db } static int32_t createCastFuncForTag(STranslateContext* pCxt, SNode* pNode, SDataType dt, SNode** pCast) { - SNode* pExpr = NULL; + SNode* pExpr = NULL; int32_t code = nodesCloneNode(pNode, (SNode**)&pExpr); if (NULL == pExpr) { return code; @@ -14636,7 +14634,7 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery) static int32_t rewriteCreateTableFromFile(STranslateContext* pCxt, SQuery* pQuery) { SVnodeModifyOpStmt* pModifyStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pModifyStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pModifyStmt); if (pModifyStmt == NULL) { return code; } @@ -15535,7 +15533,7 @@ static int32_t rewriteShowCompactDetailsStmt(STranslateContext* pCxt, SQuery* pQ static int32_t createParWhenThenNode(SNode* pWhen, SNode* pThen, SNode** ppResWhenThen) { SWhenThenNode* pWThen = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_WHEN_THEN, (SNode**)&pWThen); + int32_t code = nodesMakeNode(QUERY_NODE_WHEN_THEN, (SNode**)&pWThen); if (TSDB_CODE_SUCCESS != code) return code; pWThen->pWhen = pWhen; @@ -15547,7 +15545,7 @@ static int32_t createParWhenThenNode(SNode* pWhen, SNode* pThen, SNode** ppResWh static int32_t createParCaseWhenNode(SNode* pCase, SNodeList* pWhenThenList, SNode* pElse, const char* pAias, SNode** ppResCaseWhen) { SCaseWhenNode* pCaseWhen = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_CASE_WHEN, (SNode**)&pCaseWhen); + int32_t code = nodesMakeNode(QUERY_NODE_CASE_WHEN, (SNode**)&pCaseWhen); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15566,7 +15564,7 @@ static int32_t createParCaseWhenNode(SNode* pCase, SNodeList* pWhenThenList, SNo static int32_t createParFunctionNode(const char* pFunName, const char* pAias, SNodeList* pParameterList, SNode** ppResFunc) { SFunctionNode* pFunc = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)&pFunc); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15580,7 +15578,7 @@ static int32_t createParFunctionNode(const char* pFunName, const char* pAias, SN static int32_t createParListNode(SNode* pItem, SNodeList** ppResList) { SNodeList* pList = NULL; - int32_t code = nodesMakeList(&pList); + int32_t code = nodesMakeList(&pList); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15591,7 +15589,7 @@ static int32_t createParListNode(SNode* pItem, SNodeList** ppResList) { static int32_t createParTempTableNode(SSelectStmt* pSubquery, SNode** ppResTempTable) { STempTableNode* pTempTable = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_TEMP_TABLE, (SNode**)&pTempTable); + int32_t code = nodesMakeNode(QUERY_NODE_TEMP_TABLE, (SNode**)&pTempTable); if (TSDB_CODE_SUCCESS != code) { return code; } @@ -15623,12 +15621,9 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { SNode* pCond3 = NULL; SNode* pCond4 = NULL; code = createOperatorNode(OP_TYPE_EQUAL, "v1_status", (SNode*)pValNode, &pCond1); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v2_status", (SNode*)pValNode, &pCond2); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v3_status", (SNode*)pValNode, &pCond3); - if (TSDB_CODE_SUCCESS == code) - code = createOperatorNode(OP_TYPE_EQUAL, "v4_status", (SNode*)pValNode, &pCond4); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v2_status", (SNode*)pValNode, &pCond2); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v3_status", (SNode*)pValNode, &pCond3); + if (TSDB_CODE_SUCCESS == code) code = createOperatorNode(OP_TYPE_EQUAL, "v4_status", (SNode*)pValNode, &pCond4); nodesDestroyNode((SNode*)pValNode); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pCond1); @@ -15643,10 +15638,8 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { SNode* pTemp2 = NULL; SNode* pFullCond = NULL; code = createLogicCondNode(&pCond1, &pCond2, &pTemp1, LOGIC_COND_TYPE_OR); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pTemp1, &pCond3, &pTemp2, LOGIC_COND_TYPE_OR); - if (TSDB_CODE_SUCCESS == code) - code = createLogicCondNode(&pTemp2, &pCond4, &pFullCond, LOGIC_COND_TYPE_OR); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pTemp1, &pCond3, &pTemp2, LOGIC_COND_TYPE_OR); + if (TSDB_CODE_SUCCESS == code) code = createLogicCondNode(&pTemp2, &pCond4, &pFullCond, LOGIC_COND_TYPE_OR); if (TSDB_CODE_SUCCESS != code) { nodesDestroyNode(pCond1); nodesDestroyNode(pCond2); @@ -15969,8 +15962,8 @@ static int32_t rewriteShowAliveStmt(STranslateContext* pCxt, SQuery* pQuery) { } // pSubSelect, pWhenThenlist need to free - // case when leader_col = count_col and leader_col > 0 then 1 when leader_col < count_col and leader_col > 0 then 2 else - // 0 end as status + // case when leader_col = count_col and leader_col > 0 then 1 when leader_col < count_col and leader_col > 0 then 2 + // else 0 end as status pElse = NULL; code = nodesMakeValueNodeFromInt32(0, &pElse); if (TSDB_CODE_SUCCESS != code) { diff --git a/source/libs/parser/src/sql.c b/source/libs/parser/src/sql.c index 5849977a3a..9ddf50f7aa 100644 --- a/source/libs/parser/src/sql.c +++ b/source/libs/parser/src/sql.c @@ -1,3 +1,5 @@ +/* This file is automatically generated by Lemon from input grammar +** source file "sql.y". */ /* ** 2000-05-29 ** @@ -22,9 +24,8 @@ ** The following is the concatenation of all %include directives from the ** input grammar file: */ -#include -#include /************ Begin %include sections from the grammar ************************/ +#line 11 "sql.y" #include #include @@ -41,12 +42,401 @@ #include "parAst.h" #define YYSTACKDEPTH 0 +#line 46 "sql.c" /**************** End of %include directives **********************************/ -/* These constants specify the various numeric values for terminal symbols -** in a format understandable to "makeheaders". This section is blank unless -** "lemon" is run with the "-m" command-line option. -***************** Begin makeheaders token definitions *************************/ -/**************** End makeheaders token definitions ***************************/ +/* These constants specify the various numeric values for terminal symbols. +***************** Begin token definitions *************************************/ +#ifndef TK_OR +#define TK_OR 1 +#define TK_AND 2 +#define TK_UNION 3 +#define TK_ALL 4 +#define TK_MINUS 5 +#define TK_EXCEPT 6 +#define TK_INTERSECT 7 +#define TK_NK_BITAND 8 +#define TK_NK_BITOR 9 +#define TK_NK_LSHIFT 10 +#define TK_NK_RSHIFT 11 +#define TK_NK_PLUS 12 +#define TK_NK_MINUS 13 +#define TK_NK_STAR 14 +#define TK_NK_SLASH 15 +#define TK_NK_REM 16 +#define TK_NK_CONCAT 17 +#define TK_CREATE 18 +#define TK_ACCOUNT 19 +#define TK_NK_ID 20 +#define TK_PASS 21 +#define TK_NK_STRING 22 +#define TK_ALTER 23 +#define TK_PPS 24 +#define TK_TSERIES 25 +#define TK_STORAGE 26 +#define TK_STREAMS 27 +#define TK_QTIME 28 +#define TK_DBS 29 +#define TK_USERS 30 +#define TK_CONNS 31 +#define TK_STATE 32 +#define TK_NK_COMMA 33 +#define TK_HOST 34 +#define TK_IS_IMPORT 35 +#define TK_NK_INTEGER 36 +#define TK_CREATEDB 37 +#define TK_USER 38 +#define TK_ENABLE 39 +#define TK_SYSINFO 40 +#define TK_ADD 41 +#define TK_DROP 42 +#define TK_GRANT 43 +#define TK_ON 44 +#define TK_TO 45 +#define TK_REVOKE 46 +#define TK_FROM 47 +#define TK_SUBSCRIBE 48 +#define TK_READ 49 +#define TK_WRITE 50 +#define TK_NK_DOT 51 +#define TK_WITH 52 +#define TK_ENCRYPT_KEY 53 +#define TK_ANODE 54 +#define TK_UPDATE 55 +#define TK_ANODES 56 +#define TK_DNODE 57 +#define TK_PORT 58 +#define TK_DNODES 59 +#define TK_RESTORE 60 +#define TK_NK_IPTOKEN 61 +#define TK_FORCE 62 +#define TK_UNSAFE 63 +#define TK_CLUSTER 64 +#define TK_LOCAL 65 +#define TK_QNODE 66 +#define TK_BNODE 67 +#define TK_SNODE 68 +#define TK_MNODE 69 +#define TK_VNODE 70 +#define TK_DATABASE 71 +#define TK_USE 72 +#define TK_FLUSH 73 +#define TK_TRIM 74 +#define TK_S3MIGRATE 75 +#define TK_COMPACT 76 +#define TK_IF 77 +#define TK_NOT 78 +#define TK_EXISTS 79 +#define TK_BUFFER 80 +#define TK_CACHEMODEL 81 +#define TK_CACHESIZE 82 +#define TK_COMP 83 +#define TK_DURATION 84 +#define TK_NK_VARIABLE 85 +#define TK_MAXROWS 86 +#define TK_MINROWS 87 +#define TK_KEEP 88 +#define TK_PAGES 89 +#define TK_PAGESIZE 90 +#define TK_TSDB_PAGESIZE 91 +#define TK_PRECISION 92 +#define TK_REPLICA 93 +#define TK_VGROUPS 94 +#define TK_SINGLE_STABLE 95 +#define TK_RETENTIONS 96 +#define TK_SCHEMALESS 97 +#define TK_WAL_LEVEL 98 +#define TK_WAL_FSYNC_PERIOD 99 +#define TK_WAL_RETENTION_PERIOD 100 +#define TK_WAL_RETENTION_SIZE 101 +#define TK_WAL_ROLL_PERIOD 102 +#define TK_WAL_SEGMENT_SIZE 103 +#define TK_STT_TRIGGER 104 +#define TK_TABLE_PREFIX 105 +#define TK_TABLE_SUFFIX 106 +#define TK_S3_CHUNKPAGES 107 +#define TK_S3_KEEPLOCAL 108 +#define TK_S3_COMPACT 109 +#define TK_KEEP_TIME_OFFSET 110 +#define TK_ENCRYPT_ALGORITHM 111 +#define TK_NK_COLON 112 +#define TK_BWLIMIT 113 +#define TK_START 114 +#define TK_TIMESTAMP 115 +#define TK_END 116 +#define TK_TABLE 117 +#define TK_NK_LP 118 +#define TK_NK_RP 119 +#define TK_USING 120 +#define TK_FILE 121 +#define TK_STABLE 122 +#define TK_COLUMN 123 +#define TK_MODIFY 124 +#define TK_RENAME 125 +#define TK_TAG 126 +#define TK_SET 127 +#define TK_NK_EQ 128 +#define TK_TAGS 129 +#define TK_BOOL 130 +#define TK_TINYINT 131 +#define TK_SMALLINT 132 +#define TK_INT 133 +#define TK_INTEGER 134 +#define TK_BIGINT 135 +#define TK_FLOAT 136 +#define TK_DOUBLE 137 +#define TK_BINARY 138 +#define TK_NCHAR 139 +#define TK_UNSIGNED 140 +#define TK_JSON 141 +#define TK_VARCHAR 142 +#define TK_MEDIUMBLOB 143 +#define TK_BLOB 144 +#define TK_VARBINARY 145 +#define TK_GEOMETRY 146 +#define TK_DECIMAL 147 +#define TK_COMMENT 148 +#define TK_MAX_DELAY 149 +#define TK_WATERMARK 150 +#define TK_ROLLUP 151 +#define TK_TTL 152 +#define TK_SMA 153 +#define TK_DELETE_MARK 154 +#define TK_FIRST 155 +#define TK_LAST 156 +#define TK_SHOW 157 +#define TK_FULL 158 +#define TK_PRIVILEGES 159 +#define TK_DATABASES 160 +#define TK_TABLES 161 +#define TK_STABLES 162 +#define TK_MNODES 163 +#define TK_QNODES 164 +#define TK_ARBGROUPS 165 +#define TK_FUNCTIONS 166 +#define TK_INDEXES 167 +#define TK_ACCOUNTS 168 +#define TK_APPS 169 +#define TK_CONNECTIONS 170 +#define TK_LICENCES 171 +#define TK_GRANTS 172 +#define TK_LOGS 173 +#define TK_MACHINES 174 +#define TK_ENCRYPTIONS 175 +#define TK_QUERIES 176 +#define TK_SCORES 177 +#define TK_TOPICS 178 +#define TK_VARIABLES 179 +#define TK_BNODES 180 +#define TK_SNODES 181 +#define TK_TRANSACTIONS 182 +#define TK_DISTRIBUTED 183 +#define TK_CONSUMERS 184 +#define TK_SUBSCRIPTIONS 185 +#define TK_VNODES 186 +#define TK_ALIVE 187 +#define TK_VIEWS 188 +#define TK_VIEW 189 +#define TK_COMPACTS 190 +#define TK_NORMAL 191 +#define TK_CHILD 192 +#define TK_LIKE 193 +#define TK_TBNAME 194 +#define TK_QTAGS 195 +#define TK_AS 196 +#define TK_SYSTEM 197 +#define TK_TSMA 198 +#define TK_INTERVAL 199 +#define TK_RECURSIVE 200 +#define TK_TSMAS 201 +#define TK_FUNCTION 202 +#define TK_INDEX 203 +#define TK_COUNT 204 +#define TK_LAST_ROW 205 +#define TK_META 206 +#define TK_ONLY 207 +#define TK_TOPIC 208 +#define TK_CONSUMER 209 +#define TK_GROUP 210 +#define TK_DESC 211 +#define TK_DESCRIBE 212 +#define TK_RESET 213 +#define TK_QUERY 214 +#define TK_CACHE 215 +#define TK_EXPLAIN 216 +#define TK_ANALYZE 217 +#define TK_VERBOSE 218 +#define TK_NK_BOOL 219 +#define TK_RATIO 220 +#define TK_NK_FLOAT 221 +#define TK_OUTPUTTYPE 222 +#define TK_AGGREGATE 223 +#define TK_BUFSIZE 224 +#define TK_LANGUAGE 225 +#define TK_REPLACE 226 +#define TK_STREAM 227 +#define TK_INTO 228 +#define TK_PAUSE 229 +#define TK_RESUME 230 +#define TK_PRIMARY 231 +#define TK_KEY 232 +#define TK_TRIGGER 233 +#define TK_AT_ONCE 234 +#define TK_WINDOW_CLOSE 235 +#define TK_IGNORE 236 +#define TK_EXPIRED 237 +#define TK_FILL_HISTORY 238 +#define TK_SUBTABLE 239 +#define TK_UNTREATED 240 +#define TK_KILL 241 +#define TK_CONNECTION 242 +#define TK_TRANSACTION 243 +#define TK_BALANCE 244 +#define TK_VGROUP 245 +#define TK_LEADER 246 +#define TK_MERGE 247 +#define TK_REDISTRIBUTE 248 +#define TK_SPLIT 249 +#define TK_DELETE 250 +#define TK_INSERT 251 +#define TK_NK_BIN 252 +#define TK_NK_HEX 253 +#define TK_NULL 254 +#define TK_NK_QUESTION 255 +#define TK_NK_ALIAS 256 +#define TK_NK_ARROW 257 +#define TK_ROWTS 258 +#define TK_QSTART 259 +#define TK_QEND 260 +#define TK_QDURATION 261 +#define TK_WSTART 262 +#define TK_WEND 263 +#define TK_WDURATION 264 +#define TK_IROWTS 265 +#define TK_ISFILLED 266 +#define TK_FLOW 267 +#define TK_FHIGH 268 +#define TK_FROWTS 269 +#define TK_CAST 270 +#define TK_POSITION 271 +#define TK_IN 272 +#define TK_FOR 273 +#define TK_NOW 274 +#define TK_TODAY 275 +#define TK_RAND 276 +#define TK_SUBSTR 277 +#define TK_SUBSTRING 278 +#define TK_BOTH 279 +#define TK_TRAILING 280 +#define TK_LEADING 281 +#define TK_TIMEZONE 282 +#define TK_CLIENT_VERSION 283 +#define TK_SERVER_VERSION 284 +#define TK_SERVER_STATUS 285 +#define TK_CURRENT_USER 286 +#define TK_PI 287 +#define TK_CASE 288 +#define TK_WHEN 289 +#define TK_THEN 290 +#define TK_ELSE 291 +#define TK_BETWEEN 292 +#define TK_IS 293 +#define TK_NK_LT 294 +#define TK_NK_GT 295 +#define TK_NK_LE 296 +#define TK_NK_GE 297 +#define TK_NK_NE 298 +#define TK_MATCH 299 +#define TK_NMATCH 300 +#define TK_CONTAINS 301 +#define TK_JOIN 302 +#define TK_INNER 303 +#define TK_LEFT 304 +#define TK_RIGHT 305 +#define TK_OUTER 306 +#define TK_SEMI 307 +#define TK_ANTI 308 +#define TK_ASOF 309 +#define TK_WINDOW 310 +#define TK_WINDOW_OFFSET 311 +#define TK_JLIMIT 312 +#define TK_SELECT 313 +#define TK_NK_HINT 314 +#define TK_DISTINCT 315 +#define TK_WHERE 316 +#define TK_PARTITION 317 +#define TK_BY 318 +#define TK_SESSION 319 +#define TK_STATE_WINDOW 320 +#define TK_EVENT_WINDOW 321 +#define TK_COUNT_WINDOW 322 +#define TK_ANOMALY_WINDOW 323 +#define TK_SLIDING 324 +#define TK_FILL 325 +#define TK_VALUE 326 +#define TK_VALUE_F 327 +#define TK_NONE 328 +#define TK_PREV 329 +#define TK_NULL_F 330 +#define TK_LINEAR 331 +#define TK_NEXT 332 +#define TK_HAVING 333 +#define TK_RANGE 334 +#define TK_EVERY 335 +#define TK_ORDER 336 +#define TK_SLIMIT 337 +#define TK_SOFFSET 338 +#define TK_LIMIT 339 +#define TK_OFFSET 340 +#define TK_ASC 341 +#define TK_NULLS 342 +#define TK_ABORT 343 +#define TK_AFTER 344 +#define TK_ATTACH 345 +#define TK_BEFORE 346 +#define TK_BEGIN 347 +#define TK_BITAND 348 +#define TK_BITNOT 349 +#define TK_BITOR 350 +#define TK_BLOCKS 351 +#define TK_CHANGE 352 +#define TK_COMMA 353 +#define TK_CONCAT 354 +#define TK_CONFLICT 355 +#define TK_COPY 356 +#define TK_DEFERRED 357 +#define TK_DELIMITERS 358 +#define TK_DETACH 359 +#define TK_DIVIDE 360 +#define TK_DOT 361 +#define TK_EACH 362 +#define TK_FAIL 363 +#define TK_GLOB 364 +#define TK_ID 365 +#define TK_IMMEDIATE 366 +#define TK_IMPORT 367 +#define TK_INITIALLY 368 +#define TK_INSTEAD 369 +#define TK_ISNULL 370 +#define TK_MODULES 371 +#define TK_NK_BITNOT 372 +#define TK_NK_SEMI 373 +#define TK_NOTNULL 374 +#define TK_OF 375 +#define TK_PLUS 376 +#define TK_PRIVILEGE 377 +#define TK_RAISE 378 +#define TK_RESTRICT 379 +#define TK_ROW 380 +#define TK_STAR 381 +#define TK_STATEMENT 382 +#define TK_STRICT 383 +#define TK_STRING 384 +#define TK_TIMES 385 +#define TK_VALUES 386 +#define TK_VARIABLE 387 +#define TK_WAL 388 +#endif +/**************** End token definitions ***************************************/ /* The next sections is a series of control #defines. ** various aspects of the generated parser. @@ -1546,7 +1936,7 @@ static const YYCODETYPE yyFallback[] = { 0, /* STT_TRIGGER => nothing */ 0, /* TABLE_PREFIX => nothing */ 0, /* TABLE_SUFFIX => nothing */ - 0, /* S3_CHUNKSIZE => nothing */ + 0, /* S3_CHUNKPAGES => nothing */ 0, /* S3_KEEPLOCAL => nothing */ 0, /* S3_COMPACT => nothing */ 0, /* KEEP_TIME_OFFSET => nothing */ @@ -1881,6 +2271,7 @@ typedef struct yyParser yyParser; #ifndef NDEBUG #include +#include static FILE *yyTraceFILE = 0; static char *yyTracePrompt = 0; #endif /* NDEBUG */ @@ -2022,7 +2413,7 @@ static const char *const yyTokenName[] = { /* 104 */ "STT_TRIGGER", /* 105 */ "TABLE_PREFIX", /* 106 */ "TABLE_SUFFIX", - /* 107 */ "S3_CHUNKSIZE", + /* 107 */ "S3_CHUNKPAGES", /* 108 */ "S3_KEEPLOCAL", /* 109 */ "S3_COMPACT", /* 110 */ "KEEP_TIME_OFFSET", @@ -2634,7 +3025,7 @@ static const char *const yyRuleName[] = { /* 136 */ "db_options ::= db_options STT_TRIGGER NK_INTEGER", /* 137 */ "db_options ::= db_options TABLE_PREFIX signed", /* 138 */ "db_options ::= db_options TABLE_SUFFIX signed", - /* 139 */ "db_options ::= db_options S3_CHUNKSIZE NK_INTEGER", + /* 139 */ "db_options ::= db_options S3_CHUNKPAGES NK_INTEGER", /* 140 */ "db_options ::= db_options S3_KEEPLOCAL NK_INTEGER", /* 141 */ "db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE", /* 142 */ "db_options ::= db_options S3_COMPACT NK_INTEGER", @@ -3493,7 +3884,9 @@ static void yy_destructor( case 568: /* query_simple_or_subquery */ case 570: /* sort_specification */ { +#line 7 "sql.y" nodesDestroyNode((yypminor->yy974)); +#line 3889 "sql.c" } break; case 390: /* account_options */ @@ -3503,7 +3896,9 @@ static void yy_destructor( case 478: /* with_meta */ case 487: /* bufsize_opt */ { +#line 54 "sql.y" +#line 3901 "sql.c" } break; case 394: /* ip_range_list */ @@ -3543,14 +3938,18 @@ static void yy_destructor( case 564: /* order_by_clause_opt */ case 569: /* sort_specification_list */ { +#line 85 "sql.y" nodesDestroyList((yypminor->yy946)); +#line 3943 "sql.c" } break; case 397: /* is_import_opt */ case 398: /* is_createdb_opt */ case 400: /* sysinfo_opt */ { +#line 99 "sql.y" +#line 3952 "sql.c" } break; case 399: /* user_name */ @@ -3575,19 +3974,25 @@ static void yy_destructor( case 517: /* noarg_func */ case 535: /* alias_opt */ { +#line 1095 "sql.y" +#line 3979 "sql.c" } break; case 401: /* privileges */ case 404: /* priv_type_list */ case 405: /* priv_type */ { +#line 131 "sql.y" +#line 3988 "sql.c" } break; case 402: /* priv_level */ { +#line 148 "sql.y" +#line 3995 "sql.c" } break; case 411: /* force_opt */ @@ -3602,71 +4007,97 @@ static void yy_destructor( case 546: /* set_quantifier_opt */ case 547: /* tag_mode_opt */ { +#line 186 "sql.y" +#line 4012 "sql.c" } break; case 424: /* alter_db_option */ case 452: /* alter_table_option */ { +#line 294 "sql.y" +#line 4020 "sql.c" } break; case 438: /* type_name */ case 449: /* type_name_default_len */ { +#line 436 "sql.y" +#line 4028 "sql.c" } break; case 457: /* db_kind_opt */ case 463: /* table_kind */ { +#line 617 "sql.y" +#line 4036 "sql.c" } break; case 458: /* table_kind_db_name_cond_opt */ { +#line 582 "sql.y" +#line 4043 "sql.c" } break; case 467: /* tsma_func_list */ { +#line 636 "sql.y" nodesDestroyNode((yypminor->yy974)); +#line 4050 "sql.c" } break; case 514: /* trim_specification_type */ { +#line 1241 "sql.y" +#line 4057 "sql.c" } break; case 525: /* compare_op */ case 526: /* in_op */ { +#line 1328 "sql.y" +#line 4065 "sql.c" } break; case 538: /* join_type */ { +#line 1409 "sql.y" +#line 4072 "sql.c" } break; case 539: /* join_subtype */ { +#line 1417 "sql.y" +#line 4079 "sql.c" } break; case 560: /* fill_mode */ { +#line 1537 "sql.y" +#line 4086 "sql.c" } break; case 571: /* ordering_specification_opt */ { +#line 1622 "sql.y" +#line 4093 "sql.c" } break; case 572: /* null_ordering_opt */ { +#line 1628 "sql.y" +#line 4100 "sql.c" } break; /********* End destructor definitions *****************************************/ @@ -3833,7 +4264,7 @@ static YYACTIONTYPE yy_find_shift_action( #endif /* YYWILDCARD */ return yy_default[stateno]; }else{ - assert( i>=0 && i=0 && i<(int)(sizeof(yy_action)/sizeof(yy_action[0])) ); return yy_action[i]; } }while(1); @@ -4094,7 +4525,7 @@ static const YYCODETYPE yyRuleInfoLhs[] = { 414, /* (136) db_options ::= db_options STT_TRIGGER NK_INTEGER */ 414, /* (137) db_options ::= db_options TABLE_PREFIX signed */ 414, /* (138) db_options ::= db_options TABLE_SUFFIX signed */ - 414, /* (139) db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ + 414, /* (139) db_options ::= db_options S3_CHUNKPAGES NK_INTEGER */ 414, /* (140) db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ 414, /* (141) db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ 414, /* (142) db_options ::= db_options S3_COMPACT NK_INTEGER */ @@ -4883,7 +5314,7 @@ static const signed char yyRuleInfoNRhs[] = { -3, /* (136) db_options ::= db_options STT_TRIGGER NK_INTEGER */ -3, /* (137) db_options ::= db_options TABLE_PREFIX signed */ -3, /* (138) db_options ::= db_options TABLE_SUFFIX signed */ - -3, /* (139) db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ + -3, /* (139) db_options ::= db_options S3_CHUNKPAGES NK_INTEGER */ -3, /* (140) db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ -3, /* (141) db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ -3, /* (142) db_options ::= db_options S3_COMPACT NK_INTEGER */ @@ -5557,8 +5988,9 @@ static YYACTIONTYPE yy_reduce( (void)yyLookahead; (void)yyLookaheadToken; yymsp = yypParser->yytos; + assert( yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ); #ifndef NDEBUG - if( yyTraceFILE && yyruleno<(int)(sizeof(yyRuleName)/sizeof(yyRuleName[0])) ){ + if( yyTraceFILE ){ yysize = yyRuleInfoNRhs[yyruleno]; if( yysize ){ fprintf(yyTraceFILE, "%sReduce %d [%s]%s, pop back to state %d.\n", @@ -5618,15 +6050,21 @@ static YYACTIONTYPE yy_reduce( /********** Begin reduce actions **********************************************/ YYMINORTYPE yylhsminor; case 0: /* cmd ::= CREATE ACCOUNT NK_ID PASS NK_STRING account_options */ +#line 50 "sql.y" { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } +#line 6055 "sql.c" yy_destructor(yypParser,390,&yymsp[0].minor); break; case 1: /* cmd ::= ALTER ACCOUNT NK_ID alter_account_options */ +#line 51 "sql.y" { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } +#line 6061 "sql.c" yy_destructor(yypParser,391,&yymsp[0].minor); break; case 2: /* account_options ::= */ +#line 55 "sql.y" { } +#line 6067 "sql.c" break; case 3: /* account_options ::= account_options PPS literal */ case 4: /* account_options ::= account_options TSERIES literal */ yytestcase(yyruleno==4); @@ -5638,18 +6076,24 @@ static YYACTIONTYPE yy_reduce( case 10: /* account_options ::= account_options CONNS literal */ yytestcase(yyruleno==10); case 11: /* account_options ::= account_options STATE literal */ yytestcase(yyruleno==11); { yy_destructor(yypParser,390,&yymsp[-2].minor); +#line 56 "sql.y" { } +#line 6081 "sql.c" yy_destructor(yypParser,392,&yymsp[0].minor); } break; case 12: /* alter_account_options ::= alter_account_option */ { yy_destructor(yypParser,393,&yymsp[0].minor); +#line 68 "sql.y" { } +#line 6089 "sql.c" } break; case 13: /* alter_account_options ::= alter_account_options alter_account_option */ { yy_destructor(yypParser,391,&yymsp[-1].minor); +#line 69 "sql.y" { } +#line 6096 "sql.c" yy_destructor(yypParser,393,&yymsp[0].minor); } break; @@ -5663,19 +6107,27 @@ static YYACTIONTYPE yy_reduce( case 21: /* alter_account_option ::= USERS literal */ yytestcase(yyruleno==21); case 22: /* alter_account_option ::= CONNS literal */ yytestcase(yyruleno==22); case 23: /* alter_account_option ::= STATE literal */ yytestcase(yyruleno==23); +#line 73 "sql.y" { } +#line 6112 "sql.c" yy_destructor(yypParser,392,&yymsp[0].minor); break; case 24: /* ip_range_list ::= NK_STRING */ +#line 86 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } +#line 6118 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 25: /* ip_range_list ::= ip_range_list NK_COMMA NK_STRING */ +#line 87 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } +#line 6124 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 26: /* white_list ::= HOST ip_range_list */ +#line 91 "sql.y" { yymsp[-1].minor.yy946 = yymsp[0].minor.yy946; } +#line 6130 "sql.c" break; case 27: /* white_list_opt ::= */ case 212: /* specific_cols_opt ::= */ yytestcase(yyruleno==212); @@ -5686,98 +6138,150 @@ static YYACTIONTYPE yy_reduce( case 707: /* partition_by_clause_opt ::= */ yytestcase(yyruleno==707); case 739: /* group_by_clause_opt ::= */ yytestcase(yyruleno==739); case 759: /* order_by_clause_opt ::= */ yytestcase(yyruleno==759); +#line 95 "sql.y" { yymsp[1].minor.yy946 = NULL; } +#line 6143 "sql.c" break; case 28: /* white_list_opt ::= white_list */ case 251: /* tags_def_opt ::= tags_def */ yytestcase(yyruleno==251); case 424: /* tag_def_or_ref_opt ::= tags_def */ yytestcase(yyruleno==424); case 615: /* star_func_para_list ::= other_para_list */ yytestcase(yyruleno==615); +#line 96 "sql.y" { yylhsminor.yy946 = yymsp[0].minor.yy946; } +#line 6151 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 29: /* is_import_opt ::= */ case 31: /* is_createdb_opt ::= */ yytestcase(yyruleno==31); +#line 100 "sql.y" { yymsp[1].minor.yy815 = 0; } +#line 6158 "sql.c" break; case 30: /* is_import_opt ::= IS_IMPORT NK_INTEGER */ case 32: /* is_createdb_opt ::= CREATEDB NK_INTEGER */ yytestcase(yyruleno==32); case 42: /* sysinfo_opt ::= SYSINFO NK_INTEGER */ yytestcase(yyruleno==42); +#line 101 "sql.y" { yymsp[-1].minor.yy815 = taosStr2Int8(yymsp[0].minor.yy0.z, NULL, 10); } +#line 6165 "sql.c" break; case 33: /* cmd ::= CREATE USER user_name PASS NK_STRING sysinfo_opt is_createdb_opt is_import_opt white_list_opt */ +#line 109 "sql.y" { pCxt->pRootNode = createCreateUserStmt(pCxt, &yymsp[-6].minor.yy557, &yymsp[-4].minor.yy0, yymsp[-3].minor.yy815, yymsp[-1].minor.yy815, yymsp[-2].minor.yy815); pCxt->pRootNode = addCreateUserStmtWhiteList(pCxt, pCxt->pRootNode, yymsp[0].minor.yy946); } +#line 6173 "sql.c" break; case 34: /* cmd ::= ALTER USER user_name PASS NK_STRING */ +#line 113 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_PASSWD, &yymsp[0].minor.yy0); } +#line 6178 "sql.c" break; case 35: /* cmd ::= ALTER USER user_name ENABLE NK_INTEGER */ +#line 114 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_ENABLE, &yymsp[0].minor.yy0); } +#line 6183 "sql.c" break; case 36: /* cmd ::= ALTER USER user_name SYSINFO NK_INTEGER */ +#line 115 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_SYSINFO, &yymsp[0].minor.yy0); } +#line 6188 "sql.c" break; case 37: /* cmd ::= ALTER USER user_name CREATEDB NK_INTEGER */ +#line 116 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_CREATEDB, &yymsp[0].minor.yy0); } +#line 6193 "sql.c" break; case 38: /* cmd ::= ALTER USER user_name ADD white_list */ +#line 117 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_ADD_WHITE_LIST, yymsp[0].minor.yy946); } +#line 6198 "sql.c" break; case 39: /* cmd ::= ALTER USER user_name DROP white_list */ +#line 118 "sql.y" { pCxt->pRootNode = createAlterUserStmt(pCxt, &yymsp[-2].minor.yy557, TSDB_ALTER_USER_DROP_WHITE_LIST, yymsp[0].minor.yy946); } +#line 6203 "sql.c" break; case 40: /* cmd ::= DROP USER user_name */ +#line 119 "sql.y" { pCxt->pRootNode = createDropUserStmt(pCxt, &yymsp[0].minor.yy557); } +#line 6208 "sql.c" break; case 41: /* sysinfo_opt ::= */ +#line 123 "sql.y" { yymsp[1].minor.yy815 = 1; } +#line 6213 "sql.c" break; case 43: /* cmd ::= GRANT privileges ON priv_level with_clause_opt TO user_name */ +#line 127 "sql.y" { pCxt->pRootNode = createGrantStmt(pCxt, yymsp[-5].minor.yy483, &yymsp[-3].minor.yy723, &yymsp[0].minor.yy557, yymsp[-2].minor.yy974); } +#line 6218 "sql.c" break; case 44: /* cmd ::= REVOKE privileges ON priv_level with_clause_opt FROM user_name */ +#line 128 "sql.y" { pCxt->pRootNode = createRevokeStmt(pCxt, yymsp[-5].minor.yy483, &yymsp[-3].minor.yy723, &yymsp[0].minor.yy557, yymsp[-2].minor.yy974); } +#line 6223 "sql.c" break; case 45: /* privileges ::= ALL */ +#line 132 "sql.y" { yymsp[0].minor.yy483 = PRIVILEGE_TYPE_ALL; } +#line 6228 "sql.c" break; case 46: /* privileges ::= priv_type_list */ case 48: /* priv_type_list ::= priv_type */ yytestcase(yyruleno==48); +#line 133 "sql.y" { yylhsminor.yy483 = yymsp[0].minor.yy483; } +#line 6234 "sql.c" yymsp[0].minor.yy483 = yylhsminor.yy483; break; case 47: /* privileges ::= SUBSCRIBE */ +#line 134 "sql.y" { yymsp[0].minor.yy483 = PRIVILEGE_TYPE_SUBSCRIBE; } +#line 6240 "sql.c" break; case 49: /* priv_type_list ::= priv_type_list NK_COMMA priv_type */ +#line 139 "sql.y" { yylhsminor.yy483 = yymsp[-2].minor.yy483 | yymsp[0].minor.yy483; } +#line 6245 "sql.c" yymsp[-2].minor.yy483 = yylhsminor.yy483; break; case 50: /* priv_type ::= READ */ +#line 143 "sql.y" { yymsp[0].minor.yy483 = PRIVILEGE_TYPE_READ; } +#line 6251 "sql.c" break; case 51: /* priv_type ::= WRITE */ +#line 144 "sql.y" { yymsp[0].minor.yy483 = PRIVILEGE_TYPE_WRITE; } +#line 6256 "sql.c" break; case 52: /* priv_type ::= ALTER */ +#line 145 "sql.y" { yymsp[0].minor.yy483 = PRIVILEGE_TYPE_ALTER; } +#line 6261 "sql.c" break; case 53: /* priv_level ::= NK_STAR NK_DOT NK_STAR */ +#line 149 "sql.y" { yylhsminor.yy723.first = yymsp[-2].minor.yy0; yylhsminor.yy723.second = yymsp[0].minor.yy0; } +#line 6266 "sql.c" yymsp[-2].minor.yy723 = yylhsminor.yy723; break; case 54: /* priv_level ::= db_name NK_DOT NK_STAR */ +#line 150 "sql.y" { yylhsminor.yy723.first = yymsp[-2].minor.yy557; yylhsminor.yy723.second = yymsp[0].minor.yy0; } +#line 6272 "sql.c" yymsp[-2].minor.yy723 = yylhsminor.yy723; break; case 55: /* priv_level ::= db_name NK_DOT table_name */ +#line 151 "sql.y" { yylhsminor.yy723.first = yymsp[-2].minor.yy557; yylhsminor.yy723.second = yymsp[0].minor.yy557; } +#line 6278 "sql.c" yymsp[-2].minor.yy723 = yylhsminor.yy723; break; case 56: /* priv_level ::= topic_name */ +#line 152 "sql.y" { yylhsminor.yy723.first = yymsp[0].minor.yy557; yylhsminor.yy723.second = nil_token; } +#line 6284 "sql.c" yymsp[0].minor.yy723 = yylhsminor.yy723; break; case 57: /* with_clause_opt ::= */ @@ -5799,62 +6303,98 @@ static YYACTIONTYPE yy_reduce( case 748: /* every_opt ::= */ yytestcase(yyruleno==748); case 761: /* slimit_clause_opt ::= */ yytestcase(yyruleno==761); case 765: /* limit_clause_opt ::= */ yytestcase(yyruleno==765); +#line 154 "sql.y" { yymsp[1].minor.yy974 = NULL; } +#line 6308 "sql.c" break; case 58: /* with_clause_opt ::= WITH search_condition */ case 656: /* from_clause_opt ::= FROM table_reference_list */ yytestcase(yyruleno==656); case 683: /* join_on_clause_opt ::= ON search_condition */ yytestcase(yyruleno==683); case 706: /* where_clause_opt ::= WHERE search_condition */ yytestcase(yyruleno==706); case 744: /* having_clause_opt ::= HAVING search_condition */ yytestcase(yyruleno==744); +#line 155 "sql.y" { yymsp[-1].minor.yy974 = yymsp[0].minor.yy974; } +#line 6317 "sql.c" break; case 59: /* cmd ::= CREATE ENCRYPT_KEY NK_STRING */ +#line 158 "sql.y" { pCxt->pRootNode = createEncryptKeyStmt(pCxt, &yymsp[0].minor.yy0); } +#line 6322 "sql.c" break; case 60: /* cmd ::= CREATE ANODE NK_STRING */ +#line 161 "sql.y" { pCxt->pRootNode = createCreateAnodeStmt(pCxt, &yymsp[0].minor.yy0); } +#line 6327 "sql.c" break; case 61: /* cmd ::= UPDATE ANODE NK_INTEGER */ +#line 162 "sql.y" { pCxt->pRootNode = createUpdateAnodeStmt(pCxt, &yymsp[0].minor.yy0, false); } +#line 6332 "sql.c" break; case 62: /* cmd ::= UPDATE ALL ANODES */ +#line 163 "sql.y" { pCxt->pRootNode = createUpdateAnodeStmt(pCxt, NULL, true); } +#line 6337 "sql.c" break; case 63: /* cmd ::= DROP ANODE NK_INTEGER */ +#line 164 "sql.y" { pCxt->pRootNode = createDropAnodeStmt(pCxt, &yymsp[0].minor.yy0); } +#line 6342 "sql.c" break; case 64: /* cmd ::= CREATE DNODE dnode_endpoint */ +#line 167 "sql.y" { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[0].minor.yy557, NULL); } +#line 6347 "sql.c" break; case 65: /* cmd ::= CREATE DNODE dnode_endpoint PORT NK_INTEGER */ +#line 168 "sql.y" { pCxt->pRootNode = createCreateDnodeStmt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0); } +#line 6352 "sql.c" break; case 66: /* cmd ::= DROP DNODE NK_INTEGER force_opt */ +#line 169 "sql.y" { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy569, false); } +#line 6357 "sql.c" break; case 67: /* cmd ::= DROP DNODE dnode_endpoint force_opt */ +#line 170 "sql.y" { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy569, false); } +#line 6362 "sql.c" break; case 68: /* cmd ::= DROP DNODE NK_INTEGER unsafe_opt */ +#line 171 "sql.y" { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy0, false, yymsp[0].minor.yy569); } +#line 6367 "sql.c" break; case 69: /* cmd ::= DROP DNODE dnode_endpoint unsafe_opt */ +#line 172 "sql.y" { pCxt->pRootNode = createDropDnodeStmt(pCxt, &yymsp[-1].minor.yy557, false, yymsp[0].minor.yy569); } +#line 6372 "sql.c" break; case 70: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING */ +#line 173 "sql.y" { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, NULL); } +#line 6377 "sql.c" break; case 71: /* cmd ::= ALTER DNODE NK_INTEGER NK_STRING NK_STRING */ +#line 174 "sql.y" { pCxt->pRootNode = createAlterDnodeStmt(pCxt, &yymsp[-2].minor.yy0, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 6382 "sql.c" break; case 72: /* cmd ::= ALTER ALL DNODES NK_STRING */ +#line 175 "sql.y" { pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &yymsp[0].minor.yy0, NULL); } +#line 6387 "sql.c" break; case 73: /* cmd ::= ALTER ALL DNODES NK_STRING NK_STRING */ +#line 176 "sql.y" { pCxt->pRootNode = createAlterDnodeStmt(pCxt, NULL, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 6392 "sql.c" break; case 74: /* cmd ::= RESTORE DNODE NK_INTEGER */ +#line 177 "sql.y" { pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_DNODE_STMT, &yymsp[0].minor.yy0); } +#line 6397 "sql.c" break; case 75: /* dnode_endpoint ::= NK_STRING */ case 76: /* dnode_endpoint ::= NK_ID */ yytestcase(yyruleno==76); @@ -5893,7 +6433,9 @@ static YYACTIONTYPE yy_reduce( case 611: /* star_func ::= FIRST */ yytestcase(yyruleno==611); case 612: /* star_func ::= LAST */ yytestcase(yyruleno==612); case 613: /* star_func ::= LAST_ROW */ yytestcase(yyruleno==613); +#line 181 "sql.y" { yylhsminor.yy557 = yymsp[0].minor.yy0; } +#line 6438 "sql.c" yymsp[0].minor.yy557 = yylhsminor.yy557; break; case 78: /* force_opt ::= */ @@ -5906,7 +6448,9 @@ static YYACTIONTYPE yy_reduce( case 437: /* ignore_opt ::= */ yytestcase(yyruleno==437); case 693: /* tag_mode_opt ::= */ yytestcase(yyruleno==693); case 695: /* set_quantifier_opt ::= */ yytestcase(yyruleno==695); +#line 187 "sql.y" { yymsp[1].minor.yy569 = false; } +#line 6453 "sql.c" break; case 79: /* force_opt ::= FORCE */ case 80: /* unsafe_opt ::= UNSAFE */ yytestcase(yyruleno==80); @@ -5915,318 +6459,486 @@ static YYACTIONTYPE yy_reduce( case 401: /* agg_func_opt ::= AGGREGATE */ yytestcase(yyruleno==401); case 694: /* tag_mode_opt ::= TAGS */ yytestcase(yyruleno==694); case 696: /* set_quantifier_opt ::= DISTINCT */ yytestcase(yyruleno==696); +#line 188 "sql.y" { yymsp[0].minor.yy569 = true; } +#line 6464 "sql.c" break; case 81: /* cmd ::= ALTER CLUSTER NK_STRING */ +#line 195 "sql.y" { pCxt->pRootNode = createAlterClusterStmt(pCxt, &yymsp[0].minor.yy0, NULL); } +#line 6469 "sql.c" break; case 82: /* cmd ::= ALTER CLUSTER NK_STRING NK_STRING */ +#line 196 "sql.y" { pCxt->pRootNode = createAlterClusterStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 6474 "sql.c" break; case 83: /* cmd ::= ALTER LOCAL NK_STRING */ +#line 199 "sql.y" { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[0].minor.yy0, NULL); } +#line 6479 "sql.c" break; case 84: /* cmd ::= ALTER LOCAL NK_STRING NK_STRING */ +#line 200 "sql.y" { pCxt->pRootNode = createAlterLocalStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 6484 "sql.c" break; case 85: /* cmd ::= CREATE QNODE ON DNODE NK_INTEGER */ +#line 203 "sql.y" { pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_QNODE_STMT, &yymsp[0].minor.yy0); } +#line 6489 "sql.c" break; case 86: /* cmd ::= DROP QNODE ON DNODE NK_INTEGER */ +#line 204 "sql.y" { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_QNODE_STMT, &yymsp[0].minor.yy0); } +#line 6494 "sql.c" break; case 87: /* cmd ::= RESTORE QNODE ON DNODE NK_INTEGER */ +#line 205 "sql.y" { pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_QNODE_STMT, &yymsp[0].minor.yy0); } +#line 6499 "sql.c" break; case 88: /* cmd ::= CREATE BNODE ON DNODE NK_INTEGER */ +#line 208 "sql.y" { pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_BNODE_STMT, &yymsp[0].minor.yy0); } +#line 6504 "sql.c" break; case 89: /* cmd ::= DROP BNODE ON DNODE NK_INTEGER */ +#line 209 "sql.y" { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_BNODE_STMT, &yymsp[0].minor.yy0); } +#line 6509 "sql.c" break; case 90: /* cmd ::= CREATE SNODE ON DNODE NK_INTEGER */ +#line 212 "sql.y" { pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_SNODE_STMT, &yymsp[0].minor.yy0); } +#line 6514 "sql.c" break; case 91: /* cmd ::= DROP SNODE ON DNODE NK_INTEGER */ +#line 213 "sql.y" { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_SNODE_STMT, &yymsp[0].minor.yy0); } +#line 6519 "sql.c" break; case 92: /* cmd ::= CREATE MNODE ON DNODE NK_INTEGER */ +#line 216 "sql.y" { pCxt->pRootNode = createCreateComponentNodeStmt(pCxt, QUERY_NODE_CREATE_MNODE_STMT, &yymsp[0].minor.yy0); } +#line 6524 "sql.c" break; case 93: /* cmd ::= DROP MNODE ON DNODE NK_INTEGER */ +#line 217 "sql.y" { pCxt->pRootNode = createDropComponentNodeStmt(pCxt, QUERY_NODE_DROP_MNODE_STMT, &yymsp[0].minor.yy0); } +#line 6529 "sql.c" break; case 94: /* cmd ::= RESTORE MNODE ON DNODE NK_INTEGER */ +#line 218 "sql.y" { pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_MNODE_STMT, &yymsp[0].minor.yy0); } +#line 6534 "sql.c" break; case 95: /* cmd ::= RESTORE VNODE ON DNODE NK_INTEGER */ +#line 221 "sql.y" { pCxt->pRootNode = createRestoreComponentNodeStmt(pCxt, QUERY_NODE_RESTORE_VNODE_STMT, &yymsp[0].minor.yy0); } +#line 6539 "sql.c" break; case 96: /* cmd ::= CREATE DATABASE not_exists_opt db_name db_options */ +#line 224 "sql.y" { pCxt->pRootNode = createCreateDatabaseStmt(pCxt, yymsp[-2].minor.yy569, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } +#line 6544 "sql.c" break; case 97: /* cmd ::= DROP DATABASE exists_opt db_name */ +#line 225 "sql.y" { pCxt->pRootNode = createDropDatabaseStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 6549 "sql.c" break; case 98: /* cmd ::= USE db_name */ +#line 226 "sql.y" { pCxt->pRootNode = createUseDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } +#line 6554 "sql.c" break; case 99: /* cmd ::= ALTER DATABASE db_name alter_db_options */ +#line 227 "sql.y" { pCxt->pRootNode = createAlterDatabaseStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } +#line 6559 "sql.c" break; case 100: /* cmd ::= FLUSH DATABASE db_name */ +#line 228 "sql.y" { pCxt->pRootNode = createFlushDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } +#line 6564 "sql.c" break; case 101: /* cmd ::= TRIM DATABASE db_name speed_opt */ +#line 229 "sql.y" { pCxt->pRootNode = createTrimDatabaseStmt(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy904); } +#line 6569 "sql.c" break; case 102: /* cmd ::= S3MIGRATE DATABASE db_name */ +#line 230 "sql.y" { pCxt->pRootNode = createS3MigrateDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } +#line 6574 "sql.c" break; case 103: /* cmd ::= COMPACT DATABASE db_name start_opt end_opt */ +#line 231 "sql.y" { pCxt->pRootNode = createCompactStmt(pCxt, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 6579 "sql.c" break; case 104: /* not_exists_opt ::= IF NOT EXISTS */ +#line 235 "sql.y" { yymsp[-2].minor.yy569 = true; } +#line 6584 "sql.c" break; case 106: /* exists_opt ::= IF EXISTS */ case 407: /* or_replace_opt ::= OR REPLACE */ yytestcase(yyruleno==407); case 438: /* ignore_opt ::= IGNORE UNTREATED */ yytestcase(yyruleno==438); +#line 240 "sql.y" { yymsp[-1].minor.yy569 = true; } +#line 6591 "sql.c" break; case 108: /* db_options ::= */ +#line 243 "sql.y" { yymsp[1].minor.yy974 = createDefaultDatabaseOptions(pCxt); } +#line 6596 "sql.c" break; case 109: /* db_options ::= db_options BUFFER NK_INTEGER */ +#line 244 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_BUFFER, &yymsp[0].minor.yy0); } +#line 6601 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 110: /* db_options ::= db_options CACHEMODEL NK_STRING */ +#line 245 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_CACHEMODEL, &yymsp[0].minor.yy0); } +#line 6607 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 111: /* db_options ::= db_options CACHESIZE NK_INTEGER */ +#line 246 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_CACHESIZE, &yymsp[0].minor.yy0); } +#line 6613 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 112: /* db_options ::= db_options COMP NK_INTEGER */ +#line 247 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_COMP, &yymsp[0].minor.yy0); } +#line 6619 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 113: /* db_options ::= db_options DURATION NK_INTEGER */ case 114: /* db_options ::= db_options DURATION NK_VARIABLE */ yytestcase(yyruleno==114); +#line 248 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_DAYS, &yymsp[0].minor.yy0); } +#line 6626 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 115: /* db_options ::= db_options MAXROWS NK_INTEGER */ +#line 250 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_MAXROWS, &yymsp[0].minor.yy0); } +#line 6632 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 116: /* db_options ::= db_options MINROWS NK_INTEGER */ +#line 251 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_MINROWS, &yymsp[0].minor.yy0); } +#line 6638 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 117: /* db_options ::= db_options KEEP integer_list */ case 118: /* db_options ::= db_options KEEP variable_list */ yytestcase(yyruleno==118); +#line 252 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_KEEP, yymsp[0].minor.yy946); } +#line 6645 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 119: /* db_options ::= db_options PAGES NK_INTEGER */ +#line 254 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PAGES, &yymsp[0].minor.yy0); } +#line 6651 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 120: /* db_options ::= db_options PAGESIZE NK_INTEGER */ +#line 255 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PAGESIZE, &yymsp[0].minor.yy0); } +#line 6657 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 121: /* db_options ::= db_options TSDB_PAGESIZE NK_INTEGER */ +#line 256 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TSDB_PAGESIZE, &yymsp[0].minor.yy0); } +#line 6663 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 122: /* db_options ::= db_options PRECISION NK_STRING */ +#line 257 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_PRECISION, &yymsp[0].minor.yy0); } +#line 6669 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 123: /* db_options ::= db_options REPLICA NK_INTEGER */ +#line 258 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_REPLICA, &yymsp[0].minor.yy0); } +#line 6675 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 124: /* db_options ::= db_options VGROUPS NK_INTEGER */ +#line 260 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_VGROUPS, &yymsp[0].minor.yy0); } +#line 6681 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 125: /* db_options ::= db_options SINGLE_STABLE NK_INTEGER */ +#line 261 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_SINGLE_STABLE, &yymsp[0].minor.yy0); } +#line 6687 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 126: /* db_options ::= db_options RETENTIONS retention_list */ +#line 262 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_RETENTIONS, yymsp[0].minor.yy946); } +#line 6693 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 127: /* db_options ::= db_options SCHEMALESS NK_INTEGER */ +#line 263 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_SCHEMALESS, &yymsp[0].minor.yy0); } +#line 6699 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 128: /* db_options ::= db_options WAL_LEVEL NK_INTEGER */ +#line 264 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL, &yymsp[0].minor.yy0); } +#line 6705 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 129: /* db_options ::= db_options WAL_FSYNC_PERIOD NK_INTEGER */ +#line 265 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_FSYNC, &yymsp[0].minor.yy0); } +#line 6711 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 130: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_INTEGER */ +#line 266 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_RETENTION_PERIOD, &yymsp[0].minor.yy0); } +#line 6717 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 131: /* db_options ::= db_options WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ +#line 267 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-3].minor.yy974, DB_OPTION_WAL_RETENTION_PERIOD, &t); } +#line 6727 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 132: /* db_options ::= db_options WAL_RETENTION_SIZE NK_INTEGER */ +#line 272 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_RETENTION_SIZE, &yymsp[0].minor.yy0); } +#line 6733 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 133: /* db_options ::= db_options WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ +#line 273 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-3].minor.yy974, DB_OPTION_WAL_RETENTION_SIZE, &t); } +#line 6743 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 134: /* db_options ::= db_options WAL_ROLL_PERIOD NK_INTEGER */ +#line 278 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_ROLL_PERIOD, &yymsp[0].minor.yy0); } +#line 6749 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 135: /* db_options ::= db_options WAL_SEGMENT_SIZE NK_INTEGER */ +#line 279 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_WAL_SEGMENT_SIZE, &yymsp[0].minor.yy0); } +#line 6755 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 136: /* db_options ::= db_options STT_TRIGGER NK_INTEGER */ +#line 280 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_STT_TRIGGER, &yymsp[0].minor.yy0); } +#line 6761 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 137: /* db_options ::= db_options TABLE_PREFIX signed */ +#line 281 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TABLE_PREFIX, yymsp[0].minor.yy974); } +#line 6767 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 138: /* db_options ::= db_options TABLE_SUFFIX signed */ +#line 282 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_TABLE_SUFFIX, yymsp[0].minor.yy974); } +#line 6773 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; - case 139: /* db_options ::= db_options S3_CHUNKSIZE NK_INTEGER */ -{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_CHUNKSIZE, &yymsp[0].minor.yy0); } + case 139: /* db_options ::= db_options S3_CHUNKPAGES NK_INTEGER */ +#line 283 "sql.y" +{ yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_CHUNKPAGES, &yymsp[0].minor.yy0); } +#line 6779 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 140: /* db_options ::= db_options S3_KEEPLOCAL NK_INTEGER */ case 141: /* db_options ::= db_options S3_KEEPLOCAL NK_VARIABLE */ yytestcase(yyruleno==141); +#line 284 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_KEEPLOCAL, &yymsp[0].minor.yy0); } +#line 6786 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 142: /* db_options ::= db_options S3_COMPACT NK_INTEGER */ +#line 286 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_S3_COMPACT, &yymsp[0].minor.yy0); } +#line 6792 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 143: /* db_options ::= db_options KEEP_TIME_OFFSET NK_INTEGER */ +#line 287 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_KEEP_TIME_OFFSET, &yymsp[0].minor.yy0); } +#line 6798 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 144: /* db_options ::= db_options ENCRYPT_ALGORITHM NK_STRING */ +#line 288 "sql.y" { yylhsminor.yy974 = setDatabaseOption(pCxt, yymsp[-2].minor.yy974, DB_OPTION_ENCRYPT_ALGORITHM, &yymsp[0].minor.yy0); } +#line 6804 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 145: /* alter_db_options ::= alter_db_option */ +#line 290 "sql.y" { yylhsminor.yy974 = createAlterDatabaseOptions(pCxt); yylhsminor.yy974 = setAlterDatabaseOption(pCxt, yylhsminor.yy974, &yymsp[0].minor.yy683); } +#line 6810 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 146: /* alter_db_options ::= alter_db_options alter_db_option */ +#line 291 "sql.y" { yylhsminor.yy974 = setAlterDatabaseOption(pCxt, yymsp[-1].minor.yy974, &yymsp[0].minor.yy683); } +#line 6816 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 147: /* alter_db_option ::= BUFFER NK_INTEGER */ +#line 295 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_BUFFER; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6822 "sql.c" break; case 148: /* alter_db_option ::= CACHEMODEL NK_STRING */ +#line 296 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_CACHEMODEL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6827 "sql.c" break; case 149: /* alter_db_option ::= CACHESIZE NK_INTEGER */ +#line 297 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_CACHESIZE; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6832 "sql.c" break; case 150: /* alter_db_option ::= WAL_FSYNC_PERIOD NK_INTEGER */ +#line 298 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_FSYNC; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6837 "sql.c" break; case 151: /* alter_db_option ::= KEEP integer_list */ case 152: /* alter_db_option ::= KEEP variable_list */ yytestcase(yyruleno==152); +#line 299 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_KEEP; yymsp[-1].minor.yy683.pList = yymsp[0].minor.yy946; } +#line 6843 "sql.c" break; case 153: /* alter_db_option ::= PAGES NK_INTEGER */ +#line 301 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_PAGES; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6848 "sql.c" break; case 154: /* alter_db_option ::= REPLICA NK_INTEGER */ +#line 302 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_REPLICA; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6853 "sql.c" break; case 155: /* alter_db_option ::= WAL_LEVEL NK_INTEGER */ +#line 304 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_WAL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6858 "sql.c" break; case 156: /* alter_db_option ::= STT_TRIGGER NK_INTEGER */ +#line 305 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_STT_TRIGGER; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6863 "sql.c" break; case 157: /* alter_db_option ::= MINROWS NK_INTEGER */ +#line 306 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_MINROWS; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6868 "sql.c" break; case 158: /* alter_db_option ::= WAL_RETENTION_PERIOD NK_INTEGER */ +#line 307 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_WAL_RETENTION_PERIOD; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6873 "sql.c" break; case 159: /* alter_db_option ::= WAL_RETENTION_PERIOD NK_MINUS NK_INTEGER */ +#line 308 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yymsp[-2].minor.yy683.type = DB_OPTION_WAL_RETENTION_PERIOD; yymsp[-2].minor.yy683.val = t; } +#line 6882 "sql.c" break; case 160: /* alter_db_option ::= WAL_RETENTION_SIZE NK_INTEGER */ +#line 313 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_WAL_RETENTION_SIZE; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6887 "sql.c" break; case 161: /* alter_db_option ::= WAL_RETENTION_SIZE NK_MINUS NK_INTEGER */ +#line 314 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yymsp[-2].minor.yy683.type = DB_OPTION_WAL_RETENTION_SIZE; yymsp[-2].minor.yy683.val = t; } +#line 6896 "sql.c" break; case 162: /* alter_db_option ::= S3_KEEPLOCAL NK_INTEGER */ case 163: /* alter_db_option ::= S3_KEEPLOCAL NK_VARIABLE */ yytestcase(yyruleno==163); +#line 319 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_S3_KEEPLOCAL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6902 "sql.c" break; case 164: /* alter_db_option ::= S3_COMPACT NK_INTEGER */ +#line 321 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_S3_COMPACT, yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6907 "sql.c" break; case 165: /* alter_db_option ::= KEEP_TIME_OFFSET NK_INTEGER */ +#line 322 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_KEEP_TIME_OFFSET; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6912 "sql.c" break; case 166: /* alter_db_option ::= ENCRYPT_ALGORITHM NK_STRING */ +#line 323 "sql.y" { yymsp[-1].minor.yy683.type = DB_OPTION_ENCRYPT_ALGORITHM; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 6917 "sql.c" break; case 167: /* integer_list ::= NK_INTEGER */ +#line 327 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } +#line 6922 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 168: /* integer_list ::= integer_list NK_COMMA NK_INTEGER */ case 452: /* dnode_list ::= dnode_list DNODE NK_INTEGER */ yytestcase(yyruleno==452); +#line 328 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } +#line 6929 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 169: /* variable_list ::= NK_VARIABLE */ +#line 332 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 6935 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 170: /* variable_list ::= variable_list NK_COMMA NK_VARIABLE */ +#line 333 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 6941 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 171: /* retention_list ::= retention */ @@ -6246,7 +6958,9 @@ static YYACTIONTYPE yy_reduce( case 698: /* select_list ::= select_item */ yytestcase(yyruleno==698); case 709: /* partition_list ::= partition_item */ yytestcase(yyruleno==709); case 772: /* sort_specification_list ::= sort_specification */ yytestcase(yyruleno==772); +#line 337 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, yymsp[0].minor.yy974); } +#line 6963 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 172: /* retention_list ::= retention_list NK_COMMA retention */ @@ -6264,762 +6978,1198 @@ static YYACTIONTYPE yy_reduce( case 699: /* select_list ::= select_list NK_COMMA select_item */ yytestcase(yyruleno==699); case 710: /* partition_list ::= partition_list NK_COMMA partition_item */ yytestcase(yyruleno==710); case 773: /* sort_specification_list ::= sort_specification_list NK_COMMA sort_specification */ yytestcase(yyruleno==773); +#line 338 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } +#line 6983 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 173: /* retention ::= NK_VARIABLE NK_COLON NK_VARIABLE */ case 174: /* retention ::= NK_MINUS NK_COLON NK_VARIABLE */ yytestcase(yyruleno==174); +#line 340 "sql.y" { yylhsminor.yy974 = createNodeListNodeEx(pCxt, createDurationValueNode(pCxt, &yymsp[-2].minor.yy0), createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 6990 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 175: /* speed_opt ::= */ case 402: /* bufsize_opt ::= */ yytestcase(yyruleno==402); +#line 345 "sql.y" { yymsp[1].minor.yy904 = 0; } +#line 6997 "sql.c" break; case 176: /* speed_opt ::= BWLIMIT NK_INTEGER */ case 403: /* bufsize_opt ::= BUFSIZE NK_INTEGER */ yytestcase(yyruleno==403); +#line 346 "sql.y" { yymsp[-1].minor.yy904 = taosStr2Int32(yymsp[0].minor.yy0.z, NULL, 10); } +#line 7003 "sql.c" break; case 178: /* start_opt ::= START WITH NK_INTEGER */ case 182: /* end_opt ::= END WITH NK_INTEGER */ yytestcase(yyruleno==182); +#line 349 "sql.y" { yymsp[-2].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0); } +#line 7009 "sql.c" break; case 179: /* start_opt ::= START WITH NK_STRING */ case 183: /* end_opt ::= END WITH NK_STRING */ yytestcase(yyruleno==183); +#line 350 "sql.y" { yymsp[-2].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } +#line 7015 "sql.c" break; case 180: /* start_opt ::= START WITH TIMESTAMP NK_STRING */ case 184: /* end_opt ::= END WITH TIMESTAMP NK_STRING */ yytestcase(yyruleno==184); +#line 351 "sql.y" { yymsp[-3].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } +#line 7021 "sql.c" break; case 185: /* cmd ::= CREATE TABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def_opt table_options */ case 188: /* cmd ::= CREATE STABLE not_exists_opt full_table_name NK_LP column_def_list NK_RP tags_def table_options */ yytestcase(yyruleno==188); +#line 360 "sql.y" { pCxt->pRootNode = createCreateTableStmt(pCxt, yymsp[-6].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy946, yymsp[-1].minor.yy946, yymsp[0].minor.yy974); } +#line 7027 "sql.c" break; case 186: /* cmd ::= CREATE TABLE multi_create_clause */ +#line 361 "sql.y" { pCxt->pRootNode = createCreateMultiTableStmt(pCxt, yymsp[0].minor.yy946); } +#line 7032 "sql.c" break; case 187: /* cmd ::= CREATE TABLE not_exists_opt USING full_table_name NK_LP tag_list_opt NK_RP FILE NK_STRING */ +#line 363 "sql.y" { pCxt->pRootNode = createCreateSubTableFromFileClause(pCxt, yymsp[-7].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy946, &yymsp[0].minor.yy0); } +#line 7037 "sql.c" break; case 189: /* cmd ::= DROP TABLE with_opt multi_drop_clause */ +#line 366 "sql.y" { pCxt->pRootNode = createDropTableStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy946); } +#line 7042 "sql.c" break; case 190: /* cmd ::= DROP STABLE with_opt exists_opt full_table_name */ +#line 367 "sql.y" { pCxt->pRootNode = createDropSuperTableStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } +#line 7047 "sql.c" break; case 191: /* cmd ::= ALTER TABLE alter_table_clause */ case 454: /* cmd ::= query_or_subquery */ yytestcase(yyruleno==454); case 455: /* cmd ::= insert_query */ yytestcase(yyruleno==455); +#line 369 "sql.y" { pCxt->pRootNode = yymsp[0].minor.yy974; } +#line 7054 "sql.c" break; case 192: /* cmd ::= ALTER STABLE alter_table_clause */ +#line 370 "sql.y" { pCxt->pRootNode = setAlterSuperTableType(yymsp[0].minor.yy974); } +#line 7059 "sql.c" break; case 193: /* alter_table_clause ::= full_table_name alter_table_options */ +#line 372 "sql.y" { yylhsminor.yy974 = createAlterTableModifyOptions(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 7064 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 194: /* alter_table_clause ::= full_table_name ADD COLUMN column_name type_name column_options */ +#line 374 "sql.y" { yylhsminor.yy974 = createAlterTableAddModifyColOptions2(pCxt, yymsp[-5].minor.yy974, TSDB_ALTER_TABLE_ADD_COLUMN, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy424, yymsp[0].minor.yy974); } +#line 7070 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 195: /* alter_table_clause ::= full_table_name DROP COLUMN column_name */ +#line 375 "sql.y" { yylhsminor.yy974 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy974, TSDB_ALTER_TABLE_DROP_COLUMN, &yymsp[0].minor.yy557); } +#line 7076 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 196: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name type_name */ +#line 377 "sql.y" { yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } +#line 7082 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 197: /* alter_table_clause ::= full_table_name MODIFY COLUMN column_name column_options */ +#line 379 "sql.y" { yylhsminor.yy974 = createAlterTableAddModifyColOptions(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_COMPRESS, &yymsp[-1].minor.yy557, yymsp[0].minor.yy974); } +#line 7088 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 198: /* alter_table_clause ::= full_table_name RENAME COLUMN column_name column_name */ +#line 381 "sql.y" { yylhsminor.yy974 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } +#line 7094 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 199: /* alter_table_clause ::= full_table_name ADD TAG column_name type_name */ +#line 383 "sql.y" { yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_ADD_TAG, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } +#line 7100 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 200: /* alter_table_clause ::= full_table_name DROP TAG column_name */ +#line 384 "sql.y" { yylhsminor.yy974 = createAlterTableDropCol(pCxt, yymsp[-3].minor.yy974, TSDB_ALTER_TABLE_DROP_TAG, &yymsp[0].minor.yy557); } +#line 7106 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 201: /* alter_table_clause ::= full_table_name MODIFY TAG column_name type_name */ +#line 386 "sql.y" { yylhsminor.yy974 = createAlterTableAddModifyCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_TAG_BYTES, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424); } +#line 7112 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 202: /* alter_table_clause ::= full_table_name RENAME TAG column_name column_name */ +#line 388 "sql.y" { yylhsminor.yy974 = createAlterTableRenameCol(pCxt, yymsp[-4].minor.yy974, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } +#line 7118 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 203: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ tags_literal */ +#line 390 "sql.y" { yylhsminor.yy974 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy974, &yymsp[-2].minor.yy557, yymsp[0].minor.yy974); } +#line 7124 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 205: /* multi_create_clause ::= multi_create_clause create_subtable_clause */ case 623: /* when_then_list ::= when_then_list when_then_expr */ yytestcase(yyruleno==623); +#line 395 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-1].minor.yy946, yymsp[0].minor.yy974); } +#line 7131 "sql.c" yymsp[-1].minor.yy946 = yylhsminor.yy946; break; case 206: /* create_subtable_clause ::= not_exists_opt full_table_name USING full_table_name specific_cols_opt TAGS NK_LP tags_literal_list NK_RP table_options */ +#line 399 "sql.y" { yylhsminor.yy974 = createCreateSubTableClause(pCxt, yymsp[-9].minor.yy569, yymsp[-8].minor.yy974, yymsp[-6].minor.yy974, yymsp[-5].minor.yy946, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } +#line 7137 "sql.c" yymsp[-9].minor.yy974 = yylhsminor.yy974; break; case 209: /* drop_table_clause ::= exists_opt full_table_name */ +#line 406 "sql.y" { yylhsminor.yy974 = createDropTableClause(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } +#line 7143 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 213: /* specific_cols_opt ::= NK_LP col_name_list NK_RP */ case 417: /* col_list_opt ::= NK_LP column_stream_def_list NK_RP */ yytestcase(yyruleno==417); +#line 416 "sql.y" { yymsp[-2].minor.yy946 = yymsp[-1].minor.yy946; } +#line 7150 "sql.c" break; case 214: /* full_table_name ::= table_name */ case 358: /* full_tsma_name ::= tsma_name */ yytestcase(yyruleno==358); +#line 418 "sql.y" { yylhsminor.yy974 = createRealTableNode(pCxt, NULL, &yymsp[0].minor.yy557, NULL); } +#line 7156 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 215: /* full_table_name ::= db_name NK_DOT table_name */ case 359: /* full_tsma_name ::= db_name NK_DOT tsma_name */ yytestcase(yyruleno==359); +#line 419 "sql.y" { yylhsminor.yy974 = createRealTableNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557, NULL); } +#line 7163 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 218: /* tag_def ::= column_name type_name */ +#line 425 "sql.y" { yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy557, yymsp[0].minor.yy424, NULL); } +#line 7169 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 221: /* column_def ::= column_name type_name column_options */ +#line 433 "sql.y" { yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-2].minor.yy557, yymsp[-1].minor.yy424, yymsp[0].minor.yy974); } +#line 7175 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 222: /* type_name ::= BOOL */ +#line 437 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BOOL); } +#line 7181 "sql.c" break; case 223: /* type_name ::= TINYINT */ +#line 438 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_TINYINT); } +#line 7186 "sql.c" break; case 224: /* type_name ::= SMALLINT */ +#line 439 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_SMALLINT); } +#line 7191 "sql.c" break; case 225: /* type_name ::= INT */ case 226: /* type_name ::= INTEGER */ yytestcase(yyruleno==226); +#line 440 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_INT); } +#line 7197 "sql.c" break; case 227: /* type_name ::= BIGINT */ +#line 442 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BIGINT); } +#line 7202 "sql.c" break; case 228: /* type_name ::= FLOAT */ +#line 443 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_FLOAT); } +#line 7207 "sql.c" break; case 229: /* type_name ::= DOUBLE */ +#line 444 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_DOUBLE); } +#line 7212 "sql.c" break; case 230: /* type_name ::= BINARY NK_LP NK_INTEGER NK_RP */ +#line 445 "sql.y" { yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, &yymsp[-1].minor.yy0); } +#line 7217 "sql.c" break; case 231: /* type_name ::= TIMESTAMP */ +#line 446 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_TIMESTAMP); } +#line 7222 "sql.c" break; case 232: /* type_name ::= NCHAR NK_LP NK_INTEGER NK_RP */ +#line 447 "sql.y" { yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, &yymsp[-1].minor.yy0); } +#line 7227 "sql.c" break; case 233: /* type_name ::= TINYINT UNSIGNED */ +#line 448 "sql.y" { yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UTINYINT); } +#line 7232 "sql.c" break; case 234: /* type_name ::= SMALLINT UNSIGNED */ +#line 449 "sql.y" { yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_USMALLINT); } +#line 7237 "sql.c" break; case 235: /* type_name ::= INT UNSIGNED */ +#line 450 "sql.y" { yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UINT); } +#line 7242 "sql.c" break; case 236: /* type_name ::= BIGINT UNSIGNED */ +#line 451 "sql.y" { yymsp[-1].minor.yy424 = createDataType(TSDB_DATA_TYPE_UBIGINT); } +#line 7247 "sql.c" break; case 237: /* type_name ::= JSON */ +#line 452 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_JSON); } +#line 7252 "sql.c" break; case 238: /* type_name ::= VARCHAR NK_LP NK_INTEGER NK_RP */ +#line 453 "sql.y" { yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, &yymsp[-1].minor.yy0); } +#line 7257 "sql.c" break; case 239: /* type_name ::= MEDIUMBLOB */ +#line 454 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_MEDIUMBLOB); } +#line 7262 "sql.c" break; case 240: /* type_name ::= BLOB */ +#line 455 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_BLOB); } +#line 7267 "sql.c" break; case 241: /* type_name ::= VARBINARY NK_LP NK_INTEGER NK_RP */ +#line 456 "sql.y" { yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, &yymsp[-1].minor.yy0); } +#line 7272 "sql.c" break; case 242: /* type_name ::= GEOMETRY NK_LP NK_INTEGER NK_RP */ +#line 457 "sql.y" { yymsp[-3].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_GEOMETRY, &yymsp[-1].minor.yy0); } +#line 7277 "sql.c" break; case 243: /* type_name ::= DECIMAL */ +#line 458 "sql.y" { yymsp[0].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +#line 7282 "sql.c" break; case 244: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_RP */ +#line 459 "sql.y" { yymsp[-3].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +#line 7287 "sql.c" break; case 245: /* type_name ::= DECIMAL NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ +#line 460 "sql.y" { yymsp[-5].minor.yy424 = createDataType(TSDB_DATA_TYPE_DECIMAL); } +#line 7292 "sql.c" break; case 246: /* type_name_default_len ::= BINARY */ +#line 464 "sql.y" { yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_BINARY, NULL); } +#line 7297 "sql.c" break; case 247: /* type_name_default_len ::= NCHAR */ +#line 465 "sql.y" { yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_NCHAR, NULL); } +#line 7302 "sql.c" break; case 248: /* type_name_default_len ::= VARCHAR */ +#line 466 "sql.y" { yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARCHAR, NULL); } +#line 7307 "sql.c" break; case 249: /* type_name_default_len ::= VARBINARY */ +#line 467 "sql.y" { yymsp[0].minor.yy424 = createVarLenDataType(TSDB_DATA_TYPE_VARBINARY, NULL); } +#line 7312 "sql.c" break; case 252: /* tags_def ::= TAGS NK_LP tag_def_list NK_RP */ case 425: /* tag_def_or_ref_opt ::= TAGS NK_LP column_stream_def_list NK_RP */ yytestcase(yyruleno==425); +#line 476 "sql.y" { yymsp[-3].minor.yy946 = yymsp[-1].minor.yy946; } +#line 7318 "sql.c" break; case 253: /* table_options ::= */ +#line 478 "sql.y" { yymsp[1].minor.yy974 = createDefaultTableOptions(pCxt); } +#line 7323 "sql.c" break; case 254: /* table_options ::= table_options COMMENT NK_STRING */ +#line 479 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_COMMENT, &yymsp[0].minor.yy0); } +#line 7328 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 255: /* table_options ::= table_options MAX_DELAY duration_list */ +#line 480 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_MAXDELAY, yymsp[0].minor.yy946); } +#line 7334 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 256: /* table_options ::= table_options WATERMARK duration_list */ +#line 481 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_WATERMARK, yymsp[0].minor.yy946); } +#line 7340 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 257: /* table_options ::= table_options ROLLUP NK_LP rollup_func_list NK_RP */ +#line 482 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-4].minor.yy974, TABLE_OPTION_ROLLUP, yymsp[-1].minor.yy946); } +#line 7346 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 258: /* table_options ::= table_options TTL NK_INTEGER */ +#line 483 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_TTL, &yymsp[0].minor.yy0); } +#line 7352 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 259: /* table_options ::= table_options SMA NK_LP col_name_list NK_RP */ +#line 484 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-4].minor.yy974, TABLE_OPTION_SMA, yymsp[-1].minor.yy946); } +#line 7358 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 260: /* table_options ::= table_options DELETE_MARK duration_list */ +#line 485 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-2].minor.yy974, TABLE_OPTION_DELETE_MARK, yymsp[0].minor.yy946); } +#line 7364 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 261: /* alter_table_options ::= alter_table_option */ +#line 487 "sql.y" { yylhsminor.yy974 = createAlterTableOptions(pCxt); yylhsminor.yy974 = setTableOption(pCxt, yylhsminor.yy974, yymsp[0].minor.yy683.type, &yymsp[0].minor.yy683.val); } +#line 7370 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 262: /* alter_table_options ::= alter_table_options alter_table_option */ +#line 488 "sql.y" { yylhsminor.yy974 = setTableOption(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy683.type, &yymsp[0].minor.yy683.val); } +#line 7376 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 263: /* alter_table_option ::= COMMENT NK_STRING */ +#line 492 "sql.y" { yymsp[-1].minor.yy683.type = TABLE_OPTION_COMMENT; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 7382 "sql.c" break; case 264: /* alter_table_option ::= TTL NK_INTEGER */ +#line 493 "sql.y" { yymsp[-1].minor.yy683.type = TABLE_OPTION_TTL; yymsp[-1].minor.yy683.val = yymsp[0].minor.yy0; } +#line 7387 "sql.c" break; case 265: /* duration_list ::= duration_literal */ case 554: /* expression_list ::= expr_or_subquery */ yytestcase(yyruleno==554); +#line 497 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 7393 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 266: /* duration_list ::= duration_list NK_COMMA duration_literal */ case 555: /* expression_list ::= expression_list NK_COMMA expr_or_subquery */ yytestcase(yyruleno==555); +#line 498 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 7400 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 269: /* rollup_func_name ::= function_name */ +#line 505 "sql.y" { yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[0].minor.yy557, NULL); } +#line 7406 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 270: /* rollup_func_name ::= FIRST */ case 271: /* rollup_func_name ::= LAST */ yytestcase(yyruleno==271); case 347: /* tag_item ::= QTAGS */ yytestcase(yyruleno==347); +#line 506 "sql.y" { yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL); } +#line 7414 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 274: /* col_name ::= column_name */ case 348: /* tag_item ::= column_name */ yytestcase(yyruleno==348); +#line 514 "sql.y" { yylhsminor.yy974 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy557); } +#line 7421 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 275: /* cmd ::= SHOW DNODES */ +#line 517 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DNODES_STMT); } +#line 7427 "sql.c" break; case 276: /* cmd ::= SHOW USERS */ +#line 518 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USERS_STMT); } +#line 7432 "sql.c" break; case 277: /* cmd ::= SHOW USERS FULL */ +#line 519 "sql.y" { pCxt->pRootNode = createShowStmtWithFull(pCxt, QUERY_NODE_SHOW_USERS_FULL_STMT); } +#line 7437 "sql.c" break; case 278: /* cmd ::= SHOW USER PRIVILEGES */ +#line 520 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_USER_PRIVILEGES_STMT); } +#line 7442 "sql.c" break; case 279: /* cmd ::= SHOW db_kind_opt DATABASES */ +#line 521 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_DATABASES_STMT); (void)setShowKind(pCxt, pCxt->pRootNode, yymsp[-1].minor.yy741); } +#line 7450 "sql.c" break; case 280: /* cmd ::= SHOW table_kind_db_name_cond_opt TABLES like_pattern_opt */ +#line 525 "sql.y" { pCxt->pRootNode = createShowTablesStmt(pCxt, yymsp[-2].minor.yy595, yymsp[0].minor.yy974, OP_TYPE_LIKE); } +#line 7457 "sql.c" break; case 281: /* cmd ::= SHOW db_name_cond_opt STABLES like_pattern_opt */ +#line 528 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_STABLES_STMT, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, OP_TYPE_LIKE); } +#line 7462 "sql.c" break; case 282: /* cmd ::= SHOW db_name_cond_opt VGROUPS */ +#line 529 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VGROUPS_STMT, yymsp[-1].minor.yy974, NULL, OP_TYPE_LIKE); } +#line 7467 "sql.c" break; case 283: /* cmd ::= SHOW MNODES */ +#line 530 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_MNODES_STMT); } +#line 7472 "sql.c" break; case 284: /* cmd ::= SHOW QNODES */ +#line 532 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QNODES_STMT); } +#line 7477 "sql.c" break; case 285: /* cmd ::= SHOW ANODES */ +#line 533 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ANODES_STMT); } +#line 7482 "sql.c" break; case 286: /* cmd ::= SHOW ANODES FULL */ +#line 534 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ANODES_FULL_STMT); } +#line 7487 "sql.c" break; case 287: /* cmd ::= SHOW ARBGROUPS */ +#line 535 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ARBGROUPS_STMT); } +#line 7492 "sql.c" break; case 288: /* cmd ::= SHOW FUNCTIONS */ +#line 536 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_FUNCTIONS_STMT); } +#line 7497 "sql.c" break; case 289: /* cmd ::= SHOW INDEXES FROM table_name_cond from_db_opt */ +#line 537 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, yymsp[0].minor.yy974, yymsp[-1].minor.yy974, OP_TYPE_EQUAL); } +#line 7502 "sql.c" break; case 290: /* cmd ::= SHOW INDEXES FROM db_name NK_DOT table_name */ +#line 538 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_INDEXES_STMT, createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), OP_TYPE_EQUAL); } +#line 7507 "sql.c" break; case 291: /* cmd ::= SHOW STREAMS */ +#line 539 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_STREAMS_STMT); } +#line 7512 "sql.c" break; case 292: /* cmd ::= SHOW ACCOUNTS */ +#line 540 "sql.y" { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_EXPRIE_STATEMENT); } +#line 7517 "sql.c" break; case 293: /* cmd ::= SHOW APPS */ +#line 541 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_APPS_STMT); } +#line 7522 "sql.c" break; case 294: /* cmd ::= SHOW CONNECTIONS */ +#line 542 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONNECTIONS_STMT); } +#line 7527 "sql.c" break; case 295: /* cmd ::= SHOW LICENCES */ case 296: /* cmd ::= SHOW GRANTS */ yytestcase(yyruleno==296); +#line 543 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LICENCES_STMT); } +#line 7533 "sql.c" break; case 297: /* cmd ::= SHOW GRANTS FULL */ +#line 545 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_GRANTS_FULL_STMT); } +#line 7538 "sql.c" break; case 298: /* cmd ::= SHOW GRANTS LOGS */ +#line 546 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_GRANTS_LOGS_STMT); } +#line 7543 "sql.c" break; case 299: /* cmd ::= SHOW CLUSTER MACHINES */ +#line 547 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_MACHINES_STMT); } +#line 7548 "sql.c" break; case 300: /* cmd ::= SHOW CREATE DATABASE db_name */ +#line 548 "sql.y" { pCxt->pRootNode = createShowCreateDatabaseStmt(pCxt, &yymsp[0].minor.yy557); } +#line 7553 "sql.c" break; case 301: /* cmd ::= SHOW CREATE TABLE full_table_name */ +#line 549 "sql.y" { pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_TABLE_STMT, yymsp[0].minor.yy974); } +#line 7558 "sql.c" break; case 302: /* cmd ::= SHOW CREATE STABLE full_table_name */ +#line 550 "sql.y" { pCxt->pRootNode = createShowCreateTableStmt(pCxt, QUERY_NODE_SHOW_CREATE_STABLE_STMT, yymsp[0].minor.yy974); } +#line 7564 "sql.c" break; case 303: /* cmd ::= SHOW ENCRYPTIONS */ +#line 552 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_ENCRYPTIONS_STMT); } +#line 7569 "sql.c" break; case 304: /* cmd ::= SHOW QUERIES */ +#line 553 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_QUERIES_STMT); } +#line 7574 "sql.c" break; case 305: /* cmd ::= SHOW SCORES */ +#line 554 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT); } +#line 7579 "sql.c" break; case 306: /* cmd ::= SHOW TOPICS */ +#line 555 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT); } +#line 7584 "sql.c" break; case 307: /* cmd ::= SHOW VARIABLES */ case 308: /* cmd ::= SHOW CLUSTER VARIABLES */ yytestcase(yyruleno==308); +#line 556 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); } +#line 7590 "sql.c" break; case 309: /* cmd ::= SHOW LOCAL VARIABLES */ +#line 558 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); } +#line 7595 "sql.c" break; case 310: /* cmd ::= SHOW DNODE NK_INTEGER VARIABLES like_pattern_opt */ +#line 559 "sql.y" { pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[-2].minor.yy0), yymsp[0].minor.yy974); } +#line 7600 "sql.c" break; case 311: /* cmd ::= SHOW BNODES */ +#line 560 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); } +#line 7605 "sql.c" break; case 312: /* cmd ::= SHOW SNODES */ +#line 561 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); } +#line 7610 "sql.c" break; case 313: /* cmd ::= SHOW CLUSTER */ +#line 562 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); } +#line 7615 "sql.c" break; case 314: /* cmd ::= SHOW TRANSACTIONS */ +#line 563 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TRANSACTIONS_STMT); } +#line 7620 "sql.c" break; case 315: /* cmd ::= SHOW TABLE DISTRIBUTED full_table_name */ +#line 564 "sql.y" { pCxt->pRootNode = createShowTableDistributedStmt(pCxt, yymsp[0].minor.yy974); } +#line 7625 "sql.c" break; case 316: /* cmd ::= SHOW CONSUMERS */ +#line 565 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CONSUMERS_STMT); } +#line 7630 "sql.c" break; case 317: /* cmd ::= SHOW SUBSCRIPTIONS */ +#line 566 "sql.y" { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SUBSCRIPTIONS_STMT); } +#line 7635 "sql.c" break; case 318: /* cmd ::= SHOW TAGS FROM table_name_cond from_db_opt */ +#line 567 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, yymsp[0].minor.yy974, yymsp[-1].minor.yy974, OP_TYPE_EQUAL); } +#line 7640 "sql.c" break; case 319: /* cmd ::= SHOW TAGS FROM db_name NK_DOT table_name */ +#line 568 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_TAGS_STMT, createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), OP_TYPE_EQUAL); } +#line 7645 "sql.c" break; case 320: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM table_name_cond from_db_opt */ +#line 569 "sql.y" { pCxt->pRootNode = createShowTableTagsStmt(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974, yymsp[-3].minor.yy946); } +#line 7650 "sql.c" break; case 321: /* cmd ::= SHOW TABLE TAGS tag_list_opt FROM db_name NK_DOT table_name */ +#line 570 "sql.y" { pCxt->pRootNode = createShowTableTagsStmt(pCxt, createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557), createIdentifierValueNode(pCxt, &yymsp[-2].minor.yy557), yymsp[-4].minor.yy946); } +#line 7655 "sql.c" break; case 322: /* cmd ::= SHOW VNODES ON DNODE NK_INTEGER */ +#line 571 "sql.y" { pCxt->pRootNode = createShowVnodesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0), NULL); } +#line 7660 "sql.c" break; case 323: /* cmd ::= SHOW VNODES */ +#line 572 "sql.y" { pCxt->pRootNode = createShowVnodesStmt(pCxt, NULL, NULL); } +#line 7665 "sql.c" break; case 324: /* cmd ::= SHOW db_name_cond_opt ALIVE */ +#line 574 "sql.y" { pCxt->pRootNode = createShowAliveStmt(pCxt, yymsp[-1].minor.yy974, QUERY_NODE_SHOW_DB_ALIVE_STMT); } +#line 7670 "sql.c" break; case 325: /* cmd ::= SHOW CLUSTER ALIVE */ +#line 575 "sql.y" { pCxt->pRootNode = createShowAliveStmt(pCxt, NULL, QUERY_NODE_SHOW_CLUSTER_ALIVE_STMT); } +#line 7675 "sql.c" break; case 326: /* cmd ::= SHOW db_name_cond_opt VIEWS like_pattern_opt */ +#line 576 "sql.y" { pCxt->pRootNode = createShowStmtWithCond(pCxt, QUERY_NODE_SHOW_VIEWS_STMT, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, OP_TYPE_LIKE); } +#line 7680 "sql.c" break; case 327: /* cmd ::= SHOW CREATE VIEW full_table_name */ +#line 577 "sql.y" { pCxt->pRootNode = createShowCreateViewStmt(pCxt, QUERY_NODE_SHOW_CREATE_VIEW_STMT, yymsp[0].minor.yy974); } +#line 7685 "sql.c" break; case 328: /* cmd ::= SHOW COMPACTS */ +#line 578 "sql.y" { pCxt->pRootNode = createShowCompactsStmt(pCxt, QUERY_NODE_SHOW_COMPACTS_STMT); } +#line 7690 "sql.c" break; case 329: /* cmd ::= SHOW COMPACT NK_INTEGER */ +#line 579 "sql.y" { pCxt->pRootNode = createShowCompactDetailsStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } +#line 7695 "sql.c" break; case 330: /* table_kind_db_name_cond_opt ::= */ +#line 583 "sql.y" { yymsp[1].minor.yy595.kind = SHOW_KIND_ALL; yymsp[1].minor.yy595.dbName = nil_token; } +#line 7700 "sql.c" break; case 331: /* table_kind_db_name_cond_opt ::= table_kind */ +#line 584 "sql.y" { yylhsminor.yy595.kind = yymsp[0].minor.yy741; yylhsminor.yy595.dbName = nil_token; } +#line 7705 "sql.c" yymsp[0].minor.yy595 = yylhsminor.yy595; break; case 332: /* table_kind_db_name_cond_opt ::= db_name NK_DOT */ +#line 585 "sql.y" { yylhsminor.yy595.kind = SHOW_KIND_ALL; yylhsminor.yy595.dbName = yymsp[-1].minor.yy557; } +#line 7711 "sql.c" yymsp[-1].minor.yy595 = yylhsminor.yy595; break; case 333: /* table_kind_db_name_cond_opt ::= table_kind db_name NK_DOT */ +#line 586 "sql.y" { yylhsminor.yy595.kind = yymsp[-2].minor.yy741; yylhsminor.yy595.dbName = yymsp[-1].minor.yy557; } +#line 7717 "sql.c" yymsp[-2].minor.yy595 = yylhsminor.yy595; break; case 334: /* table_kind ::= NORMAL */ +#line 590 "sql.y" { yymsp[0].minor.yy741 = SHOW_KIND_TABLES_NORMAL; } +#line 7723 "sql.c" break; case 335: /* table_kind ::= CHILD */ +#line 591 "sql.y" { yymsp[0].minor.yy741 = SHOW_KIND_TABLES_CHILD; } +#line 7728 "sql.c" break; case 336: /* db_name_cond_opt ::= */ case 341: /* from_db_opt ::= */ yytestcase(yyruleno==341); +#line 593 "sql.y" { yymsp[1].minor.yy974 = createDefaultDatabaseCondValue(pCxt); } +#line 7734 "sql.c" break; case 337: /* db_name_cond_opt ::= db_name NK_DOT */ +#line 594 "sql.y" { yylhsminor.yy974 = createIdentifierValueNode(pCxt, &yymsp[-1].minor.yy557); } +#line 7739 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 339: /* like_pattern_opt ::= LIKE NK_STRING */ +#line 597 "sql.y" { yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } +#line 7745 "sql.c" break; case 340: /* table_name_cond ::= table_name */ +#line 599 "sql.y" { yylhsminor.yy974 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557); } +#line 7750 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 342: /* from_db_opt ::= FROM db_name */ +#line 602 "sql.y" { yymsp[-1].minor.yy974 = createIdentifierValueNode(pCxt, &yymsp[0].minor.yy557); } +#line 7756 "sql.c" break; case 346: /* tag_item ::= TBNAME */ +#line 610 "sql.y" { yylhsminor.yy974 = setProjectionAlias(pCxt, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL), &yymsp[0].minor.yy0); } +#line 7761 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 349: /* tag_item ::= column_name column_alias */ +#line 613 "sql.y" { yylhsminor.yy974 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-1].minor.yy557), &yymsp[0].minor.yy557); } +#line 7767 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 350: /* tag_item ::= column_name AS column_alias */ +#line 614 "sql.y" { yylhsminor.yy974 = setProjectionAlias(pCxt, createColumnNode(pCxt, NULL, &yymsp[-2].minor.yy557), &yymsp[0].minor.yy557); } +#line 7773 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 351: /* db_kind_opt ::= */ +#line 618 "sql.y" { yymsp[1].minor.yy741 = SHOW_KIND_ALL; } +#line 7779 "sql.c" break; case 352: /* db_kind_opt ::= USER */ +#line 619 "sql.y" { yymsp[0].minor.yy741 = SHOW_KIND_DATABASES_USER; } +#line 7784 "sql.c" break; case 353: /* db_kind_opt ::= SYSTEM */ +#line 620 "sql.y" { yymsp[0].minor.yy741 = SHOW_KIND_DATABASES_SYSTEM; } +#line 7789 "sql.c" break; case 354: /* cmd ::= CREATE TSMA not_exists_opt tsma_name ON full_table_name tsma_func_list INTERVAL NK_LP duration_literal NK_RP */ +#line 626 "sql.y" { pCxt->pRootNode = createCreateTSMAStmt(pCxt, yymsp[-8].minor.yy569, &yymsp[-7].minor.yy557, yymsp[-4].minor.yy974, yymsp[-5].minor.yy974, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 7794 "sql.c" break; case 355: /* cmd ::= CREATE RECURSIVE TSMA not_exists_opt tsma_name ON full_table_name INTERVAL NK_LP duration_literal NK_RP */ +#line 628 "sql.y" { pCxt->pRootNode = createCreateTSMAStmt(pCxt, yymsp[-7].minor.yy569, &yymsp[-6].minor.yy557, NULL, yymsp[-4].minor.yy974, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 7799 "sql.c" break; case 356: /* cmd ::= DROP TSMA exists_opt full_tsma_name */ +#line 629 "sql.y" { pCxt->pRootNode = createDropTSMAStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } +#line 7804 "sql.c" break; case 357: /* cmd ::= SHOW db_name_cond_opt TSMAS */ +#line 630 "sql.y" { pCxt->pRootNode = createShowTSMASStmt(pCxt, yymsp[-1].minor.yy974); } +#line 7809 "sql.c" break; case 360: /* tsma_func_list ::= FUNCTION NK_LP func_list NK_RP */ +#line 637 "sql.y" { yymsp[-3].minor.yy974 = createTSMAOptions(pCxt, yymsp[-1].minor.yy946); } +#line 7814 "sql.c" break; case 361: /* cmd ::= CREATE SMA INDEX not_exists_opt col_name ON full_table_name index_options */ +#line 641 "sql.y" { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_SMA, yymsp[-4].minor.yy569, yymsp[-3].minor.yy974, yymsp[-1].minor.yy974, NULL, yymsp[0].minor.yy974); } +#line 7819 "sql.c" break; case 362: /* cmd ::= CREATE INDEX not_exists_opt col_name ON full_table_name NK_LP col_name_list NK_RP */ +#line 643 "sql.y" { pCxt->pRootNode = createCreateIndexStmt(pCxt, INDEX_TYPE_NORMAL, yymsp[-6].minor.yy569, yymsp[-5].minor.yy974, yymsp[-3].minor.yy974, yymsp[-1].minor.yy946, NULL); } +#line 7824 "sql.c" break; case 363: /* cmd ::= DROP INDEX exists_opt full_index_name */ +#line 644 "sql.y" { pCxt->pRootNode = createDropIndexStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } +#line 7829 "sql.c" break; case 364: /* full_index_name ::= index_name */ +#line 646 "sql.y" { yylhsminor.yy974 = createRealTableNodeForIndexName(pCxt, NULL, &yymsp[0].minor.yy557); } +#line 7834 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 365: /* full_index_name ::= db_name NK_DOT index_name */ +#line 647 "sql.y" { yylhsminor.yy974 = createRealTableNodeForIndexName(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } +#line 7840 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 366: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_RP sliding_opt sma_stream_opt */ +#line 650 "sql.y" { yymsp[-9].minor.yy974 = createIndexOption(pCxt, yymsp[-7].minor.yy946, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 7846 "sql.c" break; case 367: /* index_options ::= FUNCTION NK_LP func_list NK_RP INTERVAL NK_LP duration_literal NK_COMMA duration_literal NK_RP sliding_opt sma_stream_opt */ +#line 653 "sql.y" { yymsp[-11].minor.yy974 = createIndexOption(pCxt, yymsp[-9].minor.yy946, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 7851 "sql.c" break; case 370: /* func ::= sma_func_name NK_LP expression_list NK_RP */ +#line 660 "sql.y" { yylhsminor.yy974 = createFunctionNode(pCxt, &yymsp[-3].minor.yy557, yymsp[-1].minor.yy946); } +#line 7856 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 371: /* sma_func_name ::= function_name */ case 666: /* alias_opt ::= table_alias */ yytestcase(yyruleno==666); +#line 664 "sql.y" { yylhsminor.yy557 = yymsp[0].minor.yy557; } +#line 7863 "sql.c" yymsp[0].minor.yy557 = yylhsminor.yy557; break; case 376: /* sma_stream_opt ::= */ case 426: /* stream_options ::= */ yytestcase(yyruleno==426); +#line 670 "sql.y" { yymsp[1].minor.yy974 = createStreamOptions(pCxt); } +#line 7870 "sql.c" break; case 377: /* sma_stream_opt ::= sma_stream_opt WATERMARK duration_literal */ +#line 671 "sql.y" { ((SStreamOptions*)yymsp[-2].minor.yy974)->pWatermark = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } +#line 7875 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 378: /* sma_stream_opt ::= sma_stream_opt MAX_DELAY duration_literal */ +#line 672 "sql.y" { ((SStreamOptions*)yymsp[-2].minor.yy974)->pDelay = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } +#line 7881 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 379: /* sma_stream_opt ::= sma_stream_opt DELETE_MARK duration_literal */ +#line 673 "sql.y" { ((SStreamOptions*)yymsp[-2].minor.yy974)->pDeleteMark = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = yymsp[-2].minor.yy974; } +#line 7887 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 380: /* with_meta ::= AS */ +#line 678 "sql.y" { yymsp[0].minor.yy904 = 0; } +#line 7893 "sql.c" break; case 381: /* with_meta ::= WITH META AS */ +#line 679 "sql.y" { yymsp[-2].minor.yy904 = 1; } +#line 7898 "sql.c" break; case 382: /* with_meta ::= ONLY META AS */ +#line 680 "sql.y" { yymsp[-2].minor.yy904 = 2; } +#line 7903 "sql.c" break; case 383: /* cmd ::= CREATE TOPIC not_exists_opt topic_name AS query_or_subquery */ +#line 682 "sql.y" { pCxt->pRootNode = createCreateTopicStmtUseQuery(pCxt, yymsp[-3].minor.yy569, &yymsp[-2].minor.yy557, yymsp[0].minor.yy974); } +#line 7908 "sql.c" break; case 384: /* cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta DATABASE db_name */ +#line 684 "sql.y" { pCxt->pRootNode = createCreateTopicStmtUseDb(pCxt, yymsp[-4].minor.yy569, &yymsp[-3].minor.yy557, &yymsp[0].minor.yy557, yymsp[-2].minor.yy904); } +#line 7913 "sql.c" break; case 385: /* cmd ::= CREATE TOPIC not_exists_opt topic_name with_meta STABLE full_table_name where_clause_opt */ +#line 686 "sql.y" { pCxt->pRootNode = createCreateTopicStmtUseTable(pCxt, yymsp[-5].minor.yy569, &yymsp[-4].minor.yy557, yymsp[-1].minor.yy974, yymsp[-3].minor.yy904, yymsp[0].minor.yy974); } +#line 7918 "sql.c" break; case 386: /* cmd ::= DROP TOPIC exists_opt topic_name */ +#line 688 "sql.y" { pCxt->pRootNode = createDropTopicStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 7923 "sql.c" break; case 387: /* cmd ::= DROP CONSUMER GROUP exists_opt cgroup_name ON topic_name */ +#line 689 "sql.y" { pCxt->pRootNode = createDropCGroupStmt(pCxt, yymsp[-3].minor.yy569, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } +#line 7928 "sql.c" break; case 388: /* cmd ::= DESC full_table_name */ case 389: /* cmd ::= DESCRIBE full_table_name */ yytestcase(yyruleno==389); +#line 692 "sql.y" { pCxt->pRootNode = createDescribeStmt(pCxt, yymsp[0].minor.yy974); } +#line 7934 "sql.c" break; case 390: /* cmd ::= RESET QUERY CACHE */ +#line 696 "sql.y" { pCxt->pRootNode = createResetQueryCacheStmt(pCxt); } +#line 7939 "sql.c" break; case 391: /* cmd ::= EXPLAIN analyze_opt explain_options query_or_subquery */ case 392: /* cmd ::= EXPLAIN analyze_opt explain_options insert_query */ yytestcase(yyruleno==392); +#line 699 "sql.y" { pCxt->pRootNode = createExplainStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 7945 "sql.c" break; case 395: /* explain_options ::= */ +#line 707 "sql.y" { yymsp[1].minor.yy974 = createDefaultExplainOptions(pCxt); } +#line 7950 "sql.c" break; case 396: /* explain_options ::= explain_options VERBOSE NK_BOOL */ +#line 708 "sql.y" { yylhsminor.yy974 = setExplainVerbose(pCxt, yymsp[-2].minor.yy974, &yymsp[0].minor.yy0); } +#line 7955 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 397: /* explain_options ::= explain_options RATIO NK_FLOAT */ +#line 709 "sql.y" { yylhsminor.yy974 = setExplainRatio(pCxt, yymsp[-2].minor.yy974, &yymsp[0].minor.yy0); } +#line 7961 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 398: /* cmd ::= CREATE or_replace_opt agg_func_opt FUNCTION not_exists_opt function_name AS NK_STRING OUTPUTTYPE type_name bufsize_opt language_opt */ +#line 714 "sql.y" { pCxt->pRootNode = createCreateFunctionStmt(pCxt, yymsp[-7].minor.yy569, yymsp[-9].minor.yy569, &yymsp[-6].minor.yy557, &yymsp[-4].minor.yy0, yymsp[-2].minor.yy424, yymsp[-1].minor.yy904, &yymsp[0].minor.yy557, yymsp[-10].minor.yy569); } +#line 7967 "sql.c" break; case 399: /* cmd ::= DROP FUNCTION exists_opt function_name */ +#line 715 "sql.y" { pCxt->pRootNode = createDropFunctionStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 7972 "sql.c" break; case 404: /* language_opt ::= */ case 449: /* on_vgroup_id ::= */ yytestcase(yyruleno==449); +#line 729 "sql.y" { yymsp[1].minor.yy557 = nil_token; } +#line 7978 "sql.c" break; case 405: /* language_opt ::= LANGUAGE NK_STRING */ case 450: /* on_vgroup_id ::= ON NK_INTEGER */ yytestcase(yyruleno==450); +#line 730 "sql.y" { yymsp[-1].minor.yy557 = yymsp[0].minor.yy0; } +#line 7984 "sql.c" break; case 408: /* cmd ::= CREATE or_replace_opt VIEW full_view_name AS query_or_subquery */ +#line 739 "sql.y" { pCxt->pRootNode = createCreateViewStmt(pCxt, yymsp[-4].minor.yy569, yymsp[-2].minor.yy974, &yymsp[-1].minor.yy0, yymsp[0].minor.yy974); } +#line 7989 "sql.c" break; case 409: /* cmd ::= DROP VIEW exists_opt full_view_name */ +#line 740 "sql.y" { pCxt->pRootNode = createDropViewStmt(pCxt, yymsp[-1].minor.yy569, yymsp[0].minor.yy974); } +#line 7994 "sql.c" break; case 410: /* full_view_name ::= view_name */ +#line 742 "sql.y" { yylhsminor.yy974 = createViewNode(pCxt, NULL, &yymsp[0].minor.yy557); } +#line 7999 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 411: /* full_view_name ::= db_name NK_DOT view_name */ +#line 743 "sql.y" { yylhsminor.yy974 = createViewNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557); } +#line 8005 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 412: /* cmd ::= CREATE STREAM not_exists_opt stream_name stream_options INTO full_table_name col_list_opt tag_def_or_ref_opt subtable_opt AS query_or_subquery */ +#line 748 "sql.y" { pCxt->pRootNode = createCreateStreamStmt(pCxt, yymsp[-9].minor.yy569, &yymsp[-8].minor.yy557, yymsp[-5].minor.yy974, yymsp[-7].minor.yy974, yymsp[-3].minor.yy946, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, yymsp[-4].minor.yy946); } +#line 8011 "sql.c" break; case 413: /* cmd ::= DROP STREAM exists_opt stream_name */ +#line 749 "sql.y" { pCxt->pRootNode = createDropStreamStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 8016 "sql.c" break; case 414: /* cmd ::= PAUSE STREAM exists_opt stream_name */ +#line 750 "sql.y" { pCxt->pRootNode = createPauseStreamStmt(pCxt, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 8021 "sql.c" break; case 415: /* cmd ::= RESUME STREAM exists_opt ignore_opt stream_name */ +#line 751 "sql.y" { pCxt->pRootNode = createResumeStreamStmt(pCxt, yymsp[-2].minor.yy569, yymsp[-1].minor.yy569, &yymsp[0].minor.yy557); } +#line 8026 "sql.c" break; case 420: /* column_stream_def ::= column_name stream_col_options */ +#line 764 "sql.y" { yylhsminor.yy974 = createColumnDefNode(pCxt, &yymsp[-1].minor.yy557, createDataType(TSDB_DATA_TYPE_NULL), yymsp[0].minor.yy974); } +#line 8031 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 421: /* stream_col_options ::= */ case 781: /* column_options ::= */ yytestcase(yyruleno==781); +#line 765 "sql.y" { yymsp[1].minor.yy974 = createDefaultColumnOptions(pCxt); } +#line 8038 "sql.c" break; case 422: /* stream_col_options ::= stream_col_options PRIMARY KEY */ case 782: /* column_options ::= column_options PRIMARY KEY */ yytestcase(yyruleno==782); +#line 766 "sql.y" { yylhsminor.yy974 = setColumnOptionsPK(pCxt, yymsp[-2].minor.yy974); } +#line 8044 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 427: /* stream_options ::= stream_options TRIGGER AT_ONCE */ case 428: /* stream_options ::= stream_options TRIGGER WINDOW_CLOSE */ yytestcase(yyruleno==428); +#line 776 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_TRIGGER_TYPE_SET, &yymsp[0].minor.yy0, NULL); } +#line 8051 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 429: /* stream_options ::= stream_options TRIGGER MAX_DELAY duration_literal */ +#line 778 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_TRIGGER_TYPE_SET, &yymsp[-1].minor.yy0, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 8057 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 430: /* stream_options ::= stream_options WATERMARK duration_literal */ +#line 779 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_WATERMARK_SET, NULL, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 8063 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 431: /* stream_options ::= stream_options IGNORE EXPIRED NK_INTEGER */ +#line 780 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_IGNORE_EXPIRED_SET, &yymsp[0].minor.yy0, NULL); } +#line 8069 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 432: /* stream_options ::= stream_options FILL_HISTORY NK_INTEGER */ +#line 781 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_FILL_HISTORY_SET, &yymsp[0].minor.yy0, NULL); } +#line 8075 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 433: /* stream_options ::= stream_options DELETE_MARK duration_literal */ +#line 782 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-2].minor.yy974, SOPT_DELETE_MARK_SET, NULL, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 8081 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 434: /* stream_options ::= stream_options IGNORE UPDATE NK_INTEGER */ +#line 783 "sql.y" { yylhsminor.yy974 = setStreamOptions(pCxt, yymsp[-3].minor.yy974, SOPT_IGNORE_UPDATE_SET, &yymsp[0].minor.yy0, NULL); } +#line 8087 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 436: /* subtable_opt ::= SUBTABLE NK_LP expression NK_RP */ case 725: /* sliding_opt ::= SLIDING NK_LP interval_sliding_duration_literal NK_RP */ yytestcase(yyruleno==725); case 749: /* every_opt ::= EVERY NK_LP duration_literal NK_RP */ yytestcase(yyruleno==749); +#line 786 "sql.y" { yymsp[-3].minor.yy974 = releaseRawExprNode(pCxt, yymsp[-1].minor.yy974); } +#line 8095 "sql.c" break; case 439: /* cmd ::= KILL CONNECTION NK_INTEGER */ +#line 794 "sql.y" { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_CONNECTION_STMT, &yymsp[0].minor.yy0); } +#line 8100 "sql.c" break; case 440: /* cmd ::= KILL QUERY NK_STRING */ +#line 795 "sql.y" { pCxt->pRootNode = createKillQueryStmt(pCxt, &yymsp[0].minor.yy0); } +#line 8105 "sql.c" break; case 441: /* cmd ::= KILL TRANSACTION NK_INTEGER */ +#line 796 "sql.y" { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_TRANSACTION_STMT, &yymsp[0].minor.yy0); } +#line 8110 "sql.c" break; case 442: /* cmd ::= KILL COMPACT NK_INTEGER */ +#line 797 "sql.y" { pCxt->pRootNode = createKillStmt(pCxt, QUERY_NODE_KILL_COMPACT_STMT, &yymsp[0].minor.yy0); } +#line 8115 "sql.c" break; case 443: /* cmd ::= BALANCE VGROUP */ +#line 800 "sql.y" { pCxt->pRootNode = createBalanceVgroupStmt(pCxt); } +#line 8120 "sql.c" break; case 444: /* cmd ::= BALANCE VGROUP LEADER on_vgroup_id */ +#line 801 "sql.y" { pCxt->pRootNode = createBalanceVgroupLeaderStmt(pCxt, &yymsp[0].minor.yy557); } +#line 8125 "sql.c" break; case 445: /* cmd ::= BALANCE VGROUP LEADER DATABASE db_name */ +#line 802 "sql.y" { pCxt->pRootNode = createBalanceVgroupLeaderDBNameStmt(pCxt, &yymsp[0].minor.yy557); } +#line 8130 "sql.c" break; case 446: /* cmd ::= MERGE VGROUP NK_INTEGER NK_INTEGER */ +#line 803 "sql.y" { pCxt->pRootNode = createMergeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 8135 "sql.c" break; case 447: /* cmd ::= REDISTRIBUTE VGROUP NK_INTEGER dnode_list */ +#line 804 "sql.y" { pCxt->pRootNode = createRedistributeVgroupStmt(pCxt, &yymsp[-1].minor.yy0, yymsp[0].minor.yy946); } +#line 8140 "sql.c" break; case 448: /* cmd ::= SPLIT VGROUP NK_INTEGER */ +#line 805 "sql.y" { pCxt->pRootNode = createSplitVgroupStmt(pCxt, &yymsp[0].minor.yy0); } +#line 8145 "sql.c" break; case 451: /* dnode_list ::= DNODE NK_INTEGER */ +#line 814 "sql.y" { yymsp[-1].minor.yy946 = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &yymsp[0].minor.yy0)); } +#line 8150 "sql.c" break; case 453: /* cmd ::= DELETE FROM full_table_name where_clause_opt */ +#line 821 "sql.y" { pCxt->pRootNode = createDeleteStmt(pCxt, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 8155 "sql.c" break; case 456: /* insert_query ::= INSERT INTO full_table_name NK_LP col_name_list NK_RP query_or_subquery */ +#line 830 "sql.y" { yymsp[-6].minor.yy974 = createInsertStmt(pCxt, yymsp[-4].minor.yy974, yymsp[-2].minor.yy946, yymsp[0].minor.yy974); } +#line 8160 "sql.c" break; case 457: /* insert_query ::= INSERT INTO full_table_name query_or_subquery */ +#line 831 "sql.y" { yymsp[-3].minor.yy974 = createInsertStmt(pCxt, yymsp[-1].minor.yy974, NULL, yymsp[0].minor.yy974); } +#line 8165 "sql.c" break; case 458: /* tags_literal ::= NK_INTEGER */ case 470: /* tags_literal ::= NK_BIN */ yytestcase(yyruleno==470); case 479: /* tags_literal ::= NK_HEX */ yytestcase(yyruleno==479); +#line 834 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0, NULL); } +#line 8172 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 459: /* tags_literal ::= NK_INTEGER NK_PLUS duration_literal */ @@ -7030,12 +8180,14 @@ yymsp[0].minor.yy974); } case 481: /* tags_literal ::= NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==481); case 489: /* tags_literal ::= NK_STRING NK_PLUS duration_literal */ yytestcase(yyruleno==489); case 490: /* tags_literal ::= NK_STRING NK_MINUS duration_literal */ yytestcase(yyruleno==490); +#line 835 "sql.y" { SToken l = yymsp[-2].minor.yy0; SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); l.n = (r.z + r.n) - l.z; yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, NULL, yymsp[0].minor.yy974); } +#line 8190 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 461: /* tags_literal ::= NK_PLUS NK_INTEGER */ @@ -7044,11 +8196,13 @@ yymsp[0].minor.yy974); } case 476: /* tags_literal ::= NK_MINUS NK_BIN */ yytestcase(yyruleno==476); case 482: /* tags_literal ::= NK_PLUS NK_HEX */ yytestcase(yyruleno==482); case 485: /* tags_literal ::= NK_MINUS NK_HEX */ yytestcase(yyruleno==485); +#line 847 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &t, NULL); } +#line 8205 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 462: /* tags_literal ::= NK_PLUS NK_INTEGER NK_PLUS duration_literal */ @@ -7063,71 +8217,97 @@ yymsp[0].minor.yy974); } case 484: /* tags_literal ::= NK_PLUS NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==484); case 486: /* tags_literal ::= NK_MINUS NK_HEX NK_PLUS duration_literal */ yytestcase(yyruleno==486); case 487: /* tags_literal ::= NK_MINUS NK_HEX NK_MINUS duration_literal */ yytestcase(yyruleno==487); +#line 852 "sql.y" { SToken l = yymsp[-3].minor.yy0; SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); l.n = (r.z + r.n) - l.z; yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, NULL, yymsp[0].minor.yy974); } +#line 8227 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 467: /* tags_literal ::= NK_FLOAT */ +#line 881 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0, NULL); } +#line 8233 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 468: /* tags_literal ::= NK_PLUS NK_FLOAT */ case 469: /* tags_literal ::= NK_MINUS NK_FLOAT */ yytestcase(yyruleno==469); +#line 882 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t, NULL); } +#line 8244 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 488: /* tags_literal ::= NK_STRING */ +#line 988 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0, NULL); } +#line 8250 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 491: /* tags_literal ::= NK_BOOL */ +#line 1001 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0, NULL); } +#line 8256 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 492: /* tags_literal ::= NULL */ +#line 1002 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0, NULL); } +#line 8262 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 493: /* tags_literal ::= literal_func */ +#line 1004 "sql.y" { yylhsminor.yy974 = createRawValueNode(pCxt, TSDB_DATA_TYPE_BINARY, NULL, yymsp[0].minor.yy974); } +#line 8268 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 494: /* tags_literal ::= literal_func NK_PLUS duration_literal */ case 495: /* tags_literal ::= literal_func NK_MINUS duration_literal */ yytestcase(yyruleno==495); +#line 1005 "sql.y" { SToken l = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken r = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); l.n = (r.z + r.n) - l.z; yylhsminor.yy974 = createRawValueNodeExt(pCxt, TSDB_DATA_TYPE_BINARY, &l, yymsp[-2].minor.yy974, yymsp[0].minor.yy974); } +#line 8280 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 498: /* literal ::= NK_INTEGER */ +#line 1024 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0)); } +#line 8286 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 499: /* literal ::= NK_FLOAT */ +#line 1025 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0)); } +#line 8292 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 500: /* literal ::= NK_STRING */ +#line 1026 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0)); } +#line 8298 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 501: /* literal ::= NK_BOOL */ +#line 1027 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0)); } +#line 8304 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 502: /* literal ::= TIMESTAMP NK_STRING */ +#line 1028 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0)); } +#line 8310 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 503: /* literal ::= duration_literal */ @@ -7151,64 +8331,90 @@ yymsp[0].minor.yy974); } case 752: /* query_simple ::= union_query_expression */ yytestcase(yyruleno==752); case 755: /* query_simple_or_subquery ::= query_simple */ yytestcase(yyruleno==755); case 757: /* query_or_subquery ::= query_expression */ yytestcase(yyruleno==757); +#line 1029 "sql.y" { yylhsminor.yy974 = yymsp[0].minor.yy974; } +#line 8336 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 504: /* literal ::= NULL */ +#line 1030 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0)); } +#line 8342 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 505: /* literal ::= NK_QUESTION */ +#line 1031 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 8348 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 506: /* duration_literal ::= NK_VARIABLE */ case 726: /* interval_sliding_duration_literal ::= NK_VARIABLE */ yytestcase(yyruleno==726); case 727: /* interval_sliding_duration_literal ::= NK_STRING */ yytestcase(yyruleno==727); case 728: /* interval_sliding_duration_literal ::= NK_INTEGER */ yytestcase(yyruleno==728); +#line 1033 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createDurationValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 8357 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 507: /* signed ::= NK_INTEGER */ +#line 1035 "sql.y" { yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } +#line 8363 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 508: /* signed ::= NK_PLUS NK_INTEGER */ +#line 1036 "sql.y" { yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_UBIGINT, &yymsp[0].minor.yy0); } +#line 8369 "sql.c" break; case 509: /* signed ::= NK_MINUS NK_INTEGER */ +#line 1037 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &t); } +#line 8378 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 510: /* signed ::= NK_FLOAT */ +#line 1042 "sql.y" { yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } +#line 8384 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 511: /* signed ::= NK_PLUS NK_FLOAT */ +#line 1043 "sql.y" { yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &yymsp[0].minor.yy0); } +#line 8390 "sql.c" break; case 512: /* signed ::= NK_MINUS NK_FLOAT */ +#line 1044 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_DOUBLE, &t); } +#line 8399 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 514: /* signed_literal ::= NK_STRING */ +#line 1051 "sql.y" { yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0); } +#line 8405 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 515: /* signed_literal ::= NK_BOOL */ +#line 1052 "sql.y" { yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_BOOL, &yymsp[0].minor.yy0); } +#line 8411 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 516: /* signed_literal ::= TIMESTAMP NK_STRING */ +#line 1053 "sql.y" { yymsp[-1].minor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_TIMESTAMP, &yymsp[0].minor.yy0); } +#line 8417 "sql.c" break; case 517: /* signed_literal ::= duration_literal */ case 519: /* signed_literal ::= literal_func */ yytestcase(yyruleno==519); @@ -7218,118 +8424,156 @@ yymsp[0].minor.yy974); } case 756: /* query_simple_or_subquery ::= subquery */ yytestcase(yyruleno==756); case 758: /* query_or_subquery ::= subquery */ yytestcase(yyruleno==758); case 771: /* search_condition ::= common_expression */ yytestcase(yyruleno==771); +#line 1054 "sql.y" { yylhsminor.yy974 = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); } +#line 8429 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 518: /* signed_literal ::= NULL */ +#line 1055 "sql.y" { yylhsminor.yy974 = createValueNode(pCxt, TSDB_DATA_TYPE_NULL, &yymsp[0].minor.yy0); } +#line 8435 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 520: /* signed_literal ::= NK_QUESTION */ +#line 1057 "sql.y" { yylhsminor.yy974 = createPlaceholderValueNode(pCxt, &yymsp[0].minor.yy0); } +#line 8441 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 539: /* expression ::= pseudo_column */ +#line 1123 "sql.y" { yylhsminor.yy974 = yymsp[0].minor.yy974; (void)setRawExprNodeIsPseudoColumn(pCxt, yylhsminor.yy974, true); } +#line 8447 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 543: /* expression ::= NK_LP expression NK_RP */ case 652: /* boolean_primary ::= NK_LP boolean_value_expression NK_RP */ yytestcase(yyruleno==652); case 770: /* subquery ::= NK_LP subquery NK_RP */ yytestcase(yyruleno==770); +#line 1127 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 8455 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 544: /* expression ::= NK_PLUS expr_or_subquery */ +#line 1128 "sql.y" { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 8464 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 545: /* expression ::= NK_MINUS expr_or_subquery */ +#line 1132 "sql.y" { SToken t = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &t, createOperatorNode(pCxt, OP_TYPE_MINUS, releaseRawExprNode(pCxt, yymsp[0].minor.yy974), NULL)); } +#line 8473 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 546: /* expression ::= expr_or_subquery NK_PLUS expr_or_subquery */ +#line 1136 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_ADD, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8483 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 547: /* expression ::= expr_or_subquery NK_MINUS expr_or_subquery */ +#line 1141 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_SUB, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8493 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 548: /* expression ::= expr_or_subquery NK_STAR expr_or_subquery */ +#line 1146 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_MULTI, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8503 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 549: /* expression ::= expr_or_subquery NK_SLASH expr_or_subquery */ +#line 1151 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_DIV, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8513 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 550: /* expression ::= expr_or_subquery NK_REM expr_or_subquery */ +#line 1156 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_REM, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8523 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 551: /* expression ::= column_reference NK_ARROW NK_STRING */ +#line 1161 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_JSON_GET_VALUE, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[0].minor.yy0))); } +#line 8532 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 552: /* expression ::= expr_or_subquery NK_BITAND expr_or_subquery */ +#line 1165 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8542 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 553: /* expression ::= expr_or_subquery NK_BITOR expr_or_subquery */ +#line 1170 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, OP_TYPE_BIT_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8552 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 556: /* column_reference ::= column_name */ +#line 1181 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy557, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy557)); } +#line 8558 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 557: /* column_reference ::= table_name NK_DOT column_name */ +#line 1182 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557, createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy557)); } +#line 8564 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 558: /* column_reference ::= NK_ALIAS */ +#line 1183 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } +#line 8570 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 559: /* column_reference ::= table_name NK_DOT NK_ALIAS */ +#line 1184 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0)); } +#line 8576 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 560: /* pseudo_column ::= ROWTS */ @@ -7348,290 +8592,427 @@ yymsp[0].minor.yy974); } case 574: /* pseudo_column ::= FROWTS */ yytestcase(yyruleno==574); case 591: /* literal_func ::= NOW */ yytestcase(yyruleno==591); case 592: /* literal_func ::= TODAY */ yytestcase(yyruleno==592); +#line 1186 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, NULL)); } +#line 8597 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 562: /* pseudo_column ::= table_name NK_DOT TBNAME */ +#line 1188 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[0].minor.yy0, createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BINARY, &yymsp[-2].minor.yy557)))); } +#line 8603 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 575: /* function_expression ::= function_name NK_LP expression_list NK_RP */ case 576: /* function_expression ::= star_func NK_LP star_func_para_list NK_RP */ yytestcase(yyruleno==576); case 584: /* function_expression ::= substr_func NK_LP expression_list NK_RP */ yytestcase(yyruleno==584); +#line 1202 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy557, yymsp[-1].minor.yy946)); } +#line 8611 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 577: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name NK_RP */ case 578: /* function_expression ::= CAST NK_LP expr_or_subquery AS type_name_default_len NK_RP */ yytestcase(yyruleno==578); +#line 1205 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createCastFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy424)); } +#line 8618 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 579: /* function_expression ::= POSITION NK_LP expr_or_subquery IN expr_or_subquery NK_RP */ +#line 1209 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createPositionFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } +#line 8624 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 580: /* function_expression ::= TRIM NK_LP expr_or_subquery NK_RP */ +#line 1211 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), TRIM_TYPE_BOTH)); } +#line 8630 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 581: /* function_expression ::= TRIM NK_LP trim_specification_type FROM expr_or_subquery NK_RP */ +#line 1213 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), yymsp[-3].minor.yy300)); } +#line 8636 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 582: /* function_expression ::= TRIM NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ +#line 1215 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), TRIM_TYPE_BOTH)); } +#line 8642 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 583: /* function_expression ::= TRIM NK_LP trim_specification_type expr_or_subquery FROM expr_or_subquery NK_RP */ +#line 1217 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-6].minor.yy0, &yymsp[0].minor.yy0, createTrimFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), yymsp[-4].minor.yy300)); } +#line 8648 "sql.c" yymsp[-6].minor.yy974 = yylhsminor.yy974; break; case 585: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery NK_RP */ +#line 1221 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-5].minor.yy557, &yymsp[0].minor.yy0, createSubstrFunctionNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } +#line 8654 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 586: /* function_expression ::= substr_func NK_LP expr_or_subquery FROM expr_or_subquery FOR expr_or_subquery NK_RP */ +#line 1223 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-7].minor.yy557, &yymsp[0].minor.yy0, createSubstrFunctionNodeExt(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974))); } +#line 8660 "sql.c" yymsp[-7].minor.yy974 = yylhsminor.yy974; break; case 587: /* function_expression ::= REPLACE NK_LP expression_list NK_RP */ case 594: /* rand_func ::= RAND NK_LP expression_list NK_RP */ yytestcase(yyruleno==594); +#line 1224 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-3].minor.yy0, yymsp[-1].minor.yy946)); } +#line 8667 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 590: /* literal_func ::= noarg_func NK_LP NK_RP */ +#line 1228 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy557, NULL)); } +#line 8673 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 593: /* rand_func ::= RAND NK_LP NK_RP */ +#line 1232 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createFunctionNode(pCxt, &yymsp[-2].minor.yy0, NULL)); } +#line 8679 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 597: /* trim_specification_type ::= BOTH */ +#line 1242 "sql.y" { yymsp[0].minor.yy300 = TRIM_TYPE_BOTH; } +#line 8685 "sql.c" break; case 598: /* trim_specification_type ::= TRAILING */ +#line 1243 "sql.y" { yymsp[0].minor.yy300 = TRIM_TYPE_TRAILING; } +#line 8690 "sql.c" break; case 599: /* trim_specification_type ::= LEADING */ +#line 1244 "sql.y" { yymsp[0].minor.yy300 = TRIM_TYPE_LEADING; } +#line 8695 "sql.c" break; case 614: /* star_func_para_list ::= NK_STAR */ +#line 1268 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0)); } +#line 8700 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 619: /* star_func_para ::= table_name NK_DOT NK_STAR */ case 704: /* select_item ::= table_name NK_DOT NK_STAR */ yytestcase(yyruleno==704); +#line 1277 "sql.y" { yylhsminor.yy974 = createColumnNode(pCxt, &yymsp[-2].minor.yy557, &yymsp[0].minor.yy0); } +#line 8707 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 620: /* case_when_expression ::= CASE when_then_list case_when_else_opt END */ +#line 1280 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-3].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, NULL, yymsp[-2].minor.yy946, yymsp[-1].minor.yy974)); } +#line 8713 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 621: /* case_when_expression ::= CASE common_expression when_then_list case_when_else_opt END */ +#line 1282 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-4].minor.yy0, &yymsp[0].minor.yy0, createCaseWhenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-2].minor.yy946, yymsp[-1].minor.yy974)); } +#line 8719 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 624: /* when_then_expr ::= WHEN common_expression THEN common_expression */ +#line 1289 "sql.y" { yymsp[-3].minor.yy974 = createWhenThenNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974)); } +#line 8725 "sql.c" break; case 626: /* case_when_else_opt ::= ELSE common_expression */ +#line 1292 "sql.y" { yymsp[-1].minor.yy974 = releaseRawExprNode(pCxt, yymsp[0].minor.yy974); } +#line 8730 "sql.c" break; case 627: /* predicate ::= expr_or_subquery compare_op expr_or_subquery */ case 632: /* predicate ::= expr_or_subquery in_op in_predicate_value */ yytestcase(yyruleno==632); +#line 1295 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createOperatorNode(pCxt, yymsp[-1].minor.yy140, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8740 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 628: /* predicate ::= expr_or_subquery BETWEEN expr_or_subquery AND expr_or_subquery */ +#line 1302 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-4].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-4].minor.yy974), releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8750 "sql.c" yymsp[-4].minor.yy974 = yylhsminor.yy974; break; case 629: /* predicate ::= expr_or_subquery NOT BETWEEN expr_or_subquery AND expr_or_subquery */ +#line 1308 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-5].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createNotBetweenAnd(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8760 "sql.c" yymsp[-5].minor.yy974 = yylhsminor.yy974; break; case 630: /* predicate ::= expr_or_subquery IS NULL */ +#line 1313 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NULL, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), NULL)); } +#line 8769 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 631: /* predicate ::= expr_or_subquery IS NOT NULL */ +#line 1317 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-3].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &yymsp[0].minor.yy0, createOperatorNode(pCxt, OP_TYPE_IS_NOT_NULL, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL)); } +#line 8778 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 633: /* compare_op ::= NK_LT */ +#line 1329 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_LOWER_THAN; } +#line 8784 "sql.c" break; case 634: /* compare_op ::= NK_GT */ +#line 1330 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_GREATER_THAN; } +#line 8789 "sql.c" break; case 635: /* compare_op ::= NK_LE */ +#line 1331 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_LOWER_EQUAL; } +#line 8794 "sql.c" break; case 636: /* compare_op ::= NK_GE */ +#line 1332 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_GREATER_EQUAL; } +#line 8799 "sql.c" break; case 637: /* compare_op ::= NK_NE */ +#line 1333 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_NOT_EQUAL; } +#line 8804 "sql.c" break; case 638: /* compare_op ::= NK_EQ */ +#line 1334 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_EQUAL; } +#line 8809 "sql.c" break; case 639: /* compare_op ::= LIKE */ +#line 1335 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_LIKE; } +#line 8814 "sql.c" break; case 640: /* compare_op ::= NOT LIKE */ +#line 1336 "sql.y" { yymsp[-1].minor.yy140 = OP_TYPE_NOT_LIKE; } +#line 8819 "sql.c" break; case 641: /* compare_op ::= MATCH */ +#line 1337 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_MATCH; } +#line 8824 "sql.c" break; case 642: /* compare_op ::= NMATCH */ +#line 1338 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_NMATCH; } +#line 8829 "sql.c" break; case 643: /* compare_op ::= CONTAINS */ +#line 1339 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_JSON_CONTAINS; } +#line 8834 "sql.c" break; case 644: /* in_op ::= IN */ +#line 1343 "sql.y" { yymsp[0].minor.yy140 = OP_TYPE_IN; } +#line 8839 "sql.c" break; case 645: /* in_op ::= NOT IN */ +#line 1344 "sql.y" { yymsp[-1].minor.yy140 = OP_TYPE_NOT_IN; } +#line 8844 "sql.c" break; case 646: /* in_predicate_value ::= NK_LP literal_list NK_RP */ +#line 1346 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } +#line 8849 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 648: /* boolean_value_expression ::= NOT boolean_primary */ +#line 1350 "sql.y" { SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-1].minor.yy0, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_NOT, releaseRawExprNode(pCxt, yymsp[0].minor.yy974), NULL)); } +#line 8858 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 649: /* boolean_value_expression ::= boolean_value_expression OR boolean_value_expression */ +#line 1355 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_OR, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8868 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 650: /* boolean_value_expression ::= boolean_value_expression AND boolean_value_expression */ +#line 1361 "sql.y" { SToken s = getTokenFromRawExprNode(pCxt, yymsp[-2].minor.yy974); SToken e = getTokenFromRawExprNode(pCxt, yymsp[0].minor.yy974); yylhsminor.yy974 = createRawExprNodeExt(pCxt, &s, &e, createLogicConditionNode(pCxt, LOGIC_COND_TYPE_AND, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 8878 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 658: /* table_reference_list ::= table_reference_list NK_COMMA table_reference */ +#line 1379 "sql.y" { yylhsminor.yy974 = createJoinTableNode(pCxt, JOIN_TYPE_INNER, JOIN_STYPE_NONE, yymsp[-2].minor.yy974, yymsp[0].minor.yy974, NULL); } +#line 8884 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 661: /* table_primary ::= table_name alias_opt */ +#line 1385 "sql.y" { yylhsminor.yy974 = createRealTableNode(pCxt, NULL, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } +#line 8890 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 662: /* table_primary ::= db_name NK_DOT table_name alias_opt */ +#line 1386 "sql.y" { yylhsminor.yy974 = createRealTableNode(pCxt, &yymsp[-3].minor.yy557, &yymsp[-1].minor.yy557, &yymsp[0].minor.yy557); } +#line 8896 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 663: /* table_primary ::= subquery alias_opt */ +#line 1387 "sql.y" { yylhsminor.yy974 = createTempTableNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), &yymsp[0].minor.yy557); } +#line 8902 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 665: /* alias_opt ::= */ +#line 1392 "sql.y" { yymsp[1].minor.yy557 = nil_token; } +#line 8908 "sql.c" break; case 667: /* alias_opt ::= AS table_alias */ +#line 1394 "sql.y" { yymsp[-1].minor.yy557 = yymsp[0].minor.yy557; } +#line 8913 "sql.c" break; case 668: /* parenthesized_joined_table ::= NK_LP joined_table NK_RP */ case 669: /* parenthesized_joined_table ::= NK_LP parenthesized_joined_table NK_RP */ yytestcase(yyruleno==669); +#line 1396 "sql.y" { yymsp[-2].minor.yy974 = yymsp[-1].minor.yy974; } +#line 8919 "sql.c" break; case 670: /* joined_table ::= table_reference join_type join_subtype JOIN table_reference join_on_clause_opt window_offset_clause_opt jlimit_clause_opt */ +#line 1402 "sql.y" { yylhsminor.yy974 = createJoinTableNode(pCxt, yymsp[-6].minor.yy792, yymsp[-5].minor.yy744, yymsp[-7].minor.yy974, yymsp[-3].minor.yy974, yymsp[-2].minor.yy974); yylhsminor.yy974 = addWindowOffsetClause(pCxt, yylhsminor.yy974, yymsp[-1].minor.yy974); yylhsminor.yy974 = addJLimitClause(pCxt, yylhsminor.yy974, yymsp[0].minor.yy974); } +#line 8928 "sql.c" yymsp[-7].minor.yy974 = yylhsminor.yy974; break; case 671: /* join_type ::= */ +#line 1410 "sql.y" { yymsp[1].minor.yy792 = JOIN_TYPE_INNER; } +#line 8934 "sql.c" break; case 672: /* join_type ::= INNER */ +#line 1411 "sql.y" { yymsp[0].minor.yy792 = JOIN_TYPE_INNER; } +#line 8939 "sql.c" break; case 673: /* join_type ::= LEFT */ +#line 1412 "sql.y" { yymsp[0].minor.yy792 = JOIN_TYPE_LEFT; } +#line 8944 "sql.c" break; case 674: /* join_type ::= RIGHT */ +#line 1413 "sql.y" { yymsp[0].minor.yy792 = JOIN_TYPE_RIGHT; } +#line 8949 "sql.c" break; case 675: /* join_type ::= FULL */ +#line 1414 "sql.y" { yymsp[0].minor.yy792 = JOIN_TYPE_FULL; } +#line 8954 "sql.c" break; case 676: /* join_subtype ::= */ +#line 1418 "sql.y" { yymsp[1].minor.yy744 = JOIN_STYPE_NONE; } +#line 8959 "sql.c" break; case 677: /* join_subtype ::= OUTER */ +#line 1419 "sql.y" { yymsp[0].minor.yy744 = JOIN_STYPE_OUTER; } +#line 8964 "sql.c" break; case 678: /* join_subtype ::= SEMI */ +#line 1420 "sql.y" { yymsp[0].minor.yy744 = JOIN_STYPE_SEMI; } +#line 8969 "sql.c" break; case 679: /* join_subtype ::= ANTI */ +#line 1421 "sql.y" { yymsp[0].minor.yy744 = JOIN_STYPE_ANTI; } +#line 8974 "sql.c" break; case 680: /* join_subtype ::= ASOF */ +#line 1422 "sql.y" { yymsp[0].minor.yy744 = JOIN_STYPE_ASOF; } +#line 8979 "sql.c" break; case 681: /* join_subtype ::= WINDOW */ +#line 1423 "sql.y" { yymsp[0].minor.yy744 = JOIN_STYPE_WIN; } +#line 8984 "sql.c" break; case 685: /* window_offset_clause_opt ::= WINDOW_OFFSET NK_LP window_offset_literal NK_COMMA window_offset_literal NK_RP */ +#line 1430 "sql.y" { yymsp[-5].minor.yy974 = createWindowOffsetNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 8989 "sql.c" break; case 686: /* window_offset_literal ::= NK_VARIABLE */ +#line 1432 "sql.y" { yylhsminor.yy974 = createRawExprNode(pCxt, &yymsp[0].minor.yy0, createTimeOffsetValueNode(pCxt, &yymsp[0].minor.yy0)); } +#line 8994 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 687: /* window_offset_literal ::= NK_MINUS NK_VARIABLE */ +#line 1433 "sql.y" { SToken t = yymsp[-1].minor.yy0; t.n = (yymsp[0].minor.yy0.z + yymsp[0].minor.yy0.n) - yymsp[-1].minor.yy0.z; yylhsminor.yy974 = createRawExprNode(pCxt, &t, createTimeOffsetValueNode(pCxt, &t)); } +#line 9004 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 689: /* jlimit_clause_opt ::= JLIMIT NK_INTEGER */ case 762: /* slimit_clause_opt ::= SLIMIT NK_INTEGER */ yytestcase(yyruleno==762); case 766: /* limit_clause_opt ::= LIMIT NK_INTEGER */ yytestcase(yyruleno==766); +#line 1440 "sql.y" { yymsp[-1].minor.yy974 = createLimitNode(pCxt, &yymsp[0].minor.yy0, NULL); } +#line 9012 "sql.c" break; case 690: /* query_specification ::= SELECT hint_list set_quantifier_opt tag_mode_opt select_list from_clause_opt where_clause_opt partition_by_clause_opt range_opt every_opt fill_opt twindow_clause_opt group_by_clause_opt having_clause_opt */ +#line 1446 "sql.y" { yymsp[-13].minor.yy974 = createSelectStmt(pCxt, yymsp[-11].minor.yy569, yymsp[-9].minor.yy946, yymsp[-8].minor.yy974, yymsp[-12].minor.yy946); yymsp[-13].minor.yy974 = setSelectStmtTagMode(pCxt, yymsp[-13].minor.yy974, yymsp[-10].minor.yy569); @@ -7644,156 +9025,243 @@ yymsp[0].minor.yy974); } yymsp[-13].minor.yy974 = addEveryClause(pCxt, yymsp[-13].minor.yy974, yymsp[-4].minor.yy974); yymsp[-13].minor.yy974 = addFillClause(pCxt, yymsp[-13].minor.yy974, yymsp[-3].minor.yy974); } +#line 9028 "sql.c" break; case 691: /* hint_list ::= */ +#line 1461 "sql.y" { yymsp[1].minor.yy946 = createHintNodeList(pCxt, NULL); } +#line 9033 "sql.c" break; case 692: /* hint_list ::= NK_HINT */ +#line 1462 "sql.y" { yylhsminor.yy946 = createHintNodeList(pCxt, &yymsp[0].minor.yy0); } +#line 9038 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 697: /* set_quantifier_opt ::= ALL */ +#line 1473 "sql.y" { yymsp[0].minor.yy569 = false; } +#line 9044 "sql.c" break; case 700: /* select_item ::= NK_STAR */ +#line 1480 "sql.y" { yylhsminor.yy974 = createColumnNode(pCxt, NULL, &yymsp[0].minor.yy0); } +#line 9049 "sql.c" yymsp[0].minor.yy974 = yylhsminor.yy974; break; case 702: /* select_item ::= common_expression column_alias */ case 712: /* partition_item ::= expr_or_subquery column_alias */ yytestcase(yyruleno==712); +#line 1482 "sql.y" { yylhsminor.yy974 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), &yymsp[0].minor.yy557); } +#line 9056 "sql.c" yymsp[-1].minor.yy974 = yylhsminor.yy974; break; case 703: /* select_item ::= common_expression AS column_alias */ case 713: /* partition_item ::= expr_or_subquery AS column_alias */ yytestcase(yyruleno==713); +#line 1483 "sql.y" { yylhsminor.yy974 = setProjectionAlias(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), &yymsp[0].minor.yy557); } +#line 9063 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 708: /* partition_by_clause_opt ::= PARTITION BY partition_list */ case 740: /* group_by_clause_opt ::= GROUP BY group_by_list */ yytestcase(yyruleno==740); case 760: /* order_by_clause_opt ::= ORDER BY sort_specification_list */ yytestcase(yyruleno==760); +#line 1492 "sql.y" { yymsp[-2].minor.yy946 = yymsp[0].minor.yy946; } +#line 9071 "sql.c" break; case 715: /* twindow_clause_opt ::= SESSION NK_LP column_reference NK_COMMA interval_sliding_duration_literal NK_RP */ +#line 1505 "sql.y" { yymsp[-5].minor.yy974 = createSessionWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 9076 "sql.c" break; case 716: /* twindow_clause_opt ::= STATE_WINDOW NK_LP expr_or_subquery NK_RP */ +#line 1506 "sql.y" { yymsp[-3].minor.yy974 = createStateWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 9081 "sql.c" break; case 717: /* twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ +#line 1508 "sql.y" { yymsp[-5].minor.yy974 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), NULL, yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 9086 "sql.c" break; case 718: /* twindow_clause_opt ::= INTERVAL NK_LP interval_sliding_duration_literal NK_COMMA interval_sliding_duration_literal NK_RP sliding_opt fill_opt */ +#line 1512 "sql.y" { yymsp[-7].minor.yy974 = createIntervalWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-5].minor.yy974), releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), yymsp[-1].minor.yy974, yymsp[0].minor.yy974); } +#line 9091 "sql.c" break; case 719: /* twindow_clause_opt ::= EVENT_WINDOW START WITH search_condition END WITH search_condition */ +#line 1514 "sql.y" { yymsp[-6].minor.yy974 = createEventWindowNode(pCxt, yymsp[-3].minor.yy974, yymsp[0].minor.yy974); } +#line 9096 "sql.c" break; case 720: /* twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_RP */ +#line 1516 "sql.y" { yymsp[-3].minor.yy974 = createCountWindowNode(pCxt, &yymsp[-1].minor.yy0, &yymsp[-1].minor.yy0); } +#line 9101 "sql.c" break; case 721: /* twindow_clause_opt ::= COUNT_WINDOW NK_LP NK_INTEGER NK_COMMA NK_INTEGER NK_RP */ +#line 1518 "sql.y" { yymsp[-5].minor.yy974 = createCountWindowNode(pCxt, &yymsp[-3].minor.yy0, &yymsp[-1].minor.yy0); } +#line 9106 "sql.c" break; case 722: /* twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_RP */ +#line 1520 "sql.y" { yymsp[-3].minor.yy974 = createAnomalyWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974), NULL); } +#line 9111 "sql.c" break; case 723: /* twindow_clause_opt ::= ANOMALY_WINDOW NK_LP expr_or_subquery NK_COMMA NK_STRING NK_RP */ +#line 1522 "sql.y" { yymsp[-5].minor.yy974 = createAnomalyWindowNode(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), &yymsp[-1].minor.yy0); } +#line 9116 "sql.c" break; case 730: /* fill_opt ::= FILL NK_LP fill_mode NK_RP */ +#line 1532 "sql.y" { yymsp[-3].minor.yy974 = createFillNode(pCxt, yymsp[-1].minor.yy102, NULL); } +#line 9121 "sql.c" break; case 731: /* fill_opt ::= FILL NK_LP VALUE NK_COMMA expression_list NK_RP */ +#line 1533 "sql.y" { yymsp[-5].minor.yy974 = createFillNode(pCxt, FILL_MODE_VALUE, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } +#line 9126 "sql.c" break; case 732: /* fill_opt ::= FILL NK_LP VALUE_F NK_COMMA expression_list NK_RP */ +#line 1534 "sql.y" { yymsp[-5].minor.yy974 = createFillNode(pCxt, FILL_MODE_VALUE_F, createNodeListNode(pCxt, yymsp[-1].minor.yy946)); } +#line 9131 "sql.c" break; case 733: /* fill_mode ::= NONE */ +#line 1538 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_NONE; } +#line 9136 "sql.c" break; case 734: /* fill_mode ::= PREV */ +#line 1539 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_PREV; } +#line 9141 "sql.c" break; case 735: /* fill_mode ::= NULL */ +#line 1540 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_NULL; } +#line 9146 "sql.c" break; case 736: /* fill_mode ::= NULL_F */ +#line 1541 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_NULL_F; } +#line 9151 "sql.c" break; case 737: /* fill_mode ::= LINEAR */ +#line 1542 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_LINEAR; } +#line 9156 "sql.c" break; case 738: /* fill_mode ::= NEXT */ +#line 1543 "sql.y" { yymsp[0].minor.yy102 = FILL_MODE_NEXT; } +#line 9161 "sql.c" break; case 741: /* group_by_list ::= expr_or_subquery */ +#line 1552 "sql.y" { yylhsminor.yy946 = createNodeList(pCxt, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 9166 "sql.c" yymsp[0].minor.yy946 = yylhsminor.yy946; break; case 742: /* group_by_list ::= group_by_list NK_COMMA expr_or_subquery */ +#line 1553 "sql.y" { yylhsminor.yy946 = addNodeToList(pCxt, yymsp[-2].minor.yy946, createGroupingSetNode(pCxt, releaseRawExprNode(pCxt, yymsp[0].minor.yy974))); } +#line 9172 "sql.c" yymsp[-2].minor.yy946 = yylhsminor.yy946; break; case 746: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_COMMA expr_or_subquery NK_RP */ +#line 1560 "sql.y" { yymsp[-5].minor.yy974 = createInterpTimeRange(pCxt, releaseRawExprNode(pCxt, yymsp[-3].minor.yy974), releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 9178 "sql.c" break; case 747: /* range_opt ::= RANGE NK_LP expr_or_subquery NK_RP */ +#line 1562 "sql.y" { yymsp[-3].minor.yy974 = createInterpTimePoint(pCxt, releaseRawExprNode(pCxt, yymsp[-1].minor.yy974)); } +#line 9183 "sql.c" break; case 750: /* query_expression ::= query_simple order_by_clause_opt slimit_clause_opt limit_clause_opt */ +#line 1569 "sql.y" { yylhsminor.yy974 = addOrderByClause(pCxt, yymsp[-3].minor.yy974, yymsp[-2].minor.yy946); yylhsminor.yy974 = addSlimitClause(pCxt, yylhsminor.yy974, yymsp[-1].minor.yy974); yylhsminor.yy974 = addLimitClause(pCxt, yylhsminor.yy974, yymsp[0].minor.yy974); } +#line 9192 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 753: /* union_query_expression ::= query_simple_or_subquery UNION ALL query_simple_or_subquery */ +#line 1579 "sql.y" { yylhsminor.yy974 = createSetOperator(pCxt, SET_OP_TYPE_UNION_ALL, yymsp[-3].minor.yy974, yymsp[0].minor.yy974); } +#line 9198 "sql.c" yymsp[-3].minor.yy974 = yylhsminor.yy974; break; case 754: /* union_query_expression ::= query_simple_or_subquery UNION query_simple_or_subquery */ +#line 1581 "sql.y" { yylhsminor.yy974 = createSetOperator(pCxt, SET_OP_TYPE_UNION, yymsp[-2].minor.yy974, yymsp[0].minor.yy974); } +#line 9204 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 763: /* slimit_clause_opt ::= SLIMIT NK_INTEGER SOFFSET NK_INTEGER */ case 767: /* limit_clause_opt ::= LIMIT NK_INTEGER OFFSET NK_INTEGER */ yytestcase(yyruleno==767); +#line 1596 "sql.y" { yymsp[-3].minor.yy974 = createLimitNode(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0); } +#line 9211 "sql.c" break; case 764: /* slimit_clause_opt ::= SLIMIT NK_INTEGER NK_COMMA NK_INTEGER */ case 768: /* limit_clause_opt ::= LIMIT NK_INTEGER NK_COMMA NK_INTEGER */ yytestcase(yyruleno==768); +#line 1597 "sql.y" { yymsp[-3].minor.yy974 = createLimitNode(pCxt, &yymsp[0].minor.yy0, &yymsp[-2].minor.yy0); } +#line 9217 "sql.c" break; case 769: /* subquery ::= NK_LP query_expression NK_RP */ +#line 1605 "sql.y" { yylhsminor.yy974 = createRawExprNodeExt(pCxt, &yymsp[-2].minor.yy0, &yymsp[0].minor.yy0, yymsp[-1].minor.yy974); } +#line 9222 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 774: /* sort_specification ::= expr_or_subquery ordering_specification_opt null_ordering_opt */ +#line 1619 "sql.y" { yylhsminor.yy974 = createOrderByExprNode(pCxt, releaseRawExprNode(pCxt, yymsp[-2].minor.yy974), yymsp[-1].minor.yy410, yymsp[0].minor.yy307); } +#line 9228 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; case 775: /* ordering_specification_opt ::= */ +#line 1623 "sql.y" { yymsp[1].minor.yy410 = ORDER_ASC; } +#line 9234 "sql.c" break; case 776: /* ordering_specification_opt ::= ASC */ +#line 1624 "sql.y" { yymsp[0].minor.yy410 = ORDER_ASC; } +#line 9239 "sql.c" break; case 777: /* ordering_specification_opt ::= DESC */ +#line 1625 "sql.y" { yymsp[0].minor.yy410 = ORDER_DESC; } +#line 9244 "sql.c" break; case 778: /* null_ordering_opt ::= */ +#line 1629 "sql.y" { yymsp[1].minor.yy307 = NULL_ORDER_DEFAULT; } +#line 9249 "sql.c" break; case 779: /* null_ordering_opt ::= NULLS FIRST */ +#line 1630 "sql.y" { yymsp[-1].minor.yy307 = NULL_ORDER_FIRST; } +#line 9254 "sql.c" break; case 780: /* null_ordering_opt ::= NULLS LAST */ +#line 1631 "sql.y" { yymsp[-1].minor.yy307 = NULL_ORDER_LAST; } +#line 9259 "sql.c" break; case 783: /* column_options ::= column_options NK_ID NK_STRING */ +#line 1639 "sql.y" { yylhsminor.yy974 = setColumnOptions(pCxt, yymsp[-2].minor.yy974, &yymsp[-1].minor.yy0, &yymsp[0].minor.yy0); } +#line 9264 "sql.c" yymsp[-2].minor.yy974 = yylhsminor.yy974; break; default: @@ -7856,6 +9324,7 @@ static void yy_syntax_error( ParseCTX_FETCH #define TOKEN yyminor /************ Begin %syntax_error code ****************************************/ +#line 29 "sql.y" if (TSDB_CODE_SUCCESS == pCxt->errCode) { if(TOKEN.z) { @@ -7866,6 +9335,7 @@ static void yy_syntax_error( } else if (TSDB_CODE_PAR_DB_NOT_SPECIFIED == pCxt->errCode && TK_NK_FLOAT == TOKEN.type) { pCxt->errCode = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_SYNTAX_ERROR, TOKEN.z); } +#line 9338 "sql.c" /************ End %syntax_error code ******************************************/ ParseARG_STORE /* Suppress warning about unused %extra_argument variable */ ParseCTX_STORE diff --git a/source/libs/qworker/src/qwUtil.c b/source/libs/qworker/src/qwUtil.c index bebb9b288a..ef07a42629 100644 --- a/source/libs/qworker/src/qwUtil.c +++ b/source/libs/qworker/src/qwUtil.c @@ -533,7 +533,7 @@ int32_t qwSaveTbVersionInfo(qTaskInfo_t pTaskInfo, SQWTaskCtx *ctx) { while (true) { tbGet = false; - code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, tbName, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); + code = qGetQueryTableSchemaVersion(pTaskInfo, dbFName, TSDB_DB_FNAME_LEN, tbName, TSDB_TABLE_NAME_LEN, &tbInfo.sversion, &tbInfo.tversion, i, &tbGet); if (TSDB_CODE_SUCCESS != code || !tbGet) { break; } diff --git a/source/libs/scalar/src/filter.c b/source/libs/scalar/src/filter.c index 03bc2b544b..d8622d93ee 100644 --- a/source/libs/scalar/src/filter.c +++ b/source/libs/scalar/src/filter.c @@ -4133,7 +4133,7 @@ int32_t fltSclBuildDatumFromValueNode(SFltSclDatum *datum, SValueNode *valNode) } case TSDB_DATA_TYPE_BOOL: { datum->kind = FLT_SCL_DATUM_KIND_INT64; - datum->i = (valNode->datum.b) ? 0 : 1; + datum->i = (valNode->datum.b) ? 1 : 0; break; } case TSDB_DATA_TYPE_TINYINT: @@ -4541,6 +4541,7 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { if (info->scalarMode) { SArray *colRanges = info->sclCtx.fltSclRange; + SOperatorNode *optNode = (SOperatorNode *) pNode; if (taosArrayGetSize(colRanges) == 1) { SFltSclColumnRange *colRange = taosArrayGet(colRanges, 0); if (NULL == colRange) { @@ -4560,7 +4561,8 @@ int32_t filterGetTimeRange(SNode *pNode, STimeWindow *win, bool *isStrict) { FLT_ERR_JRET(fltSclGetTimeStampDatum(endPt, &end)); win->skey = start.i; win->ekey = end.i; - *isStrict = true; + if(optNode->opType == OP_TYPE_IN) *isStrict = false; + else *isStrict = true; goto _return; } else if (taosArrayGetSize(points) == 0) { *win = TSWINDOW_DESC_INITIALIZER; @@ -5023,6 +5025,34 @@ int32_t fltSclBuildRangePoints(SFltSclOperator *oper, SArray *points) { } break; } + case OP_TYPE_IN: { + SNodeListNode *listNode = (SNodeListNode *)oper->valNode; + SListCell *cell = listNode->pNodeList->pHead; + SFltSclDatum minDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MAX, .type = oper->colNode->node.resType}; + SFltSclDatum maxDatum = {.kind = FLT_SCL_DATUM_KIND_INT64, .i = INT64_MIN, .type = oper->colNode->node.resType}; + for (int32_t i = 0; i < listNode->pNodeList->length; ++i) { + SValueNode *valueNode = (SValueNode *)cell->pNode; + SFltSclDatum valDatum; + FLT_ERR_RET(fltSclBuildDatumFromValueNode(&valDatum, valueNode)); + if(valueNode->node.resType.type == TSDB_DATA_TYPE_FLOAT || valueNode->node.resType.type == TSDB_DATA_TYPE_DOUBLE) { + minDatum.i = TMIN(minDatum.i, valDatum.d); + maxDatum.i = TMAX(maxDatum.i, valDatum.d); + } else { + minDatum.i = TMIN(minDatum.i, valDatum.i); + maxDatum.i = TMAX(maxDatum.i, valDatum.i); + } + cell = cell->pNext; + } + SFltSclPoint startPt = {.start = true, .excl = false, .val = minDatum}; + SFltSclPoint endPt = {.start = false, .excl = false, .val = maxDatum}; + if (NULL == taosArrayPush(points, &startPt)) { + FLT_ERR_RET(terrno); + } + if (NULL == taosArrayPush(points, &endPt)) { + FLT_ERR_RET(terrno); + } + break; + } default: { qError("not supported operator type : %d when build range points", oper->type); break; @@ -5075,11 +5105,13 @@ static bool fltSclIsCollectableNode(SNode *pNode) { if (!(pOper->opType == OP_TYPE_GREATER_THAN || pOper->opType == OP_TYPE_GREATER_EQUAL || pOper->opType == OP_TYPE_LOWER_THAN || pOper->opType == OP_TYPE_LOWER_EQUAL || - pOper->opType == OP_TYPE_NOT_EQUAL || pOper->opType == OP_TYPE_EQUAL)) { + pOper->opType == OP_TYPE_NOT_EQUAL || pOper->opType == OP_TYPE_EQUAL || + pOper->opType == OP_TYPE_IN)) { return false; } - if (!(nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_VALUE)) { + if (!((nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_VALUE) || + (nodeType(pOper->pLeft) == QUERY_NODE_COLUMN && nodeType(pOper->pRight) == QUERY_NODE_NODE_LIST))) { return false; } return true; diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 33cbf64905..b82d06b6c7 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -3364,7 +3364,7 @@ int32_t streamStateClear_rocksdb(SStreamState* pState) { return 0; } void streamStateCurNext_rocksdb(SStreamStateCur* pCur) { - if (pCur) { + if (pCur && pCur->iter && rocksdb_iter_valid(pCur->iter)) { rocksdb_iter_next(pCur->iter); } } diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index cfe476540c..0e2ff48fa5 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -120,7 +120,7 @@ SStreamState* streamStateOpen(const char* path, void* pTask, int64_t streamId, i SStreamTask* pStreamTask = pTask; pState->streamId = streamId; pState->taskId = taskId; - sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId); + TAOS_UNUSED(tsnprintf(pState->pTdbState->idstr, sizeof(pState->pTdbState->idstr), "0x%" PRIx64 "-0x%x", pState->streamId, pState->taskId)); code = streamTaskSetDb(pStreamTask->pMeta, pTask, pState->pTdbState->idstr); QUERY_CHECK_CODE(code, lino, _end); diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index 424845e4f2..c630010598 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -777,7 +777,7 @@ _end: int32_t forceRemoveCheckpoint(SStreamFileState* pFileState, int64_t checkpointId) { char keyBuf[128] = {0}; - sprintf(keyBuf, "%s:%" PRId64 "", TASK_KEY, checkpointId); + TAOS_UNUSED(tsnprintf(keyBuf, sizeof(keyBuf), "%s:%" PRId64 "", TASK_KEY, checkpointId)); return streamDefaultDel_rocksdb(pFileState->pFileStore, keyBuf); } @@ -799,14 +799,14 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { } memcpy(buf, val, len); buf[len] = 0; - maxCheckPointId = atol((char*)buf); + maxCheckPointId = taosStr2Int64((char*)buf, NULL, 10); taosMemoryFree(val); } for (int64_t i = maxCheckPointId; i > 0; i--) { char buf[128] = {0}; void* val = 0; int32_t len = 0; - sprintf(buf, "%s:%" PRId64 "", TASK_KEY, i); + TAOS_UNUSED(tsnprintf(buf, sizeof(buf), "%s:%" PRId64 "", TASK_KEY, i)); code = streamDefaultGet_rocksdb(pFileState->pFileStore, buf, &val, &len); if (code != 0) { return TSDB_CODE_FAILED; @@ -816,7 +816,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { taosMemoryFree(val); TSKEY ts; - ts = atol((char*)buf); + ts = taosStr2Int64((char*)buf, NULL, 10); if (ts < mark) { // statekey winkey.ts < mark int32_t tmpRes = forceRemoveCheckpoint(pFileState, i); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index e3f94c1c9a..0345880874 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -95,6 +95,8 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { bool accepted = false; SSyncRaftEntry* pEntry = NULL; bool resetElect = false; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -150,10 +152,10 @@ int32_t syncNodeOnAppendEntries(SSyncNode* ths, const SRpcMsg* pRpcMsg) { goto _IGNORE; } - sTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 - ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, - pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, - pEntry->term); + sGTrace("vgId:%d, recv append entries msg. index:%" PRId64 ", term:%" PRId64 ", preLogIndex:%" PRId64 + ", prevLogTerm:%" PRId64 " commitIndex:%" PRId64 " entryterm:%" PRId64, + pMsg->vgId, pMsg->prevLogIndex + 1, pMsg->term, pMsg->prevLogIndex, pMsg->prevLogTerm, pMsg->commitIndex, + pEntry->term); if (ths->fsmState == SYNC_FSM_STATE_INCOMPLETE) { pReply->fsmState = ths->fsmState; @@ -179,6 +181,11 @@ _SEND_RESPONSE: sTrace("vgId:%d, update commit return index %" PRId64 "", ths->vgId, returnIndex); } + TRACE_SET_MSGID(&(rpcRsp.info.traceId), tGenIdPI64()); + trace = &(rpcRsp.info.traceId); + sGTrace("vgId:%d, send append reply matchIndex:%" PRId64 " term:%" PRId64 " lastSendIndex:%" PRId64 + " to dest: 0x%016" PRIx64, + ths->vgId, pReply->matchIndex, pReply->term, pReply->lastSendIndex, pReply->destId.addr); // ack, i.e. send response TAOS_CHECK_RETURN(syncNodeSendMsgById(&pReply->destId, ths, &rpcRsp)); diff --git a/source/libs/sync/src/syncAppendEntriesReply.c b/source/libs/sync/src/syncAppendEntriesReply.c index 005cf4337d..a7f36be9e9 100644 --- a/source/libs/sync/src/syncAppendEntriesReply.c +++ b/source/libs/sync/src/syncAppendEntriesReply.c @@ -43,6 +43,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { int32_t code = 0; SyncAppendEntriesReply* pMsg = (SyncAppendEntriesReply*)pRpcMsg->pCont; int32_t ret = 0; + const STraceId* trace = &pRpcMsg->info.traceId; + char tbuf[40] = {0}; // if already drop replica, do not process if (!syncNodeInRaftGroup(ths, &(pMsg->srcId))) { @@ -63,8 +65,8 @@ int32_t syncNodeOnAppendEntriesReply(SSyncNode* ths, const SRpcMsg* pRpcMsg) { return TSDB_CODE_SYN_WRONG_TERM; } - sTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", - pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); + sGTrace("vgId:%d, received append entries reply. srcId:0x%016" PRIx64 ", term:%" PRId64 ", matchIndex:%" PRId64 "", + pMsg->vgId, pMsg->srcId.addr, pMsg->term, pMsg->matchIndex); if (pMsg->success) { SyncIndex oldMatchIndex = syncIndexMgrGetIndex(ths->pMatchIndex, &(pMsg->srcId)); diff --git a/source/libs/sync/src/syncPipeline.c b/source/libs/sync/src/syncPipeline.c index 9f6acf6d83..efb71b5714 100644 --- a/source/libs/sync/src/syncPipeline.c +++ b/source/libs/sync/src/syncPipeline.c @@ -1026,6 +1026,14 @@ int32_t syncLogReplRecover(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncAppendEn int32_t code = 0; if (pMgr->restored != false) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to recover sync log repl. peer: dnode:%d (%" PRIx64 "), repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 ") restore:%d, buffer: [%" PRId64 ", %" PRId64 ", %" PRId64 ", %" PRId64 + "), msg: {lastSendIndex:%" PRId64 ", matchIndex:%" PRId64 ", fsmState:%d, success:%d, lastMatchTerm:%" PRId64 + "}", + pNode->vgId, DID(&destId), destId.addr, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored, + pBuf->startIndex, pBuf->commitIndex, pBuf->matchIndex, pBuf->endIndex, pMsg->lastSendIndex, pMsg->matchIndex, + pMsg->fsmState, pMsg->success, pMsg->lastMatchTerm); + if (pMgr->endIndex == 0) { if (pMgr->startIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; if (pMgr->matchIndex != 0) return TSDB_CODE_SYN_INTERNAL_ERROR; @@ -1171,6 +1179,11 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int64_t nowMs = taosGetMonoTimestampMs(); int32_t code = 0; + sTrace("vgId:%d, begin to probe peer:%" PRIx64 " with msg of index:%" PRId64 ". repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restored:%d", + pNode->vgId, pNode->replicasId[pMgr->peerId].addr, index, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, + pMgr->restored); + if (pMgr->endIndex > pMgr->startIndex && nowMs < pMgr->states[pMgr->startIndex % pMgr->size].timeMs + retryMaxWaitMs) { return 0; @@ -1206,6 +1219,10 @@ int32_t syncLogReplProbe(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex inde int32_t syncLogReplAttempt(SSyncLogReplMgr* pMgr, SSyncNode* pNode) { if (!pMgr->restored) return TSDB_CODE_SYN_INTERNAL_ERROR; + sTrace("vgId:%d, begin to attempt replicate log entries from end to match. repl-mgr:[%" PRId64 ", %" PRId64 + ", %" PRId64 "), restore:%d", + pNode->vgId, pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex, pMgr->restored); + SRaftId* pDestId = &pNode->replicasId[pMgr->peerId]; int32_t batchSize = TMAX(1, pMgr->size >> (4 + pMgr->retryBackoff)); int32_t code = 0; @@ -1527,11 +1544,12 @@ int32_t syncLogReplSendTo(SSyncLogReplMgr* pMgr, SSyncNode* pNode, SyncIndex ind goto _err; } + TRACE_SET_MSGID(&(msgOut.info.traceId), tGenIdPI64()); + STraceId* trace = &(msgOut.info.traceId); + sGTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, + pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); TAOS_CHECK_GOTO(syncNodeSendAppendEntries(pNode, pDestId, &msgOut), &lino, _err); - sTrace("vgId:%d, replicate one msg index:%" PRId64 " term:%" PRId64 " prevterm:%" PRId64 " to dest: 0x%016" PRIx64, - pNode->vgId, pEntry->index, pEntry->term, prevLogTerm, pDestId->addr); - if (!inBuf) { syncEntryDestroy(pEntry); pEntry = NULL; diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 212a75c2ae..efb258d952 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -152,8 +152,8 @@ static void syncLogReplStates2Str(SSyncNode* pSyncNode, char* buf, int32_t bufLe for (int32_t i = 0; i < pSyncNode->replicaNum; i++) { SSyncLogReplMgr* pMgr = pSyncNode->logReplMgrs[i]; if (pMgr == NULL) break; - len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 " %" PRId64 ", %" PRId64 "]", i, pMgr->restored, - pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); + len += tsnprintf(buf + len, bufLen - len, "%d:%d [%" PRId64 ", %" PRId64 ", %" PRId64 "]", i, pMgr->restored, + pMgr->startIndex, pMgr->matchIndex, pMgr->endIndex); if (i + 1 < pSyncNode->replicaNum) { len += tsnprintf(buf + len, bufLen - len, "%s", ", "); } diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 3a4f11ac81..d835d12c79 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -278,19 +278,19 @@ bool transAsyncPoolIsEmpty(SAsyncPool* pool); } \ } while (0) -#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ - do { \ - if (id > 0) { \ - SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ - if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ - tError("handle not match, exh1:%p, exh2:%p, refId:%"PRId64"", exh1, exh2, id); \ - code = TSDB_CODE_INVALID_MSG; \ - goto _return1; \ - } \ - } else { \ - tError("invalid handle to release"); \ - goto _return2; \ - } \ +#define ASYNC_CHECK_HANDLE(idMgt, id, exh1) \ + do { \ + if (id > 0) { \ + SExHandle* exh2 = transAcquireExHandle(idMgt, id); \ + if (exh2 == NULL || exh1 != exh2 || (exh2 != NULL && exh2->refId != id)) { \ + tDebug("handle not match, exh1:%p, exh2:%p, refId:%" PRId64 "", exh1, exh2, id); \ + code = TSDB_CODE_INVALID_MSG; \ + goto _return1; \ + } \ + } else { \ + tDebug("invalid handle to release"); \ + goto _return2; \ + } \ } while (0) int32_t transInitBuffer(SConnBuffer* buf); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index c3e214b5e3..5ade768d0c 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -3090,7 +3090,7 @@ int32_t transReleaseCliHandle(void* handle) { static int32_t transInitMsg(void* pInstRef, const SEpSet* pEpSet, STransMsg* pReq, STransCtx* ctx, SCliReq** pCliMsg) { int32_t code = 0; - TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); + if (pReq->info.traceId.msgId == 0) TRACE_SET_MSGID(&pReq->info.traceId, tGenIdPI64()); SCliReq* pCliReq = NULL; SReqCtx* pCtx = taosMemoryCalloc(1, sizeof(SReqCtx)); diff --git a/source/util/src/tlog.c b/source/util/src/tlog.c index 3ca148a625..76d0139521 100644 --- a/source/util/src/tlog.c +++ b/source/util/src/tlog.c @@ -21,10 +21,12 @@ #include "tjson.h" #include "tutil.h" -#define LOG_MAX_LINE_SIZE (10024) -#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) -#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) -#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) +#define LOG_MAX_LINE_SIZE (10024) +#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3) +#define LOG_MAX_STACK_LINE_SIZE (512) +#define LOG_MAX_STACK_LINE_BUFFER_SIZE (LOG_MAX_STACK_LINE_SIZE + 3) +#define LOG_MAX_LINE_DUMP_SIZE (1024 * 1024) +#define LOG_MAX_LINE_DUMP_BUFFER_SIZE (LOG_MAX_LINE_DUMP_SIZE + 128) #define LOG_FILE_DAY_LEN 64 @@ -126,7 +128,7 @@ int32_t idxDebugFlag = 131; int32_t sndDebugFlag = 131; int32_t simDebugFlag = 131; -int32_t tqClientDebug = 0; +int32_t tqClientDebugFlag = 131; int64_t dbgEmptyW = 0; int64_t dbgWN = 0; @@ -669,16 +671,40 @@ static inline void taosPrintLogImp(ELogLevel level, int32_t dflag, const char *b } } -void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { - if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; +/* + use taosPrintLogImpl_useStackBuffer to avoid stack overflow - char buffer[LOG_MAX_LINE_BUFFER_SIZE]; +*/ +static int8_t taosPrintLogImplUseStackBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char buffer[LOG_MAX_STACK_LINE_BUFFER_SIZE]; int32_t len = taosBuildLogHead(buffer, flags); - va_list argpointer; - va_start(argpointer, format); - int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len, format, argpointer); - va_end(argpointer); + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_STACK_LINE_BUFFER_SIZE - len - 1, format, args); + if (writeLen > LOG_MAX_STACK_LINE_SIZE) { + return 1; + } + + buffer[writeLen++] = '\n'; + buffer[writeLen] = 0; + + taosPrintLogImp(level, dflag, buffer, writeLen); + + if (tsLogFp && level <= DEBUG_INFO) { + buffer[writeLen - 1] = 0; + (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); + } + return 0; +} +static int8_t taosPrintLogImplUseHeapBuffer(const char *flags, int32_t level, int32_t dflag, const char *format, + va_list args) { + char *buffer = taosMemoryCalloc(1, LOG_MAX_LINE_BUFFER_SIZE + 1); + if (buffer == NULL) { + return 1; + } + int32_t len = taosBuildLogHead(buffer, flags); + + int32_t writeLen = len + vsnprintf(buffer + len, LOG_MAX_LINE_BUFFER_SIZE - len - 1, format, args); if (writeLen > LOG_MAX_LINE_SIZE) writeLen = LOG_MAX_LINE_SIZE; buffer[writeLen++] = '\n'; @@ -690,6 +716,22 @@ void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *f buffer[writeLen - 1] = 0; (*tsLogFp)(taosGetTimestampMs(), level, buffer + len); } + taosMemoryFree(buffer); + return 0; +} +void taosPrintLog(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { + if (!(dflag & DEBUG_FILE) && !(dflag & DEBUG_SCREEN)) return; + + va_list argpointer, argpointer_copy; + va_start(argpointer, format); + va_copy(argpointer_copy, argpointer); + + if (taosPrintLogImplUseStackBuffer(flags, level, dflag, format, argpointer) == 0) { + } else { + TAOS_UNUSED(taosPrintLogImplUseHeapBuffer(flags, level, dflag, format, argpointer_copy)); + } + va_end(argpointer_copy); + va_end(argpointer); } void taosPrintLongString(const char *flags, int32_t level, int32_t dflag, const char *format, ...) { diff --git a/source/util/test/CMakeLists.txt b/source/util/test/CMakeLists.txt index 4966a629d8..3732c2af59 100644 --- a/source/util/test/CMakeLists.txt +++ b/source/util/test/CMakeLists.txt @@ -126,6 +126,13 @@ add_test( COMMAND regexTest ) +add_executable(logTest "log.cpp") + target_link_libraries(logTest os util common gtest_main) + add_test( + NAME logTest + COMMAND logTest +) + add_executable(decompressTest "decompressTest.cpp") target_link_libraries(decompressTest os util common gtest_main) add_test( diff --git a/source/util/test/log.cpp b/source/util/test/log.cpp new file mode 100644 index 0000000000..ba32d2d639 --- /dev/null +++ b/source/util/test/log.cpp @@ -0,0 +1,46 @@ +#include +#include +#include +#include +#include +#include + +using namespace std; + + +TEST(log, check_log_refactor) { + const char *logDir = "/tmp"; + const char *defaultLogFileNamePrefix = "taoslog"; + const int32_t maxLogFileNum = 10000; + tsAsyncLog = 0; + // idxDebugFlag = 143; + strcpy(tsLogDir, (char *)logDir); + taosInitLog(tsLogDir, 10, false); + tsAsyncLog = 0; + uDebugFlag = 143; + + std::string str; + str.push_back('a'); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uError("write to file %s", str.c_str()); + } + str.clear(); + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uDebug("write to file %s", str.c_str()); + } + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uInfo("write to file %s", str.c_str()); + } + str.clear(); + + for (int i = 0; i < 10000; i += 2) { + str.push_back('a'); + uTrace("write to file %s", str.c_str()); + } + taosCloseLog(); +} diff --git a/tests/army/frame/common.py b/tests/army/frame/common.py index b816095817..a82bf4c94f 100644 --- a/tests/army/frame/common.py +++ b/tests/army/frame/common.py @@ -803,11 +803,14 @@ class TDCom: else: tdLog.exit(f"getOneRow out of range: row_index={location} row_count={self.query_row}") - def killProcessor(self, processorName): + def kill_signal_process(self, signal=15, processor_name: str = "taosd"): if (platform.system().lower() == 'windows'): - os.system("TASKKILL /F /IM %s.exe"%processorName) + os.system(f"TASKKILL /F /IM {processor_name}.exe") else: - os.system("unset LD_PRELOAD; pkill %s " % processorName) + command = f"unset LD_PRELOAD; sudo pkill -f -{signal} '{processor_name}'" + tdLog.debug(f"command: {command}") + os.system(command) + def gen_tag_col_str(self, gen_type, data_type, count): """ diff --git a/tests/army/storage/s3/s3Basic.json b/tests/army/storage/s3/s3Basic.json index ee341b2096..2b911a989f 100644 --- a/tests/army/storage/s3/s3Basic.json +++ b/tests/army/storage/s3/s3Basic.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/s3/s3Basic.py b/tests/army/storage/s3/s3Basic.py index bc55fe6f5c..273a6129e1 100644 --- a/tests/army/storage/s3/s3Basic.py +++ b/tests/army/storage/s3/s3Basic.py @@ -168,13 +168,13 @@ class TDTestCase(TBase): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -194,9 +194,9 @@ class TDTestCase(TBase): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" diff --git a/tests/army/storage/s3/s3Basic1.json b/tests/army/storage/s3/s3Basic1.json index 02be308443..087f89edec 100644 --- a/tests/army/storage/s3/s3Basic1.json +++ b/tests/army/storage/s3/s3Basic1.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/s3/s3azure.py b/tests/army/storage/s3/s3azure.py index 43857cb7ca..e0226b0aa4 100644 --- a/tests/army/storage/s3/s3azure.py +++ b/tests/army/storage/s3/s3azure.py @@ -202,13 +202,13 @@ class TDTestCase(TBase): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - # sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + # sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -228,9 +228,9 @@ class TDTestCase(TBase): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" diff --git a/tests/army/tmq/drop_lost_comsumers.py b/tests/army/tmq/drop_lost_comsumers.py new file mode 100644 index 0000000000..a5e8140c4a --- /dev/null +++ b/tests/army/tmq/drop_lost_comsumers.py @@ -0,0 +1,337 @@ +import time +import os +import threading +import datetime +from taos.tmq import Consumer +from taos.error import TmqError + +from frame.log import tdLog +from frame.cases import tdCases +from frame.sql import tdSql +from frame.caseBase import * +from frame import etool +from frame.common import tdCom + + +class TaosConsumer: + # TODO: Move this class to tq.py and remove it from here + def __init__(self): + self.sub_once = True + self.once_consumer_rows = 0 + self.sub_log = False + self.safe_counter = ThreadSafeCounter() + + def log_info(self, message): + if self.sub_log: + tdLog.info(message) + + #TODO merge sub_consumer and sub_consumer_once + def sub_consumer(self, consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + try: + consumer.subscribe([topic_name]) + except TmqError: + tdLog.exit(f"subscribe error") + nrows = 0 + while True: + start = datetime.datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + message_offset = message.offset() + # topic = message.topic() + # database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + # values = block.fetchall + end = datetime.datetime.now() + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time}," + f"consumer_nrows:{nrows},consumer_addrows:{addrows}," + f"consumer_ncols:{ncols},offset:{id}" + ) + consumer.commit() + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + + def set_conf( + self, + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=180000, + experimental_snapshot_enable="false", + ): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", + } + return conf + + def sub_consumer_once(self, consumer, group_id, topic_name, stop_event): + group_id = int(group_id) + if group_id < 100: + consumer.subscribe([topic_name]) + nrows = 0 + consumer_nrows = 0 + count = 0 + while not stop_event.is_set(): + start = datetime.datetime.now() + # self.log_info( + # f"time:{start},consumer:{group_id}, start to consume,consumer_nrows:{consumer_nrows}" + # ) + message = None + if consumer_nrows < self.once_consumer_rows: + message = consumer.poll(timeout=1.0) + elif consumer_nrows >= self.once_consumer_rows: + if count == 0: + # when break the loop, the consumer will be closed, so we need to continue to keep consumer alive util the stop_event is set + tdLog.info("stop consumer when consumer all rows") + count += 1 + # tdLog.info("stop consumer when consumer all rows") + else: + continue + if message: + message_offset = message.offset() + # topic = message.topic() + # database = message.database() + for block in message: + addrows = block.nrows() + nrows += block.nrows() + self.safe_counter.rows(block.nrows()) + ncols = block.ncols() + # values = block.fetchall + end = datetime.datetime.now() + elapsed_time = end - start + + # self.log_info( + # f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{message_offset}" + # ) + self.log_info( + f"consumer:{group_id},consumer_nrows:{nrows},counter.counter:{self.safe_counter.counter},counter.get():{self.safe_counter.get()}" + ) + + # consumer.commit() + consumer_nrows = nrows + + tdLog.info("Consumer subscription thread is stopping.") + + def taosc_consumer(self, conf: list, topic_name: str, stop_event: threading.Event): + try: + tdLog.info(conf) + tdLog.info("start to config consumer") + consumer = Consumer(conf) + tdLog.info("start to subscribe") + group_id = int(conf["group.id"]) + tdLog.info(f"{consumer},{group_id}") + if self.sub_once: + self.sub_consumer_once(consumer, group_id, topic_name, stop_event) + else: + self.sub_consumer(consumer, group_id, topic_name) + # only consumer once + except Exception as e: + tdLog.exit(f"{e}") + + # consumer.close() + + +class ThreadSafeCounter: + def __init__(self): + self.counter = 0 + self.lock = threading.Lock() + + def rows(self, rows): + with self.lock: + self.counter += rows + + def get(self): + with self.lock: + return self.counter + + +class TDTestCase: + # updatecfgDict = {'debugFlag': 135, 'asynclog': 0} + def __init__(self): + # db parameter + self.table_number = 1000 + self.rows_per_table = 1000 + # consumer parameter + self.consumer_groups_num = 2 + self.session_timeout_ms = 180000 + self.max_poll_interval_ms = 180000 + # case consumer parameter + self.consumer_rows_per_thread = self.table_number * self.rows_per_table + self.consumer_all_rows = ( + self.consumer_rows_per_thread * self.consumer_groups_num + ) + self.topic_name = "select_d1" + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), logSql) + self.consumer_instance = TaosConsumer() + # tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def caseDescription(self): + """ + drop_lost_consmuers: + 1. verifying that the boundary and valid values of session_timeout_ms are in effect + 2. verifying that the boundary and valid values of max_poll_interval_ms are in effect + 3. verifying that consumer will be closed when the session_timeout_ms and max_poll_interval_ms is expired + """ + return + + def check_consumer(self, count, rows, stop_event=None): + time.sleep(count) + try: + tdLog.info( + f"wait timeout count:{count} and check consumer status whether is closed" + ) + for _ in range(2): + tdSql.query("show consumers") + anser_rows = tdSql.getRows() + if anser_rows == rows: + break + time.sleep(1) + tdLog.info( + f"wait for {count} seconds to check that consumers number is {anser_rows}" + ) + if anser_rows != rows: + if stop_event: + stop_event.set() + tdLog.exit(f"consumer number is {anser_rows } but not expected {rows}") + except Exception as e: + tdLog.exit(f"{e},check consumer error") + + def drop_session_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") + + # start consumer and config some parameters + os.system( + f"nohup python3 ./tmq/per_consumer.py -c {self.consumer_groups_num} -s {self.session_timeout_ms} -p {self.max_poll_interval_ms} -t {self.topic_name} > consumer.log &" + ) + # wait 5s for consuming data + time.sleep(5) + # kill consumer to simulate session_timeout_ms + tdLog.info("kill per_consumer.py") + tdCom.kill_signal_process( + signal=9, processor_name=r"python3\s*./tmq/per_consumer.py" + ) + self.check_consumer(int(self.session_timeout_ms / 1000), 0) + tdSql.execute(f"drop topic if exists {self.topic_name};") + os.system("rm -rf consumer.log") + + def drop_max_poll_timeout_consmuers(self): + tdSql.execute(f"drop topic if exists {self.topic_name};") + tdSql.execute("use db_sub") + tdSql.execute(f"create topic {self.topic_name} as select * from db_sub.meters;") + + threads = [] + self.safe_counter = ThreadSafeCounter() + self.consumer_instance.safe_counter = self.safe_counter + stop_event = threading.Event() + self.consumer_instance.once_consumer_rows = self.consumer_rows_per_thread + tdLog.info(f"consumer_rows:{self.consumer_instance.once_consumer_rows}") + self.consumer_instance.sub_once = True + for group_id in range(self.consumer_groups_num): + conf = self.consumer_instance.set_conf( + group_id=group_id, + session_timeout_ms=self.session_timeout_ms, + max_poll_interval_ms=self.max_poll_interval_ms, + ) + threads.append( + threading.Thread( + target=self.consumer_instance.taosc_consumer, + args=(conf, self.topic_name, stop_event), + ) + ) + for tr in threads: + tr.start() + + while True: + if self.safe_counter.counter < self.consumer_all_rows: + # control print log frequency + time.sleep(1) + tdLog.info( + f"consumer_all_rows:{self.consumer_all_rows},counter.get():{self.safe_counter.counter}" + ) + elif self.safe_counter.counter == self.consumer_all_rows: + # adding 5s is for heartbeat check + self.check_consumer(int(self.max_poll_interval_ms / 1000 ) + 5, 0, stop_event) + stop_event.set() + break + + time.sleep(1) + tdSql.execute(f"drop topic if exists {self.topic_name};") + + def case_session_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of session_timeout_ms are in effect + """ + + tdLog.info("start to test session_timeout_ms=12s") + # test session_timeout_ms=12s + self.session_timeout_ms = 12000 + self.max_poll_interval_ms = 180000 + # self.set_session_timeout = int(self.session_timeout_ms / 1000) + self.drop_session_timeout_consmuers() + tdLog.info("stop to test session_timeout_ms=12s and done ") + + def case_max_poll_timeout(self): + """ + TEST CASE: verifying that the boundary and valid values of max_poll_interval_ms are in effect + """ + tdLog.info("start to test max_poll_interval_ms=20s") + # test max_poll_interval_ms=20s + self.session_timeout_ms = 300000 + self.max_poll_interval_ms = 20000 + self.drop_max_poll_timeout_consmuers() + tdLog.info("stop to test max_poll_interval_ms=20s and done ") + + def run(self): + """ + Run the test cases for session timeout and max poll timeout. + """ + vgroups = 4 + etool.benchMark( + command=f"-d db_sub -t {self.table_number} -n {self.rows_per_table} -v {vgroups} -a {self.replicaVar} -y" + ) + # test case start here + self.topic_name = "select_d1" + # self.case_session_timeout() + self.case_max_poll_timeout() + + def stop(self): + """ + Closes the taos connection and logs the success message. + """ + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/army/tmq/per_consumer.py b/tests/army/tmq/per_consumer.py new file mode 100644 index 0000000000..b8f409d710 --- /dev/null +++ b/tests/army/tmq/per_consumer.py @@ -0,0 +1,182 @@ +import os +import taos +import sys +from datetime import datetime +sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +from frame.log import tdLog +import subprocess +from multiprocessing import Process +import threading +from taos.tmq import Consumer +import click + +# TDDO +# 1. using tmq common class to replace the function, file drop_lost_consumers.py has the same function + +try: + conn = taos.connect() +except Exception as e: + tdLog.info(str(e)) + + +@click.command() +@click.option( + "-c", + "--consumer-groups-num", + "consumer_group_num", + default=1, + help="Number of consumer group.", +) +@click.option( + "-s", + "--session-timeout-ms", + "session_timeout_ms", + default=60000, + help="session timeout:ms", +) +@click.option( + "-p", + "--max-poll-interval-ms", + "max_poll_interval_ms", + default=180000, + help="max poll interval timeout:ms", +) +@click.option( + "-t", + "--topic-name", + "topic_name", + default="select_d1", + help="topic name", +) +def test_timeout_sub(consumer_group_num, session_timeout_ms, max_poll_interval_ms, topic_name): + threads = [] + tdLog.info(f"consumer_group_num:{consumer_group_num}, session_timeout_ms:{session_timeout_ms}, max_poll_interval_ms:{max_poll_interval_ms}") + for id in range(consumer_group_num): + conf = set_conf( + group_id=id, + session_timeout_ms=session_timeout_ms, + max_poll_interval_ms=max_poll_interval_ms, + ) + tdLog.info(f"conf:{conf}") + threads.append(threading.Thread(target=taosc_consumer, args=(conf,topic_name))) + for tr in threads: + tr.start() + for tr in threads: + tr.join() + + +def sub_consumer(consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + try: + consumer.subscribe([topic_name]) + except Exception as e: + tdLog.info(f"subscribe error") + exit(1) + + nrows = 0 + while True: + start = datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + message = consumer.poll(timeout=10.0) + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end - start + tdLog.info( + f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}" + ) + consumer.commit() + tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + # consumer.unsubscribe() + # consumer.close() + + +def sub_consumer_once(consumer, group_id, topic_name): + group_id = int(group_id) + if group_id < 100: + consumer.subscribe([topic_name]) + nrows = 0 + consumer_nrows = 0 + while True: + start = datetime.now() + tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + # start = datetime.now() + # tdLog.info(f"time:{start},consumer:{group_id}, start to consume") + tdLog.info(f"consumer_nrows:{consumer_nrows}") + if consumer_nrows < 1000000: + message = consumer.poll(timeout=10.0) + else: + tdLog.info(" stop consumer when consumer all rows") + + if message: + id = message.offset() + topic = message.topic() + database = message.database() + + for block in message: + addrows = block.nrows() + nrows += block.nrows() + ncols = block.ncols() + values = block.fetchall + end = datetime.now() + elapsed_time = end - start + # tdLog.info(f"time:{end},consumer:{group_id}, elapsed time:{elapsed_time},consumer_nrows:{nrows},consumer_addrows:{addrows}, consumer_ncols:{ncols},offset:{id}") + consumer.commit() + # tdLog.info(f"consumer:{group_id},consumer_nrows:{nrows}") + consumer_nrows = nrows + # consumer.unsubscribe() + # consumer.close() + # break + + +def set_conf( + td_connect_ip="localhost", + group_id=1, + client_id="test_consumer_py", + enable_auto_commit="false", + auto_commit_interval_ms="1000", + auto_offset_reset="earliest", + msg_with_table_name="true", + session_timeout_ms=10000, + max_poll_interval_ms=20000, + experimental_snapshot_enable="false", +): + conf = { + # auth options + # consume options + "td.connect.ip": f"{td_connect_ip}", + "group.id": f"{group_id}", + "client.id": f"{client_id}", + "enable.auto.commit": f"{enable_auto_commit}", + "auto.commit.interval.ms": f"{auto_commit_interval_ms}", + "auto.offset.reset": f"{auto_offset_reset}", + "msg.with.table.name": f"{msg_with_table_name}", + "session.timeout.ms": f"{session_timeout_ms}", + "max.poll.interval.ms": f"{max_poll_interval_ms}", + "experimental.snapshot.enable": f"{experimental_snapshot_enable}", + } + return conf + + +def taosc_consumer(conf,topic_name): + consumer = Consumer(conf) + group_id = int(conf["group.id"]) + tdLog.info(f"{consumer},{group_id}") + try: + sub_consumer_once(consumer, group_id, topic_name) + except Exception as e: + tdLog.info(str(e)) + + +if __name__ == "__main__": + test_timeout_sub() diff --git a/tests/develop-test/2-query/show_create_db.py b/tests/develop-test/2-query/show_create_db.py index b77e744df2..9589b6dc6f 100644 --- a/tests/develop-test/2-query/show_create_db.py +++ b/tests/develop-test/2-query/show_create_db.py @@ -42,17 +42,17 @@ class TDTestCase: tdSql.query('show create database scd;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd') - tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd2;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd2') - tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd4') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd4') - tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") self.restartTaosd(1, dbname='scd') @@ -60,16 +60,16 @@ class TDTestCase: tdSql.query('show create database scd;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd') - tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 2 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd2;') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd2') - tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd2` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 3 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.query('show create database scd4') tdSql.checkRows(1) tdSql.checkData(0, 0, 'scd4') - tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKSIZE 262144 S3_KEEPLOCAL 525600m S3_COMPACT 1") + tdSql.checkData(0, 1, "CREATE DATABASE `scd4` BUFFER 256 CACHESIZE 1 CACHEMODEL 'none' COMP 2 DURATION 10d WAL_FSYNC_PERIOD 3000 MAXROWS 4096 MINROWS 100 STT_TRIGGER 13 KEEP 3650d,3650d,3650d PAGES 256 PAGESIZE 4 PRECISION 'ms' REPLICA 1 WAL_LEVEL 1 VGROUPS 2 SINGLE_STABLE 0 TABLE_PREFIX 0 TABLE_SUFFIX 0 TSDB_PAGESIZE 4 WAL_RETENTION_PERIOD 3600 WAL_RETENTION_SIZE 0 KEEP_TIME_OFFSET 0 ENCRYPT_ALGORITHM 'none' S3_CHUNKPAGES 131072 S3_KEEPLOCAL 525600m S3_COMPACT 1") tdSql.execute('drop database scd') diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 09216add82..817d9f049a 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -47,7 +47,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py ,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py - +,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py # # system test # diff --git a/tests/script/tsim/scalar/in.sim b/tests/script/tsim/scalar/in.sim index 75e1face88..a2164675f0 100644 --- a/tests/script/tsim/scalar/in.sim +++ b/tests/script/tsim/scalar/in.sim @@ -35,6 +35,14 @@ if $rows != 3 then return -1 endi +sql explain verbose true select * from tb1 where tts in ('2022-07-10 16:31:01', '2022-07-10 16:31:03', 1657441865000); +if $rows != 3 then + return -1 +endi +if $data20 != @ Time Range: [-9223372036854775808, 9223372036854775807]@ then + return -1 +endi + sql select * from tb1 where fbool in (0, 3); if $rows != 5 then return -1 @@ -80,4 +88,45 @@ if $rows != 0 then return -1 endi +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1657441840000, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, true); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, false); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [0, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, 1.02); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [1, 1657441980000]@ then + return -1 +endi + +sql explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, -1.02); +if $rows != 4 then + return -1 +endi +if $data20 != @ Time Range: [-1, 1657441980000]@ then + return -1 +endi + +sql_error explain verbose true select * from tb1 where fts in ('2022-07-10 16:31:00', '2022-07-10 16:33:00', 1657441840000, 'abc'); system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index cd7c5bd26e..d4e5f89aa3 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -57,7 +57,7 @@ class TDTestCase: tdSql.checkData(0, 2, 0) tdSql.query("show dnode 1 variables like '%debugFlag'") - tdSql.checkRows(24) + tdSql.checkRows(25) tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) diff --git a/tests/system-test/2-query/group_partition.py b/tests/system-test/2-query/group_partition.py index 384df02e8d..7ee528841c 100644 --- a/tests/system-test/2-query/group_partition.py +++ b/tests/system-test/2-query/group_partition.py @@ -420,7 +420,23 @@ class TDTestCase: tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 where t2 = 1") tdSql.error(f"select t2, count(*) from {self.dbname}.{self.stable} group by t2 interval(1d)") - + def test_TS5567(self): + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t group by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by const_col") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by const_col") + tdSql.checkRows(10) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t group by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col as c_c from (select 1 as const_col from {self.dbname}.{self.stable}) t partition by c_c") + tdSql.checkRows(50) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) group by 1") + tdSql.checkRows(10) + tdSql.query(f"select const_col from (select 1 as const_col, count(c1) from {self.dbname}.{self.stable} t group by c1) partition by 1") + tdSql.checkRows(10) def run(self): tdSql.prepare() self.prepare_db() @@ -453,6 +469,7 @@ class TDTestCase: self.test_window(nonempty_tb_num) self.test_event_window(nonempty_tb_num) + self.test_TS5567() ## test old version before changed # self.test_groupby('group', 0, 0) diff --git a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py index 40879d5c66..938dcfcc9e 100644 --- a/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py +++ b/tests/system-test/7-tmq/tmqVnodeTransform-stb-removewal.py @@ -17,6 +17,8 @@ sys.path.append("./7-tmq") from tmqCommon import * class TDTestCase: + + updatecfgDict = {'sDebugFlag':143} def __init__(self): self.vgroups = 1 self.ctbNum = 10 diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py index 4e90aefe7c..5047ada1d1 100644 --- a/tests/system-test/7-tmq/tmq_taosx.py +++ b/tests/system-test/7-tmq/tmq_taosx.py @@ -131,14 +131,14 @@ class TDTestCase: tdSql.checkData(0, 2, 1) tdSql.query("select * from ct3 order by c1 desc") - tdSql.checkRows(2) + tdSql.checkRows(5) tdSql.checkData(0, 1, 51) tdSql.checkData(0, 4, 940) tdSql.checkData(1, 1, 23) tdSql.checkData(1, 4, None) tdSql.query("select * from st1 order by ts") - tdSql.checkRows(8) + tdSql.checkRows(14) tdSql.checkData(0, 1, 1) tdSql.checkData(1, 1, 3) tdSql.checkData(4, 1, 4) @@ -180,7 +180,7 @@ class TDTestCase: tdSql.checkData(6, 8, None) tdSql.query("select * from ct1") - tdSql.checkRows(4) + tdSql.checkRows(7) tdSql.query("select * from ct2") tdSql.checkRows(0) diff --git a/tests/system-test/7-tmq/ts-4674.py b/tests/system-test/7-tmq/ts-4674.py index 0b3dc1b077..79379aaaed 100644 --- a/tests/system-test/7-tmq/ts-4674.py +++ b/tests/system-test/7-tmq/ts-4674.py @@ -35,10 +35,11 @@ class TDTestCase: def balance_vnode(self): leader_before = self.get_leader() - tdSql.query("balance vgroup leader") + while True: leader_after = -1 tdLog.debug("balancing vgroup leader") + tdSql.execute("balance vgroup leader") while True: tdLog.debug("get new vgroup leader") leader_after = self.get_leader() @@ -51,6 +52,7 @@ class TDTestCase: break else : time.sleep(1) + tdLog.debug("leader not changed") def consume_TS_4674_Test(self): diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt index 87630b773b..5e93be695d 100644 --- a/tools/CMakeLists.txt +++ b/tools/CMakeLists.txt @@ -218,3 +218,75 @@ ELSE() ) ENDIF() ENDIF() + +IF(TD_BUILD_KEEPER) + MESSAGE("") + MESSAGE("${Green} build taoskeeper, current platform is ${PLATFORM_ARCH_STR} ${ColourReset}") + + EXECUTE_PROCESS( + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/keeper + COMMAND git rev-parse HEAD + OUTPUT_VARIABLE taoskeeper_commit_sha1 + ) + + IF("${taoskeeper_commit_sha1}" STREQUAL "") + SET(taoskeeper_commit_sha1 "unknown") + ELSE() + STRING(STRIP "${taoskeeper_commit_sha1}" taoskeeper_commit_sha1) + ENDIF() + + SET(taos_version ${TD_VER_NUMBER}) + MESSAGE("${Green} taoskeeper will use ${taos_version} and commit ${taoskeeper_commit_sha1} as version ${ColourReset}") + MESSAGE(" current source dir is ${CMAKE_CURRENT_SOURCE_DIR}") + + IF(TD_WINDOWS) + MESSAGE("Building taoskeeper on Windows") + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Comparessing taoskeeper.exe" + COMMAND cmake -E time upx taoskeeper.exe + COMMAND cmake -E echo "Copy taoskeeper.exe" + COMMAND cmake -E copy taoskeeper.exe ${CMAKE_BINARY_DIR}/build/bin/taoskeeper.exe + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ELSE() + IF(TD_DARWIN) + MESSAGE("Building taoskeeper on macOS") + ELSE() + MESSAGE("Building taoskeeper on Linux") + ENDIF() + + INCLUDE(ExternalProject) + ExternalProject_Add(taoskeeper + PREFIX "taoskeeper" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/keeper + BUILD_ALWAYS off + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND cmake -E echo "taoskeeper no need cmake to config" + PATCH_COMMAND + COMMAND git clean -f -d + BUILD_COMMAND + COMMAND go build -a -ldflags "-X 'github.com/taosdata/taoskeeper/version.Version=${taos_version}' -X 'github.com/taosdata/taoskeeper/version.CommitID=${taoskeeper_commit_sha1}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}'" + INSTALL_COMMAND + COMMAND cmake -E echo "Copy taoskeeper" + COMMAND cmake -E copy taoskeeper ${CMAKE_BINARY_DIR}/build/bin + COMMAND cmake -E make_directory ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.toml" + COMMAND cmake -E copy ./config/taoskeeper.toml ${CMAKE_BINARY_DIR}/test/cfg/ + COMMAND cmake -E echo "Copy taoskeeper.service" + COMMAND cmake -E copy ./taoskeeper.service ${CMAKE_BINARY_DIR}/test/cfg/ + ) + ENDIF() +ENDIF() diff --git a/tools/keeper/.dockerignore b/tools/keeper/.dockerignore new file mode 100644 index 0000000000..cff5a58d80 --- /dev/null +++ b/tools/keeper/.dockerignore @@ -0,0 +1 @@ +!taoskeeper diff --git a/tools/keeper/.gitignore b/tools/keeper/.gitignore new file mode 100644 index 0000000000..2cba3f06c8 --- /dev/null +++ b/tools/keeper/.gitignore @@ -0,0 +1,22 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool, specifically when used with LiteIDE +*.out +*.html +*.data + +# Dependency directories (remove the comment below to include it) +vendor +/debug/ +/.idea/ +/taoskeeper +/test_data +/.vscode diff --git a/tools/keeper/CHANGELOG.md b/tools/keeper/CHANGELOG.md new file mode 100644 index 0000000000..6775343b0f --- /dev/null +++ b/tools/keeper/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Conventional Changelog](https://www.conventionalcommits.org/en/v1.0.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## Footnote + +This changelog is automatically generated. diff --git a/tools/keeper/Dockerfile b/tools/keeper/Dockerfile new file mode 100644 index 0000000000..c38bdc1acb --- /dev/null +++ b/tools/keeper/Dockerfile @@ -0,0 +1,16 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "Linhe Huo " + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/DockerfileCloud b/tools/keeper/DockerfileCloud new file mode 100644 index 0000000000..11137f61c2 --- /dev/null +++ b/tools/keeper/DockerfileCloud @@ -0,0 +1,24 @@ +FROM golang:1.18.6-alpine as builder +LABEL maintainer = "TDengine" + +ARG latestv +ARG gitinfo +ARG buildinfo + +RUN apk --no-cache add upx && \ + rm -rf /var/lib/apt/lists/* + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct + +RUN echo "$latestv $gitinfo $buildinfo" +RUN go mod tidy && go build -ldflags="-s -w -X 'github.com/taosdata/taoskeeper/version.Version=${latestv}' -X 'github.com/taosdata/taoskeeper/version.Gitinfo=${gitinfo}' -X 'github.com/taosdata/taoskeeper/version.BuildInfo=${buildinfo}'" -o taoskeeper . && upx -9 taoskeeper +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +RUN chmod u+rw /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] diff --git a/tools/keeper/README-CN.md b/tools/keeper/README-CN.md new file mode 100644 index 0000000000..770e9513c1 --- /dev/null +++ b/tools/keeper/README-CN.md @@ -0,0 +1,267 @@ +# TaosKeeper + +taosKeeper 是 TDengine 各项监控指标的导出工具,通过简单的几项配置即可获取 TDengine 的运行状态。并且 taosKeeper 企业版支持多种收集器,可以方便进行监控数据的展示。 + +taosKeeper 使用 TDengine RESTful 接口,所以不需要安装 TDengine 客户端即可使用。 + +## 构建 + +### 获取源码 + +从 GitHub 克隆源码: + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### 编译 + +taosKeeper 使用 `GO` 语言编写,在构建前需要配置好 `GO` 语言开发环境。 + +```sh +go mod tidy +go build +``` + +## 安装 + +如果是自行构建的项目,仅需要拷贝 `taoskeeper` 文件到你的 `PATH` 中。 + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## 启动 + +在启动前,应该做好如下配置: +在 `/etc/taos/taoskeeper.toml` 配置 TDengine 连接参数以及监控指标前缀等其他信息。 + +```toml +# gin 框架是否启用 debug +debug = false + +# 服务监听端口, 默认为 6043 +port = 6043 + +# 日志级别,包含 panic、error、info、debug、trace等 +loglevel = "info" + +# 程序中使用协程池的大小 +gopoolsize = 50000 + +# 查询 TDengine 监控数据轮询间隔 +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# 需要被监控的 taosAdapter +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# 监控指标前缀 +prefix = "taos" + +# 存放监控数据的数据库 +database = "log" + +# 指定需要监控的普通表 +tables = [] + +[environment] +# 是否在容器中运行,影响 taosKeeper 自身的监控数据 +incgroup = false +``` + +现在可以启动服务,输入: + +```sh +taoskeeper +``` + +如果你使用 `systemd`,复制 `taoskeeper.service` 到 `/lib/systemd/system/`,并启动服务。 + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +让 taosKeeper 随系统开机自启动。 + +```sh +sudo systemctl enable taoskeeper +``` + +如果使用 `systemd`,你可以使用如下命令完成安装。 + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +如下介绍了如何在 docker 中构建 taosKeeper: + +在构建前请配置好 `./config/taoskeeper.toml` 中合适的参数,并编辑 Dockerfile ,示例如下。 + +```dockerfile +FROM golang:1.18.6-alpine as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +如果已经有 taosKeeper 可执行文件,在配置好 `taoskeeper.toml` 后你可以使用如下方式构建: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## 使用(**企业版**) + +### Prometheus (by scrape) + +taosKeeper 可以像 `node-exporter` 一样向 Prometheus 提供监控指标。\ +在 `/etc/prometheus/prometheus.yml` 添加配置: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] +``` + +现在使用 PromQL 查询即可以显示结果,比如要查看指定主机(通过 FQDN 正则匹配表达式筛选)硬盘使用百分比: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +你可以使用 `docker-compose` 测试完整的链路。 +`docker-compose.yml`示例: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: +``` + +启动: + +```sh +docker-compose up -d +``` + +现在通过访问 来查询结果。访问[simple dashboard](https://grafana.com/grafana/dashboards/15164) 来查看TaosKeeper + Prometheus + Grafana 监控 TDengine 的快速启动实例。 + +### Telegraf + +如果使用 telegraf 来收集各个指标,仅需要在配置中增加: + +```toml +[[inputs.prometheus]] +## An array of urls to scrape metrics from. +urls = ["http://taoskeeper:6043/metrics"] +``` + +可以通过 `docker-compose` 来测试 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +由于可以在 `telegraf.conf` 设置日志为标准输出: + +```toml +[[outputs.file]] +files = ["stdout"] +``` + +所以你可以通过 `docker-compose logs` 在标准输出中追踪 TDengine 各项指标。 + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. 导入 zabbix 临时文件 `zbx_taos_keeper_templates.xml`。 +2. 使用 `TDengine` 模板来创建主机,修改宏 `{$TAOSKEEPER_HOST}` 和 `{$COLLECTION_INTERVAL}`。 +3. 等待并查看到自动创建的条目。 + +### 常见问题 + +* 启动报错,显示connection refused + + **解析**:taosKeeper 依赖 restful 接口查询数据,请检查 taosAdapter 是否正常运行或 taoskeeper.toml 中 taosAdapter 地址是否正确。 + +* taosKeeper 监控不同 TDengine 显示的检测指标数目不一致? + + **解析**:如果 TDengine 中未创建某项指标,taoskeeper 不能获取对应的检测结果。 + +* 不能接收到 TDengine 的监控日志。 + + **解析**: 修改 `/etc/taos/taos.cfg` 文件并增加如下参数: + + ```cfg + monitor 1 // 启用monitor + monitorInterval 30 // 发送间隔 (s) + monitorFqdn localhost // 接收消息的FQDN,默认为空 + monitorPort 6043 // 接收消息的端口号 + monitorMaxLogs 100 // 每个监控间隔缓存的最大日志数量 + ``` diff --git a/tools/keeper/README.md b/tools/keeper/README.md new file mode 100644 index 0000000000..18e351f160 --- /dev/null +++ b/tools/keeper/README.md @@ -0,0 +1,273 @@ +# TaosKeeper + +TDengine Metrics Exporter for Kinds of Collectors, you can obtain the running status of TDengine by performing several simple configurations. + +This tool uses TDengine RESTful API, so you could just build it without TDengine client. + +## Build + +### Get the source codes + +```sh +git clone https://github.com/taosdata/TDengine +cd TDengine/tools/keeper +``` + +### compile + +```sh +go mod tidy +go build +``` + +## Install + +If you build the tool by your self, just copy the `taoskeeper` binary to your `PATH`. + +```sh +sudo install taoskeeper /usr/bin/ +``` + +## Start + +Before start, you should configure some options like database ip, port or the prefix and others for exported metrics. + +in `/etc/taos/taoskeeper.toml`. + +```toml +# Start with debug middleware for gin +debug = false + +# Listen port, default is 6043 +port = 6043 + +# log level +loglevel = "info" + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" + +# list of taosAdapter that need to be monitored +[taosAdapter] +address = ["127.0.0.1:6041"] + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# database for storing metrics data +database = "log" + +# export some tables that are not super table +tables = [] + +[environment] +# Whether running in cgroup. +incgroup = false +``` + +Now you could run the tool: + +```sh +taoskeeper +``` + +If you use `systemd`, copy the `taoskeeper.service` to `/lib/systemd/system/` and start the service. + +```sh +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +``` + +To start taoskeeper whenever os rebooted, you should enable the systemd service: + +```sh +sudo systemctl enable taoskeeper +``` + +So if use `systemd`, you'd better install it with these lines all-in-one: + +```sh +go mod tidy +go build +sudo install taoskeeper /usr/bin/ +sudo cp taoskeeper.service /lib/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl start taoskeeper +sudo systemctl enable taoskeeper +``` + +## Docker + +Here is an example to show how to build this tool in docker: + +Before building, you should configure `./config/taoskeeper.toml` with proper parameters and edit Dockerfile. Take following as example. + +```dockerfile +FROM golang:1.18.2 as builder + +WORKDIR /usr/src/taoskeeper +COPY ./ /usr/src/taoskeeper/ +ENV GO111MODULE=on \ + GOPROXY=https://goproxy.cn,direct +RUN go mod tidy && go build + +FROM alpine:3 +RUN mkdir -p /etc/taos +COPY --from=builder /usr/src/taoskeeper/taoskeeper /usr/bin/ +COPY ./config/taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +If you already have taosKeeper binary file, you can build this tool like: + +```dockerfile +FROM ubuntu:18.04 +RUN mkdir -p /etc/taos +COPY ./taoskeeper /usr/bin/ +COPY ./taoskeeper.toml /etc/taos/taoskeeper.toml +EXPOSE 6043 +CMD ["taoskeeper"] +``` + +## Usage (**Enterprise Edition**) + +### Prometheus (by scrape) + +It's now act as a prometheus exporter like `node-exporter`. + +Here's how to add this in scrape configs of `/etc/prometheus/prometheus.yml`: + +```yml +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "taoskeeper" + static_configs: + - targets: [ "taoskeeper:6043" ] +``` + +Now PromQL query will show the right result, for example, to show disk used percent in an specific host with FQDN regex +match expression: + +```promql +taos_dn_disk_used / taos_dn_disk_total {fqdn=~ "tdengine.*"} +``` + +You can use `docker-compose` with the current `docker-compose.yml` to test the whole stack. + +Here is the `docker-compose.yml`: + +```yml +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine + environment: + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: + +``` + +Start the stack: + +```sh +docker-compose up -d +``` + +Now you point to (if you have not started a prometheus server by yourself) and query. + +For a quick demo with TaosKeeper + Prometheus + Grafana, we provide +a [simple dashboard](https://grafana.com/grafana/dashboards/15164) to monitor TDengine. + +### Telegraf + +If you are using telegraf to collect metrics, just add inputs like this: + +```toml +[[inputs.prometheus]] + ## An array of urls to scrape metrics from. + urls = ["http://taoskeeper:6043/metrics"] +``` + +You can test it with `docker-compose`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml up -d telegraf taoskeeper +``` + +Since we have set an stdout file output in `telegraf.conf`: + +```toml +[[outputs.file]] + files = ["stdout"] +``` + +So you can track with TDengine metrics in standard output with `docker-compose logs`: + +```sh +docker-compose -f docker-compose.yml -f telegraf.yml logs -f telegraf +``` + +### Zabbix + +1. Import the zabbix template file `zbx_taos_keeper_templates.xml`. +2. Use the template `TDengine` to create the host and modify the macros `{$TAOSKEEPER_HOST}` + and `{$COLLECTION_INTERVAL}`. +3. Waiting for monitoring items to be created automatically. + +### FAQ + +* Error occurred: Connection refused, while taosKeeper was starting + + **Answer**: taoskeeper relies on restful interfaces to query data. Check whether the taosAdapter is running or whether + the taosAdapter address in taoskeeper.toml is correct. + +* Why detection metrics displayed by different TDengine's inconsistent with taoskeeper monitoring? + + **Answer**: If a metric is not created in TDengine, taoskeeper cannot get the corresponding test results. + +* Cannot receive log from TDengine server. + + **Answer**: Modify `/etc/taos/taos.cfg` file and add parameters like: + + ```cfg + monitor 1 // start monitor + monitorInterval 30 // send log interval (s) + monitorFqdn localhost + monitorPort 6043 // taosKeeper port + monitorMaxLogs 100 + ``` diff --git a/tools/keeper/api/adapter2.go b/tools/keeper/api/adapter2.go new file mode 100644 index 0000000000..645b9a176b --- /dev/null +++ b/tools/keeper/api/adapter2.go @@ -0,0 +1,260 @@ +package api + +import ( + "bytes" + "context" + "crypto/md5" + "encoding/hex" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var adapterLog = log.GetLogger("ADP") + +type adapterReqType int + +const ( + rest adapterReqType = iota // 0 - rest + ws // 1 - ws +) + +type Adapter struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +func NewAdapter(c *config.Config) *Adapter { + return &Adapter{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Metrics.Database.Name, + dbOptions: c.Metrics.Database.Options, + } +} + +func (a *Adapter) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error:%s", err) + } + if err := a.createTable(); err != nil { + return fmt.Errorf("create table error:%s", err) + } + c.POST("/adapter_report", a.handleFunc()) + return nil +} + +func (a *Adapter) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + adapterLog.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + adapterLog.Errorf("get adapter report data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get adapter report data error. %s", err)}) + return + } + if adapterLog.Logger.IsLevelEnabled(logrus.TraceLevel) { + adapterLog.Tracef("received adapter report data:%s", string(data)) + } + + var report AdapterReport + if err = json.Unmarshal(data, &report); err != nil { + adapterLog.Errorf("parse adapter report data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse adapter report data error: %s", err)}) + return + } + sql := a.parseSql(report) + adapterLog.Debugf("adapter report sql:%s", sql) + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + adapterLog.Errorf("adapter report error, msg:%s", err) + c.JSON(http.StatusInternalServerError, gin.H{"error": err.Error()}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Adapter) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + adapterLog.Dup().Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Adapter) parseSql(report AdapterReport) string { + // reqType: 0: rest, 1: websocket + restTbName := a.tableName(report.Endpoint, rest) + wsTbName := a.tableName(report.Endpoint, ws) + ts := time.Unix(report.Timestamp, 0).Format(time.RFC3339) + metric := report.Metric + return fmt.Sprintf("insert into %s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d) "+ + "%s using adapter_requests tags ('%s', %d) "+ + "values('%s', %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)", + restTbName, report.Endpoint, rest, ts, metric.RestTotal, metric.RestQuery, metric.RestWrite, metric.RestOther, + metric.RestInProcess, metric.RestSuccess, metric.RestFail, metric.RestQuerySuccess, metric.RestQueryFail, + metric.RestWriteSuccess, metric.RestWriteFail, metric.RestOtherSuccess, metric.RestOtherFail, + metric.RestQueryInProcess, metric.RestWriteInProcess, + wsTbName, report.Endpoint, ws, ts, metric.WSTotal, + metric.WSQuery, metric.WSWrite, metric.WSOther, metric.WSInProcess, metric.WSSuccess, metric.WSFail, + metric.WSQuerySuccess, metric.WSQueryFail, metric.WSWriteSuccess, metric.WSWriteFail, metric.WSOtherSuccess, + metric.WSOtherFail, metric.WSQueryInProcess, metric.WSWriteInProcess) +} + +func (a *Adapter) tableName(endpoint string, reqType adapterReqType) string { + var tbname string + if reqType == rest { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "rest") + } else { + tbname = fmt.Sprintf("adapter_req_%s_%s", endpoint, "ws") + } + + if len(tbname) <= util.MAX_TABLE_NAME_LEN { + return util.ToValidTableName(tbname) + } else { + sum := md5.Sum([]byte(fmt.Sprintf("%s%d", endpoint, reqType))) + return fmt.Sprintf("adapter_req_%s", hex.EncodeToString(sum[:])) + } +} + +func (a *Adapter) createDatabase() error { + qid := util.GetQidOwn() + + adapterLog := adapterLog.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + adapterLog.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + adapterLog.Errorf("create database error, msg:%s", err) + return err + } + + return err +} + +func (a *Adapter) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +var adapterTableSql = "create stable if not exists `adapter_requests` (" + + "`ts` timestamp, " + + "`total` int unsigned, " + + "`query` int unsigned, " + + "`write` int unsigned, " + + "`other` int unsigned, " + + "`in_process` int unsigned, " + + "`success` int unsigned, " + + "`fail` int unsigned, " + + "`query_success` int unsigned, " + + "`query_fail` int unsigned, " + + "`write_success` int unsigned, " + + "`write_fail` int unsigned, " + + "`other_success` int unsigned, " + + "`other_fail` int unsigned, " + + "`query_in_process` int unsigned, " + + "`write_in_process` int unsigned ) " + + "tags (`endpoint` varchar(32), `req_type` tinyint unsigned )" + +func (a *Adapter) createTable() error { + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), adapterTableSql, util.GetQidOwn()) + return err +} + +type AdapterReport struct { + Timestamp int64 `json:"ts"` + Metric AdapterMetrics `json:"metrics"` + Endpoint string `json:"endpoint"` +} + +type AdapterMetrics struct { + RestTotal int `json:"rest_total"` + RestQuery int `json:"rest_query"` + RestWrite int `json:"rest_write"` + RestOther int `json:"rest_other"` + RestInProcess int `json:"rest_in_process"` + RestSuccess int `json:"rest_success"` + RestFail int `json:"rest_fail"` + RestQuerySuccess int `json:"rest_query_success"` + RestQueryFail int `json:"rest_query_fail"` + RestWriteSuccess int `json:"rest_write_success"` + RestWriteFail int `json:"rest_write_fail"` + RestOtherSuccess int `json:"rest_other_success"` + RestOtherFail int `json:"rest_other_fail"` + RestQueryInProcess int `json:"rest_query_in_process"` + RestWriteInProcess int `json:"rest_write_in_process"` + WSTotal int `json:"ws_total"` + WSQuery int `json:"ws_query"` + WSWrite int `json:"ws_write"` + WSOther int `json:"ws_other"` + WSInProcess int `json:"ws_in_process"` + WSSuccess int `json:"ws_success"` + WSFail int `json:"ws_fail"` + WSQuerySuccess int `json:"ws_query_success"` + WSQueryFail int `json:"ws_query_fail"` + WSWriteSuccess int `json:"ws_write_success"` + WSWriteFail int `json:"ws_write_fail"` + WSOtherSuccess int `json:"ws_other_success"` + WSOtherFail int `json:"ws_other_fail"` + WSQueryInProcess int `json:"ws_query_in_process"` + WSWriteInProcess int `json:"ws_write_in_process"` +} diff --git a/tools/keeper/api/adapter2_test.go b/tools/keeper/api/adapter2_test.go new file mode 100644 index 0000000000..e6fd263c43 --- /dev/null +++ b/tools/keeper/api/adapter2_test.go @@ -0,0 +1,98 @@ +package api + +import ( + "context" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAdapter2(t *testing.T) { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "adapter_report_test", + Options: map[string]interface{}{}, + }, + }, + } + a := NewAdapter(c) + err := a.Init(router) + assert.NoError(t, err) + + w := httptest.NewRecorder() + body := strings.NewReader(" {\"ts\": 1696928323, \"metrics\": {\"rest_total\": 10, \"rest_query\": 2, " + + "\"rest_write\": 5, \"rest_other\": 3, \"rest_in_process\": 1, \"rest_fail\": 5, \"rest_success\": 3, " + + "\"rest_query_success\": 1, \"rest_query_fail\": 2, \"rest_write_success\": 2, \"rest_write_fail\": 3, " + + "\"rest_other_success\": 1, \"rest_other_fail\": 2, \"rest_query_in_process\": 1, \"rest_write_in_process\": 2, " + + "\"ws_total\": 10, \"ws_query\": 2, \"ws_write\": 3, \"ws_other\": 5, \"ws_in_process\": 1, \"ws_success\": 3, " + + "\"ws_fail\": 3, \"ws_query_success\": 1, \"ws_query_fail\": 1, \"ws_write_success\": 2, \"ws_write_fail\": 2, " + + "\"ws_other_success\": 1, \"ws_other_fail\": 2, \"ws_query_in_process\": 1, \"ws_write_in_process\": 2 }, " + + "\"endpoint\": \"adapter-1:6041\"}") + req, _ := http.NewRequest(http.MethodPost, "/adapter_report", body) + req.Header.Set("X-QID", "0x1234567890ABCD00") + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + conn, err := db.NewConnectorWithDb(c.TDengine.Username, c.TDengine.Password, c.TDengine.Host, c.TDengine.Port, c.Metrics.Database.Name, c.TDengine.Usessl) + defer func() { + _, _ = conn.Query(context.Background(), "drop database if exists adapter_report_test", util.GetQidOwn()) + }() + + assert.NoError(t, err) + data, err := conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=0", util.GetQidOwn()) + + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(5), data.Data[0][3]) + assert.Equal(t, uint32(3), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(5), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(2), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(3), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + data, err = conn.Query(context.Background(), "select * from adapter_report_test.adapter_requests where req_type=1", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, uint32(10), data.Data[0][1]) + assert.Equal(t, uint32(2), data.Data[0][2]) + assert.Equal(t, uint32(3), data.Data[0][3]) + assert.Equal(t, uint32(5), data.Data[0][4]) + assert.Equal(t, uint32(1), data.Data[0][5]) + assert.Equal(t, uint32(3), data.Data[0][6]) + assert.Equal(t, uint32(3), data.Data[0][7]) + assert.Equal(t, uint32(1), data.Data[0][8]) + assert.Equal(t, uint32(1), data.Data[0][9]) + assert.Equal(t, uint32(2), data.Data[0][10]) + assert.Equal(t, uint32(2), data.Data[0][11]) + assert.Equal(t, uint32(1), data.Data[0][12]) + assert.Equal(t, uint32(2), data.Data[0][13]) + assert.Equal(t, uint32(1), data.Data[0][14]) + assert.Equal(t, uint32(2), data.Data[0][15]) + + conn.Exec(context.Background(), "drop database "+c.Metrics.Database.Name, util.GetQidOwn()) +} diff --git a/tools/keeper/api/audit.go b/tools/keeper/api/audit.go new file mode 100644 index 0000000000..fd9fc4f667 --- /dev/null +++ b/tools/keeper/api/audit.go @@ -0,0 +1,336 @@ +package api + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var auditLogger = log.GetLogger("AUD") + +const MAX_DETAIL_LEN = 50000 + +type Audit struct { + username string + password string + host string + port int + usessl bool + conn *db.Connector + db string + dbOptions map[string]interface{} +} + +type AuditInfo struct { + Timestamp string `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +type AuditArrayInfo struct { + Records []AuditInfo `json:"records"` +} + +type AuditInfoOld struct { + Timestamp int64 `json:"timestamp"` + ClusterID string `json:"cluster_id"` + User string `json:"user"` + Operation string `json:"operation"` + Db string `json:"db"` + Resource string `json:"resource"` + ClientAdd string `json:"client_add"` // client address + Details string `json:"details"` +} + +func NewAudit(c *config.Config) (*Audit, error) { + a := Audit{ + username: c.TDengine.Username, + password: c.TDengine.Password, + host: c.TDengine.Host, + port: c.TDengine.Port, + usessl: c.TDengine.Usessl, + db: c.Audit.Database.Name, + dbOptions: c.Audit.Database.Options, + } + if a.db == "" { + a.db = "audit" + } + return &a, nil +} + +func (a *Audit) Init(c gin.IRouter) error { + if err := a.createDatabase(); err != nil { + return fmt.Errorf("create database error, msg:%s", err) + } + if err := a.initConnect(); err != nil { + return fmt.Errorf("init db connect error, msg:%s", err) + } + if err := a.createSTables(); err != nil { + return fmt.Errorf("create stable error, msg:%s", err) + } + c.POST("/audit", a.handleFunc()) + c.POST("/audit_v2", a.handleFunc()) + c.POST("/audit-batch", a.handleBatchFunc()) + return nil +} + +func (a *Audit) handleBatchFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + var auditArray AuditArrayInfo + + if err := json.Unmarshal(data, &auditArray); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + if len(auditArray.Records) == 0 { + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Trace("handle request successfully (no records)") + } + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = handleBatchRecord(auditArray.Records, a.conn, qid) + + if err != nil { + auditLogger.Errorf("process records error, error:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (a *Audit) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + auditLogger := auditLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if a.conn == nil { + auditLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + auditLogger.Errorf("get audit data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get audit data error. %s", err)}) + return + } + if auditLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + auditLogger.Tracef("receive audit request, data:%s", string(data)) + } + sql := "" + + isStrTime, _ := regexp.MatchString(`"timestamp"\s*:\s*"[^"]*"`, string(data)) + if isStrTime { + var audit AuditInfo + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse audit data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSql(audit) + } else { + var audit AuditInfoOld + if err := json.Unmarshal(data, &audit); err != nil { + auditLogger.Errorf("parse old audit error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse audit data error: %s", err)}) + return + } + + sql = parseSqlOld(audit) + } + + if _, err = a.conn.Exec(context.Background(), sql, qid); err != nil { + auditLogger.Errorf("save audit data error, sql:%s, error:%s", sql, err) + c.JSON(http.StatusInternalServerError, gin.H{"error": fmt.Sprintf("save audit data error: %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func handleDetails(details string) string { + if strings.Contains(details, "'") { + details = strings.ReplaceAll(details, "'", "\\'") + } + if strings.Contains(details, "\"") { + details = strings.ReplaceAll(details, "\"", "\\\"") + } + if len(details) > MAX_DETAIL_LEN { + details = details[:MAX_DETAIL_LEN] + } + return details +} + +func parseSql(audit AuditInfo) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableName(audit), audit.ClusterID, audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func parseSqlOld(audit AuditInfoOld) string { + details := handleDetails(audit.Details) + + return fmt.Sprintf( + "insert into %s using operations tags ('%s') values (%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + getTableNameOld(audit), audit.ClusterID, strconv.FormatInt(audit.Timestamp, 10)+"000000", audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) +} + +func handleBatchRecord(auditArray []AuditInfo, conn *db.Connector, qid uint64) error { + var builder strings.Builder + var head = fmt.Sprintf( + "insert into %s using operations tags ('%s') values", + getTableName(auditArray[0]), auditArray[0].ClusterID) + + builder.WriteString(head) + var qid_counter uint8 = 0 + for _, audit := range auditArray { + + details := handleDetails(audit.Details) + valuesStr := fmt.Sprintf( + "(%s, '%s', '%s', '%s', '%s', '%s', '%s') ", + audit.Timestamp, audit.User, audit.Operation, audit.Db, audit.Resource, audit.ClientAdd, details) + + if (builder.Len() + len(valuesStr)) > MAX_SQL_LEN { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + builder.Reset() + builder.WriteString(head) + } + builder.WriteString(valuesStr) + qid_counter++ + } + + if builder.Len() > len(head) { + sql := builder.String() + if _, err := conn.Exec(context.Background(), sql, qid|uint64((qid_counter%255))); err != nil { + return err + } + } + + return nil +} + +func getTableName(audit AuditInfo) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func getTableNameOld(audit AuditInfoOld) string { + return fmt.Sprintf("t_operations_%s", audit.ClusterID) +} + +func (a *Audit) initConnect() error { + conn, err := db.NewConnectorWithDb(a.username, a.password, a.host, a.port, a.db, a.usessl) + if err != nil { + auditLogger.Errorf("init db connect error, msg:%s", err) + return err + } + a.conn = conn + return nil +} + +func (a *Audit) createDatabase() error { + conn, err := db.NewConnector(a.username, a.password, a.host, a.port, a.usessl) + if err != nil { + return fmt.Errorf("connect to database error, msg:%s", err) + } + defer func() { _ = conn.Close() }() + sql := a.createDBSql() + auditLogger.Infof("create database, sql:%s", sql) + _, err = conn.Exec(context.Background(), sql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("create database error, msg:%s", err) + return err + } + return err +} + +var errNoConnection = errors.New("no connection") + +func (a *Audit) createDBSql() string { + var buf bytes.Buffer + buf.WriteString(fmt.Sprintf("create database if not exists %s precision 'ns' ", a.db)) + + for k, v := range a.dbOptions { + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + + return buf.String() +} + +func (a *Audit) createSTables() error { + var createTableSql = "create stable if not exists operations " + + "(ts timestamp, user_name varchar(25), operation varchar(20), db varchar(65), resource varchar(193), client_address varchar(25), details varchar(50000)) " + + "tags (cluster_id varchar(64))" + + if a.conn == nil { + return errNoConnection + } + _, err := a.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + auditLogger.Errorf("## create stable error, msg:%s", err) + return err + } + return nil +} diff --git a/tools/keeper/api/audit_test.go b/tools/keeper/api/audit_test.go new file mode 100644 index 0000000000..99beae7a54 --- /dev/null +++ b/tools/keeper/api/audit_test.go @@ -0,0 +1,153 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestAudit(t *testing.T) { + cfg := util.GetCfg() + cfg.Audit = config.AuditConfig{ + Database: config.Database{ + Name: "keepter_test_audit", + }, + Enable: true, + } + + a, err := NewAudit(cfg) + assert.NoError(t, err) + err = a.Init(router) + assert.NoError(t, err) + + longDetails := strings.Repeat("0123456789", 5000) + + cases := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716440000000, + data: `{"timestamp": "1699839716440000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "detail"}`, + expect: "detail", + }, + { + name: "2", + ts: 1699839716441000000, + data: `{"timestamp": "1699839716441000000", "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "` + longDetails + `"}`, + expect: longDetails[:50000], + }, + { + name: "3", + ts: 1699839716442000000, + data: "{\"timestamp\": \"1699839716442000000\", \"cluster_id\": \"cluster_id\", \"user\": \"user\", \"operation\": \"operation\", \"db\":\"dbnameb\", \"resource\":\"resourcenameb\", \"client_add\": \"localhost:30000\", \"details\": \"create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'\"}", + expect: "create database `meter` buffer 32 cachemodel 'none' duration 50d keep 3650d single_stable 0 wal_retention_period 3600 precision 'ms'", + }, + } + + cases2 := []struct { + name string + ts int64 + detail string + data string + expect string + }{ + { + name: "1", + ts: 1699839716445000000, + data: `{"timestamp":1699839716445, "cluster_id": "cluster_id", "user": "user", "operation": "operation", "db":"dbnamea", "resource":"resourcenamea", "client_add": "localhost:30000", "details": "details"}`, + expect: "details", + }, + } + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Audit.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit_v2", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + for _, c := range cases2 { + t.Run(c.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(c.data) + req, _ := http.NewRequest(http.MethodPost, "/audit", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, details from %s.operations where ts=%d", cfg.Audit.Database.Name, c.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, c.expect, data.Data[0][1]) + }) + } + + MAX_SQL_LEN = 300 + // test audit batch + input := `{"records":[{"timestamp":"1702548856940013848","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d630302"},{"timestamp":"1702548856939746458","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45230","db":"test","resource":"","details":"d130277"},{"timestamp":"1702548856939586665","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5268"},{"timestamp":"1702548856939528940","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50222","db":"test","resource":"","details":"d255282"},{"timestamp":"1702548856939336371","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45126","db":"test","resource":"","details":"d755297"},{"timestamp":"1702548856939075131","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d380325"},{"timestamp":"1702548856938640661","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45152","db":"test","resource":"","details":"d255281"},{"timestamp":"1702548856938505795","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45122","db":"test","resource":"","details":"d130276"},{"timestamp":"1702548856938363319","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45178","db":"test","resource":"","details":"d755296"},{"timestamp":"1702548856938201478","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:45166","db":"test","resource":"","details":"d380324"},{"timestamp":"1702548856937740618","cluster_id":"8468922059162439502","user":"root","operation":"createTable","client_add":"173.50.0.7:50288","db":"test","resource":"","details":"d5266"}]}` + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Audit.Database.Name), util.GetQidOwn()) + }() + + t.Run("testbatch", func(t *testing.T) { + //test empty array + w1 := httptest.NewRecorder() + body1 := strings.NewReader(`{"records": []}`) + + req1, _ := http.NewRequest(http.MethodPost, "/audit-batch", body1) + router.ServeHTTP(w1, req1) + assert.Equal(t, 200, w1.Code) + + //test 2 items array + w := httptest.NewRecorder() + body := strings.NewReader(input) + req, _ := http.NewRequest(http.MethodPost, "/audit-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), "select ts, details from "+cfg.Audit.Database.Name+".operations where cluster_id='8468922059162439502'", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 11, len(data.Data)) + }) +} diff --git a/tools/keeper/api/checkhealth.go b/tools/keeper/api/checkhealth.go new file mode 100644 index 0000000000..c5d5a2d24a --- /dev/null +++ b/tools/keeper/api/checkhealth.go @@ -0,0 +1,21 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" +) + +func NewCheckHealth(version string) *CheckHealth { + return &CheckHealth{version: version} +} + +type CheckHealth struct { + version string +} + +func (h *CheckHealth) Init(c gin.IRouter) { + c.GET("check_health", func(context *gin.Context) { + context.JSON(http.StatusOK, map[string]string{"version": h.version}) + }) +} diff --git a/tools/keeper/api/common.go b/tools/keeper/api/common.go new file mode 100644 index 0000000000..d02a30eb8b --- /dev/null +++ b/tools/keeper/api/common.go @@ -0,0 +1,89 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var commonLogger = log.GetLogger("CMN") + +func CreateDatabase(username string, password string, host string, port int, usessl bool, dbname string, databaseOptions map[string]interface{}) { + qid := util.GetQidOwn() + + commonLogger := commonLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + ctx := context.Background() + + conn, err := db.NewConnector(username, password, host, port, usessl) + if err != nil { + commonLogger.Errorf("connect to adapter error, msg:%s", err) + return + } + + defer closeConn(conn) + + createDBSql := generateCreateDBSql(dbname, databaseOptions) + commonLogger.Warningf("create database sql: %s", createDBSql) + + for i := 0; i < 3; i++ { + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("try %v times: create database %s error, msg:%v", i+1, dbname, err) + time.Sleep(5 * time.Second) + continue + } + return + } + panic(err) +} + +func generateCreateDBSql(dbname string, databaseOptions map[string]interface{}) string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(dbname) + + for k, v := range databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func CreatTables(username string, password string, host string, port int, usessl bool, dbname string, createList []string) { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(username, password, host, port, dbname, usessl) + if err != nil { + commonLogger.Errorf("connect to database error, msg:%s", err) + return + } + defer closeConn(conn) + + for _, createSql := range createList { + commonLogger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + commonLogger.Errorf("execute sql: %s, error: %s", createSql, err) + } + } +} + +func closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + commonLogger.Errorf("close connection error, msg:%s", err) + } +} diff --git a/tools/keeper/api/exporter_test.go b/tools/keeper/api/exporter_test.go new file mode 100644 index 0000000000..f9ef6b169a --- /dev/null +++ b/tools/keeper/api/exporter_test.go @@ -0,0 +1,297 @@ +package api + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/gin-gonic/gin" + "github.com/shopspring/decimal" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util" +) + +var router *gin.Engine +var conf *config.Config +var dbName = "exporter_test" + +func TestMain(m *testing.M) { + conf = config.InitConfig() + log.ConfigLog() + + conf.Metrics.Database.Name = dbName + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + defer conn.Close() + ctx := context.Background() + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + + if _, err = conn.Exec(ctx, fmt.Sprintf("create database if not exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("create database %s", dbName), err) + } + gin.SetMode(gin.ReleaseMode) + router = gin.New() + reporter := NewReporter(conf) + reporter.Init(router) + + var createList = []string{ + CreateClusterInfoSql, + CreateDnodeSql, + CreateMnodeSql, + CreateDnodeInfoSql, + CreateDataDirSql, + CreateLogDirSql, + CreateTempDirSql, + CreateVgroupsInfoSql, + CreateVnodeRoleSql, + CreateSummarySql, + CreateGrantInfoSql, + CreateKeeperSql, + } + CreatTables(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl, conf.Metrics.Database.Name, createList) + + processor := process.NewProcessor(conf) + node := NewNodeExporter(processor) + node.Init(router) + m.Run() + if _, err = conn.Exec(ctx, fmt.Sprintf("drop database if exists %s", dbName), util.GetQidOwn()); err != nil { + logger.Errorf("execute sql: %s, error: %s", fmt.Sprintf("drop database %s", dbName), err) + } +} + +func TestGetMetrics(t *testing.T) { + w := httptest.NewRecorder() + req, _ := http.NewRequest(http.MethodGet, "/metrics", nil) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) +} + +var now = time.Now() +var nowStr = now.Format(time.RFC3339Nano) + +var report = Report{ + Ts: nowStr, + DnodeID: 1, + DnodeEp: "localhost:7100", + ClusterID: "6980428120398645172", + Protocol: 1, + ClusterInfo: &ClusterInfo{ + FirstEp: "localhost:7100", + FirstEpDnodeID: 1, + Version: "3.0.0.0", + MasterUptime: 2.3090276954462752e-05, + MonitorInterval: 1, + VgroupsTotal: 2, + VgroupsAlive: 2, + VnodesTotal: 2, + VnodesAlive: 2, + ConnectionsTotal: 1, + Dnodes: []Dnode{ + { + DnodeID: 1, + DnodeEp: "localhost:7100", + Status: "ready", + }, + }, + Mnodes: []Mnode{ + { + MnodeID: 1, + MnodeEp: "localhost:7100", + Role: "master", + }, + }, + }, + VgroupInfos: []VgroupInfo{ + { + VgroupID: 1, + DatabaseName: "test", + TablesNum: 1, + Status: "ready", + Vnodes: []Vnode{ + { + DnodeID: 1, + VnodeRole: "LEADER", + }, + { + DnodeID: 2, + VnodeRole: "FOLLOWER", + }, + }, + }, + }, + GrantInfo: &GrantInfo{ + ExpireTime: 2147483647, + TimeseriesUsed: 800, + TimeseriesTotal: 2147483647, + }, + DnodeInfo: DnodeInfo{ + Uptime: 0.000291412026854232, + CPUEngine: 0.0828500414250207, + CPUSystem: 0.4971002485501243, + CPUCores: 12, + MemEngine: 9268, + MemSystem: 54279816, + MemTotal: 65654816, + DiskEngine: 0, + DiskUsed: 39889702912, + DiskTotal: 210304475136, + NetIn: 4727.45292368682, + NetOut: 2194.251734390486, + IoRead: 3789.8909811694753, + IoWrite: 12311.19920713578, + IoReadDisk: 0, + IoWriteDisk: 12178.394449950447, + ReqSelect: 2, + ReqSelectRate: 0, + ReqInsert: 6, + ReqInsertSuccess: 4, + ReqInsertRate: 0, + ReqInsertBatch: 10, + ReqInsertBatchSuccess: 8, + ReqInsertBatchRate: 0, + Errors: 2, + VnodesNum: 2, + Masters: 2, + HasMnode: 1, + HasQnode: 1, + HasSnode: 1, + HasBnode: 1, + }, + DiskInfos: DiskInfo{ + Datadir: []DataDir{ + { + Name: "/root/TDengine/sim/dnode1/data", + Level: 0, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + { + Name: "/root/TDengine/sim/dnode2/data", + Level: 1, + Avail: decimal.NewFromInt(171049893888), + Used: decimal.NewFromInt(39254581248), + Total: decimal.NewFromInt(210304475136), + }, + }, + Logdir: LogDir{ + Name: "/root/TDengine/sim/dnode1/log", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + Tempdir: TempDir{ + Name: "/tmp", + Avail: decimal.NewFromInt(171049771008), + Used: decimal.NewFromInt(39254704128), + Total: decimal.NewFromInt(210304475136), + }, + }, + LogInfos: LogInfo{ + Summary: []Summary{ + { + Level: "error", + Total: 0, + }, { + Level: "info", + Total: 114, + }, { + Level: "debug", + Total: 117, + }, { + Level: "trace", + Total: 126, + }, + }, + }, +} + +func TestPutMetrics(t *testing.T) { + w := httptest.NewRecorder() + b, _ := json.Marshal(report) + body := strings.NewReader(string(b)) + req, _ := http.NewRequest(http.MethodPost, "/report", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, dbName, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + }() + + ctx := context.Background() + data, err := conn.Query(ctx, "select info from log_summary", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from log_summary", err) + t.Fatal(err) + } + for _, info := range data.Data { + assert.Equal(t, int32(114), info[0]) + } + + var tenMinutesBefore = now.Add(-10 * time.Minute) + var tenMinutesBeforeStr = tenMinutesBefore.Format(time.RFC3339Nano) + + conf.FromTime = tenMinutesBeforeStr + conf.Transfer = "old_taosd_metric" + + var cmd = cmd.NewCommand(conf) + cmd.Process(conf) + + type TableInfo struct { + TsName string + RowNum int + } + + tables := map[string]*TableInfo{ + "taosd_cluster_basic": {"ts", 1}, + "taosd_cluster_info": {"_ts", 1}, + "taosd_vgroups_info": {"_ts", 1}, + "taosd_dnodes_info": {"_ts", 1}, + "taosd_dnodes_status": {"_ts", 1}, + "taosd_dnodes_data_dirs": {"_ts", 1}, + "taosd_dnodes_log_dirs": {"_ts", 2}, + "taosd_mnodes_info": {"_ts", 1}, + "taosd_vnodes_info": {"_ts", 1}, + } + + for table, tableInfo := range tables { + data, err = conn.Query(ctx, fmt.Sprintf("select %s from %s", tableInfo.TsName, table), util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "select * from "+table, err) + t.Fatal(err) + } + + assert.Equal(t, tableInfo.RowNum, len(data.Data)) + assert.Equal(t, now.UnixMilli(), data.Data[0][0].(time.Time).UnixMilli()) + } + + conf.Transfer = "" + conf.Drop = "old_taosd_metric_stables" + cmd.Process(conf) + + data, err = conn.Query(ctx, "select * from information_schema.ins_stables where stable_name = 'm_info'", util.GetQidOwn()) + if err != nil { + logger.Errorf("execute sql:%s, error:%s", "m_info is not droped", err) + t.Fatal(err) + } + assert.Equal(t, 0, len(data.Data)) + logger.Infof("ALL OK !!!") +} diff --git a/tools/keeper/api/gen_metric.go b/tools/keeper/api/gen_metric.go new file mode 100644 index 0000000000..5534fe453d --- /dev/null +++ b/tools/keeper/api/gen_metric.go @@ -0,0 +1,770 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net" + "net/http" + "regexp" + + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var re = regexp.MustCompile("'+") +var gmLogger = log.GetLogger("GEN") + +var MAX_SQL_LEN = 1000000 + +var STABLE_NAME_KEY = "priv_stn" + +type ColumnSeq struct { + tagNames []string + metricNames []string +} + +var ( + mu sync.RWMutex + gColumnSeqMap = make(map[string]ColumnSeq) +) + +type GeneralMetric struct { + client *http.Client + conn *db.Connector + username string + password string + host string + port int + usessl bool + database string + url *url.URL +} + +type Tag struct { + Name string `json:"name"` + Value string `json:"value"` +} + +type Metric struct { + Name string `json:"name"` + Value float64 `json:"value"` +} + +type MetricGroup struct { + Tags []Tag `json:"tags"` + Metrics []Metric `json:"metrics"` +} + +type StableInfo struct { + Name string `json:"name"` + MetricGroups []MetricGroup `json:"metric_groups"` +} + +type StableArrayInfo struct { + Ts string `json:"ts"` + Protocol int `json:"protocol"` + Tables []StableInfo `json:"tables"` +} + +type ClusterBasic struct { + ClusterId string `json:"cluster_id"` + Ts string `json:"ts"` + FirstEp string `json:"first_ep"` + FirstEpDnodeId int32 `json:"first_ep_dnode_id"` + ClusterVersion string `json:"cluster_version"` +} + +type SlowSqlDetailInfo struct { + StartTs string `json:"start_ts"` + RequestId string `json:"request_id"` + QueryTime int32 `json:"query_time"` + Code int32 `json:"code"` + ErrorInfo string `json:"error_info"` + Type int8 `json:"type"` + RowsNum int64 `json:"rows_num"` + Sql string `json:"sql"` + ProcessName string `json:"process_name"` + ProcessId string `json:"process_id"` + Db string `json:"db"` + User string `json:"user"` + Ip string `json:"ip"` + ClusterId string `json:"cluster_id"` +} + +func (gm *GeneralMetric) Init(c gin.IRouter) error { + c.POST("/general-metric", gm.handleFunc()) + c.POST("/taosd-cluster-basic", gm.handleTaosdClusterBasic()) + c.POST("/slow-sql-detail-batch", gm.handleSlowSqlDetailBatch()) + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + if err != nil { + gmLogger.Errorf("init db connect error, msg:%s", err) + return err + } + gm.conn = conn + + err = gm.createSTables() + if err != nil { + gmLogger.Errorf("create stable error, msg:%s", err) + return err + } + + err = gm.initColumnSeqMap() + if err != nil { + gmLogger.Errorf("init gColumnSeqMap error, msg:%s", err) + return err + } + + return err +} + +func NewGeneralMetric(conf *config.Config) *GeneralMetric { + + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + var protocol string + if conf.TDengine.Usessl { + protocol = "https" + } else { + protocol = "http" + } + + imp := &GeneralMetric{ + client: client, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + database: conf.Metrics.Database.Name, + url: &url.URL{ + Scheme: protocol, + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms&table_name_key=%s", conf.Metrics.Database.Name, STABLE_NAME_KEY), + }, + } + return imp +} + +func (gm *GeneralMetric) handleFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.client == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get general metric data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + + var request []StableArrayInfo + + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("data:%s", string(data)) + } + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, error:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + if len(request) == 0 { + c.JSON(http.StatusOK, gin.H{}) + return + } + + err = gm.handleBatchMetrics(request, qid) + + if err != nil { + gmLogger.Errorf("process records error. msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("process records error. %s", err)}) + return + } + + c.JSON(http.StatusOK, gin.H{}) + } +} + +func (gm *GeneralMetric) handleBatchMetrics(request []StableArrayInfo, qid uint64) error { + var buf bytes.Buffer + + for _, stableArrayInfo := range request { + if stableArrayInfo.Ts == "" { + gmLogger.Error("ts data is empty") + continue + } + + for _, table := range stableArrayInfo.Tables { + if table.Name == "" { + gmLogger.Error("stable name is empty") + continue + } + + table.Name = strings.ToLower(table.Name) + if _, ok := Load(table.Name); !ok { + Init(table.Name) + } + + for _, metricGroup := range table.MetricGroups { + buf.WriteString(table.Name) + writeTags(metricGroup.Tags, table.Name, &buf) + buf.WriteString(" ") + writeMetrics(metricGroup.Metrics, table.Name, &buf) + buf.WriteString(" ") + buf.WriteString(stableArrayInfo.Ts) + buf.WriteString("\n") + } + } + } + + if buf.Len() > 0 { + return gm.lineWriteBody(&buf, qid) + } + return nil +} + +func (gm *GeneralMetric) lineWriteBody(buf *bytes.Buffer, qid uint64) error { + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + req_data := buf.String() + + //build new URL,add qid to URL + urlWithQid := *gm.url + query := urlWithQid.Query() + query.Set("qid", fmt.Sprintf("%d", qid)) + urlWithQid.RawQuery = query.Encode() + + req := &http.Request{ + Method: http.MethodPost, + URL: &urlWithQid, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: gm.url.Host, + } + req.SetBasicAuth(gm.username, gm.password) + + req.Body = io.NopCloser(buf) + + startTime := time.Now() + resp, err := gm.client.Do(req) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + gmLogger.Errorf("latency:%v, req_data:%v, url:%s, resp:%d, err:%s", latency, req_data, urlWithQid.String(), resp.StatusCode, err) + return err + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("latency:%v, req_data:%v, url:%s, resp:%d", latency, req_data, urlWithQid.String(), resp.StatusCode) + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} + +func (gm *GeneralMetric) handleTaosdClusterBasic() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taosd cluster basic data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get general metric data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taosd cluster basic data:%s", string(data)) + } + + var request ClusterBasic + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse general metric data error, data:%s, msg:%s", string(data), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse general metric data error: %s", err)}) + return + } + + sql := fmt.Sprintf( + "insert into %s.taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values (%s, '%s', %d, '%s') ", + gm.database, request.ClusterId, request.ClusterId, request.Ts, request.FirstEp, request.FirstEpDnodeId, request.ClusterVersion) + + if _, err = gm.conn.Exec(context.Background(), sql, qid); err != nil { + gmLogger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taosd_cluster_basic error. %s", err)}) + return + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func processString(input string) string { + // remove number in the beginning + re := regexp.MustCompile(`^\d+`) + input = re.ReplaceAllString(input, "") + + // replage "." to "_" + input = strings.ReplaceAll(input, ".", "_") + + // remove special characters + re = regexp.MustCompile(`[^a-zA-Z0-9_]`) + input = re.ReplaceAllString(input, "") + + return input +} + +func (gm *GeneralMetric) handleSlowSqlDetailBatch() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + gmLogger := gmLogger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + if gm.conn == nil { + gmLogger.Error("no connection") + c.JSON(http.StatusInternalServerError, gin.H{"error": "no connection"}) + return + } + + data, err := c.GetRawData() + if err != nil { + gmLogger.Errorf("get taos slow sql detail data error, msg:%s", err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("get taos slow sql detail data error. %s", err)}) + return + } + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + gmLogger.Tracef("receive taos slow sql detail data:%s", string(data)) + } + + var request []SlowSqlDetailInfo + + if err := json.Unmarshal(data, &request); err != nil { + gmLogger.Errorf("parse taos slow sql detail error, msg:%s", string(data)) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("parse taos slow sql detail error: %s", err)}) + return + } + + var sql_head = "INSERT INTO `taos_slow_sql_detail` (tbname, `db`, `user`, `ip`, `cluster_id`, `start_ts`, `request_id`, `query_time`, `code`, `error_info`, `type`, `rows_num`, `sql`, `process_name`, `process_id`) values " + var buf bytes.Buffer + buf.WriteString(sql_head) + var qid_counter uint8 = 0 + for _, slowSqlDetailInfo := range request { + if slowSqlDetailInfo.StartTs == "" { + gmLogger.Error("start_ts data is empty") + continue + } + + // cut string to max len + slowSqlDetailInfo.Sql = re.ReplaceAllString(slowSqlDetailInfo.Sql, "'") // 将匹配到的部分替换为一个单引号 + slowSqlDetailInfo.Sql = strings.ReplaceAll(slowSqlDetailInfo.Sql, "'", "''") + slowSqlDetailInfo.Sql = util.SafeSubstring(slowSqlDetailInfo.Sql, 16384) + slowSqlDetailInfo.ClusterId = util.SafeSubstring(slowSqlDetailInfo.ClusterId, 32) + slowSqlDetailInfo.Db = util.SafeSubstring(slowSqlDetailInfo.Db, 1024) + if slowSqlDetailInfo.Db == "" { + slowSqlDetailInfo.Db = "unknown" + } + slowSqlDetailInfo.User = util.SafeSubstring(slowSqlDetailInfo.User, 32) + slowSqlDetailInfo.Ip = util.SafeSubstring(slowSqlDetailInfo.Ip, 32) + slowSqlDetailInfo.ProcessName = util.SafeSubstring(slowSqlDetailInfo.ProcessName, 32) + slowSqlDetailInfo.ProcessId = util.SafeSubstring(slowSqlDetailInfo.ProcessId, 32) + slowSqlDetailInfo.ErrorInfo = util.SafeSubstring(slowSqlDetailInfo.ErrorInfo, 128) + + // max len 192 + var sub_table_name = slowSqlDetailInfo.User + "_" + util.SafeSubstring(slowSqlDetailInfo.Db, 80) + "_" + slowSqlDetailInfo.Ip + "_clusterId_" + slowSqlDetailInfo.ClusterId + sub_table_name = strings.ToLower(processString(sub_table_name)) + + var sql = fmt.Sprintf( + "('%s', '%s', '%s', '%s', '%s', %s, %s, %d, %d, '%s', %d, %d, '%s', '%s', '%s') ", + sub_table_name, + slowSqlDetailInfo.Db, slowSqlDetailInfo.User, slowSqlDetailInfo.Ip, slowSqlDetailInfo.ClusterId, slowSqlDetailInfo.StartTs, slowSqlDetailInfo.RequestId, + slowSqlDetailInfo.QueryTime, slowSqlDetailInfo.Code, slowSqlDetailInfo.ErrorInfo, slowSqlDetailInfo.Type, slowSqlDetailInfo.RowsNum, slowSqlDetailInfo.Sql, + slowSqlDetailInfo.ProcessName, slowSqlDetailInfo.ProcessId) + if (buf.Len() + len(sql)) < MAX_SQL_LEN { + buf.WriteString(sql) + } else { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, sql:%s, error:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + buf.Reset() + buf.WriteString(sql_head) + buf.WriteString(sql) + qid_counter++ + } + } + + if buf.Len() > len(sql_head) { + if _, err = gm.conn.Exec(context.Background(), buf.String(), qid|uint64((qid_counter%255))); err != nil { + gmLogger.Errorf("insert taos_slow_sql_detail error, data:%s, msg:%s", buf.String(), err) + c.JSON(http.StatusBadRequest, gin.H{"error": fmt.Sprintf("insert taos_slow_sql_detail error. %s", err)}) + return + } + } + c.JSON(http.StatusOK, gin.H{}) + } +} + +func writeTags(tags []Tag, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.tagNames) < len(tags) { + // add column, only schema change will hit here + for _, tag := range tags { + if !contains(columnSeq.tagNames, tag.Name) { + columnSeq.tagNames = append(columnSeq.tagNames, tag.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.tagNames + } + + // 将 Tag 切片转换为 map + tagMap := make(map[string]string) + for _, tag := range tags { + tagMap[tag.Name] = tag.Value + } + + for _, name := range nameArray { + if value, ok := tagMap[name]; ok { + if value != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", name, util.EscapeInfluxProtocol(value))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + gmLogger.Errorf("tag value is empty, tag name:%s", name) + } + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", name, "unknown")) + } + } + + // have sub table name + if _, ok := tagMap[STABLE_NAME_KEY]; ok { + return + } + + subTableName := get_sub_table_name_valid(stbName, tagMap) + if subTableName != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", STABLE_NAME_KEY, subTableName)) + } else { + gmLogger.Errorf("get sub stable name error, stable name:%s, tag map:%v", stbName, tagMap) + } +} + +func checkKeysExist(data map[string]string, keys ...string) bool { + for _, key := range keys { + _, ok := data[key] + if !ok { + return false + } + } + return true +} + +func get_sub_table_name_valid(stbName string, tagMap map[string]string) string { + stbName = get_sub_table_name(stbName, tagMap) + return util.ToValidTableName(stbName) +} + +func get_sub_table_name(stbName string, tagMap map[string]string) string { + if strings.HasPrefix(stbName, "taosx") { + switch stbName { + case "taosx_sys": + if checkKeysExist(tagMap, "taosx_id") { + return fmt.Sprintf("sys_%s", tagMap["taosx_id"]) + } + case "taosx_agent": + if checkKeysExist(tagMap, "taosx_id", "agent_id") { + return fmt.Sprintf("agent_%s_%s", tagMap["taosx_id"], tagMap["agent_id"]) + } + case "taosx_connector": + if checkKeysExist(tagMap, "taosx_id", "ds_name", "task_id") { + return fmt.Sprintf("connector_%s_%s_%s", tagMap["taosx_id"], tagMap["ds_name"], tagMap["task_id"]) + } + default: + if strings.HasPrefix(stbName, "taosx_task_") { + ds_name := stbName[len("taosx_task_"):] + if checkKeysExist(tagMap, "taosx_id", "task_id") { + return fmt.Sprintf("task_%s_%s_%s", tagMap["taosx_id"], ds_name, tagMap["task_id"]) + } + } + return "" + } + } + + switch stbName { + case "taosd_cluster_info": + if checkKeysExist(tagMap, "cluster_id") { + return fmt.Sprintf("cluster_%s", tagMap["cluster_id"]) + } + case "taosd_vgroups_info": + if checkKeysExist(tagMap, "cluster_id", "vgroup_id", "database_name") { + return fmt.Sprintf("vginfo_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_info": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dinfo_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_status": + if checkKeysExist(tagMap, "cluster_id", "dnode_id") { + return fmt.Sprintf("dstatus_%s_cluster_%s", tagMap["dnode_id"], tagMap["cluster_id"]) + } + case "taosd_dnodes_log_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name") { + subTableName := fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("dlog_%s_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["cluster_id"]) + } + case "taosd_dnodes_data_dirs": + if checkKeysExist(tagMap, "cluster_id", "dnode_id", "data_dir_name", "data_dir_level") { + subTableName := fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], tagMap["data_dir_name"], tagMap["data_dir_level"], tagMap["cluster_id"]) + if len(subTableName) <= util.MAX_TABLE_NAME_LEN { + return subTableName + } + return fmt.Sprintf("ddata_%s_%s_level_%s_cluster_%s", tagMap["dnode_id"], + util.GetMd5HexStr(tagMap["data_dir_name"]), + tagMap["data_dir_level"], + tagMap["cluster_id"]) + } + case "taosd_mnodes_info": + if checkKeysExist(tagMap, "cluster_id", "mnode_id") { + return fmt.Sprintf("minfo_%s_cluster_%s", tagMap["mnode_id"], tagMap["cluster_id"]) + } + case "taosd_vnodes_info": + if checkKeysExist(tagMap, "cluster_id", "database_name", "vgroup_id", "dnode_id") { + return fmt.Sprintf("vninfo_%s_dnode_%s_vgroup_%s_cluster_%s", tagMap["database_name"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taosd_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "dnode_id", "vgroup_id", "cluster_id") { + return fmt.Sprintf("taosdsql_%s_%s_%s_%s_vgroup_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["dnode_id"], tagMap["vgroup_id"], tagMap["cluster_id"]) + } + case "taos_sql_req": + if checkKeysExist(tagMap, "username", "sql_type", "result", "cluster_id") { + return fmt.Sprintf("taossql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["sql_type"], tagMap["result"], tagMap["cluster_id"]) + } + case "taos_slow_sql": + if checkKeysExist(tagMap, "username", "duration", "result", "cluster_id") { + return fmt.Sprintf("slowsql_%s_%s_%s_cluster_%s", tagMap["username"], + tagMap["duration"], tagMap["result"], tagMap["cluster_id"]) + } + + default: + return "" + } + return "" +} + +func contains(array []string, item string) bool { + for _, value := range array { + if value == item { + return true + } + } + return false +} + +func writeMetrics(metrics []Metric, stbName string, buf *bytes.Buffer) { + var nameArray []string + if columnSeq, ok := Load(stbName); ok { + if len(columnSeq.metricNames) < len(metrics) { + // add column, only schema change will hit here + for _, metric := range metrics { + if !contains(columnSeq.metricNames, metric.Name) { + columnSeq.metricNames = append(columnSeq.metricNames, metric.Name) + } + } + Store(stbName, columnSeq) + } + nameArray = columnSeq.metricNames + } + + // 将 Metric 切片转换为 map + metricMap := make(map[string]float64) + for _, metric := range metrics { + metricMap[metric.Name] = metric.Value + } + + for i, name := range nameArray { + if value, ok := metricMap[name]; ok { + buf.WriteString(fmt.Sprintf("%s=%sf64", name, strconv.FormatFloat(value, 'f', -1, 64))) + if i != len(nameArray)-1 { + buf.WriteString(",") + } + } + } +} + +// 存储数据 +func Store(key string, value ColumnSeq) { + mu.Lock() + defer mu.Unlock() + gColumnSeqMap[key] = value +} + +// 加载数据 +func Load(key string) (ColumnSeq, bool) { + mu.RLock() + defer mu.RUnlock() + value, ok := gColumnSeqMap[key] + return value, ok +} + +// 初始化单表的列序列 +func Init(key string) { + mu.Lock() + defer mu.Unlock() + if _, ok := gColumnSeqMap[key]; !ok { + columnSeq := ColumnSeq{ + tagNames: []string{}, + metricNames: []string{}, + } + gColumnSeqMap[key] = columnSeq + } +} + +// 初始化所有列序列 +func (gm *GeneralMetric) initColumnSeqMap() error { + query := fmt.Sprintf(` + select stable_name + from information_schema.ins_stables + where db_name = '%s' + and ( + stable_name like 'taos_%%' + or stable_name like 'taosd_%%' + or stable_name like 'taosx_%%' + ) + order by stable_name asc; + `, gm.database) + + data, err := gm.conn.Query(context.Background(), query, util.GetQidOwn()) + + if err != nil { + return err + } + + //get all stables, then init gColumnSeqMap + for _, row := range data.Data { + stableName := row[0].(string) + Init(stableName) + } + //set gColumnSeqMap with desc stables + for tableName, columnSeq := range gColumnSeqMap { + data, err := gm.conn.Query(context.Background(), fmt.Sprintf(`desc %s.%s;`, gm.database, tableName), util.GetQidOwn()) + + if err != nil { + return err + } + + if len(data.Data) < 1 || len(data.Data[0]) < 4 { + return fmt.Errorf("desc %s.%s error", gm.database, tableName) + } + + for i, row := range data.Data { + if i == 0 { + continue + } + + if row[3].(string) == "TAG" { + columnSeq.tagNames = append(columnSeq.tagNames, row[0].(string)) + } else { + columnSeq.metricNames = append(columnSeq.metricNames, row[0].(string)) + } + } + Store(tableName, columnSeq) + } + + gmLogger.Infof("gColumnSeqMap:%v", gColumnSeqMap) + return nil +} + +func (gm *GeneralMetric) createSTables() error { + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if gm.conn == nil { + return errNoConnection + } + _, err := gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + if err != nil { + return err + } + + createTableSql = "create stable if not exists taos_slow_sql_detail" + + " (start_ts TIMESTAMP, request_id BIGINT UNSIGNED PRIMARY KEY, query_time INT, code INT, error_info varchar(128), " + + "type TINYINT, rows_num BIGINT, sql varchar(16384), process_name varchar(32), process_id varchar(32)) " + + "tags (db varchar(1024), `user` varchar(32), ip varchar(32), cluster_id varchar(32))" + + _, err = gm.conn.Exec(context.Background(), createTableSql, util.GetQidOwn()) + return err +} diff --git a/tools/keeper/api/gen_metric_test.go b/tools/keeper/api/gen_metric_test.go new file mode 100644 index 0000000000..88987d6544 --- /dev/null +++ b/tools/keeper/api/gen_metric_test.go @@ -0,0 +1,358 @@ +package api + +import ( + "context" + "fmt" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +var router_inited bool = false + +func TestClusterBasic(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taosd_cluster_basic", + ts: 1705655770381, + data: `{"ts":"1705655770381","cluster_id":"7648966395564416484","protocol":2,"first_ep":"ssfood06:6130","first_ep_dnode_id":1,"cluster_version":"3.2.1.0.alp"}`, + expect: "7648966395564416484", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/taosd-cluster-basic", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select ts, cluster_id from %s.%s where ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) + + testcfg = struct { + name string + ts int64 + tbname string + data string + expect string + }{ + name: "1", + tbname: "taos_slow_sql_detail", + ts: 1703226836762, + data: `[{ + "start_ts": "1703226836762", + "request_id": "1", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "select * from abc;", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }, + { + "start_ts": "1703226836763", + "request_id": "2", + "query_time": 100, + "code": 0, + "error_info": "", + "type": 1, + "rows_num": 5, + "sql": "insert into abc ('a', 'b') values ('aaa', 'bbb');", + "process_name": "abc", + "process_id": "123", + "db": "dbname", + "user": "root", + "ip": "127.0.0.1", + "cluster_id": "1234567" + }]`, + expect: "1234567", + } + + conn, err = db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + MAX_SQL_LEN = 1000000 + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/slow-sql-detail-batch", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + data, err := conn.Query(context.Background(), fmt.Sprintf("select start_ts, cluster_id from %s.%s where start_ts=%d", gm.database, testcfg.tbname, testcfg.ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + }) +} + +func TestGenMetric(t *testing.T) { + cfg := util.GetCfg() + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + gm := NewGeneralMetric(cfg) + if !router_inited { + err := gm.Init(router) + assert.NoError(t, err) + router_inited = true + } + + testcfg := struct { + name string + ts []int64 + tbname []string + data string + expect string + }{ + name: "1", + tbname: []string{"taosd_cluster_info", "taosd_dnodes_info"}, + ts: []int64{1703226836761, 1703226836762}, + data: `[{ + "ts": "1703226836761", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": "ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }, { + "ts": "1703226836762", + "protocol": 2, + "tables": [{ + "name": "taosd_cluster_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }], + "metrics": [{ + "name": "dbs_total", + "value": 1 + }, { + "name": "master_uptime", + "value": 0 + }] + }] + }, { + "name": "taosd_dnodes_info", + "metric_groups": [{ + "tags": [{ + "name": "cluster_id", + "value": "1397715317673023180" + }, { + "name": "dnode_id", + "value": "1" + }, { + "name": "dnode_ep", + "value": ", =\"ssfood06:6130" + }], + "metrics": [{ + "name": "uptime", + "value": 0 + }, { + "name": "cpu_engine", + "value": 0 + }] + }] + }] + }]`, + expect: "1397715317673023180", + } + + conn, err := db.NewConnectorWithDb(gm.username, gm.password, gm.host, gm.port, gm.database, gm.usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", gm.database), util.GetQidOwn()) + }() + + t.Run(testcfg.name, func(t *testing.T) { + w := httptest.NewRecorder() + body := strings.NewReader(testcfg.data) + req, _ := http.NewRequest(http.MethodPost, "/general-metric", body) + router.ServeHTTP(w, req) + assert.Equal(t, 200, w.Code) + + for _, tbname := range testcfg.tbname { + for _, ts := range testcfg.ts { + data, err := conn.Query(context.Background(), fmt.Sprintf("select _ts, cluster_id from %s.%s where _ts=%d", gm.database, tbname, ts), util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) + assert.Equal(t, testcfg.expect, data.Data[0][1]) + } + } + }) +} +func TestGetSubTableName(t *testing.T) { + tests := []struct { + stbName string + tagMap map[string]string + want string + }{ + { + stbName: "taosx_sys", + tagMap: map[string]string{"taosx_id": "123"}, + want: "sys_123", + }, + { + stbName: "taosx_agent", + tagMap: map[string]string{"taosx_id": "123", "agent_id": "456"}, + want: "agent_123_456", + }, + { + stbName: "taosx_connector", + tagMap: map[string]string{"taosx_id": "123", "ds_name": "ds", "task_id": "789"}, + want: "connector_123_ds_789", + }, + { + stbName: "taosx_task_example", + tagMap: map[string]string{"taosx_id": "123", "task_id": "789"}, + want: "task_123_example_789", + }, + { + stbName: "taosd_cluster_info", + tagMap: map[string]string{"cluster_id": "123"}, + want: "cluster_123", + }, + { + stbName: "taosd_vgroups_info", + tagMap: map[string]string{"cluster_id": "123", "vgroup_id": "456", "database_name": "db"}, + want: "vginfo_db_vgroup_456_cluster_123", + }, + { + stbName: "taosd_dnodes_info", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dinfo_123_cluster_123", + }, + { + stbName: "taosd_dnodes_status", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123"}, + want: "dstatus_123_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "log"}, + want: "dlog_123_log_cluster_123", + }, + { + stbName: "taosd_dnodes_log_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "loglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglogloglog"}, + want: "dlog_123_9cdc719961a632a27603cd5ed9f1aee2_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "data", "data_dir_level": "5"}, + want: "ddata_123_data_level_5_cluster_123", + }, + { + stbName: "taosd_dnodes_data_dirs", + tagMap: map[string]string{"cluster_id": "123", "dnode_id": "123", "data_dir_name": "datadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadatadata", "data_dir_level": "5"}, + want: "ddata_123_03bf8dffdf6b97e08f347c6ae795998b_level_5_cluster_123", + }, + { + stbName: "taosd_mnodes_info", + tagMap: map[string]string{"cluster_id": "123", "mnode_id": "12"}, + want: "minfo_12_cluster_123", + }, + { + stbName: "taosd_vnodes_info", + tagMap: map[string]string{"cluster_id": "123", "database_name": "db", "vgroup_id": "456", "dnode_id": "789"}, + want: "vninfo_db_dnode_789_vgroup_456_cluster_123", + }, + { + stbName: "taosd_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "dnode_id": "123", "vgroup_id": "456", "cluster_id": "123"}, + want: "taosdsql_user_select_success_123_vgroup_456_cluster_123", + }, + { + stbName: "taos_sql_req", + tagMap: map[string]string{"username": "user", "sql_type": "select", "result": "success", "cluster_id": "123"}, + want: "taossql_user_select_success_cluster_123", + }, + { + stbName: "taos_slow_sql", + tagMap: map[string]string{"username": "user", "duration": "100ms", "result": "success", "cluster_id": "123"}, + want: "slowsql_user_100ms_success_cluster_123", + }, + } + + for _, tt := range tests { + t.Run(tt.stbName, func(t *testing.T) { + if got := get_sub_table_name_valid(tt.stbName, tt.tagMap); got != tt.want { + panic(fmt.Sprintf("get_sub_table_name() = %v, want %v", got, tt.want)) + } + }) + } +} diff --git a/tools/keeper/api/https_test.go b/tools/keeper/api/https_test.go new file mode 100644 index 0000000000..c73cbfc2e4 --- /dev/null +++ b/tools/keeper/api/https_test.go @@ -0,0 +1,127 @@ +package api + +import ( + "context" + "crypto/ecdsa" + "crypto/elliptic" + crand "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "log" + "math/big" + "net/http" + "net/http/httputil" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" +) + +func TestHttps(t *testing.T) { + server := startProxy() + defer server.Shutdown(context.Background()) + + cfg := util.GetCfg() + cfg.TDengine.Usessl = true + cfg.TDengine.Port = 34443 + + CreateDatabase(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.TDengine.Usessl, cfg.Metrics.Database.Name, cfg.Metrics.Database.Options) + + conn, err := db.NewConnectorWithDb(cfg.TDengine.Username, cfg.TDengine.Password, cfg.TDengine.Host, cfg.TDengine.Port, cfg.Metrics.Database.Name, cfg.TDengine.Usessl) + assert.NoError(t, err) + defer func() { + _, _ = conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", cfg.Metrics.Database.Name), util.GetQidOwn()) + }() + + data, err := conn.Query(context.Background(), "select server_version()", util.GetQidOwn()) + assert.NoError(t, err) + assert.Equal(t, 1, len(data.Data)) +} + +func generateSelfSignedCert() (tls.Certificate, error) { + priv, err := ecdsa.GenerateKey(elliptic.P384(), crand.Reader) + if err != nil { + return tls.Certificate{}, err + } + + notBefore := time.Now() + notAfter := notBefore.Add(365 * 24 * time.Hour) + + serialNumber, err := crand.Int(crand.Reader, new(big.Int).Lsh(big.NewInt(1), 128)) + if err != nil { + return tls.Certificate{}, err + } + + template := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Your Company"}, + }, + NotBefore: notBefore, + NotAfter: notAfter, + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + certDER, err := x509.CreateCertificate(crand.Reader, &template, &template, &priv.PublicKey, priv) + if err != nil { + return tls.Certificate{}, err + } + + certPEM := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: certDER}) + keyPEM, err := x509.MarshalECPrivateKey(priv) + if err != nil { + return tls.Certificate{}, err + } + + keyPEMBlock := pem.EncodeToMemory(&pem.Block{Type: "EC PRIVATE KEY", Bytes: keyPEM}) + + return tls.X509KeyPair(certPEM, keyPEMBlock) +} + +func startProxy() *http.Server { + // Generate self-signed certificate + cert, err := generateSelfSignedCert() + if err != nil { + log.Fatalf("Failed to generate self-signed certificate: %v", err) + } + + target := "http://127.0.0.1:6041" + proxyURL, err := url.Parse(target) + if err != nil { + log.Fatalf("Failed to parse target URL: %v", err) + } + + proxy := httputil.NewSingleHostReverseProxy(proxyURL) + proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, e error) { + http.Error(w, "Proxy error", http.StatusBadGateway) + } + mux := http.NewServeMux() + mux.Handle("/", proxy) + + server := &http.Server{ + Addr: ":34443", + Handler: mux, + TLSConfig: &tls.Config{Certificates: []tls.Certificate{cert}}, + // Setup server timeouts for better handling of idle connections and slowloris attacks + WriteTimeout: 10 * time.Second, + ReadTimeout: 10 * time.Second, + IdleTimeout: 30 * time.Second, + } + + log.Println("Starting server on :34443") + go func() { + err = server.ListenAndServeTLS("", "") + if err != nil && err != http.ErrServerClosed { + log.Fatalf("Failed to start HTTPS server: %v", err) + } + }() + return server +} diff --git a/tools/keeper/api/nodeexporter.go b/tools/keeper/api/nodeexporter.go new file mode 100644 index 0000000000..7b87a14336 --- /dev/null +++ b/tools/keeper/api/nodeexporter.go @@ -0,0 +1,32 @@ +package api + +import ( + "net/http" + + "github.com/gin-gonic/gin" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/taosdata/taoskeeper/process" +) + +type NodeExporter struct { + processor *process.Processor +} + +func NewNodeExporter(processor *process.Processor) *NodeExporter { + return &NodeExporter{processor: processor} +} + +func (z *NodeExporter) Init(c gin.IRouter) { + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(z.processor) + c.GET("metrics", z.myMiddleware(promhttp.HandlerFor(reg, promhttp.HandlerOpts{}))) +} + +func (z *NodeExporter) myMiddleware(next http.Handler) gin.HandlerFunc { + return func(c *gin.Context) { + z.processor.Process() + // call Prometheus handler + next.ServeHTTP(c.Writer, c.Request) + } +} diff --git a/tools/keeper/api/report.go b/tools/keeper/api/report.go new file mode 100644 index 0000000000..eb9c3856f8 --- /dev/null +++ b/tools/keeper/api/report.go @@ -0,0 +1,478 @@ +package api + +import ( + "bytes" + "context" + "fmt" + "strconv" + "strings" + "sync/atomic" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/go-utils/json" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var logger = log.GetLogger("REP") + +var createList = []string{ + // CreateClusterInfoSql, + // CreateDnodeSql, + // CreateMnodeSql, + // CreateDnodeInfoSql, + // CreateDataDirSql, + // CreateLogDirSql, + // CreateTempDirSql, + // CreateVgroupsInfoSql, + // CreateVnodeRoleSql, + // CreateSummarySql, + // CreateGrantInfoSql, + CreateKeeperSql, +} + +type Reporter struct { + username string + password string + host string + port int + usessl bool + dbname string + databaseOptions map[string]interface{} + totalRep atomic.Value +} + +func NewReporter(conf *config.Config) *Reporter { + r := &Reporter{ + username: conf.TDengine.Username, + password: conf.TDengine.Password, + host: conf.TDengine.Host, + port: conf.TDengine.Port, + usessl: conf.TDengine.Usessl, + dbname: conf.Metrics.Database.Name, + databaseOptions: conf.Metrics.Database.Options, + } + r.totalRep.Store(0) + return r +} + +func (r *Reporter) Init(c gin.IRouter) { + c.POST("report", r.handlerFunc()) + r.createDatabase() + r.creatTables() + // todo: it can delete in the future. + if r.shouldDetectFields() { + r.detectGrantInfoFieldType() + r.detectClusterInfoFieldType() + r.detectVgroupsInfoType() + } +} + +func (r *Reporter) getConn() *db.Connector { + conn, err := db.NewConnector(r.username, r.password, r.host, r.port, r.usessl) + if err != nil { + qid := util.GetQidOwn() + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + logger.Errorf("connect to database error, msg:%s", err) + panic(err) + } + return conn +} + +func (r *Reporter) detectGrantInfoFieldType() { + // `expire_time` `timeseries_used` `timeseries_total` in table `grant_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "grants_info", "expire_time", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_used", "bigint") + r.detectFieldType(ctx, conn, "grants_info", "timeseries_total", "bigint") + if r.tagExist(ctx, conn, "grants_info", "dnode_id") { + r.dropTag(ctx, conn, "grants_info", "dnode_id") + } + if r.tagExist(ctx, conn, "grants_info", "dnode_ep") { + r.dropTag(ctx, conn, "grants_info", "dnode_ep") + } +} + +func (r *Reporter) detectClusterInfoFieldType() { + // `tbs_total` in table `cluster_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "cluster_info", "tbs_total", "bigint") + + // add column `topics_total` and `streams_total` from TD-22032 + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "topics_total"); !exists { + // logger.Warningf("## %s.cluster_info.topics_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "topics_total", "int") + // } + // if exists, _ := r.columnInfo(ctx, conn, "cluster_info", "streams_total"); !exists { + // logger.Warningf("## %s.cluster_info.streams_total not exists, will add it", r.dbname) + // r.addColumn(ctx, conn, "cluster_info", "streams_total", "int") + // } +} + +func (r *Reporter) detectVgroupsInfoType() { + // `tables_num` in table `vgroups_info` changed to bigint from TS-3003. + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + r.detectFieldType(ctx, conn, "vgroups_info", "tables_num", "bigint") +} + +func (r *Reporter) detectFieldType(ctx context.Context, conn *db.Connector, table, field, fieldType string) { + _, colType := r.columnInfo(ctx, conn, table, field) + if colType == "INT" { + logger.Warningf("%s.%s.%s type is %s, will change to %s", r.dbname, table, field, colType, fieldType) + // drop column `tables_num` + r.dropColumn(ctx, conn, table, field) + + // add column `tables_num` + r.addColumn(ctx, conn, table, field, fieldType) + } +} + +func (r *Reporter) shouldDetectFields() bool { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + version, err := r.serverVersion(ctx, conn) + if err != nil { + logger.Errorf("get server version error:%s", err) + return false + } + + // if server version is less than v3.0.3.0, should not detect fields. + versions := strings.Split(version, ".") + if len(versions) < 4 { + logger.Errorf("get server version error. version:%s", version) + return false + } + + v1, _ := strconv.Atoi(versions[0]) + v2, _ := strconv.Atoi(versions[1]) + v3, _ := strconv.Atoi(versions[2]) + + if v1 > 3 || v2 > 0 || v3 >= 3 { + return true + } + + return false +} + +func (r *Reporter) serverVersion(ctx context.Context, conn *db.Connector) (version string, err error) { + res, err := conn.Query(ctx, "select server_version()", util.GetQidOwn()) + if err != nil { + logger.Errorf("get server version error, msg:%s", err) + return + } + + if len(res.Data) == 0 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get server version error. response:%+v", res) + return + } + + version = res.Data[0][0].(string) + + return +} + +func (r *Reporter) columnInfo(ctx context.Context, conn *db.Connector, table string, field string) (exists bool, colType string) { + res, err := conn.Query(ctx, fmt.Sprintf("select col_type from information_schema.ins_columns where table_name='%s' and db_name='%s' and col_name='%s'", table, r.dbname, field), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s field type error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get field type for %s error. response:%+v", table, res) + panic(fmt.Sprintf("get field type for %s error. response:%+v", table, res)) + } + + exists = true + colType = res.Data[0][0].(string) + colType = strings.ToUpper(colType) + return +} + +func (r *Reporter) tagExist(ctx context.Context, conn *db.Connector, stable string, tag string) (exists bool) { + res, err := conn.Query(ctx, fmt.Sprintf("select tag_name from information_schema.ins_tags where stable_name='%s' and db_name='%s' and tag_name='%s'", stable, r.dbname, tag), util.GetQidOwn()) + if err != nil { + logger.Errorf("get %s tag_name error, msg:%s", r.dbname, err) + panic(err) + } + + if len(res.Data) == 0 { + exists = false + return + } + + if len(res.Data) != 1 && len(res.Data[0]) != 1 { + logger.Errorf("get tag_name for %s error. response:%+v", stable, res) + panic(fmt.Sprintf("get tag_name for %s error. response:%+v", stable, res)) + } + + exists = true + return +} + +func (r *Reporter) dropColumn(ctx context.Context, conn *db.Connector, table string, field string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s drop column %s", r.dbname, table, field), util.GetQidOwn()); err != nil { + logger.Errorf("drop column %s from table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) dropTag(ctx context.Context, conn *db.Connector, stable string, tag string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter stable %s.%s drop tag %s", r.dbname, stable, tag), util.GetQidOwn()); err != nil { + logger.Errorf("drop tag %s from stable %s error, msg:%s", tag, stable, err) + panic(err) + } +} + +func (r *Reporter) addColumn(ctx context.Context, conn *db.Connector, table string, field string, fieldType string) { + if _, err := conn.Exec(ctx, fmt.Sprintf("alter table %s.%s add column %s %s", r.dbname, table, field, fieldType), util.GetQidOwn()); err != nil { + logger.Errorf("add column %s to table %s error, msg:%s", field, table, err) + panic(err) + } +} + +func (r *Reporter) createDatabase() { + ctx := context.Background() + conn := r.getConn() + defer r.closeConn(conn) + + createDBSql := r.generateCreateDBSql() + logger.Warningf("create database sql: %s", createDBSql) + + if _, err := conn.Exec(ctx, createDBSql, util.GetQidOwn()); err != nil { + logger.Errorf("create database %s error, msg:%v", r.dbname, err) + panic(err) + } +} + +func (r *Reporter) generateCreateDBSql() string { + var buf bytes.Buffer + buf.WriteString("create database if not exists ") + buf.WriteString(r.dbname) + + for k, v := range r.databaseOptions { + buf.WriteString(" ") + buf.WriteString(k) + switch v := v.(type) { + case string: + buf.WriteString(fmt.Sprintf(" '%s'", v)) + default: + buf.WriteString(fmt.Sprintf(" %v", v)) + } + buf.WriteString(" ") + } + return buf.String() +} + +func (r *Reporter) creatTables() { + ctx := context.Background() + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + + for _, createSql := range createList { + logger.Infof("execute sql:%s", createSql) + if _, err = conn.Exec(ctx, createSql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", createSql, err) + } + } +} + +func (r *Reporter) closeConn(conn *db.Connector) { + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } +} + +func (r *Reporter) handlerFunc() gin.HandlerFunc { + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + + r.recordTotalRep() + // data parse + data, err := c.GetRawData() + if err != nil { + logger.Errorf("receiving taosd data error, msg:%s", err) + return + } + var report Report + + logger.Tracef("report data:%s", string(data)) + if e := json.Unmarshal(data, &report); e != nil { + logger.Errorf("error occurred while unmarshal request, data:%s, error:%s", data, err) + return + } + var sqls []string + if report.ClusterInfo != nil { + sqls = append(sqls, insertClusterInfoSql(*report.ClusterInfo, report.ClusterID, report.Protocol, report.Ts)...) + } + sqls = append(sqls, insertDnodeSql(report.DnodeInfo, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + if report.GrantInfo != nil { + sqls = append(sqls, insertGrantSql(*report.GrantInfo, report.DnodeID, report.ClusterID, report.Ts)) + } + sqls = append(sqls, insertDataDirSql(report.DiskInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + for _, group := range report.VgroupInfos { + sqls = append(sqls, insertVgroupSql(group, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)...) + } + sqls = append(sqls, insertLogSummary(report.LogInfos, report.DnodeID, report.DnodeEp, report.ClusterID, report.Ts)) + + conn, err := db.NewConnectorWithDb(r.username, r.password, r.host, r.port, r.dbname, r.usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + defer r.closeConn(conn) + ctx := context.Background() + + for _, sql := range sqls { + logger.Tracef("execute sql:%s", sql) + if _, err := conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql error, sql:%s, error:%s", sql, err) + } + } + } +} + +func (r *Reporter) recordTotalRep() { + old := r.totalRep.Load().(int) + for i := 0; i < 3; i++ { + r.totalRep.CompareAndSwap(old, old+1) + } +} + +func (r *Reporter) GetTotalRep() *atomic.Value { + return &r.totalRep +} + +func insertClusterInfoSql(info ClusterInfo, ClusterID string, protocol int, ts string) []string { + var sqls []string + var dtotal, dalive, mtotal, malive int + for _, dnode := range info.Dnodes { + sqls = append(sqls, fmt.Sprintf("insert into d_info_%s using d_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(dnode.DnodeID), dnode.DnodeID, dnode.DnodeEp, ClusterID, ts, dnode.Status)) + dtotal++ + if "ready" == dnode.Status { + dalive++ + } + } + + for _, mnode := range info.Mnodes { + sqls = append(sqls, fmt.Sprintf("insert into m_info_%s using m_info tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(mnode.MnodeID), mnode.MnodeID, mnode.MnodeEp, ClusterID, ts, mnode.Role)) + mtotal++ + //LEADER FOLLOWER CANDIDATE ERROR + if "ERROR" != mnode.Role { + malive++ + } + } + + sqls = append(sqls, fmt.Sprintf( + "insert into cluster_info_%s using cluster_info tags('%s') (ts, first_ep, first_ep_dnode_id, version, "+ + "master_uptime, monitor_interval, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, "+ + "mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, "+ + "topics_total, streams_total, protocol) values ('%s', '%s', %d, '%s', %f, %d, %d, %d, %d, %d, %d, %d, %d, "+ + "%d, %d, %d, %d, %d, %d, %d, %d)", + ClusterID, ClusterID, ts, info.FirstEp, info.FirstEpDnodeID, info.Version, info.MasterUptime, info.MonitorInterval, + info.DbsTotal, info.TbsTotal, info.StbsTotal, dtotal, dalive, mtotal, malive, info.VgroupsTotal, info.VgroupsAlive, + info.VnodesTotal, info.VnodesAlive, info.ConnectionsTotal, info.TopicsTotal, info.StreamsTotal, protocol)) + return sqls +} + +func insertDnodeSql(info DnodeInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + return fmt.Sprintf("insert into dnode_info_%s using dnodes_info tags (%d, '%s', '%s') values ('%s', %f, %f, %f, %f, %d, %d, %d, %d, %d, %d, %f, %f, %f, %f, %f, %f, %d, %f, %d, %d, %f, %d, %d, %f, %d, %d, %d, %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, info.Uptime, info.CPUEngine, info.CPUSystem, info.CPUCores, info.MemEngine, info.MemSystem, info.MemTotal, + info.DiskEngine, info.DiskUsed, info.DiskTotal, info.NetIn, info.NetOut, info.IoRead, info.IoWrite, + info.IoReadDisk, info.IoWriteDisk, info.ReqSelect, info.ReqSelectRate, info.ReqInsert, info.ReqInsertSuccess, + info.ReqInsertRate, info.ReqInsertBatch, info.ReqInsertBatchSuccess, info.ReqInsertBatchRate, info.Errors, + info.VnodesNum, info.Masters, info.HasMnode, info.HasQnode, info.HasSnode, info.HasBnode) +} + +func insertDataDirSql(disk DiskInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + for _, data := range disk.Datadir { + sqls = append(sqls, + fmt.Sprintf("insert into data_dir_%s using data_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, data.Name, data.Level, data.Avail.IntPart(), data.Used.IntPart(), data.Total.IntPart()), + ) + } + sqls = append(sqls, + fmt.Sprintf("insert into log_dir_%s using log_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Logdir.Name, disk.Logdir.Avail.IntPart(), disk.Logdir.Used.IntPart(), disk.Logdir.Total.IntPart()), + fmt.Sprintf("insert into temp_dir_%s using temp_dir tags (%d, '%s', '%s') values ('%s', '%s', %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, + ts, disk.Tempdir.Name, disk.Tempdir.Avail.IntPart(), disk.Tempdir.Used.IntPart(), disk.Tempdir.Total.IntPart()), + ) + return sqls +} + +func insertVgroupSql(g VgroupInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) []string { + var sqls []string + sqls = append(sqls, fmt.Sprintf("insert into vgroups_info_%s using vgroups_info tags (%d, '%s', '%s') "+ + "(ts, vgroup_id, database_name, tables_num, status, ) values ( '%s','%d', '%s', %d, '%s')", + ClusterID+strconv.Itoa(DnodeID)+strconv.Itoa(g.VgroupID), DnodeID, DnodeEp, ClusterID, + ts, g.VgroupID, g.DatabaseName, g.TablesNum, g.Status)) + for _, v := range g.Vnodes { + sqls = append(sqls, fmt.Sprintf("insert into vnodes_role_%s using vnodes_role tags (%d, '%s', '%s') values ('%s', '%s')", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, v.VnodeRole)) + } + return sqls +} + +func insertLogSummary(log LogInfo, DnodeID int, DnodeEp string, ClusterID string, ts string) string { + var e, info, debug, trace int + for _, s := range log.Summary { + switch s.Level { + case "error": + e = s.Total + case "info": + info = s.Total + case "debug": + debug = s.Total + case "trace": + trace = s.Total + } + } + return fmt.Sprintf("insert into log_summary_%s using log_summary tags (%d, '%s', '%s') values ('%s', %d, %d, %d, %d)", + ClusterID+strconv.Itoa(DnodeID), DnodeID, DnodeEp, ClusterID, ts, e, info, debug, trace) +} + +func insertGrantSql(g GrantInfo, DnodeID int, ClusterID string, ts string) string { + return fmt.Sprintf("insert into grants_info_%s using grants_info tags ('%s') (ts, expire_time, "+ + "timeseries_used, timeseries_total) values ('%s', %d, %d, %d)", ClusterID+strconv.Itoa(DnodeID), ClusterID, ts, g.ExpireTime, g.TimeseriesUsed, g.TimeseriesTotal) +} diff --git a/tools/keeper/api/tables.go b/tools/keeper/api/tables.go new file mode 100644 index 0000000000..90f0e09721 --- /dev/null +++ b/tools/keeper/api/tables.go @@ -0,0 +1,286 @@ +package api + +import ( + "strconv" + + "github.com/shopspring/decimal" +) + +type Report struct { + Ts string `json:"ts"` + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + ClusterID string `json:"cluster_id"` + Protocol int `json:"protocol"` + ClusterInfo *ClusterInfo `json:"cluster_info"` // only reported by master + StbInfos []StbInfo `json:"stb_infos"` + VgroupInfos []VgroupInfo `json:"vgroup_infos"` // only reported by master + GrantInfo *GrantInfo `json:"grant_info"` // only reported by master + DnodeInfo DnodeInfo `json:"dnode_info"` + DiskInfos DiskInfo `json:"disk_infos"` + LogInfos LogInfo `json:"log_infos"` +} + +type ClusterInfo struct { + FirstEp string `json:"first_ep"` + FirstEpDnodeID int `json:"first_ep_dnode_id"` + Version string `json:"version"` + MasterUptime float32 `json:"master_uptime"` + MonitorInterval int `json:"monitor_interval"` + DbsTotal int `json:"dbs_total"` + TbsTotal int64 `json:"tbs_total"` // change to bigint since TS-3003 + StbsTotal int `json:"stbs_total"` + VgroupsTotal int `json:"vgroups_total"` + VgroupsAlive int `json:"vgroups_alive"` + VnodesTotal int `json:"vnodes_total"` + VnodesAlive int `json:"vnodes_alive"` + ConnectionsTotal int `json:"connections_total"` + TopicsTotal int `json:"topics_total"` + StreamsTotal int `json:"streams_total"` + Dnodes []Dnode `json:"dnodes"` + Mnodes []Mnode `json:"mnodes"` +} + +var dnodeEpLen = strconv.Itoa(255) + +var CreateClusterInfoSql = "create table if not exists cluster_info (" + + "ts timestamp, " + + "first_ep binary(134), " + + "first_ep_dnode_id int, " + + "version binary(12), " + + "master_uptime float, " + + "monitor_interval int, " + + "dbs_total int, " + + "tbs_total bigint, " + // change to bigint since TS-3003 + "stbs_total int, " + + "dnodes_total int, " + + "dnodes_alive int, " + + "mnodes_total int, " + + "mnodes_alive int, " + + "vgroups_total int, " + + "vgroups_alive int, " + + "vnodes_total int, " + + "vnodes_alive int, " + + "connections_total int, " + + "topics_total int, " + + "streams_total int, " + + "protocol int " + + ") tags (cluster_id nchar(32))" + +type Dnode struct { + DnodeID int `json:"dnode_id"` + DnodeEp string `json:"dnode_ep"` + Status string `json:"status"` +} + +var CreateDnodeSql = "create table if not exists d_info (" + + "ts timestamp, " + + "status binary(10)" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Mnode struct { + MnodeID int `json:"mnode_id"` + MnodeEp string `json:"mnode_ep"` + Role string `json:"role"` +} + +var CreateMnodeSql = "create table if not exists m_info (" + + "ts timestamp, " + + "role binary(10)" + + ") tags (mnode_id int, mnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DnodeInfo struct { + Uptime float32 `json:"uptime"` + CPUEngine float32 `json:"cpu_engine"` + CPUSystem float32 `json:"cpu_system"` + CPUCores float32 `json:"cpu_cores"` + MemEngine int `json:"mem_engine"` + MemSystem int `json:"mem_system"` + MemTotal int `json:"mem_total"` + DiskEngine int64 `json:"disk_engine"` + DiskUsed int64 `json:"disk_used"` + DiskTotal int64 `json:"disk_total"` + NetIn float32 `json:"net_in"` + NetOut float32 `json:"net_out"` + IoRead float32 `json:"io_read"` + IoWrite float32 `json:"io_write"` + IoReadDisk float32 `json:"io_read_disk"` + IoWriteDisk float32 `json:"io_write_disk"` + ReqSelect int `json:"req_select"` + ReqSelectRate float32 `json:"req_select_rate"` + ReqInsert int `json:"req_insert"` + ReqInsertSuccess int `json:"req_insert_success"` + ReqInsertRate float32 `json:"req_insert_rate"` + ReqInsertBatch int `json:"req_insert_batch"` + ReqInsertBatchSuccess int `json:"req_insert_batch_success"` + ReqInsertBatchRate float32 `json:"req_insert_batch_rate"` + Errors int `json:"errors"` + VnodesNum int `json:"vnodes_num"` + Masters int `json:"masters"` + HasMnode int8 `json:"has_mnode"` + HasQnode int8 `json:"has_qnode"` + HasSnode int8 `json:"has_snode"` + HasBnode int8 `json:"has_bnode"` +} + +var CreateDnodeInfoSql = "create table if not exists dnodes_info (" + + "ts timestamp, " + + "uptime float, " + + "cpu_engine float, " + + "cpu_system float, " + + "cpu_cores float, " + + "mem_engine int, " + + "mem_system int, " + + "mem_total int, " + + "disk_engine bigint, " + + "disk_used bigint, " + + "disk_total bigint, " + + "net_in float, " + + "net_out float, " + + "io_read float, " + + "io_write float, " + + "io_read_disk float, " + + "io_write_disk float, " + + "req_select int, " + + "req_select_rate float, " + + "req_insert int, " + + "req_insert_success int, " + + "req_insert_rate float, " + + "req_insert_batch int, " + + "req_insert_batch_success int, " + + "req_insert_batch_rate float, " + + "errors int, " + + "vnodes_num int, " + + "masters int, " + + "has_mnode int, " + + "has_qnode int, " + + "has_snode int, " + + "has_bnode int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type DiskInfo struct { + Datadir []DataDir `json:"datadir"` + Logdir LogDir `json:"logdir"` + Tempdir TempDir `json:"tempdir"` +} + +type DataDir struct { + Name string `json:"name"` + Level int `json:"level"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateDataDirSql = "create table if not exists data_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "`level` int, " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateLogDirSql = "create table if not exists log_dir (" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint" + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type TempDir struct { + Name string `json:"name"` + Avail decimal.Decimal `json:"avail"` + Used decimal.Decimal `json:"used"` + Total decimal.Decimal `json:"total"` +} + +var CreateTempDirSql = "create table if not exists temp_dir(" + + "ts timestamp, " + + "name nchar(200), " + + "avail bigint, " + + "used bigint, " + + "total bigint " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type StbInfo struct { + StbName string `json:"stb_name"` + DataBaseName string `json:"database_name"` +} + +type VgroupInfo struct { + VgroupID int `json:"vgroup_id"` + DatabaseName string `json:"database_name"` + TablesNum int64 `json:"tables_num"` + Status string `json:"status"` + Vnodes []Vnode `json:"vnodes"` +} + +var CreateVgroupsInfoSql = "create table if not exists vgroups_info (" + + "ts timestamp, " + + "vgroup_id int, " + + "database_name binary(33), " + + "tables_num bigint, " + // change to bigint since TS-3003 + "status binary(512) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type Vnode struct { + DnodeID int `json:"dnode_id"` + VnodeRole string `json:"vnode_role"` +} + +var CreateVnodeRoleSql = "create table if not exists vnodes_role (" + + "ts timestamp, " + + "vnode_role binary(10) " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type LogInfo struct { + Summary []Summary `json:"summary"` +} + +type Log struct { + Ts string `json:"ts"` + Level string `json:"level"` + Content string `json:"content"` +} + +type Summary struct { + Level string `json:"level"` + Total int `json:"total"` +} + +var CreateSummarySql = "create table if not exists log_summary(" + + "ts timestamp, " + + "error int, " + + "info int, " + + "debug int, " + + "trace int " + + ") tags (dnode_id int, dnode_ep nchar(" + dnodeEpLen + "), cluster_id nchar(32))" + +type GrantInfo struct { + ExpireTime int64 `json:"expire_time"` + TimeseriesUsed int64 `json:"timeseries_used"` + TimeseriesTotal int64 `json:"timeseries_total"` +} + +var CreateGrantInfoSql = "create table if not exists grants_info(" + + "ts timestamp, " + + "expire_time bigint, " + + "timeseries_used bigint, " + + "timeseries_total bigint " + + ") tags (cluster_id nchar(32))" + +var CreateKeeperSql = "create table if not exists keeper_monitor (" + + "ts timestamp, " + + "cpu float, " + + "mem float, " + + "total_reports int " + + ") tags (identify nchar(50))" diff --git a/tools/keeper/api/zabbix.go b/tools/keeper/api/zabbix.go new file mode 100644 index 0000000000..8b7cb75992 --- /dev/null +++ b/tools/keeper/api/zabbix.go @@ -0,0 +1,113 @@ +package api + +import ( + "net/http" + "sort" + "strings" + + "github.com/gin-gonic/gin" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/util/pool" +) + +type Zabbix struct { + processor *process.Processor + floatGroup []*process.Metric + strGroup []*process.Metric +} + +func NewZabbix(processor *process.Processor) *Zabbix { + z := &Zabbix{processor: processor} + z.processorMetrics() + return z +} + +type zabbixMetric struct { + Data []*ZMetric `json:"data"` +} + +type ZMetric struct { + Metric string `json:"{#METRIC}"` + Key string `json:"key"` + Value interface{} `json:"value"` +} + +const ( + FloatType = iota + 1 + StringType +) + +func (z *Zabbix) Init(c gin.IRouter) { + api := c.Group("zabbix") + api.GET("float", z.getFloat) + api.GET("string", z.getString) +} + +func (z *Zabbix) getFloat(c *gin.Context) { + z.returnData(c, FloatType) +} + +func (z *Zabbix) getString(c *gin.Context) { + z.returnData(c, StringType) +} + +func (z *Zabbix) returnData(c *gin.Context, valueType int) { + var metrics []*process.Metric + switch valueType { + case FloatType: + metrics = z.floatGroup + case StringType: + metrics = z.strGroup + } + var d zabbixMetric + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for _, metric := range metrics { + values := metric.GetValue() + for _, value := range values { + label := z.sortLabel(value.Label) + b.Reset() + b.WriteString(metric.FQName) + if len(label) > 0 { + b.WriteByte(',') + b.WriteString(label) + } + metricName := b.String() + d.Data = append(d.Data, &ZMetric{ + Metric: metricName, + Key: metricName, + Value: value.Value, + }) + } + } + c.JSON(http.StatusOK, d) +} + +func (z *Zabbix) sortLabel(labels map[string]string) string { + if len(labels) == 0 { + return "" + } + result := make([]string, 0, len(labels)) + b := pool.BytesPoolGet() + defer pool.BytesPoolPut(b) + for k, v := range labels { + b.Reset() + b.WriteString(k) + b.WriteByte('=') + b.WriteString(v) + result = append(result, b.String()) + } + sort.Strings(result) + return strings.Join(result, "_") +} + +func (z *Zabbix) processorMetrics() { + metrics := z.processor.GetMetric() + for _, metric := range metrics { + if metric.Type == process.Gauge || metric.Type == process.Counter { + z.floatGroup = append(z.floatGroup, metric) + } else if metric.Type == process.Info { + z.strGroup = append(z.strGroup, metric) + } + } +} diff --git a/tools/keeper/cmd/command.go b/tools/keeper/cmd/command.go new file mode 100644 index 0000000000..82d3efea1f --- /dev/null +++ b/tools/keeper/cmd/command.go @@ -0,0 +1,461 @@ +package cmd + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "strconv" + "sync" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("CMD") + +var MAX_SQL_LEN = 1000000 + +type Command struct { + fromTime time.Time + client *http.Client + conn *db.Connector + username string + password string + url *url.URL +} + +func NewCommand(conf *config.Config) *Command { + client := &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + DisableCompression: true, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, + }, + }, + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("init db connect error, msg:%s", err) + panic(err) + } + + imp := &Command{ + client: client, + conn: conn, + username: conf.TDengine.Username, + password: conf.TDengine.Password, + url: &url.URL{ + Scheme: "http", + Host: fmt.Sprintf("%s:%d", conf.TDengine.Host, conf.TDengine.Port), + Path: "/influxdb/v1/write", + RawQuery: fmt.Sprintf("db=%s&precision=ms", conf.Metrics.Database.Name), + }, + } + return imp +} + +func (cmd *Command) Process(conf *config.Config) { + if len(conf.Transfer) > 0 && len(conf.Drop) > 0 { + logger.Errorf("transfer and drop can't be set at the same time") + return + } + + if len(conf.Transfer) > 0 && conf.Transfer != "old_taosd_metric" { + logger.Errorf("transfer only support old_taosd_metric") + return + } + + if conf.Transfer == "old_taosd_metric" { + cmd.ProcessTransfer(conf) + return + } + + if len(conf.Drop) > 0 && conf.Drop != "old_taosd_metric_stables" { + logger.Errorf("drop only support old_taosd_metric_stables") + return + } + + if conf.Drop == "old_taosd_metric_stables" { + cmd.ProcessDrop(conf) + return + } +} + +func (cmd *Command) ProcessTransfer(conf *config.Config) { + fromTime, err := time.Parse("2006-01-02T15:04:05Z07:00", conf.FromTime) + if err != nil { + logger.Errorf("parse fromTime error, msg:%s", err) + return + } + cmd.fromTime = fromTime + + funcs := []func() error{ + cmd.TransferTaosdClusterBasicInfo, + cmd.TransferTaosdClusterInfo, + cmd.TransferTaosdVgroupsInfo, + cmd.TransferTaosdDnodesInfo, + cmd.TransferTaosdDnodesStatus, + cmd.TransferTaosdDnodesLogDirs1, + cmd.TransferTaosdDnodesLogDirs2, + cmd.TransferTaosdDnodesDataDirs, + cmd.TransferTaosdMnodesInfo, + cmd.TransferTaosdVnodesInfo, + } + wg := sync.WaitGroup{} + wg.Add(len(funcs)) + + for i := range funcs { + index := i + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + funcs[index]() + }) + + if err != nil { + panic(err) + } + } + + wg.Wait() + logger.Info("transfer all old taosd metric success!!") +} + +func (cmd *Command) TransferTaosdClusterInfo() error { + sql := "select a.cluster_id, master_uptime * 3600 * 24 as cluster_uptime, dbs_total, tbs_total, stbs_total, dnodes_total, dnodes_alive, mnodes_total, mnodes_alive, vgroups_total, vgroups_alive, vnodes_total, vnodes_alive, connections_total, topics_total, streams_total, b.expire_time as grants_expire_time, b.timeseries_used as grants_timeseries_used, b.timeseries_total as grants_timeseries_total, a.ts from cluster_info a, grants_info b where a.ts = b.ts and a.cluster_id = b.cluster_id and" + dstTable := "taosd_cluster_info" + return cmd.TransferTableToDst(sql, dstTable, 1) +} + +func (cmd *Command) TransferTaosdVgroupsInfo() error { + sql := "select cluster_id, vgroup_id, database_name, tables_num, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from vgroups_info a where " + dstTable := "taosd_vgroups_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesInfo() error { + sql := "select a.cluster_id, a.dnode_id, a.dnode_ep, uptime * 3600 * 24 as uptime, cpu_engine, cpu_system, cpu_cores, mem_engine, mem_system as mem_free, mem_total, disk_used, disk_total, disk_engine, net_in as system_net_in, net_out as system_net_out, io_read, io_write, io_read_disk, io_write_disk, vnodes_num, masters, has_mnode, has_qnode, has_snode, has_bnode, errors, b.error as error_log_count, b.info as info_log_count, b.debug as debug_log_count, b.trace as trace_log_count, a.ts as ts from dnodes_info a, log_summary b where a.ts = b.ts and a.dnode_id = b.dnode_id and a. dnode_ep = b.dnode_ep and " + dstTable := "taosd_dnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} +func (cmd *Command) TransferTaosdDnodesStatus() error { + sql := "select cluster_id, dnode_id, dnode_ep, CASE status WHEN 'ready' THEN 1 ELSE 0 END as status, ts from d_info a where " + dstTable := "taosd_dnodes_status" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdDnodesLogDirs1() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from log_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} +func (cmd *Command) TransferTaosdDnodesLogDirs2() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as log_dir_name, avail, used, total, ts from temp_dir a where " + dstTable := "taosd_dnodes_log_dirs" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) TransferTaosdDnodesDataDirs() error { + sql := "select cluster_id, dnode_id, dnode_ep, name as data_dir_name, `level` as data_dir_level, avail, used, total, ts from data_dir a where " + dstTable := "taosd_dnodes_data_dirs" + return cmd.TransferTableToDst(sql, dstTable, 5) +} + +func (cmd *Command) TransferTaosdMnodesInfo() error { + sql := "select cluster_id, mnode_id, mnode_ep, CASE role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from m_info a where " + dstTable := "taosd_mnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 3) +} + +func (cmd *Command) TransferTaosdVnodesInfo() error { + sql := "select cluster_id, 0 as vgroup_id, 'UNKNOWN' as database_name, dnode_id, CASE vnode_role WHEN 'offline' THEN 0 WHEN 'follower' THEN 100 WHEN 'candidate' THEN 101 WHEN 'leader' THEN 102 WHEN 'learner' THEN 104 ELSE 103 END as role, ts from vnodes_role a where " + dstTable := "taosd_vnodes_info" + return cmd.TransferTableToDst(sql, dstTable, 4) +} + +func (cmd *Command) ProcessDrop(conf *config.Config) { + var dropStableList = []string{ + "log_dir", + "dnodes_info", + "data_dir", + "log_summary", + "m_info", + "vnodes_role", + "cluster_info", + "temp_dir", + "grants_info", + "vgroups_info", + "d_info", + "taosadapter_system_cpu_percent", + "taosadapter_restful_http_request_in_flight", + "taosadapter_restful_http_request_summary_milliseconds", + "taosadapter_restful_http_request_fail", + "taosadapter_system_mem_percent", + "taosadapter_restful_http_request_total", + } + ctx := context.Background() + logger.Infof("use database:%s", conf.Metrics.Database.Name) + + for _, stable := range dropStableList { + if _, err := cmd.conn.Exec(ctx, "DROP STABLE IF EXISTS "+stable, util.GetQidOwn()); err != nil { + logger.Errorf("drop stable %s, error:%s", stable, err) + panic(err) + } + } + logger.Info("drop old taosd metric stables success!!") +} + +func (cmd *Command) TransferDataToDest(data *db.Data, dstTable string, tagNum int) { + + var buf bytes.Buffer + + if len(data.Data) < 1 { + return + } + + for _, row := range data.Data { + // get one row here + buf.WriteString(dstTable) + + // write tags + var tag string + for j := 0; j < tagNum; j++ { + switch v := row[j].(type) { + case int: + tag = fmt.Sprint(v) + case int32: + tag = fmt.Sprint(v) + case int64: + tag = fmt.Sprint(v) + case string: + tag = v + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if tag != "" { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], util.EscapeInfluxProtocol(tag))) + } else { + buf.WriteString(fmt.Sprintf(",%s=%s", data.Head[j], "unknown")) + logger.Errorf("tag value is empty, tag_name:%s", data.Head[j]) + } + } + buf.WriteString(" ") + + // write metrics + for j := tagNum; j < len(row)-1; j++ { + + switch v := row[j].(type) { + case int: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int32: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case int64: + buf.WriteString(fmt.Sprintf("%s=%ff64", data.Head[j], float64(v))) + case float32: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(float64(v), 'f', -1, 64))) + case float64: + buf.WriteString(fmt.Sprintf("%s=%sf64", data.Head[j], strconv.FormatFloat(v, 'f', -1, 64))) + default: + panic(fmt.Sprintf("Unexpected type for row[%d]: %T", j, row[j])) + } + + if j != len(row)-2 { + buf.WriteString(",") + } + } + + // write timestamp + buf.WriteString(" ") + buf.WriteString(fmt.Sprintf("%v", row[len(row)-1].(time.Time).UnixMilli())) + buf.WriteString("\n") + + if buf.Len() >= MAX_SQL_LEN { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + if logger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logger.Tracef("buf:%v", buf.String()) + } + err := cmd.lineWriteBody(&buf) + if err != nil { + logger.Errorf("insert data error, msg:%s", err) + panic(err) + } + } +} + +// cluster_info +func (cmd *Command) TransferTaosdClusterBasicInfo() error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + var createTableSql = "create stable if not exists taosd_cluster_basic " + + "(ts timestamp, first_ep varchar(100), first_ep_dnode_id INT, cluster_version varchar(20)) " + + "tags (cluster_id varchar(50))" + + if _, err := cmd.conn.Exec(ctx, createTableSql, util.GetQidOwn()); err != nil { + logger.Errorf("create taosd_cluster_basic error, msg:%s", err) + return err + } + + logger.Tracef("fromeTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf("select cluster_id, first_ep, first_ep_dnode_id, `version` as cluster_version, ts from cluster_info where ts > %d and ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + var buf bytes.Buffer + + // 使用 map 将二维数组切分为多个二维数组 + result := make(map[string][][]interface{}) + for _, row := range data.Data { + key := row[0].(string) // 使用第一列的值作为 key + result[key] = append(result[key], row) + } + + // 按照不同 tag 来迁移数据 + for _, dataByCluster := range result { + buf.Reset() + + for _, row := range dataByCluster { + if len(buf.Bytes()) == 0 { + sql := fmt.Sprintf( + "insert into taosd_cluster_basic_%s using taosd_cluster_basic tags ('%s') values ", + row[0].(string), row[0].(string)) + + buf.WriteString(sql) + } + + sql := fmt.Sprintf( + "(%d, '%s', %d, '%s')", + row[4].(time.Time).UnixMilli(), row[1].(string), row[2].(int32), row[3].(string)) + buf.WriteString(sql) + + if buf.Len() >= MAX_SQL_LEN { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + buf.Reset() + } + } + + if buf.Len() > 0 { + rowsAffected, err := cmd.conn.Exec(context.Background(), buf.String(), util.GetQidOwn()) + if err != nil { + logger.Errorf("insert taosd_cluster_basic error, msg:%s", err) + return err + } + if rowsAffected <= 0 { + logger.Errorf("insert taosd_cluster_basic failed, rowsAffected:%d", rowsAffected) + } + } + } + } + + logger.Info("transfer stable taosd_cluster_basic success!!") + return nil +} + +// cluster_info +func (cmd *Command) TransferTableToDst(sql string, dstTable string, tagNum int) error { + + ctx := context.Background() + + endTime := time.Now() + delta := time.Hour * 24 * 10 + + logger.Tracef("fromTime:%d", cmd.fromTime.UnixMilli()) + + for current := cmd.fromTime; current.Before(endTime); current = current.Add(time.Duration(delta)) { + querySql := fmt.Sprintf(sql+" a.ts > %d and a.ts <= %d", + current.UnixMilli(), current.Add(time.Duration(delta)).UnixMilli()) + logger.Tracef("query sql:%s", querySql) + data, err := cmd.conn.Query(ctx, querySql, util.GetQidOwn()) + if err != nil { + logger.Errorf("query cluster_info error, msg:%s", err) + return err + } + + // transfer data to new table, only this table need use insert statement + cmd.TransferDataToDest(data, dstTable, tagNum) + } + + logger.Info("transfer stable " + dstTable + " success!!") + return nil +} + +func (cmd *Command) lineWriteBody(buf *bytes.Buffer) error { + header := map[string][]string{ + "Connection": {"keep-alive"}, + } + + req := &http.Request{ + Method: http.MethodPost, + URL: cmd.url, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: header, + Host: cmd.url.Host, + } + req.SetBasicAuth(cmd.username, cmd.password) + + req.Body = io.NopCloser(buf) + resp, err := cmd.client.Do(req) + + if err != nil { + logger.Errorf("writing metrics exception, msg:%s", err) + return err + } + + defer resp.Body.Close() + if resp.StatusCode != http.StatusNoContent { + body, _ := io.ReadAll(resp.Body) + return fmt.Errorf("unexpected status code %d:body:%s", resp.StatusCode, string(body)) + } + return nil +} diff --git a/tools/keeper/cmd/empty_test.go b/tools/keeper/cmd/empty_test.go new file mode 100644 index 0000000000..143df6893c --- /dev/null +++ b/tools/keeper/cmd/empty_test.go @@ -0,0 +1,8 @@ +package cmd + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/config/metrics.sample b/tools/keeper/config/metrics.sample new file mode 100644 index 0000000000..9dbfea2323 --- /dev/null +++ b/tools/keeper/config/metrics.sample @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/config/taoskeeper.toml b/tools/keeper/config/taoskeeper.toml new file mode 100644 index 0000000000..89847db2d5 --- /dev/null +++ b/tools/keeper/config/taoskeeper.toml @@ -0,0 +1,53 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/config/taoskeeper_enterprise.toml b/tools/keeper/config/taoskeeper_enterprise.toml new file mode 100644 index 0000000000..6601b60cd8 --- /dev/null +++ b/tools/keeper/config/taoskeeper_enterprise.toml @@ -0,0 +1,65 @@ +instanceId = 64 + +# Listen port, default is 6043 +port = 6043 + +# go pool size +gopoolsize = 50000 + +# interval for TDengine metrics +RotationInterval = "15s" + +[tdengine] +host = "127.0.0.1" +port = 6041 +username = "root" +password = "taosdata" +usessl = false + +[metrics] +# metrics prefix in metrics names. +prefix = "taos" + +# cluster identifier for multiple TDengine clusters +cluster = "" + +# export some tables that are not super table +tables = [] + +# database for storing metrics data +[metrics.database] +name = "log" +# database options for db storing metrics data +[metrics.database.options] +vgroups = 1 +buffer = 64 +keep = 90 +cachemodel = "both" + +[environment] +# Whether running in cgroup. +incgroup = false + +[audit] +enable = true +[audit.database] +name = "audit" +[audit.database.options] +vgroups = 1 +buffer = 16 +cachemodel = "both" + +[log] +# The directory where log files are stored. +# path = "/var/log/taos" +level = "info" +# Number of log file rotations before deletion. +rotationCount = 30 +# The number of days to retain log files. +keepDays = 30 +# The maximum size of a log file before rotation. +rotationSize = "1GB" +# If set to true, log files will be compressed. +compress = false +# Minimum disk space to reserve. Log files will not be written if disk space falls below this limit. +reservedDiskSize = "1GB" diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json new file mode 100644 index 0000000000..153778915f --- /dev/null +++ b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.json @@ -0,0 +1,5365 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "gnetId": null, + "graphTooltip": 0, + "id": 3, + "iteration": 1643173897059, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": "Prometheus", + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 32, + "options": { + "content": "

TDengine Cluster Dashboard (First EP: ${firstEp}, Version: ${version})

", + "mode": "markdown" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "mnodes", + "formatType": "Time series", + "queryType": "SQL", + "refId": "A", + "sql": "show mnodes", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "-- OVERVIEW --", + "transformations": [ + { + "id": "calculateField", + "options": { + "binary": { + "left": "Time", + "operator": "+", + "reducer": "sum", + "right": "" + }, + "mode": "binary", + "reduce": { + "reducer": "sum" + } + } + } + ], + "type": "text" + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 57, + "panels": [], + "title": "Cluster Status", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 0, + "y": 4 + }, + "id": 73, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_first_ep{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "First EP", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 3, + "y": 4 + }, + "id": 74, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_version{cluster=\"$cluster\"}", + "format": "table", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(version) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Version", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "value" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "MNode 被选举后经过的时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 6, + "y": 4 + }, + "id": 72, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_master_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(master_uptime) from log.cluster_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Master Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权到期时间", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 9, + "y": 4 + }, + "id": 99, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) from log.grants_info", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Expire Time", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "企业版授权已用测点数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [ + { + "matcher": { + "id": "byFrameRefID", + "options": "A" + }, + "properties": [ + { + "id": "noValue", + "value": "unlimited" + } + ] + } + ] + }, + "gridPos": { + "h": 4, + "w": 4, + "x": 12, + "y": 4 + }, + "id": 100, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_disk_engine", + "formatType": "Time series", + "interval": "", + "legendFormat": "used", + "queryType": "SQL", + "refId": "A", + "sql": "select max(timeseries_used) as used ,max(timeseries_total) as total from log.grants_info where ts >= $from and ts <= $to interval(30s)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_dnodes_info_disk_total", + "hide": false, + "interval": "", + "legendFormat": "total", + "refId": "B" + } + ], + "title": "Used Meassuring Points", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "数据库个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 2, + "x": 16, + "y": 4 + }, + "id": 65, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(taos_vgroups_info_status{cluster=\"$cluster\"})", + "format": "time_series", + "formatType": "Time series", + "instant": false, + "interval": "", + "legendFormat": "databases", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Databases", + "transformations": [ + { + "id": "filterFieldsByName", + "options": { + "include": { + "names": [ + "Time", + "databases" + ] + } + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "所有数据库的表数量之和", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 18, + "y": 4 + }, + "id": 68, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_tables_per_database{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show databases;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 21, + "y": 4 + }, + "id": 82, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "/.*/", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select connections_total from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "title": "Connections", + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群DNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 0, + "y": 8 + }, + "id": 75, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select dnodes_total as total,dnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群MNode数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 6, + "y": 8 + }, + "id": 101, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select mnodes_total as total,mnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "MNodes", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VGroups 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 12, + "y": 8 + }, + "id": 102, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count(count(taos_vgroups_info_status{cluster=\"$cluster\"}) by (vgroup_id))", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vgroups_total as total, vgroups_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VGroups", + "transformations": [], + "type": "bargauge" + }, + { + "datasource": "Prometheus", + "description": "当前TDengine集群 VNodes 数量,Alive 为存活,Total 为所有", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 6, + "x": 18, + "y": 8 + }, + "id": 103, + "options": { + "displayMode": "basic", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_online_vnodes{cluster=\"$cluster\"})", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select vnodes_total as total, vnodes_alive as alive from log.cluster_info where ts >= $from and ts <= $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes", + "transformations": [], + "type": "bargauge" + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 12 + }, + "hiddenSeries": false, + "id": 84, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive / taos_cluster_info_dnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_alive)/avg(dnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:71", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:72", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "MNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 12 + }, + "hiddenSeries": false, + "id": 87, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive / taos_cluster_info_mnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_alive)/avg(mnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:221", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:222", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VGroups Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 12 + }, + "hiddenSeries": false, + "id": 85, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vgroups_alive / taos_cluster_info_vgroups_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vgroups_alive)/avg(vgroups_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VGroups Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:256", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:257", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 1 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "VNodes Alive Percent alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 18, + "y": 12 + }, + "hiddenSeries": false, + "id": 86, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_vnodes_alive / taos_cluster_info_vnodes_total", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(vnodes_alive)/avg(vnodes_total) from log.cluster_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 1, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "VNodes Alive Percent", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:291", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:292", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 0.95 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + }, + { + "evaluator": { + "params": [ + 0, + 1 + ], + "type": "within_range" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "DNodes Alive Percent alert", + "noDataState": "ok", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 0, + "y": 18 + }, + "hiddenSeries": false, + "id": 104, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_timeseries_used / taos_grants_info_timeseries_total {cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "percent", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(timeseries_used)/avg(timeseries_total) as percent from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 0.95, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Messuring Points Used Percent Alert", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:333", + "decimals": null, + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + }, + { + "$$hashKey": "object:334", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 86400 + ], + "type": "lt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "min" + }, + "type": "query" + } + ], + "executionErrorState": "keep_state", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Grants Expire Time alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 6, + "y": 18 + }, + "hiddenSeries": false, + "id": 105, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:368", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_grants_info_expire_time", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(expire_time) as expire_time from log.grants_info where ts >= $from and ts <= $to interval(30s) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "lt", + "value": 86400, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Grants Expire Time", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:375", + "decimals": null, + "format": "s", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:376", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "alert": { + "alertRuleTags": {}, + "conditions": [ + { + "evaluator": { + "params": [ + 10 + ], + "type": "gt" + }, + "operator": { + "type": "and" + }, + "query": { + "params": [ + "A", + "1m", + "now" + ] + }, + "reducer": { + "params": [], + "type": "avg" + }, + "type": "query" + } + ], + "executionErrorState": "alerting", + "for": "1m", + "frequency": "10s", + "handler": 1, + "name": "Error Rate alert", + "noDataState": "no_data", + "notifications": [] + }, + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "错误率(每秒错误数)", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 6, + "x": 12, + "y": 18 + }, + "hiddenSeries": false, + "id": 106, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:410", + "alias": "percent", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(taos_dnodes_info_errors{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(errors, 1s, 1) as errors from (select sum(errors) as errors from log.dnodes_info where ts >= $from and ts <= $to interval(1s))", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [ + { + "colorMode": "critical", + "fill": true, + "line": true, + "op": "gt", + "value": 10, + "visible": true + } + ], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Error Rate", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:417", + "decimals": null, + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": "0", + "show": true + }, + { + "$$hashKey": "object:418", + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": false, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 24 + }, + "id": 24, + "panels": [], + "repeat": null, + "title": "DNodes Overview", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 25 + }, + "id": 90, + "options": { + "colorMode": "value", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "horizontal", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value_and_name" + }, + "pluginVersion": "8.2.2", + "repeat": null, + "targets": [ + { + "alias": "", + "colNameFormatStr": "{{groupValue}}", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "{{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) from log.dnodes_info where ts >= now -1m and ts <= now group by dnode_ep", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "DNodes Lifetime", + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 4, + "y": 25 + }, + "hiddenSeries": false, + "id": 88, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_dnodes_alive{cluster=\"$cluster\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "alive", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(dnodes_total) as total, avg(dnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "DNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:128", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:129", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "short" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 6, + "w": 10, + "x": 14, + "y": 25 + }, + "hiddenSeries": false, + "id": 89, + "legend": { + "alignAsTable": true, + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_cluster_info_mnodes_total{cluster=\"$cluster\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mnodes_total) as total, avg(mnodes_alive) as alive from log.cluster_info where ts >= $from and ts <= $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "exemplar": true, + "expr": "taos_cluster_info_mnodes_alive{cluster=\"$cluster\"}", + "hide": false, + "interval": "", + "legendFormat": "alive", + "refId": "B" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "MNodes Number", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:452", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:453", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 31 + }, + "id": 108, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "Include two parts:", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 32 + }, + "hiddenSeries": false, + "id": 110, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideEmpty": false, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:66", + "alias": "/success_rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_count - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_insert) as insert_count, sum(req_insert_success) as insert_success, sum(req_insert_batch) as insert_batches, sum(req_insert_batch_success) as insert_batch_success from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select sum(req_insert) as total_inserts, sum(req_insert_batch) as total_batches from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "insert_batches_success - {{dnode_ep}}", + "queryType": "SQL", + "refId": "D", + "sql": "select sum(req_insert_success) / sum(req_insert) as success_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "success_rate - {{dnode_ep}}", + "refId": "E" + }, + { + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_insert_batch_success{cluster=\"$cluster\"}) by (dnode_ep) / sum(taos_dnodes_info_req_insert_batch{cluster=\"$cluster\"}) by (dnode_ep)", + "hide": false, + "interval": "", + "legendFormat": "batch_success_rate - {{dnode_ep}}", + "refId": "F" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Inserts)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": null, + "logBase": 1, + "max": "1", + "min": "0", + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "unit": "cps" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 24, + "x": 0, + "y": 41 + }, + "hiddenSeries": false, + "id": 112, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "insert_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(req_insert_rate) as insert_rate, avg(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "batch_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(insert_rate) as cluster_insert_rate, avg(batch_rate) as cluster_batch_rate from (select sum(req_insert_rate) as insert_rate, sum(req_insert_batch_rate) as batch_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_insert_rate", + "refId": "C" + }, + { + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_insert_batch_rate{cluster=\"$cluster\"})", + "hide": false, + "interval": "", + "legendFormat": "cluster_batch_rate", + "refId": "D" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests Rate (Inserts per Second)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 50 + }, + "hiddenSeries": false, + "id": 114, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:227", + "alias": "/rate/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_select - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_select) as req_select from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_select{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_select) as total from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_select_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_select_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_select_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_select_rate) as req_select_rate from (select sum(req_select_rate) as req_select_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (Selects)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "cps", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 58 + }, + "hiddenSeries": false, + "id": 111, + "interval": null, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": true, + "show": true, + "sideWidth": 600, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:100", + "alias": "/.*rate.*/", + "dashes": true, + "fill": 4, + "spaceLength": 1, + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "interval": "", + "legendFormat": "req_http - {{dnode_ep}}", + "queryType": "SQL", + "refId": "A", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "sum(taos_dnodes_info_req_http{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http", + "queryType": "SQL", + "refId": "B", + "sql": "select sum(req_http) as req_http from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null)" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "dnode_ep", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"}) by (dnode_ep)", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "req_http_rate - {{dnode_ep}}", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval($interval) fill(null) group by dnode_ep" + }, + { + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg(taos_dnodes_info_req_http_rate{cluster=\"$cluster\"})", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "cluster_req_http_rate", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(req_http_rate) as req_http_rate from (select sum(req_http_rate) as req_http_rate from log.dnodes_info where ts >= $from and ts <= $to interval(1s)) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Requests (HTTP)", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "ops", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Requests", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 32 + }, + "id": 92, + "panels": [ + { + "datasource": "Prometheus", + "description": "超级表的个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 67 + }, + "id": 96, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_stables_per_database{cluster=\"$cluster\", database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "STables", + "transformations": [], + "type": "stat" + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 9, + "x": 3, + "y": 67 + }, + "hiddenSeries": false, + "id": 94, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "sum(taos_vgroups_info_tables_num{cluster=\"$cluster\", database_name=\"$database\"})", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) as tables_num from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts) where ts >= $from and ts <= $to interval($interval) fill(null)" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 67 + }, + "hiddenSeries": false, + "id": 95, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colName": "vgroup_id", + "colNameFormatStr": "vgoup{{groupValue}}", + "colNameToGroup": "vgroup_id", + "exemplar": true, + "expr": "taos_vgroups_info_tables_num", + "formatType": "Time series", + "groupDataByColName": "", + "interval": "", + "legendFormat": "vgroup_{{vgroup_id}}", + "queryType": "SQL", + "refId": "A", + "sql": "select max(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' interval($interval) fill(null) group by vgroup_id" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Tables Number Foreach VGroups", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "datasource": "Prometheus", + "description": "所有普通表的个数(包括超级表的子表)", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 70 + }, + "id": 98, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(tables_num) from (select last(ts) as ts, sum(tables_num) as tables_num from log.vgroups_info where ts > $from and ts <= $to and database_name = '$database' group by ts)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Total Tables", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "超级表的所有子表个数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 3, + "x": 0, + "y": 73 + }, + "id": 97, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "repeatDirection": "h", + "targets": [ + { + "alias": "dnodes", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_tables_per_database{cluster=\"$cluster\",database=\"$database\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "show $database.stables;", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Sub Tables", + "transformations": [], + "type": "stat" + } + ], + "repeat": "database", + "title": "Database: [ $database ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 34 + }, + "id": 20, + "panels": [ + { + "datasource": "Prometheus", + "description": "启动时长", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 0, + "y": 69 + }, + "id": 120, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_uptime{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(uptime) as uptime from log.dnodes_info where dnode_ep = '$fqdn'", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Uptime", + "transformations": [ + { + "id": "reduce", + "options": { + "includeTimeField": false, + "mode": "reduceFields", + "reducers": [ + "lastNotNull" + ] + } + } + ], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "是否有MNode", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 1, + "text": "No" + }, + "1": { + "index": 0, + "text": "Yes" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 4, + "y": 69 + }, + "id": 121, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_has_mnode{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(has_mnode) as has_mnode from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Has MNode?", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "CPU 核数", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "noValue": "Down", + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 6, + "y": 69 + }, + "id": 122, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_cores{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_cores) as uptime from log.dnodes_info where dnode_ep = '$fqdn' interval ($interval)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "CPU Cores", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 8, + "y": 69 + }, + "id": 123, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_vnodes_num{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(vnodes_num) as vnodes_num from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Number", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Master VNodes 数量", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 2, + "x": 10, + "y": 69 + }, + "id": 124, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "auto" + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_masters{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(masters) as masters from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "VNodes Masters", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 CPU 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "max": 100, + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 0.5 + }, + { + "color": "red", + "value": 0.8 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "last(cpu_taosd)" + }, + "properties": [ + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 12, + "y": 69 + }, + "id": 45, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "mem_taosd", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Table", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select last(cpu_engine) from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current CPU Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "当前 taosd 线程 内存 使用率", + "fieldConfig": { + "defaults": { + "color": { + "mode": "continuous-GrYlRd" + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "decmbytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 16, + "y": 69 + }, + "id": 66, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "mean" + ], + "fields": "/^taosd$/", + "values": false + }, + "showThresholdLabels": true, + "showThresholdMarkers": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "memory", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "B", + "sql": "select last(mem_engine) as taosd, last(mem_total) as total from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Current Memory Usage of taosd", + "type": "gauge" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "max": 1, + "min": 0, + "thresholds": { + "mode": "percentage", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "#EAB839", + "value": 75 + }, + { + "color": "red", + "value": 80 + }, + { + "color": "dark-red", + "value": 95 + } + ] + }, + "unit": "percentunit" + }, + "overrides": [] + }, + "gridPos": { + "h": 6, + "w": 4, + "x": 20, + "y": 69 + }, + "id": 51, + "options": { + "displayMode": "gradient", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "last" + ], + "fields": "", + "values": false + }, + "showUnfilled": true, + "text": {} + }, + "pluginVersion": "8.2.2", + "targets": [ + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l0_used/taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l1_used/taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "disk_used", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "taos_disks_info_datadir_l2_used/taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select last(disk_used) as used, last(disk_total) as total, last(disk_used) / last(disk_total) as percent from log.dnodes_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "timeFrom": null, + "timeShift": null, + "title": "Disk Used", + "transformations": [], + "type": "bargauge" + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "CPU 资源占用情况。", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 0, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 2, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "hideZero": false, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_cpu_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(cpu_engine) as taosd, avg(cpu_system) as system from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "CPU Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "percent", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "内存资源占用情况", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 11, + "w": 12, + "x": 12, + "y": 75 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 42, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_engine{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "taosd", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_system{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "system", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_mem_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(mem_engine) as taosd, avg(mem_system) as system, avg(mem_total) as total from log.dnodes_info where dnode_ep='$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "RAM Usage", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "decmbytes", + "label": "使用占比", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 0, + "y": 86 + }, + "hiddenSeries": false, + "id": 117, + "legend": { + "alignAsTable": true, + "avg": false, + "current": true, + "hideEmpty": false, + "hideZero": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:84", + "alias": "/percent/", + "yaxis": 2 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_used", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_used", + "queryType": "SQL", + "refId": "D", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_used{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_used", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(datadir_l0_used)/avg(datadir_l0_total) as level0_percent, avg(datadir_l1_used)/avg(datadir_l1_total) as level1_percent , avg(datadir_l2_used)/avg(datadir_l2_total) as level2_percent from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l0_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0_total", + "queryType": "SQL", + "refId": "C", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l1_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1_total", + "queryType": "SQL", + "refId": "E", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_disks_info_datadir_l2_total{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$__interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2_total", + "queryType": "SQL", + "refId": "F", + "sql": "select avg(datadir_l0_used) as level0_used, avg(datadir_l0_total) as level0_total, avg(datadir_l1_used) as level1_used, avg(datadir_l1_total) as level1_total, avg(datadir_l2_used) as level2_used, avg(datadir_l2_total) as level2_total from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null)", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "transformations": [], + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "gbytes", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 10, + "w": 12, + "x": 12, + "y": 86 + }, + "hiddenSeries": false, + "id": 64, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "sideWidth": 400, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "level0", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l0_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level0", + "queryType": "SQL", + "refId": "A", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l0_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l1_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level1", + "queryType": "SQL", + "refId": "B", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + }, + { + "alias": "level1", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "deriv(rate(taos_disks_info_datadir_l2_used{cluster=\"$cluster\", dnode_ep=\"$fqdn\"}[1m])[5m:1s]) * 1024", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "level2", + "queryType": "SQL", + "refId": "C", + "sql": "select derivative(value, 1m, 0) from (select avg(datadir_l1_used) * 1024 as value from log.disks_info where dnode_ep = '$fqdn' and ts >= $from and ts < $to interval($interval) fill(null))", + "target": "select metric", + "timeshift": { + "period": null + }, + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Disk Used Increasing Rate per Minute", + "tooltip": { + "shared": false, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "MBs", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "percentunit", + "label": "Disk Used", + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [] + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 119, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "rightSide": false, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:302", + "alias": "/last/", + "dashLength": 5, + "dashes": true, + "spaceLength": 5 + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_write_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_write_disk", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_io_read_disk{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "io_read_disk", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(io_read_disk) as io_read_taosd, avg(io_write_disk) as io_write_taosd from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Disk IO", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "MBs", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + }, + { + "aliasColors": {}, + "bars": false, + "cacheTimeout": null, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "description": "io", + "fieldConfig": { + "defaults": { + "links": [], + "unit": "Mbits" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 96 + }, + "hiddenSeries": false, + "hideTimeOverride": true, + "id": 118, + "legend": { + "alignAsTable": true, + "avg": true, + "current": true, + "max": true, + "min": true, + "show": true, + "total": false, + "values": true + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:347", + "alias": "/in/", + "transform": "negative-Y" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_in{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_in", + "queryType": "SQL", + "refId": "A", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + }, + { + "alias": "", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "avg_over_time(taos_dnodes_info_net_out{cluster=\"$cluster\",dnode_ep=\"$fqdn\"}[$interval])", + "formatType": "Time series", + "hide": false, + "interval": "", + "legendFormat": "net_out", + "queryType": "SQL", + "refId": "B", + "sql": "select avg(net_in) as net_in, avg(net_out) as net_out from log.dnodes_info where dnode_ep = '$fqdn' and ts >= now-1h and ts < now interval($interval) fill(null)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": "1h", + "timeRegions": [], + "timeShift": "30s", + "title": "Net", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "decimals": null, + "format": "Mbits", + "label": "IO Rate", + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": false + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "repeat": "fqdn", + "title": "DNode Usage [ $fqdn ]", + "type": "row" + }, + { + "collapsed": true, + "datasource": "Prometheus", + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 35 + }, + "id": 63, + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "displayName": "Logins Per Minute", + "unit": "cpm" + }, + "overrides": [] + }, + "fill": 1, + "fillGradient": 0, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 10 + }, + "hiddenSeries": false, + "id": 61, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "nullPointMode": "null", + "options": { + "alertThreshold": true + }, + "percentage": false, + "pluginVersion": "8.2.2", + "pointradius": 2, + "points": false, + "renderer": "flot", + "seriesOverrides": [ + { + "$$hashKey": "object:67", + "alias": "logins", + "nullPointMode": "null as zero" + } + ], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "alias": "logins", + "colNameFormatStr": "", + "colNameToGroup": "", + "exemplar": true, + "expr": "count_over_time(taos_logs_content{cluster=\"$cluster\"}[1m])", + "formatType": "Time series", + "interval": "", + "legendFormat": "", + "queryType": "SQL", + "refId": "A", + "sql": "select count(*) from log.logs where ts >= $from and ts < $to interval (1m)", + "target": "select metric", + "type": "timeserie" + } + ], + "thresholds": [], + "timeFrom": null, + "timeRegions": [], + "timeShift": null, + "title": "Login Counts per Minute", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "$$hashKey": "object:74", + "format": "cpm", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "$$hashKey": "object:75", + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ], + "yaxis": { + "align": false, + "alignLevel": null + } + } + ], + "title": "Login History", + "type": "row" + } + ], + "schemaVersion": 31, + "style": "dark", + "tags": [ + "TDengine", + "Prometheus", + "TaosKeeper" + ], + "templating": { + "list": [ + { + "allValue": null, + "current": { + "selected": false, + "text": "localhost", + "value": "localhost" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd, cluster)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "cluster", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd, cluster)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": "", + "multi": false, + "name": "fqdn", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, fqdn)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "1", + "value": "1" + }, + "datasource": null, + "definition": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "description": null, + "error": null, + "hide": 2, + "includeAll": false, + "label": "with DNode ID:", + "multi": false, + "name": "dnodeid", + "options": [], + "query": { + "query": "label_values(taos_dn_cpu_taosd{cluster=\"$cluster\"}, dnodeid)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": true, + "text": [ + "All" + ], + "value": [ + "$__all" + ] + }, + "datasource": null, + "definition": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "description": null, + "error": null, + "hide": 0, + "includeAll": true, + "label": null, + "multi": true, + "name": "database", + "options": [], + "query": { + "query": "label_values(taos_vgroups_info_status{cluster=\"$cluster\"}, database_name)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "tdengine:6030", + "value": "tdengine:6030" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "firstEp", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_first_ep{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "allValue": null, + "current": { + "selected": false, + "text": "2.4.0.4", + "value": "2.4.0.4" + }, + "datasource": null, + "definition": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "description": null, + "error": null, + "hide": 0, + "includeAll": false, + "label": null, + "multi": false, + "name": "version", + "options": [], + "query": { + "query": "label_values(taos_cluster_info_version{cluster=\"$cluster\"}, value)", + "refId": "StandardVariableQuery" + }, + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "auto": true, + "auto_count": 100, + "auto_min": "10s", + "current": { + "selected": false, + "text": "5s", + "value": "5s" + }, + "description": null, + "error": null, + "hide": 0, + "label": null, + "name": "interval", + "options": [ + { + "selected": false, + "text": "auto", + "value": "$__auto_interval_interval" + }, + { + "selected": true, + "text": "5s", + "value": "5s" + }, + { + "selected": false, + "text": "10s", + "value": "10s" + }, + { + "selected": false, + "text": "15s", + "value": "15s" + }, + { + "selected": false, + "text": "20s", + "value": "20s" + }, + { + "selected": false, + "text": "30s", + "value": "30s" + }, + { + "selected": false, + "text": "1m", + "value": "1m" + }, + { + "selected": false, + "text": "2m", + "value": "2m" + }, + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "1d", + "value": "1d" + } + ], + "query": "5s,10s,15s,20s,30s,1m,2m,5m,10m,30m,1h,6h,12h,1d", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ] + }, + "timezone": "", + "title": "TaosKeeper Prometheus Dashboard", + "uid": "rSFM0Fxnk", + "version": 62 +} \ No newline at end of file diff --git a/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png new file mode 100644 index 0000000000..a608c98b93 Binary files /dev/null and b/tools/keeper/dashboards/tdengine-taoskeeper-prometheus-dashboard.png differ diff --git a/tools/keeper/db/connector.go b/tools/keeper/db/connector.go new file mode 100644 index 0000000000..a8f106e952 --- /dev/null +++ b/tools/keeper/db/connector.go @@ -0,0 +1,177 @@ +package db + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "os" + "strings" + "time" + + "github.com/sirupsen/logrus" + "github.com/taosdata/driver-go/v3/common" + + _ "github.com/taosdata/driver-go/v3/taosRestful" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +type Connector struct { + db *sql.DB +} + +type Data struct { + Head []string `json:"head"` + Data [][]interface{} `json:"data"` +} + +var dbLogger = log.GetLogger("DB ") + +func NewConnector(username, password, host string, port int, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/?skipVerify=true", username, password, protocol, host, port)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, usessl:%v, error:%s", host, port, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, usessl:%v", host, port, usessl) + return &Connector{db: db}, nil +} + +func NewConnectorWithDb(username, password, host string, port int, dbname string, usessl bool) (*Connector, error) { + var protocol string + if usessl { + protocol = "https" + } else { + protocol = "http" + } + + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: util.GetQidOwn()}) + dbLogger.Tracef("connect to adapter, host:%s, port:%d, usessl:%v", host, port, usessl) + + db, err := sql.Open("taosRestful", fmt.Sprintf("%s:%s@%s(%s:%d)/%s?skipVerify=true", username, password, protocol, host, port, dbname)) + if err != nil { + dbLogger.Errorf("connect to adapter failed, host:%s, port:%d, db:%s, usessl:%v, error:%s", host, port, dbname, usessl, err) + return nil, err + } + + dbLogger.Tracef("connect to adapter success, host:%s, port:%d, db:%s, usessl:%v", host, port, dbname, usessl) + return &Connector{db: db}, nil +} + +func (c *Connector) Exec(ctx context.Context, sql string, qid uint64) (int64, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute sql:%s", sql) + startTime := time.Now() + res, err := c.db.ExecContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return 0, err + } + + rowsAffected, err := res.RowsAffected() + if err != nil { + dbLogger.Errorf("latency:%v, err:%s", latency, err) + return rowsAffected, err + } + + dbLogger.Tracef("response ok, rowsAffected:%v, latency:%v", rowsAffected, latency) + + return rowsAffected, err +} + +func logData(data *Data, logger *logrus.Entry) { + if data == nil { + logger.Tracef("No data to display") + return + } + + jsonData, err := json.Marshal(data) + if err != nil { + logger.Errorf("Failed to marshal data to JSON: %v", err) + return + } + logger.Tracef("query result data:%s", jsonData) +} + +func (c *Connector) Query(ctx context.Context, sql string, qid uint64) (*Data, error) { + dbLogger := dbLogger.WithFields(logrus.Fields{config.ReqIDKey: qid}) + ctx = context.WithValue(ctx, common.ReqIDKey, int64(qid)) + + dbLogger.Tracef("call adapter to execute query, sql:%s", sql) + + startTime := time.Now() + rows, err := c.db.QueryContext(ctx, sql) + + endTime := time.Now() + latency := endTime.Sub(startTime) + + if err != nil { + if strings.Contains(err.Error(), "Authentication failure") { + dbLogger.Error("Authentication failure") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 3*time.Second) + defer cancelLog() + log.Close(ctxLog) + os.Exit(1) + } + dbLogger.Errorf("latency:%v, sql:%s, err:%s", latency, sql, err) + return nil, err + } + + dbLogger.Tracef("response ok, latency:%v, sql:%s", latency, sql) + + data := &Data{} + data.Head, err = rows.Columns() + columnCount := len(data.Head) + if err != nil { + dbLogger.Errorf("get columns error, msg:%s", err) + return nil, err + } + scanData := make([]interface{}, columnCount) + for rows.Next() { + tmp := make([]interface{}, columnCount) + for i := 0; i < columnCount; i++ { + scanData[i] = &tmp[i] + } + err = rows.Scan(scanData...) + if err != nil { + rows.Close() + dbLogger.Errorf("rows scan error, msg:%s", err) + return nil, err + } + data.Data = append(data.Data, tmp) + } + + if dbLogger.Logger.IsLevelEnabled(logrus.TraceLevel) { + logData(data, dbLogger) + } + return data, nil +} + +func (c *Connector) Close() error { + return c.db.Close() +} diff --git a/tools/keeper/db/empty_test.go b/tools/keeper/db/empty_test.go new file mode 100644 index 0000000000..52e32e1f89 --- /dev/null +++ b/tools/keeper/db/empty_test.go @@ -0,0 +1,8 @@ +package db + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/docker-compose.yml b/tools/keeper/docker-compose.yml new file mode 100644 index 0000000000..f7f43fe112 --- /dev/null +++ b/tools/keeper/docker-compose.yml @@ -0,0 +1,29 @@ +version: "3.7" + +services: + tdengine: + image: tdengine/tdengine:3.0.1.6 + environment: + TZ: Asia/Shanghai + TAOS_FQDN: tdengine + volumes: + - taosdata:/var/lib/taos + taoskeeper: + build: ./ + depends_on: + - tdengine + environment: + TDENGINE_HOST: tdengine + TDENGINE_PORT: 6041 + volumes: + - ./config/taoskeeper.toml:/etc/taos/taoskeeper.toml + ports: + - 6043:6043 + prometheus: + image: prom/prometheus + volumes: + - ./prometheus/:/etc/prometheus/ + ports: + - 9090:9090 +volumes: + taosdata: diff --git a/tools/keeper/examples/metrics.toml b/tools/keeper/examples/metrics.toml new file mode 100644 index 0000000000..9dbfea2323 --- /dev/null +++ b/tools/keeper/examples/metrics.toml @@ -0,0 +1,38 @@ +prefix = "taos" +cluster = "localhost" +database = "log" +explicit = false + +[tables.restful_info] +[tables.slowquery] +[tables.cluster_info] +[tables.grants_info] +[tables.disks_info] + +[tables.logs] +explicit = true +[tables.logs.metrics.content] +type = "info" +help = "login types or messages" +[tables.logs.metrics.level] +type = "gauge" +help = "login level" + +[tables.dnodes_info] +[tables.dnodes_info.metrics.has_mnode] +type = "gauge" +help = "check if the node has mnode" + +[tables.vgroups_info] +column_as_variables = ["database_name", "dnode_roles", "dnode_ids"] +explicit = false + +[tables.vgroups_info.metrics.tables_num] +type = "gauge" +help = "Tables count of the vgroup" +[tables.vgroups_info.metrics.online_vnodes] +type = "gauge" +help = "Online v-nodes of the v-group" +[tables.vgroups_info.metrics.status] +type = "info" +help = "Status of the v-group" diff --git a/tools/keeper/go.mod b/tools/keeper/go.mod new file mode 100644 index 0000000000..f520ceb774 --- /dev/null +++ b/tools/keeper/go.mod @@ -0,0 +1,80 @@ +module github.com/taosdata/taoskeeper + +go 1.18 + +require ( + github.com/BurntSushi/toml v0.4.1 + github.com/gin-gonic/gin v1.9.1 + github.com/kardianos/service v1.2.1 + github.com/panjf2000/ants/v2 v2.4.6 + github.com/prometheus/client_golang v1.12.2 + github.com/shirou/gopsutil/v3 v3.22.4 + github.com/shopspring/decimal v1.3.1 + github.com/sirupsen/logrus v1.8.1 + github.com/spf13/pflag v1.0.5 + github.com/spf13/viper v1.12.0 + github.com/stretchr/testify v1.9.0 + github.com/taosdata/driver-go/v3 v3.5.8 + github.com/taosdata/file-rotatelogs/v2 v2.5.2 + github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/bytedance/sonic v1.9.1 // indirect + github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.2 // indirect + github.com/gin-contrib/cors v1.3.1 // indirect + github.com/gin-contrib/gzip v0.0.3 // indirect + github.com/gin-contrib/pprof v1.3.0 // indirect + github.com/gin-contrib/sse v0.1.0 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.14.0 // indirect + github.com/goccy/go-json v0.10.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect + github.com/google/uuid v1.3.0 // indirect + github.com/gorilla/websocket v1.5.0 // indirect + github.com/hashicorp/hcl v1.0.0 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/cpuid/v2 v2.2.4 // indirect + github.com/leodido/go-urn v1.2.4 // indirect + github.com/lestrrat-go/strftime v1.0.6 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.6 // indirect + github.com/mattn/go-isatty v0.0.19 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/mapstructure v1.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/pelletier/go-toml v1.9.5 // indirect + github.com/pelletier/go-toml/v2 v2.0.8 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.2.0 // indirect + github.com/prometheus/common v0.32.1 // indirect + github.com/prometheus/procfs v0.7.3 // indirect + github.com/spf13/afero v1.8.2 // indirect + github.com/spf13/cast v1.5.0 // indirect + github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/subosito/gotenv v1.3.0 // indirect + github.com/tklauser/go-sysconf v0.3.10 // indirect + github.com/tklauser/numcpus v0.4.0 // indirect + github.com/twitchyliquid64/golang-asm v0.15.1 // indirect + github.com/ugorji/go/codec v1.2.11 // indirect + github.com/yusufpapurcu/wmi v1.2.2 // indirect + golang.org/x/arch v0.3.0 // indirect + golang.org/x/crypto v0.9.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.9.0 // indirect + google.golang.org/protobuf v1.30.0 // indirect + gopkg.in/ini.v1 v1.66.4 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect +) diff --git a/tools/keeper/go.sum b/tools/keeper/go.sum new file mode 100644 index 0000000000..9c7721c4d7 --- /dev/null +++ b/tools/keeper/go.sum @@ -0,0 +1,764 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/Shopify/sarama v1.29.1/go.mod h1:mdtqvCSg8JOxk8PmpTNGyo6wzd4BMm4QXSfDnTXmgkE= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM= +github.com/bytedance/sonic v1.9.1 h1:6iJ6NqdoxCDr6mbY8h18oSO+cShGSMRGCEo7F2h0x8s= +github.com/bytedance/sonic v1.9.1/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= +github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/deathowl/go-metrics-prometheus v0.0.0-20200518174047-74482eab5bfb/go.mod h1:kZ9Xvhj+PTMJ415unU/sutrnWDVqG0PDS/Sl4Rt3xkE= +github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.3.5/go.mod h1:eTzb4gxwwyWpqBUHGQZ4ABAV7+Jgm1PklsYT/eo8Hcc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= +github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= +github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU= +github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA= +github.com/gin-contrib/cors v1.3.1 h1:doAsuITavI4IOcd0Y19U4B+O0dNWihRyX//nn4sEmgA= +github.com/gin-contrib/cors v1.3.1/go.mod h1:jjEJ4268OPZUcU7k9Pm653S7lXUGcqMADzFA61xsmDk= +github.com/gin-contrib/gzip v0.0.3 h1:etUaeesHhEORpZMp18zoOhepboiWnFtXrBZxszWUn4k= +github.com/gin-contrib/gzip v0.0.3/go.mod h1:YxxswVZIqOvcHEQpsSn+QF5guQtO1dCfy0shBPy4jFc= +github.com/gin-contrib/pprof v1.3.0 h1:G9eK6HnbkSqDZBYbzG4wrjCsA4e+cvYAHUZw6W+W9K0= +github.com/gin-contrib/pprof v1.3.0/go.mod h1:waMjT1H9b179t3CxuG1cV3DHpga6ybizwfBaM5OXaB0= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= +github.com/gin-gonic/gin v1.6.2/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= +github.com/gin-gonic/gin v1.7.2/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg= +github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-playground/validator/v10 v10.14.0 h1:vgvQWe3XCz3gIeFDm/HnTIbj6UGmg/+t63MyGU2n5js= +github.com/go-playground/validator/v10 v10.14.0/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= +github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= +github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= +github.com/gorilla/sessions v1.2.1/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= +github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs= +github.com/jcmturner/dnsutils/v2 v2.0.0/go.mod h1:b0TnjGOvI/n42bZa+hmXL+kFJZsFT7G4t3HTlQ184QM= +github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jcmturner/goidentity/v6 v6.0.1/go.mod h1:X1YW3bgtvwAXju7V3LCIMpY0Gbxyjn/mY9zx4tFonSg= +github.com/jcmturner/gokrb5/v8 v8.4.2/go.mod h1:sb+Xq/fTY5yktf/VxLsE3wlfPqQjp0aWNYyvBVK62bc= +github.com/jcmturner/rpc/v2 v2.0.3/go.mod h1:VUJYCIDm3PVOEHw8sgt091/20OJjskO/YJki3ELg/Hc= +github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kardianos/service v1.2.1 h1:AYndMsehS+ywIS6RB9KOlcXzteWUzxgMgBymJD7+BYk= +github.com/kardianos/service v1.2.1/go.mod h1:CIMRFEJVL+0DS1a3Nx06NaMn4Dz63Ng6O7dl0qH0zVM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.12.2/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.2.4 h1:acbojRNwl3o09bUq+yDCtZFc1aiwaAAxtcn8YkZXnvk= +github.com/klauspost/cpuid/v2 v2.2.4/go.mod h1:RVVoqg1df56z8g3pUjL/3lE5UfnlrJX8tyFgg4nqhuY= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q= +github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= +github.com/lestrrat-go/strftime v1.0.6 h1:CFGsDEt1pOpFNU+TJB0nhz9jl+K0hZSLE205AhTIGQQ= +github.com/lestrrat-go/strftime v1.0.6/go.mod h1:f7jQKgV5nnJpYgdEasS+/y7EsTb8ykN2z68n3TtcTaw= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.6 h1:5ibWZ6iY0NctNGWo87LalDlEZ6R41TqbbDamhfG/Qzo= +github.com/magiconair/properties v1.8.6/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= +github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/panjf2000/ants/v2 v2.4.6 h1:drmj9mcygn2gawZ155dRbo+NfXEfAssjZNU1qoIb4gQ= +github.com/panjf2000/ants/v2 v2.4.6/go.mod h1:f6F0NZVFsGCp5A7QW/Zj/m92atWwOkY0OIhFxRNFr4A= +github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= +github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= +github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= +github.com/pierrec/lz4 v2.6.0+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= +github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= +github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= +github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= +github.com/spf13/afero v1.8.2 h1:xehSyVa0YnHWsJ49JFljMpg1HX19V6NDZ1fkm1Xznbo= +github.com/spf13/afero v1.8.2/go.mod h1:CtAatgMJh6bJEIs48Ay/FOnkljP3WeGUG0MC1RfAqwo= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= +github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= +github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.12.0 h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ= +github.com/spf13/viper v1.12.0/go.mod h1:b6COn30jlNxbm/V2IqWiNWkJ+vZNiMNksliPCiuKtSI= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/subosito/gotenv v1.3.0 h1:mjC+YW8QpAdXibNi+vNWgzmgBH4+5l5dCXv8cNysBLI= +github.com/subosito/gotenv v1.3.0/go.mod h1:YzJjq/33h7nrwdY+iHMhEOEEbW0ovIz0tB6t6PwAXzs= +github.com/taosdata/driver-go/v2 v2.0.1-0.20211018081904-0a2a3ef6c829/go.mod h1:W7pu74rSvDmGjJPO6fzp+GCtwOelrMgXEhPD0aQJ1xw= +github.com/taosdata/driver-go/v3 v3.5.8 h1:JT5lNFUCOHD9Hs4Phjg8RBkGOWlePRnpGqq8kIRHT98= +github.com/taosdata/driver-go/v3 v3.5.8/go.mod h1:H2vo/At+rOPY1aMzUV9P49SVX7NlXb3LAbKw+MCLrmU= +github.com/taosdata/file-rotatelogs/v2 v2.5.2 h1:6ryjwDdKqQtWrkVq9OKj4gvMING/f+fDluMAAe2DIXQ= +github.com/taosdata/file-rotatelogs/v2 v2.5.2/go.mod h1:Qm99Lh0iMZouGgyy++JgTqKvP5FQw1ruR5jkWF7e1n0= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a h1:WGFREiuYBrTXTS9GVQQpDvVgGRyByfo0V5//o7tv/ho= +github.com/taosdata/go-utils v0.0.0-20211022070036-018cc5f2432a/go.mod h1:hlvGgM/HN3AqWMajvMQe80qoLNJ4KIxs8YOVqEqnxUo= +github.com/tidwall/gjson v1.9.1/go.mod h1:jydLKE7s8J0+1/5jC4eXcuFlzKizGrCKvLmBVX/5oXc= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.2.0/go.mod h1:ITEVvHYasfjBbM0u2Pg8T2nJnzm8xPwvNhhsoaGGjNU= +github.com/tklauser/go-sysconf v0.3.10 h1:IJ1AZGZRWbY8T5Vfk04D9WOA5WSejdflXxP03OUqALw= +github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYaQ2hGm7jmxEFk= +github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o= +github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ= +github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= +github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= +github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= +github.com/xdg/scram v1.0.3/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.3/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg= +github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/arch v0.3.0 h1:02VY4/ZcO/gBOH6PUaoiptASxtXU10jazRCP865E97k= +golang.org/x/arch v0.3.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g= +golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200425230154-ff2c4b7c35a0/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190130150945-aca44879d564/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220704084225-05e143d24a9e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= +google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= +google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.30.0 h1:kPPoIgf3TsEvrm0PFe15JQ+570QVxYzEvvHqChK+cng= +google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= +gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= +gopkg.in/ini.v1 v1.66.4 h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4= +gopkg.in/ini.v1 v1.66.4/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/tools/keeper/infrastructure/config/audit.go b/tools/keeper/infrastructure/config/audit.go new file mode 100644 index 0000000000..10f3a6aa1e --- /dev/null +++ b/tools/keeper/infrastructure/config/audit.go @@ -0,0 +1,6 @@ +package config + +type AuditConfig struct { + Enable bool `toml:"enable"` + Database Database `toml:"database"` +} diff --git a/tools/keeper/infrastructure/config/config.go b/tools/keeper/infrastructure/config/config.go new file mode 100644 index 0000000000..d3e884ba8f --- /dev/null +++ b/tools/keeper/infrastructure/config/config.go @@ -0,0 +1,294 @@ +package config + +import ( + "fmt" + "io/fs" + "os" + "runtime" + "time" + + "github.com/spf13/pflag" + "github.com/spf13/viper" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/util/pool" + "github.com/taosdata/taoskeeper/version" +) + +var Name = fmt.Sprintf("%skeeper", version.CUS_PROMPT) + +const ReqIDKey = "QID" +const ModelKey = "model" + +type Config struct { + InstanceID uint8 + Cors web.CorsConfig `toml:"cors"` + Port int `toml:"port"` + LogLevel string `toml:"loglevel"` + GoPoolSize int `toml:"gopoolsize"` + RotationInterval string `toml:"RotationInterval"` + TDengine TDengineRestful `toml:"tdengine"` + Metrics MetricsConfig `toml:"metrics"` + Env Environment `toml:"environment"` + Audit AuditConfig `toml:"audit"` + Log Log `mapstructure:"-"` + + Transfer string + FromTime string + Drop string +} + +type TDengineRestful struct { + Host string `toml:"host"` + Port int `toml:"port"` + Username string `toml:"username"` + Password string `toml:"password"` + Usessl bool `toml:"usessl"` +} + +var Conf *Config + +func InitConfig() *Config { + viper.SetConfigType("toml") + viper.SetConfigName(Name) + viper.AddConfigPath("/etc/taos") + + var cp *string + switch runtime.GOOS { + case "windows": + viper.AddConfigPath(fmt.Sprintf("C:\\%s\\cfg", version.CUS_NAME)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default C:\\%s\\cfg\\%s.toml", version.CUS_NAME, Name)) + default: + viper.AddConfigPath(fmt.Sprintf("/etc/%s", version.CUS_PROMPT)) + cp = pflag.StringP("config", "c", "", fmt.Sprintf("config path default /etc/%s/%s.toml", version.CUS_PROMPT, Name)) + } + + transfer := pflag.StringP("transfer", "", "", "run "+Name+" in command mode, only support old_taosd_metric. transfer old metrics data to new tables and exit") + fromTime := pflag.StringP("fromTime", "", "2020-01-01T00:00:00+08:00", "parameter of transfer, example: 2020-01-01T00:00:00+08:00") + drop := pflag.StringP("drop", "", "", "run "+Name+" in command mode, only support old_taosd_metric_stables. ") + + v := pflag.BoolP("version", "V", false, "Print the version and exit") + help := pflag.BoolP("help", "h", false, "Print this help message and exit") + + pflag.Parse() + + if *help { + fmt.Fprintf(os.Stderr, "Usage of %s v%s:\n", Name, version.Version) + pflag.PrintDefaults() + os.Exit(0) + } + + if *v { + fmt.Printf("%s version: %s\n", Name, version.Version) + fmt.Printf("git: %s\n", version.Gitinfo) + fmt.Printf("build: %s\n", version.BuildInfo) + os.Exit(0) + } + + if *cp != "" { + viper.SetConfigFile(*cp) + } + + viper.SetEnvPrefix(Name) + err := viper.BindPFlags(pflag.CommandLine) + if err != nil { + panic(err) + } + viper.AutomaticEnv() + + gotoStep := false +ReadConfig: + if err := viper.ReadInConfig(); err != nil { + _, isConfigNotFoundError := err.(viper.ConfigFileNotFoundError) + _, isPathError := err.(*fs.PathError) + if isConfigNotFoundError || isPathError { + fmt.Println("config file not found") + + if !gotoStep { + fmt.Println("use keeper.toml instead") + viper.SetConfigName("keeper") + gotoStep = true + goto ReadConfig + } + } else { + panic(err) + } + } + + // if old format, change to new format + if !viper.IsSet("metrics.database.name") { + databaseName := viper.GetString("metrics.database") + viper.Set("metrics.database.name", databaseName) + viper.Set("metrics.database.options", viper.Get("metrics.databaseoptions")) + } + + var conf Config + if err = viper.Unmarshal(&conf); err != nil { + panic(err) + } + + conf.Transfer = *transfer + conf.FromTime = *fromTime + conf.Drop = *drop + + conf.Cors.Init() + pool.Init(conf.GoPoolSize) + conf.Log.SetValue() + + // set log level default value: info + if conf.LogLevel == "" { + conf.LogLevel = "info" + } + if viper.IsSet("log.level") { + conf.LogLevel = conf.Log.Level + } else { + viper.Set("log.level", "") + } + + if !viper.IsSet("logLevel") { + viper.Set("logLevel", "") + } + + Conf = &conf + return &conf +} + +func init() { + viper.SetDefault("instanceId", 64) + _ = viper.BindEnv("instanceId", "TAOS_KEEPER_INSTANCE_ID") + pflag.Int("instanceId", 64, `instance ID. Env "TAOS_KEEPER_INSTANCE_ID"`) + + viper.SetDefault("port", 6043) + _ = viper.BindEnv("port", "TAOS_KEEPER_PORT") + pflag.IntP("port", "P", 6043, `http port. Env "TAOS_KEEPER_PORT"`) + + _ = viper.BindEnv("logLevel", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("logLevel", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("gopoolsize", 50000) + _ = viper.BindEnv("gopoolsize", "TAOS_KEEPER_POOL_SIZE") + pflag.Int("gopoolsize", 50000, `coroutine size. Env "TAOS_KEEPER_POOL_SIZE"`) + + viper.SetDefault("RotationInterval", "15s") + _ = viper.BindEnv("RotationInterval", "TAOS_KEEPER_ROTATION_INTERVAL") + pflag.StringP("RotationInterval", "R", "15s", `interval for refresh metrics, such as "300ms", Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". Env "TAOS_KEEPER_ROTATION_INTERVAL"`) + + viper.SetDefault("tdengine.host", "127.0.0.1") + _ = viper.BindEnv("tdengine.host", "TAOS_KEEPER_TDENGINE_HOST") + pflag.String("tdengine.host", "127.0.0.1", `TDengine server's ip. Env "TAOS_KEEPER_TDENGINE_HOST"`) + + viper.SetDefault("tdengine.port", 6041) + _ = viper.BindEnv("tdengine.port", "TAOS_KEEPER_TDENGINE_PORT") + pflag.Int("tdengine.port", 6041, `TDengine REST server(taosAdapter)'s port. Env "TAOS_KEEPER_TDENGINE_PORT"`) + + viper.SetDefault("tdengine.username", "root") + _ = viper.BindEnv("tdengine.username", "TAOS_KEEPER_TDENGINE_USERNAME") + pflag.String("tdengine.username", "root", `TDengine server's username. Env "TAOS_KEEPER_TDENGINE_USERNAME"`) + + viper.SetDefault("tdengine.password", "taosdata") + _ = viper.BindEnv("tdengine.password", "TAOS_KEEPER_TDENGINE_PASSWORD") + pflag.String("tdengine.password", "taosdata", `TDengine server's password. Env "TAOS_KEEPER_TDENGINE_PASSWORD"`) + + viper.SetDefault("tdengine.usessl", false) + _ = viper.BindEnv("tdengine.usessl", "TAOS_KEEPER_TDENGINE_USESSL") + pflag.Bool("tdengine.usessl", false, `TDengine server use ssl or not. Env "TAOS_KEEPER_TDENGINE_USESSL"`) + + viper.SetDefault("metrics.prefix", "") + _ = viper.BindEnv("metrics.prefix", "TAOS_KEEPER_METRICS_PREFIX") + pflag.String("metrics.prefix", "", `prefix in metrics names. Env "TAOS_KEEPER_METRICS_PREFIX"`) + + viper.SetDefault("metrics.database.name", "log") + _ = viper.BindEnv("metrics.database.name", "TAOS_KEEPER_METRICS_DATABASE") + pflag.String("metrics.database.name", "log", `database for storing metrics data. Env "TAOS_KEEPER_METRICS_DATABASE"`) + + viper.SetDefault("metrics.database.options.vgroups", 1) + _ = viper.BindEnv("metrics.database.options.vgroups", "TAOS_KEEPER_METRICS_VGROUPS") + pflag.Int("metrics.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_METRICS_VGROUPS"`) + + viper.SetDefault("metrics.database.options.buffer", 64) + _ = viper.BindEnv("metrics.database.options.buffer", "TAOS_KEEPER_METRICS_BUFFER") + pflag.Int("metrics.database.options.buffer", 64, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_BUFFER"`) + + viper.SetDefault("metrics.database.options.keep", 90) + _ = viper.BindEnv("metrics.database.options.keep", "TAOS_KEEPER_METRICS_KEEP") + pflag.Int("metrics.database.options.keep", 90, `database option buffer for audit database. Env "TAOS_KEEPER_METRICS_KEEP"`) + + viper.SetDefault("metrics.database.options.cachemodel", "both") + _ = viper.BindEnv("metrics.database.options.cachemodel", "TAOS_KEEPER_METRICS_CACHEMODEL") + pflag.String("metrics.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_METRICS_CACHEMODEL"`) + + viper.SetDefault("metrics.tables", []string{}) + _ = viper.BindEnv("metrics.tables", "TAOS_KEEPER_METRICS_TABLES") + pflag.StringArray("metrics.tables", []string{}, `export some tables that are not super table, multiple values split with white space. Env "TAOS_KEEPER_METRICS_TABLES"`) + + viper.SetDefault("environment.incgroup", false) + _ = viper.BindEnv("environment.incgroup", "TAOS_KEEPER_ENVIRONMENT_INCGROUP") + pflag.Bool("environment.incgroup", false, `whether running in cgroup. Env "TAOS_KEEPER_ENVIRONMENT_INCGROUP"`) + + initLog() + + if version.IsEnterprise == "true" { + initAudit() + } +} + +func initLog() { + switch runtime.GOOS { + case "windows": + viper.SetDefault("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("C:\\%s\\log", version.CUS_NAME), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + default: + viper.SetDefault("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT)) + _ = viper.BindEnv("log.path", "TAOS_KEEPER_LOG_PATH") + pflag.String("log.path", fmt.Sprintf("/var/log/%s", version.CUS_PROMPT), `log path. Env "TAOS_KEEPER_LOG_PATH"`) + } + + _ = viper.BindEnv("log.level", "TAOS_KEEPER_LOG_LEVEL") + pflag.String("log.level", "info", `log level (trace debug info warning error). Env "TAOS_KEEPER_LOG_LEVEL"`) + + viper.SetDefault("log.rotationCount", 5) + _ = viper.BindEnv("log.rotationCount", "TAOS_KEEPER_LOG_ROTATION_COUNT") + pflag.Uint("log.rotationCount", 5, `log rotation count. Env "TAOS_KEEPER_LOG_ROTATION_COUNT"`) + + viper.SetDefault("log.keepDays", 30) + _ = viper.BindEnv("log.keepDays", "TAOS_KEEPER_LOG_KEEP_DAYS") + pflag.Uint("log.keepDays", 30, `log retention days, must be a positive integer. Env "TAOS_KEEPER_LOG_KEEP_DAYS"`) + + viper.SetDefault("log.rotationTime", time.Hour*24) + _ = viper.BindEnv("log.rotationTime", "TAOS_KEEPER_LOG_ROTATION_TIME") + pflag.Duration("log.rotationTime", time.Hour*24, `deprecated: log rotation time always 24 hours. Env "TAOS_KEEPER_LOG_ROTATION_TIME"`) + + viper.SetDefault("log.rotationSize", "1GB") + _ = viper.BindEnv("log.rotationSize", "TAOS_KEEPER_LOG_ROTATION_SIZE") + pflag.String("log.rotationSize", "1GB", `log rotation size(KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_ROTATION_SIZE"`) + + viper.SetDefault("log.compress", false) + _ = viper.BindEnv("log.compress", "TAOS_KEEPER_LOG_COMPRESS") + pflag.Bool("log.compress", false, `whether to compress old log. Env "TAOS_KEEPER_LOG_COMPRESS"`) + + viper.SetDefault("log.reservedDiskSize", "1GB") + _ = viper.BindEnv("log.reservedDiskSize", "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE") + pflag.String("log.reservedDiskSize", "1GB", `reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_KEEPER_LOG_RESERVED_DISK_SIZE"`) +} + +func initAudit() { + viper.SetDefault("audit.enable", "true") + _ = viper.BindEnv("audit.enable", "TAOS_KEEPER_AUDIT_ENABLE") + pflag.String("audit.enable", "true", `database for enable audit data. Env "TAOS_KEEPER_AUDIT_ENABLE"`) + + viper.SetDefault("audit.database.name", "audit") + _ = viper.BindEnv("audit.database.name", "TAOS_KEEPER_AUDIT_DATABASE") + pflag.String("audit.database.name", "audit", `database for storing audit data. Env "TAOS_KEEPER_AUDIT_DATABASE"`) + + viper.SetDefault("audit.database.options.vgroups", 1) + _ = viper.BindEnv("audit.database.options.vgroups", "TAOS_KEEPER_AUDIT_VGROUPS") + pflag.Int("audit.database.options.vgroups", 1, `database option vgroups for audit database. Env "TAOS_KEEPER_AUDIT_VGROUPS"`) + + viper.SetDefault("audit.database.options.buffer", 16) + _ = viper.BindEnv("audit.database.options.buffer", "TAOS_KEEPER_AUDIT_BUFFER") + pflag.Int("audit.database.options.buffer", 16, `database option buffer for audit database. Env "TAOS_KEEPER_AUDIT_BUFFER"`) + + viper.SetDefault("audit.database.options.cachemodel", "both") + _ = viper.BindEnv("audit.database.options.cachemodel", "TAOS_KEEPER_AUDIT_CACHEMODEL") + pflag.String("audit.database.options.cachemodel", "both", `database option cachemodel for audit database. Env "TAOS_KEEPER_AUDIT_CACHEMODEL"`) +} diff --git a/tools/keeper/infrastructure/config/log.go b/tools/keeper/infrastructure/config/log.go new file mode 100644 index 0000000000..dc67d877e7 --- /dev/null +++ b/tools/keeper/infrastructure/config/log.go @@ -0,0 +1,29 @@ +package config + +import ( + "time" + + "github.com/spf13/viper" +) + +type Log struct { + Level string + Path string + RotationCount uint + RotationTime time.Duration + RotationSize uint + KeepDays uint + Compress bool + ReservedDiskSize uint +} + +func (l *Log) SetValue() { + l.Level = viper.GetString("log.level") + l.Path = viper.GetString("log.path") + l.RotationCount = viper.GetUint("log.rotationCount") + l.RotationTime = viper.GetDuration("log.rotationTime") + l.RotationSize = viper.GetSizeInBytes("log.rotationSize") + l.KeepDays = viper.GetUint("log.keepDays") + l.Compress = viper.GetBool("log.compress") + l.ReservedDiskSize = viper.GetSizeInBytes("log.reservedDiskSize") +} diff --git a/tools/keeper/infrastructure/config/metric_test.go b/tools/keeper/infrastructure/config/metric_test.go new file mode 100644 index 0000000000..5d20cdc5ec --- /dev/null +++ b/tools/keeper/infrastructure/config/metric_test.go @@ -0,0 +1,85 @@ +package config_test + +import ( + "fmt" + "io" + "os" + "runtime" + "testing" + + "github.com/BurntSushi/toml" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/version" +) + +func TestConfig(t *testing.T) { + data := ` +# Start with debug middleware for gin +debug = true +# Listen port, default is 6043 +port = 9000 +# log level +loglevel = "error" +# go pool size +gopoolsize = 5000 +# interval for TDengine metrics +RotationInterval = "10s" +[tdengine] +address = "http://localhost:6041" +authtype = "Basic" +username = "root" +password = "taosdata" +` + var c config.Config + _, err := toml.Decode(data, &c) + if err != nil { + t.Error(err) + return + } + assert.EqualValues(t, c, c) + fmt.Print(c) +} + +func TestBakConfig(t *testing.T) { + isOk := copyConfigFile() + if isOk { + config.Name = "aaa" + config.InitConfig() + config.Name = "taoskeeper" + } +} + +func copyConfigFile() bool { + var sourceFile string + var destinationFile string + switch runtime.GOOS { + case "windows": + sourceFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "taoskeeper") + destinationFile = fmt.Sprintf("C:\\%s\\cfg\\%s.toml", version.CUS_NAME, "keeper") + default: + sourceFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "taoskeeper") + destinationFile = fmt.Sprintf("/etc/%s/%s.toml", version.CUS_PROMPT, "keeper") + } + _, err := os.Stat(sourceFile) + if os.IsNotExist(err) { + return false + } + + source, err := os.Open(sourceFile) //open the source file + if err != nil { + panic(err) + } + defer source.Close() + + destination, err := os.Create(destinationFile) //create the destination file + if err != nil { + panic(err) + } + defer destination.Close() + _, err = io.Copy(destination, source) //copy the contents of source to destination file + if err != nil { + panic(err) + } + return true +} diff --git a/tools/keeper/infrastructure/config/metrics.go b/tools/keeper/infrastructure/config/metrics.go new file mode 100644 index 0000000000..c41544fc39 --- /dev/null +++ b/tools/keeper/infrastructure/config/metrics.go @@ -0,0 +1,29 @@ +package config + +type MetricsConfig struct { + Cluster string `toml:"cluster"` + Prefix string `toml:"prefix"` + Database Database `toml:"database"` + Tables []string `toml:"tables"` +} + +type TaosAdapter struct { + Address []string `toml:"address"` +} + +type Metric struct { + Alias string `toml:"alias"` + Help string `toml:"help"` + Unit string `toml:"unit"` + Type string `toml:"type"` + Labels map[string]string `toml:"labels"` +} + +type Environment struct { + InCGroup bool `toml:"incgroup"` +} + +type Database struct { + Name string `toml:"name"` + Options map[string]interface{} `toml:"options"` +} diff --git a/tools/keeper/infrastructure/log/empty_test.go b/tools/keeper/infrastructure/log/empty_test.go new file mode 100644 index 0000000000..468c02173b --- /dev/null +++ b/tools/keeper/infrastructure/log/empty_test.go @@ -0,0 +1,8 @@ +package log + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/infrastructure/log/log.go b/tools/keeper/infrastructure/log/log.go new file mode 100644 index 0000000000..0a54e99eb9 --- /dev/null +++ b/tools/keeper/infrastructure/log/log.go @@ -0,0 +1,278 @@ +package log + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + "sync" + "time" + + "github.com/sirupsen/logrus" + rotatelogs "github.com/taosdata/file-rotatelogs/v2" + "github.com/taosdata/taoskeeper/infrastructure/config" + + "github.com/taosdata/taoskeeper/version" +) + +var logger = logrus.New() +var ServerID = randomID() +var globalLogFormatter = &TaosLogFormatter{} +var finish = make(chan struct{}) +var exist = make(chan struct{}) + +var bufferPool = &defaultPool{ + pool: &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + }, +} + +type defaultPool struct { + pool *sync.Pool +} + +func (p *defaultPool) Put(buf *bytes.Buffer) { + buf.Reset() + p.pool.Put(buf) +} + +func (p *defaultPool) Get() *bytes.Buffer { + return p.pool.Get().(*bytes.Buffer) +} + +type FileHook struct { + formatter logrus.Formatter + writer io.Writer + buf *bytes.Buffer + sync.Mutex +} + +func NewFileHook(formatter logrus.Formatter, writer io.WriteCloser) *FileHook { + fh := &FileHook{formatter: formatter, writer: writer, buf: &bytes.Buffer{}} + ticker := time.NewTicker(time.Second * 5) + go func() { + for { + select { + case <-ticker.C: + //can be optimized by tryLock + fh.Lock() + if fh.buf.Len() > 0 { + fh.flush() + } + fh.Unlock() + case <-exist: + fh.Lock() + fh.flush() + fh.Unlock() + writer.Close() + ticker.Stop() + close(finish) + return + } + } + }() + return fh +} + +func (f *FileHook) Levels() []logrus.Level { + return logrus.AllLevels +} + +func (f *FileHook) Fire(entry *logrus.Entry) error { + if entry.Buffer == nil { + entry.Buffer = bufferPool.Get() + defer func() { + bufferPool.Put(entry.Buffer) + entry.Buffer = nil + }() + } + data, err := f.formatter.Format(entry) + if err != nil { + return err + } + f.Lock() + f.buf.Write(data) + if f.buf.Len() > 1024 || entry.Level == logrus.FatalLevel || entry.Level == logrus.PanicLevel { + err = f.flush() + } + f.Unlock() + return err +} + +func (f *FileHook) flush() error { + _, err := f.writer.Write(f.buf.Bytes()) + f.buf.Reset() + return err +} + +var once sync.Once + +func ConfigLog() { + once.Do(func() { + err := SetLevel(config.Conf.LogLevel) + if err != nil { + panic(err) + } + writer, err := rotatelogs.New( + filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_%%Y%%m%%d.log", version.CUS_PROMPT, config.Conf.InstanceID)), + rotatelogs.WithRotationCount(config.Conf.Log.RotationCount), + rotatelogs.WithRotationTime(time.Hour*24), + rotatelogs.WithRotationSize(int64(config.Conf.Log.RotationSize)), + rotatelogs.WithReservedDiskSize(int64(config.Conf.Log.ReservedDiskSize)), + rotatelogs.WithRotateGlobPattern(filepath.Join(config.Conf.Log.Path, fmt.Sprintf("%skeeper_%d_*.log*", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.WithCompress(config.Conf.Log.Compress), + rotatelogs.WithCleanLockFile(filepath.Join(config.Conf.Log.Path, fmt.Sprintf(".%skeeper_%d_rotate_lock", version.CUS_PROMPT, config.Conf.InstanceID))), + rotatelogs.ForceNewFile(), + rotatelogs.WithMaxAge(time.Hour*24*time.Duration(config.Conf.Log.KeepDays)), + ) + if err != nil { + panic(err) + } + fmt.Fprintln(writer, "==================================================") + fmt.Fprintln(writer, " new log file") + fmt.Fprintln(writer, "==================================================") + fmt.Fprintf(writer, "config:%+v\n", config.Conf) + + fmt.Fprintf(writer, "%-45s%v\n", "version", version.Version) + fmt.Fprintf(writer, "%-45s%v\n", "gitinfo", version.CommitID) + fmt.Fprintf(writer, "%-45s%v\n", "buildinfo", version.BuildInfo) + + hook := NewFileHook(globalLogFormatter, writer) + logger.AddHook(hook) + }) +} + +func SetLevel(level string) error { + l, err := logrus.ParseLevel(level) + if err != nil { + return err + } + logger.SetLevel(l) + return nil +} + +func GetLogger(model string) *logrus.Entry { + return logger.WithFields(logrus.Fields{config.ModelKey: model}) +} + +func init() { + logrus.SetBufferPool(bufferPool) + logger.SetFormatter(globalLogFormatter) + logger.SetOutput(os.Stdout) +} + +func randomID() string { + return fmt.Sprintf("%08d", os.Getpid()) +} + +type TaosLogFormatter struct { +} + +func (t *TaosLogFormatter) Format(entry *logrus.Entry) ([]byte, error) { + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + b.Reset() + b.WriteString(entry.Time.Format("01/02 15:04:05.000000")) + b.WriteByte(' ') + b.WriteString(ServerID) + b.WriteByte(' ') + v, exist := entry.Data[config.ModelKey] + if exist && v != nil { + b.WriteString(v.(string)) + b.WriteByte(' ') + } else { + b.WriteString("CLI ") + } + switch entry.Level { + case logrus.PanicLevel: + b.WriteString("PANIC ") + case logrus.FatalLevel: + b.WriteString("FATAL ") + case logrus.ErrorLevel: + b.WriteString("ERROR ") + case logrus.WarnLevel: + b.WriteString("WARN ") + case logrus.InfoLevel: + b.WriteString("INFO ") + case logrus.DebugLevel: + b.WriteString("DEBUG ") + case logrus.TraceLevel: + b.WriteString("TRACE ") + } + + // request id + v, exist = entry.Data[config.ReqIDKey] + if exist && v != nil { + b.WriteString(config.ReqIDKey) + b.WriteByte(':') + fmt.Fprintf(b, "0x%x ", v) + } + if len(entry.Message) > 0 && entry.Message[len(entry.Message)-1] == '\n' { + b.WriteString(entry.Message[:len(entry.Message)-1]) + } else { + b.WriteString(entry.Message) + } + // sort the keys + keys := make([]string, 0, len(entry.Data)) + for k := range entry.Data { + if k == config.ModelKey || k == config.ReqIDKey { + continue + } + keys = append(keys, k) + } + for _, k := range keys { + v := entry.Data[k] + if k == config.ReqIDKey && v == nil { + continue + } + b.WriteString(", ") + b.WriteString(k) + b.WriteByte(':') + fmt.Fprintf(b, "%v", v) + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func IsDebug() bool { + return logger.IsLevelEnabled(logrus.DebugLevel) +} + +func GetLogLevel() logrus.Level { + return logger.Level +} + +var zeroTime = time.Time{} +var zeroDuration = time.Duration(0) + +func GetLogNow(isDebug bool) time.Time { + if isDebug { + return time.Now() + } + return zeroTime +} +func GetLogDuration(isDebug bool, s time.Time) time.Duration { + if isDebug { + return time.Since(s) + } + return zeroDuration +} + +func Close(ctx context.Context) { + close(exist) + select { + case <-finish: + return + case <-ctx.Done(): + return + } +} diff --git a/tools/keeper/infrastructure/log/log_test.go b/tools/keeper/infrastructure/log/log_test.go new file mode 100644 index 0000000000..656cda4bbc --- /dev/null +++ b/tools/keeper/infrastructure/log/log_test.go @@ -0,0 +1,23 @@ +package log + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +func TestConfigLog(t *testing.T) { + config.InitConfig() + config.Conf.LogLevel = "debug" + ConfigLog() + debug, _ := logrus.ParseLevel("debug") + assert.Equal(t, logger.Level, debug) + assert.Equal(t, true, IsDebug()) + fmt.Print(GetLogNow(true), GetLogDuration(true, time.Now())) + Close(context.Background()) +} diff --git a/tools/keeper/infrastructure/log/web.go b/tools/keeper/infrastructure/log/web.go new file mode 100644 index 0000000000..4aa244448b --- /dev/null +++ b/tools/keeper/infrastructure/log/web.go @@ -0,0 +1,55 @@ +package log + +import ( + "time" + + "github.com/gin-gonic/gin" + "github.com/sirupsen/logrus" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func GinLog() gin.HandlerFunc { + logger := GetLogger("WEB") + + return func(c *gin.Context) { + qid := util.GetQid(c.GetHeader("X-QID")) + + logger := logger.WithFields( + logrus.Fields{config.ReqIDKey: qid}, + ) + statusCode := c.Writer.Status() + + startTime := time.Now() + c.Next() + endTime := time.Now() + latencyTime := endTime.Sub(startTime) + reqMethod := c.Request.Method + reqUri := c.Request.RequestURI + + clientIP := c.ClientIP() + + if statusCode != 200 { + logger.Errorf("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + return + } + logger.Infof("finish request, status_code:%3d, latency:%v, client_ip:%s, method:%s, uri:%s", statusCode, latencyTime, clientIP, reqMethod, reqUri) + } +} + +type recoverLog struct { + logger logrus.FieldLogger +} + +func (r *recoverLog) Write(p []byte) (n int, err error) { + r.logger.Errorln(string(p)) + return len(p), nil +} + +func GinRecoverLog() gin.HandlerFunc { + logger := GetLogger("WEB") + return func(c *gin.Context) { + writer := &recoverLog{logger: logger} + gin.RecoveryWithWriter(writer)(c) + } +} diff --git a/tools/keeper/main.go b/tools/keeper/main.go new file mode 100644 index 0000000000..43432bde3d --- /dev/null +++ b/tools/keeper/main.go @@ -0,0 +1,11 @@ +package main + +import ( + "github.com/taosdata/taoskeeper/system" +) + +func main() { + r := system.Init() + system.Start(r) + // config.IsEnterprise +} diff --git a/tools/keeper/monitor/collect.go b/tools/keeper/monitor/collect.go new file mode 100644 index 0000000000..652ae1f1ce --- /dev/null +++ b/tools/keeper/monitor/collect.go @@ -0,0 +1,99 @@ +package monitor + +import ( + "math" + "os" + "runtime" + + "github.com/shirou/gopsutil/v3/mem" + "github.com/shirou/gopsutil/v3/process" + "github.com/taosdata/taoskeeper/util" +) + +type SysCollector interface { + CpuPercent() (float64, error) + MemPercent() (float64, error) +} + +type NormalCollector struct { + p *process.Process +} + +func NewNormalCollector() (*NormalCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + return &NormalCollector{p: p}, nil +} + +func (n *NormalCollector) CpuPercent() (float64, error) { + cpuPercent, err := n.p.Percent(0) + if err != nil { + return 0, err + } + return cpuPercent / float64(runtime.NumCPU()), nil +} + +func (n *NormalCollector) MemPercent() (float64, error) { + memPercent, err := n.p.MemoryPercent() + if err != nil { + return 0, err + } + return float64(memPercent), nil +} + +const ( + CGroupCpuQuotaPath = "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" + CGroupCpuPeriodPath = "/sys/fs/cgroup/cpu/cpu.cfs_period_us" + CGroupMemLimitPath = "/sys/fs/cgroup/memory/memory.limit_in_bytes" +) + +type CGroupCollector struct { + p *process.Process + cpuCore float64 + totalMemory uint64 +} + +func NewCGroupCollector() (*CGroupCollector, error) { + p, err := process.NewProcess(int32(os.Getpid())) + if err != nil { + return nil, err + } + cpuPeriod, err := util.ReadUint(CGroupCpuPeriodPath) + if err != nil { + return nil, err + } + cpuQuota, err := util.ReadUint(CGroupCpuQuotaPath) + if err != nil { + return nil, err + } + cpuCore := float64(cpuQuota) / float64(cpuPeriod) + limitMemory, err := util.ReadUint(CGroupMemLimitPath) + if err != nil { + return nil, err + } + machineMemory, err := mem.VirtualMemory() + if err != nil { + return nil, err + } + totalMemory := uint64(math.Min(float64(limitMemory), float64(machineMemory.Total))) + return &CGroupCollector{p: p, cpuCore: cpuCore, totalMemory: totalMemory}, nil +} + +func (c *CGroupCollector) CpuPercent() (float64, error) { + cpuPercent, err := c.p.Percent(0) + if err != nil { + return 0, err + } + cpuPercent = cpuPercent / c.cpuCore + return cpuPercent, nil +} + +func (c *CGroupCollector) MemPercent() (float64, error) { + memInfo, err := c.p.MemoryInfo() + if err != nil { + return 0, err + } + return 100 * float64(memInfo.RSS) / float64(c.totalMemory), nil +} diff --git a/tools/keeper/monitor/empty_test.go b/tools/keeper/monitor/empty_test.go new file mode 100644 index 0000000000..689acfac4c --- /dev/null +++ b/tools/keeper/monitor/empty_test.go @@ -0,0 +1,8 @@ +package monitor + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/monitor/monitor.go b/tools/keeper/monitor/monitor.go new file mode 100644 index 0000000000..6f3083e866 --- /dev/null +++ b/tools/keeper/monitor/monitor.go @@ -0,0 +1,89 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "time" + + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("MON") + +func StartMonitor(identity string, conf *config.Config, reporter *api.Reporter) { + if len(identity) == 0 { + hostname, err := os.Hostname() + if err != nil { + logger.Errorf("can not get hostname, error:%s", err) + } + if len(hostname) > 40 { + hostname = hostname[:40] + } + identity = fmt.Sprintf("%s:%d", hostname, conf.Port) + } + + systemStatus := make(chan SysStatus) + _ = pool.GoroutinePool.Submit(func() { + var ( + cpuPercent float64 + memPercent float64 + totalReport int + ) + + for status := range systemStatus { + if status.CpuError == nil { + cpuPercent = status.CpuPercent + } + if status.MemError == nil { + memPercent = status.MemPercent + } + + totalResp := reporter.GetTotalRep() + for i := 0; i < 3; i++ { + totalReport = totalResp.Load().(int) + if totalResp.CompareAndSwap(totalReport, 0) { + break + } + logger.Warn("Reset keeper_monitor total resp via cas fail! Maybe to many concurrent ") + reporter.GetTotalRep().Store(0) + } + + var kn string + if len(identity) <= util.MAX_TABLE_NAME_LEN { + kn = util.ToValidTableName(identity) + } else { + kn = util.GetMd5HexStr(identity) + } + + sql := fmt.Sprintf("insert into `km_%s` using keeper_monitor tags ('%s') values ( now, "+ + " %f, %f, %d)", kn, identity, cpuPercent, memPercent, totalReport) + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, + conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + if err != nil { + logger.Errorf("connect to database error, msg:%s", err) + return + } + + ctx := context.Background() + if _, err = conn.Exec(ctx, sql, util.GetQidOwn()); err != nil { + logger.Errorf("execute sql:%s, error:%s", sql, err) + } + + if err := conn.Close(); err != nil { + logger.Errorf("close connection error, msg:%s", err) + } + } + }) + SysMonitor.Register(systemStatus) + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + Start(interval, conf.Env.InCGroup) +} diff --git a/tools/keeper/monitor/monitor_test.go b/tools/keeper/monitor/monitor_test.go new file mode 100644 index 0000000000..b2b860dcaa --- /dev/null +++ b/tools/keeper/monitor/monitor_test.go @@ -0,0 +1,58 @@ +package monitor + +import ( + "context" + "fmt" + "os" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/util" + + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" +) + +func TestStart(t *testing.T) { + conf := config.InitConfig() + if conf == nil { + panic("config error") + } + conf.Env.InCGroup = true + cpuCgroupDir := "/sys/fs/cgroup/cpu" + if _, err := os.Stat(cpuCgroupDir); os.IsNotExist(err) { + conf.Env.InCGroup = false + } + log.ConfigLog() + router := web.CreateRouter(false, &conf.Cors, false) + conf.Metrics.Database.Name = "monitor" + reporter := api.NewReporter(conf) + reporter.Init(router) + conf.RotationInterval = "1s" + StartMonitor("", conf, reporter) + time.Sleep(2 * time.Second) + for k, _ := range SysMonitor.outputs { + SysMonitor.Deregister(k) + } + + conn, err := db.NewConnectorWithDb(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.Metrics.Database.Name, conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", conf.Metrics.Database.Name), util.GetQidOwn()) + +} + +func TestParseUint(t *testing.T) { + num, err := util.ParseUint("-1", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("0", 10, 8) + assert.Equal(t, nil, err) + assert.Equal(t, uint64(0), num) + num, err = util.ParseUint("257", 10, 8) + assert.Equal(t, "strconv.ParseUint: parsing \"257\": value out of range", err.Error()) + assert.Equal(t, uint64(0), num) +} diff --git a/tools/keeper/monitor/system.go b/tools/keeper/monitor/system.go new file mode 100644 index 0000000000..7d5ef5bd54 --- /dev/null +++ b/tools/keeper/monitor/system.go @@ -0,0 +1,97 @@ +package monitor + +import ( + "math" + "runtime" + "sync" + "time" + + "github.com/taosdata/taoskeeper/util/pool" +) + +type SysStatus struct { + CollectTime time.Time + CpuPercent float64 + CpuError error + MemPercent float64 + MemError error + GoroutineCounts int + ThreadCounts int +} + +type sysMonitor struct { + sync.Mutex + collectDuration time.Duration + collector SysCollector + status *SysStatus + outputs map[chan<- SysStatus]struct{} + ticker *time.Ticker +} + +func (s *sysMonitor) collect() { + s.status.CollectTime = time.Now() + s.status.CpuPercent, s.status.CpuError = s.collector.CpuPercent() + s.status.MemPercent, s.status.MemError = s.collector.MemPercent() + s.status.GoroutineCounts = runtime.NumGoroutine() + s.status.ThreadCounts, _ = runtime.ThreadCreateProfile(nil) + // skip when inf or nan + if math.IsInf(s.status.CpuPercent, 0) || math.IsNaN(s.status.CpuPercent) || + math.IsInf(s.status.MemPercent, 0) || math.IsNaN(s.status.MemPercent) { + return + } + + s.Lock() + for output := range s.outputs { + select { + case output <- *s.status: + default: + } + } + s.Unlock() +} + +func (s *sysMonitor) Register(c chan<- SysStatus) { + s.Lock() + if s.outputs == nil { + s.outputs = map[chan<- SysStatus]struct{}{ + c: {}, + } + } else { + s.outputs[c] = struct{}{} + } + s.Unlock() +} + +func (s *sysMonitor) Deregister(c chan<- SysStatus) { + s.Lock() + if s.outputs != nil { + delete(s.outputs, c) + } + s.Unlock() +} + +var SysMonitor = &sysMonitor{status: &SysStatus{}} + +func Start(collectDuration time.Duration, inCGroup bool) { + SysMonitor.collectDuration = collectDuration + if inCGroup { + collector, err := NewCGroupCollector() + if err != nil { + logger.Errorf("new normal group controller error, msg:%s", err) + } + SysMonitor.collector = collector + } else { + collector, err := NewNormalCollector() + if err != nil { + logger.Errorf("new normal controller error, msg:%s", err) + } + SysMonitor.collector = collector + } + SysMonitor.collect() + SysMonitor.ticker = time.NewTicker(SysMonitor.collectDuration) + pool.GoroutinePool.Submit(func() { + for range SysMonitor.ticker.C { + SysMonitor.collect() + } + }) +} diff --git a/tools/keeper/process/builder.go b/tools/keeper/process/builder.go new file mode 100644 index 0000000000..d6e37534bf --- /dev/null +++ b/tools/keeper/process/builder.go @@ -0,0 +1,55 @@ +package process + +import ( + "context" + "fmt" + + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" +) + +var builderLogger = log.GetLogger("BLD") + +func ExpandMetricsFromConfig(ctx context.Context, conn *db.Connector, cfg *config.MetricsConfig) (tables map[string]struct{}, err error) { + tables = make(map[string]struct{}) + for _, name := range cfg.Tables { + builderLogger.Debug("normal table: ", name) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + + sql := fmt.Sprintf(GetStableNameListSql(), cfg.Database.Name) + data, err := conn.Query(ctx, sql, util.GetQidOwn()) + if err != nil { + return nil, err + } + builderLogger.Debugf("show stables:%s", sql) + + for _, info := range data.Data { + name := info[0].(string) + builderLogger.Debug("stable:", info) + + _, exist := tables[name] + if exist { + builderLogger.Debug(name, "is exist in config") + continue + } + tables[name] = struct{}{} + } + return +} + +func GetStableNameListSql() string { + return "select stable_name from information_schema.ins_stables " + + " where db_name = '%s' " + + " and (stable_name not like 'taosx\\_%%')" + + " and (stable_name not like 'taosadapter%%')" + + " and (stable_name != 'temp_dir' and stable_name != 'data_dir')" +} diff --git a/tools/keeper/process/empty_test.go b/tools/keeper/process/empty_test.go new file mode 100644 index 0000000000..6718d12525 --- /dev/null +++ b/tools/keeper/process/empty_test.go @@ -0,0 +1,8 @@ +package process + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/process/handle.go b/tools/keeper/process/handle.go new file mode 100644 index 0000000000..980902daca --- /dev/null +++ b/tools/keeper/process/handle.go @@ -0,0 +1,666 @@ +package process + +import ( + "context" + "errors" + "fmt" + "math" + "strings" + "sync" + "time" + + "github.com/prometheus/client_golang/prometheus" + taosError "github.com/taosdata/driver-go/v3/errors" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/util" + "github.com/taosdata/taoskeeper/util/pool" +) + +var logger = log.GetLogger("HND") + +var metricNameMap = map[string]string{ + "taosd_cluster_basic_first_ep": "cluster_info_first_ep", + "taosd_cluster_basic_first_ep_dnode_id": "cluster_info_first_ep_dnode_id", + "taosd_cluster_basic_cluster_version": "cluster_info_version", + + "taosd_cluster_info_cluster_uptime": "cluster_info_master_uptime", + "taosd_cluster_info_dbs_total": "cluster_info_dbs_total", + "taosd_cluster_info_tbs_total": "cluster_info_tbs_total", + "taosd_cluster_info_stbs_total": "cluster_info_stbs_total", + "taosd_cluster_info_dnodes_total": "cluster_info_dnodes_total", + "taosd_cluster_info_dnodes_alive": "cluster_info_dnodes_alive", + "taosd_cluster_info_mnodes_total": "cluster_info_mnodes_total", + "taosd_cluster_info_mnodes_alive": "cluster_info_mnodes_alive", + "taosd_cluster_info_vgroups_total": "cluster_info_vgroups_total", + "taosd_cluster_info_vgroups_alive": "cluster_info_vgroups_alive", + "taosd_cluster_info_vnodes_total": "cluster_info_vnodes_total", + "taosd_cluster_info_vnodes_alive": "cluster_info_vnodes_alive", + "taosd_cluster_info_connections_total": "cluster_info_connections_total", + "taosd_cluster_info_topics_total": "cluster_info_topics_total", + "taosd_cluster_info_streams_total": "cluster_info_streams_total", + + "taosd_cluster_info_grants_expire_time": "grants_info_expire_time", + "taosd_cluster_info_grants_timeseries_used": "grants_info_timeseries_used", + "taosd_cluster_info_grants_timeseries_total": "grants_info_timeseries_total", + + "taosd_dnodes_info_uptime": "dnodes_info_uptime", + "taosd_dnodes_info_cpu_engine": "dnodes_info_cpu_engine", + "taosd_dnodes_info_cpu_system": "dnodes_info_cpu_system", + "taosd_dnodes_info_cpu_cores": "dnodes_info_cpu_cores", + "taosd_dnodes_info_mem_engine": "dnodes_info_mem_engine", + "taosd_dnodes_info_mem_free": "dnodes_info_mem_system", + "taosd_dnodes_info_mem_total": "dnodes_info_mem_total", + "taosd_dnodes_info_disk_engine": "dnodes_info_disk_engine", + "taosd_dnodes_info_disk_used": "dnodes_info_disk_used", + "taosd_dnodes_info_disk_total": "dnodes_info_disk_total", + "taosd_dnodes_info_system_net_in": "dnodes_info_net_in", + "taosd_dnodes_info_system_net_out": "dnodes_info_net_out", + "taosd_dnodes_info_io_read": "dnodes_info_io_read", + "taosd_dnodes_info_io_write": "dnodes_info_io_write", + "taosd_dnodes_info_io_read_disk": "dnodes_info_io_read_disk", + "taosd_dnodes_info_io_write_disk": "dnodes_info_io_write_disk", + "taosd_dnodes_info_vnodes_num": "dnodes_info_vnodes_num", + "taosd_dnodes_info_masters": "dnodes_info_masters", + "taosd_dnodes_info_has_mnode": "dnodes_info_has_mnode", + "taosd_dnodes_info_has_qnode": "dnodes_info_has_qnode", + "taosd_dnodes_info_has_snode": "dnodes_info_has_snode", + "taosd_dnodes_info_has_bnode": "dnodes_info_has_bnode", + "taosd_dnodes_info_errors": "dnodes_info_errors", + "taosd_dnodes_info_error_log_count": "log_summary_error", + "taosd_dnodes_info_info_log_count": "log_summary_info", + "taosd_dnodes_info_debug_log_count": "log_summary_debug", + "taosd_dnodes_info_trace_log_count": "log_summary_trace", + + "taosd_dnodes_status_status": "d_info_status", + + "taosd_mnodes_info_role": "m_info_role", +} + +var metricTypeMap = map[string]CollectType{ + "taosd_cluster_basic_first_ep": Info, + "taosd_cluster_basic_first_ep_dnode_id": Counter, + "taosd_cluster_basic_cluster_version": Info, + + "taosd_cluster_info_cluster_uptime": Gauge, + "taosd_cluster_info_dbs_total": Counter, + "taosd_cluster_info_tbs_total": Counter, + "taosd_cluster_info_stbs_total": Counter, + "taosd_cluster_info_dnodes_total": Counter, + "taosd_cluster_info_dnodes_alive": Counter, + "taosd_cluster_info_mnodes_total": Counter, + "taosd_cluster_info_mnodes_alive": Counter, + "taosd_cluster_info_vgroups_total": Counter, + "taosd_cluster_info_vgroups_alive": Counter, + "taosd_cluster_info_vnodes_total": Counter, + "taosd_cluster_info_vnodes_alive": Counter, + "taosd_cluster_info_connections_total": Counter, + "taosd_cluster_info_topics_total": Counter, + "taosd_cluster_info_streams_total": Counter, + + "taosd_cluster_info_grants_expire_time": Counter, + "taosd_cluster_info_grants_timeseries_used": Counter, + "taosd_cluster_info_grants_timeseries_total": Counter, + + "taosd_dnodes_info_uptime": Gauge, + "taosd_dnodes_info_cpu_engine": Gauge, + "taosd_dnodes_info_cpu_system": Gauge, + "taosd_dnodes_info_cpu_cores": Gauge, + "taosd_dnodes_info_mem_engine": Counter, + "taosd_dnodes_info_mem_free": Counter, + "taosd_dnodes_info_mem_total": Counter, + "taosd_dnodes_info_disk_engine": Counter, + "taosd_dnodes_info_disk_used": Counter, + "taosd_dnodes_info_disk_total": Counter, + "taosd_dnodes_info_system_net_in": Gauge, + "taosd_dnodes_info_system_net_out": Gauge, + "taosd_dnodes_info_io_read": Gauge, + "taosd_dnodes_info_io_write": Gauge, + "taosd_dnodes_info_io_read_disk": Gauge, + "taosd_dnodes_info_io_write_disk": Gauge, + "taosd_dnodes_info_vnodes_num": Counter, + "taosd_dnodes_info_masters": Counter, + "taosd_dnodes_info_has_mnode": Counter, + "taosd_dnodes_info_has_qnode": Counter, + "taosd_dnodes_info_has_snode": Counter, + "taosd_dnodes_info_has_bnode": Counter, + "taosd_dnodes_info_errors": Counter, + "taosd_dnodes_info_error_log_count": Counter, + "taosd_dnodes_info_info_log_count": Counter, + "taosd_dnodes_info_debug_log_count": Counter, + "taosd_dnodes_info_trace_log_count": Counter, + + "taosd_dnodes_status_status": Info, + + "taosd_mnodes_info_role": Info, +} + +type CollectType string + +const ( + Counter CollectType = "counter" + Gauge CollectType = "gauge" + Info CollectType = "info" + Summary CollectType = "summary" +) + +type Processor struct { + prefix string + db string + tableMap map[string]*Table //tableName:*Table{} + metricMap map[string]*Metric //Fqname:*Metric{} + tableList []string + ctx context.Context + rotationInterval time.Duration + exitChan chan struct{} + dbConn *db.Connector + summaryTable map[string]*Table + tables map[string]struct{} +} + +func (p *Processor) Describe(descs chan<- *prometheus.Desc) { + for _, metric := range p.metricMap { + descs <- metric.Desc + } +} + +func (p *Processor) Collect(metrics chan<- prometheus.Metric) { + for _, metric := range p.metricMap { + logger.Tracef("metric name:%v", metric.FQName) + + switch metric.Type { + case Gauge: + gv := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + g := gv.With(value.Label) + g.Set(value.Value.(float64)) + metrics <- g + } + case Counter: + cv := prometheus.NewCounterVec(prometheus.CounterOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, metric.Variables) + for _, value := range metric.GetValue() { + if value.Value == nil { + continue + } + v := i2float(value.Value) + if v < 0 { + logger.Warningf("negative value for prometheus counter. label %v value %v", + value.Label, value.Value) + continue + } + c := cv.With(value.Label) + c.Add(v) + metrics <- c + } + case Info: + lbs := []string{"value"} + lbs = append(lbs, metric.Variables...) + gf := prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: metric.FQName, + Help: metric.Help, + ConstLabels: metric.ConstLabels, + }, lbs) + for _, value := range metric.GetValue() { + if value == nil { + continue + } + v := make(map[string]string, len(value.Label)+1) + v["value"] = value.Value.(string) + for k, l := range value.Label { + v[k] = l + } + g := gf.With(v) + g.Set(1) + metrics <- g + } + case Summary: + } + } +} + +type Table struct { + tsName string + Variables []string + ColumnList []string +} + +type Metric struct { + sync.RWMutex + FQName string + Help string + Type CollectType + ColType int + ConstLabels map[string]string + Variables []string + Desc *prometheus.Desc + LastValue []*Value +} + +func (m *Metric) SetValue(v []*Value) { + m.Lock() + defer m.Unlock() + m.LastValue = v +} + +func (m *Metric) GetValue() []*Value { + m.RLock() + defer m.RUnlock() + return m.LastValue +} + +type Value struct { + Label map[string]string + Value interface{} +} + +func NewProcessor(conf *config.Config) *Processor { + + conn, err := db.NewConnector(conf.TDengine.Username, conf.TDengine.Password, conf.TDengine.Host, conf.TDengine.Port, conf.TDengine.Usessl) + if err != nil { + panic(err) + } + interval, err := time.ParseDuration(conf.RotationInterval) + if err != nil { + panic(err) + } + ctx := context.Background() + tables, err := ExpandMetricsFromConfig(ctx, conn, &conf.Metrics) + if err != nil { + panic(err) + } + p := &Processor{ + prefix: conf.Metrics.Prefix, + db: conf.Metrics.Database.Name, + tableMap: map[string]*Table{}, + metricMap: map[string]*Metric{}, + ctx: ctx, + rotationInterval: interval, + exitChan: make(chan struct{}), + dbConn: conn, + summaryTable: map[string]*Table{"taosadapter_restful_http_request_summary_milliseconds": nil}, + tables: tables, + } + p.Prepare() + p.Process() + return p +} + +func (p *Processor) Prepare() { + locker := sync.RWMutex{} + wg := sync.WaitGroup{} + wg.Add(len(p.tables)) + + for tn := range p.tables { + tableName := tn + + err := pool.GoroutinePool.Submit(func() { + defer wg.Done() + data, err := p.dbConn.Query(p.ctx, fmt.Sprintf("describe %s", p.withDBName(tableName)), util.GetQidOwn()) + if err != nil { + var tdEngineError *taosError.TaosError + if errors.As(err, &tdEngineError) { + logger.Errorf("table %s not exist, skip it, error:%s", tableName, err) + } else { + logger.Errorf("could not get table %s metadata, skip it, error:%s", tableName, err) + } + return + } + + tags := make([]string, 0, len(data.Data)) + columns := make([]string, 0, len(data.Data)) + typeList := make([]string, 0, len(data.Data)) + columnMap := make(map[string]struct{}, len(data.Data)) + variablesMap := make(map[string]struct{}, len(data.Data)) + for _, info := range data.Data { + if info[3].(string) != "" { + variable := info[0].(string) + tags = append(tags, variable) + variablesMap[variable] = struct{}{} + } else { + column := info[0].(string) + columns = append(columns, column) + typeList = append(typeList, info[1].(string)) + columnMap[column] = struct{}{} + } + } + + // metrics := make([]*Metric, 0, len(columns)) + // newMetrics := make(map[string]*Metric, len(columns)) + columnList := make([]string, 0, len(columns)) + + timestampColumn := "ts" + _, exist := p.summaryTable[tableName] + for i, column := range columns { + if _, columnExist := variablesMap[column]; columnExist { + continue + } + + if typeList[i] == "TIMESTAMP" { + timestampColumn = column + continue + } + + columnName, metricType := "", Summary + if !exist { + columnName = column + + if _, ok := metricTypeMap[tableName+"_"+columnName]; ok { + metricType = metricTypeMap[tableName+"_"+columnName] + } else { + metricType = exchangeDBType(typeList[i]) + } + + // 为了兼容性,硬编码,后续要优化 + if strings.HasSuffix(columnName, "role") { + metricType = Info + } + } + + labels := make(map[string]string) + + fqName := p.buildFQName(tableName, columnName) + pDesc := prometheus.NewDesc(fqName, "", nil, labels) + metric := &Metric{ + Type: metricType, + Desc: pDesc, + FQName: fqName, + Help: "", + ConstLabels: labels, + Variables: tags, + } + // metrics = append(metrics, metric) + // newMetrics[column] = metric + + locker.Lock() + p.metricMap[fqName] = metric + locker.Unlock() + + columnList = append(columnList, column) + } + + t := &Table{ + tsName: timestampColumn, + Variables: tags, + ColumnList: columnList, + } + locker.Lock() + p.tableMap[tableName] = t + p.tableList = append(p.tableList, tableName) + locker.Unlock() + + }) + if err != nil { + panic(err) + } + } + + wg.Wait() +} + +func (p *Processor) withDBName(tableName string) string { + b := pool.BytesPoolGet() + b.WriteString(p.db) + b.WriteByte('.') + b.WriteString(tableName) + return b.String() +} + +func (p *Processor) Process() { + // 首先清空所有指标值 + for _, metric := range p.metricMap { + metric.SetValue(nil) + } + + for _, tableName := range p.tableList { + tagIndex := 0 + hasTag := false + b := pool.BytesPoolGet() + b.WriteString("select ") + + table := p.tableMap[tableName] + columns := table.ColumnList + + for i, column := range columns { + b.WriteString("last_row(`" + column + "`) as `" + column + "`") + if i != len(columns)-1 { + b.WriteByte(',') + } + } + + if len(table.Variables) > 0 { + tagIndex = len(columns) + for _, tag := range table.Variables { + b.WriteString(", last_row(`" + tag + "`) as `" + tag + "`") + } + } + + b.WriteString(" from ") + b.WriteString(p.withDBName(tableName)) + + b.WriteString(" WHERE " + p.tableMap[tableName].tsName + " > (NOW() - 1m) ") + + if len(table.Variables) > 0 { + tagIndex = len(columns) + b.WriteString(" group by ") + for i, tag := range table.Variables { + b.WriteString("`" + tag + "`") + if i != len(table.Variables)-1 { + b.WriteByte(',') + } + } + } + sql := b.String() + pool.BytesPoolPut(b) + data, err := p.dbConn.Query(p.ctx, sql, util.GetQidOwn()) + logger.Debug(sql) + if err != nil { + logger.WithError(err).Errorln("select data sql:", sql) + continue + } + if tagIndex > 0 { + hasTag = true + } + if len(data.Data) == 0 { + continue + } + values := make([][]*Value, len(table.ColumnList)) + for _, row := range data.Data { + label := map[string]string{} + valuesMap := make(map[string]interface{}) + colEndIndex := len(columns) + if hasTag { + for i := tagIndex; i < len(data.Head); i++ { + if row[i] != nil { + label[data.Head[i]] = fmt.Sprintf("%v", row[i]) + } + } + } + // values array to map + for i := 0; i < colEndIndex; i++ { + valuesMap[columns[i]] = row[i] + } + for i, column := range table.ColumnList { + var v interface{} + metric := p.metricMap[p.buildFQName(tableName, column)] + switch metric.Type { + case Info: + _, isFloat := valuesMap[column].(float64) + if strings.HasSuffix(column, "role") && valuesMap[column] != nil && isFloat { + v = getRoleStr(valuesMap[column].(float64)) + break + } + if strings.HasSuffix(column, "status") && valuesMap[column] != nil && isFloat { + v = getStatusStr(valuesMap[column].(float64)) + break + } + + if valuesMap[column] != nil { + v = i2string(valuesMap[column]) + } else { + v = nil + } + case Counter, Gauge, Summary: + if valuesMap[column] != nil { + v = i2float(valuesMap[column]) + if column == "cluster_uptime" { + v = i2float(valuesMap[column]) / 86400 + } + } else { + v = nil + } + } + values[i] = append(values[i], &Value{ + Label: label, + Value: v, + }) + } + } + + for i, column := range table.ColumnList { + metric := p.metricMap[p.buildFQName(tableName, column)] + for _, value := range values[i] { + logger.Tracef("set metric:%s, Label:%v, Value:%v", column, value.Label, value.Value) + } + if metric.GetValue() != nil { + values[i] = append(values[i], metric.GetValue()...) + } + metric.SetValue(values[i]) + } + } +} + +func (p *Processor) buildFQName(tableName string, column string) string { + + // keep same metric name + tempFQName := tableName + "_" + column + if _, ok := metricNameMap[tempFQName]; ok { + return p.prefix + "_" + metricNameMap[tempFQName] + } + + b := pool.BytesPoolGet() + b.WriteString(p.prefix) + b.WriteByte('_') + + b.WriteString(tableName) + + if column != "" { + b.WriteByte('_') + b.WriteString(column) + } + + fqName := b.String() + pool.BytesPoolPut(b) + + return fqName +} + +func (p *Processor) GetMetric() map[string]*Metric { + return p.metricMap +} + +func (p *Processor) Close() error { + close(p.exitChan) + return p.dbConn.Close() +} + +func getRoleStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 100: + return "follower" + case 101: + return "candidate" + case 102: + return "leader" + case 103: + return "error" + case 104: + return "learner" + } + return "unknown" +} + +func getStatusStr(v float64) string { + rounded := math.Round(v) + integer := int(rounded) + + switch integer { + case 0: + return "offline" + case 1: + return "ready" + } + return "unknown" +} + +func exchangeDBType(t string) CollectType { + switch t { + case "BOOL", "FLOAT", "DOUBLE": + return Gauge + case "TINYINT", "SMALLINT", "INT", "BIGINT", "TINYINT UNSIGNED", "SMALLINT UNSIGNED", "INT UNSIGNED", "BIGINT UNSIGNED": + return Counter + case "BINARY", "NCHAR", "VARCHAR": + return Info + default: + panic("unsupported type") + } +} + +func i2string(value interface{}) string { + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + panic("unexpected type to string") + } +} + +func i2float(value interface{}) float64 { + switch v := value.(type) { + case int8: + return float64(v) + case int16: + return float64(v) + case int32: + return float64(v) + case int64: + return float64(v) + case uint8: + return float64(v) + case uint16: + return float64(v) + case uint32: + return float64(v) + case uint64: + return float64(v) + case float64: + return v + case float32: + return float64(v) + case bool: + if v { + return 1 + } + return 0 + default: + panic("unexpected type to float64") + } +} diff --git a/tools/keeper/prometheus/prometheus.yml b/tools/keeper/prometheus/prometheus.yml new file mode 100644 index 0000000000..397d566d91 --- /dev/null +++ b/tools/keeper/prometheus/prometheus.yml @@ -0,0 +1,13 @@ +global: + scrape_interval: 5s + +scrape_configs: + - job_name: "prometheus" + static_configs: + - targets: ["localhost:9090"] + - job_name: "taoskeeper" + static_configs: + - targets: ["taoskeeper:6043"] + - job_name: "node" + static_configs: + - targets: ["nodeexporter:9100"] diff --git a/tools/keeper/system/empty_test.go b/tools/keeper/system/empty_test.go new file mode 100644 index 0000000000..a4d4777d32 --- /dev/null +++ b/tools/keeper/system/empty_test.go @@ -0,0 +1,8 @@ +package system + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/system/program.go b/tools/keeper/system/program.go new file mode 100644 index 0000000000..b8f1d8943f --- /dev/null +++ b/tools/keeper/system/program.go @@ -0,0 +1,146 @@ +package system + +import ( + "context" + "fmt" + "net/http" + "os" + "strconv" + "time" + + "github.com/kardianos/service" + "github.com/taosdata/go-utils/web" + "github.com/taosdata/taoskeeper/api" + "github.com/taosdata/taoskeeper/cmd" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/infrastructure/log" + "github.com/taosdata/taoskeeper/monitor" + "github.com/taosdata/taoskeeper/process" + "github.com/taosdata/taoskeeper/version" +) + +var logger = log.GetLogger("PRG") + +func Init() *http.Server { + conf := config.InitConfig() + log.ConfigLog() + + if len(conf.Transfer) > 0 || len(conf.Drop) > 0 { + cmd := cmd.NewCommand(conf) + cmd.Process(conf) + os.Exit(0) + return nil + } + + router := web.CreateRouter(false, &conf.Cors, false) + router.Use(log.GinLog()) + router.Use(log.GinRecoverLog()) + + reporter := api.NewReporter(conf) + reporter.Init(router) + monitor.StartMonitor(conf.Metrics.Cluster, conf, reporter) + + go func() { + // wait for monitor to all metric received + time.Sleep(time.Second * 35) + + processor := process.NewProcessor(conf) + node := api.NewNodeExporter(processor) + node.Init(router) + + if version.IsEnterprise == "true" { + zabbix := api.NewZabbix(processor) + zabbix.Init(router) + } + }() + + checkHealth := api.NewCheckHealth(version.Version) + checkHealth.Init(router) + + if version.IsEnterprise == "true" { + if conf.Audit.Enable { + audit, err := api.NewAudit(conf) + if err != nil { + panic(err) + } + if err = audit.Init(router); err != nil { + panic(err) + } + } + } + + adapter := api.NewAdapter(conf) + if err := adapter.Init(router); err != nil { + panic(err) + } + + gen_metric := api.NewGeneralMetric(conf) + if err := gen_metric.Init(router); err != nil { + panic(err) + } + + server := &http.Server{ + Addr: ":" + strconv.Itoa(conf.Port), + Handler: router, + } + + return server +} + +func Start(server *http.Server) { + prg := newProgram(server) + svcConfig := &service.Config{ + Name: "taoskeeper", + DisplayName: "taoskeeper", + Description: "taosKeeper is a tool for TDengine that exports monitoring metrics", + } + s, err := service.New(prg, svcConfig) + if err != nil { + logger.Fatal(err) + } + err = s.Run() + if err != nil { + logger.Fatal(err) + } +} + +type program struct { + server *http.Server +} + +func newProgram(server *http.Server) *program { + return &program{server: server} +} + +func (p *program) Start(s service.Service) error { + if service.Interactive() { + logger.Info("Running in terminal.") + } else { + logger.Info("Running under service manager.") + } + + server := p.server + go func() { + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + panic(fmt.Errorf("taoskeeper start up fail! %v", err)) + } + }() + return nil +} + +func (p *program) Stop(s service.Service) error { + logger.Println("Shutdown WebServer ...") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := p.server.Shutdown(ctx); err != nil { + logger.Println("WebServer Shutdown error:", err) + } + + logger.Println("Server exiting") + ctxLog, cancelLog := context.WithTimeout(context.Background(), 5*time.Second) + defer cancelLog() + logger.Println("Flushing Log") + log.Close(ctxLog) + return nil +} diff --git a/tools/keeper/system/program_test.go b/tools/keeper/system/program_test.go new file mode 100644 index 0000000000..eabc4fff35 --- /dev/null +++ b/tools/keeper/system/program_test.go @@ -0,0 +1,22 @@ +package system + +import ( + "context" + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/taosdata/taoskeeper/db" + "github.com/taosdata/taoskeeper/infrastructure/config" + "github.com/taosdata/taoskeeper/util" +) + +func TestStart(t *testing.T) { + server := Init() + assert.NotNil(t, server) + + conn, err := db.NewConnectorWithDb(config.Conf.TDengine.Username, config.Conf.TDengine.Password, config.Conf.TDengine.Host, config.Conf.TDengine.Port, config.Conf.Metrics.Database.Name, config.Conf.TDengine.Usessl) + assert.NoError(t, err) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Metrics.Database.Name), util.GetQidOwn()) + conn.Query(context.Background(), fmt.Sprintf("drop database if exists %s", config.Conf.Audit.Database.Name), util.GetQidOwn()) +} diff --git a/tools/keeper/taoskeeper.service b/tools/keeper/taoskeeper.service new file mode 100644 index 0000000000..d8478bc59b --- /dev/null +++ b/tools/keeper/taoskeeper.service @@ -0,0 +1,19 @@ +[Unit] +Description=TaosKeeper - TDengine Metrics Exporter for Kinds of Collectors +Documentation=https://www.taosdata.com +After=network-online.target +Wants=network-online.target + +[Service] +Type=simple +ExecStart=/usr/bin/taoskeeper +TimeoutSec=0 +RestartSec=2 +StandardOutput=null +StandardError=journal +Restart=always +StartLimitBurst=3 +StartLimitInterval=60s + +[Install] +WantedBy=multi-user.target diff --git a/tools/keeper/telegraf.conf b/tools/keeper/telegraf.conf new file mode 100644 index 0000000000..aa2e8e0b35 --- /dev/null +++ b/tools/keeper/telegraf.conf @@ -0,0 +1,6 @@ +[[inputs.prometheus]] +# An array of urls to scrape metrics from. +urls = ["${TAOSKEEPER}"] + +[[outputs.file]] +files = ["stdout"] diff --git a/tools/keeper/telegraf.yml b/tools/keeper/telegraf.yml new file mode 100644 index 0000000000..a02e9f669b --- /dev/null +++ b/tools/keeper/telegraf.yml @@ -0,0 +1,9 @@ +version: "3.6" +services: + telegraf: + image: telegraf:1.20-alpine + hostname: telegraf + volumes: + - ./telegraf.conf:/etc/telegraf/telegraf.conf:ro + environment: + TAOSKEEPER: http://taoskeeper:6043/metrics diff --git a/tools/keeper/util/empty_test.go b/tools/keeper/util/empty_test.go new file mode 100644 index 0000000000..5d82866721 --- /dev/null +++ b/tools/keeper/util/empty_test.go @@ -0,0 +1,8 @@ +package util + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/pool/antpool.go b/tools/keeper/util/pool/antpool.go new file mode 100644 index 0000000000..7a4ecd46de --- /dev/null +++ b/tools/keeper/util/pool/antpool.go @@ -0,0 +1,15 @@ +package pool + +import ( + "github.com/panjf2000/ants/v2" +) + +var GoroutinePool *ants.Pool + +func Init(size int) { + var err error + GoroutinePool, err = ants.NewPool(size) + if err != nil { + panic(err) + } +} diff --git a/tools/keeper/util/pool/bytes.go b/tools/keeper/util/pool/bytes.go new file mode 100644 index 0000000000..0fc44f77b8 --- /dev/null +++ b/tools/keeper/util/pool/bytes.go @@ -0,0 +1,23 @@ +package pool + +import ( + "bytes" + "sync" +) + +var bytesBufferPool sync.Pool + +func init() { + bytesBufferPool.New = func() interface{} { + return &bytes.Buffer{} + } +} + +func BytesPoolGet() *bytes.Buffer { + return bytesBufferPool.Get().(*bytes.Buffer) +} + +func BytesPoolPut(b *bytes.Buffer) { + b.Reset() + bytesBufferPool.Put(b) +} diff --git a/tools/keeper/util/pool/empty_test.go b/tools/keeper/util/pool/empty_test.go new file mode 100644 index 0000000000..dcbca2d11d --- /dev/null +++ b/tools/keeper/util/pool/empty_test.go @@ -0,0 +1,8 @@ +package pool + +import ( + "testing" +) + +func TestEmpty(t *testing.T) { +} diff --git a/tools/keeper/util/util.go b/tools/keeper/util/util.go new file mode 100644 index 0000000000..a739c23760 --- /dev/null +++ b/tools/keeper/util/util.go @@ -0,0 +1,154 @@ +package util + +import ( + "crypto/md5" + "encoding/hex" + "os" + "strconv" + "strings" + "sync/atomic" + "time" + "unicode" + + "github.com/taosdata/taoskeeper/infrastructure/config" +) + +// https://github.com/containerd/cgroups/blob/main/utils.go +var globalCounter64 uint64 +var globalCounter32 uint32 + +var MAX_TABLE_NAME_LEN = 190 + +func init() { + atomic.StoreUint64(&globalCounter64, 0) + atomic.StoreUint32(&globalCounter32, 0) +} + +func ReadUint(path string) (uint64, error) { + v, err := os.ReadFile(path) + if err != nil { + return 0, err + } + return ParseUint(strings.TrimSpace(string(v)), 10, 64) +} + +func ParseUint(s string, base, bitSize int) (uint64, error) { + v, err := strconv.ParseUint(s, base, bitSize) + if err != nil { + intValue, intErr := strconv.ParseInt(s, base, bitSize) + // 1. Handle negative values greater than MinInt64 (and) + // 2. Handle negative values lesser than MinInt64 + if intErr == nil && intValue < 0 { + return 0, nil + } else if intErr != nil && + intErr.(*strconv.NumError).Err == strconv.ErrRange && + intValue < 0 { + return 0, nil + } + return 0, err + } + return v, nil +} + +func EscapeInfluxProtocol(s string) string { + s = strings.TrimSuffix(s, "\\") + s = strings.ReplaceAll(s, ",", "\\,") + s = strings.ReplaceAll(s, "=", "\\=") + s = strings.ReplaceAll(s, " ", "\\ ") + s = strings.ReplaceAll(s, "\"", "\\\"") + return s +} + +func GetCfg() *config.Config { + c := &config.Config{ + InstanceID: 64, + Port: 6043, + LogLevel: "trace", + TDengine: config.TDengineRestful{ + Host: "127.0.0.1", + Port: 6041, + Username: "root", + Password: "taosdata", + Usessl: false, + }, + Metrics: config.MetricsConfig{ + Database: config.Database{ + Name: "keeper_test_log", + Options: map[string]interface{}{}, + }, + }, + Log: config.Log{ + Level: "trace", + Path: "/var/log/taos", + RotationCount: 10, + RotationTime: 24 * time.Hour, + RotationSize: 1073741824, + Compress: true, + ReservedDiskSize: 1073741824, + }, + } + return c +} + +func SafeSubstring(s string, n int) string { + if len(s) > n { + return s[:n] + } + return s +} + +func GetQid(qidStr string) uint64 { + if qidStr == "" || !strings.HasPrefix(qidStr, "0x") { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + qid, err := strconv.ParseUint(qidStr[2:], 16, 64) + if err != nil { + qid32 := atomic.AddUint32(&globalCounter32, 1) + qid64 := uint64(qid32) << 8 + return qid64 + } + + // clear the last byte + qid = qid &^ 0xFF + + return qid +} + +func GetQidOwn() uint64 { + + id := atomic.AddUint64(&globalCounter64, 1) + + if id > 0x00ffffffffffffff { + atomic.StoreUint64(&globalCounter64, 1) + id = 1 + } + qid64 := uint64(config.Conf.InstanceID)<<56 | id + return qid64 +} + +func GetMd5HexStr(str string) string { + sum := md5.Sum([]byte(str)) + return hex.EncodeToString(sum[:]) +} + +func isValidChar(r rune) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' +} + +func ToValidTableName(input string) string { + var builder strings.Builder + + for _, r := range input { + if isValidChar(r) { + builder.WriteRune(unicode.ToLower(r)) + } else { + builder.WriteRune('_') + } + } + + result := builder.String() + return result +} diff --git a/tools/keeper/version/version.go b/tools/keeper/version/version.go new file mode 100644 index 0000000000..c29a40c58e --- /dev/null +++ b/tools/keeper/version/version.go @@ -0,0 +1,11 @@ +package version + +var Version = "0.0.0.0" +var Gitinfo = "unknown" +var BuildInfo = "1970-01-01 00:00:00 +08:00" +var CommitID = "unknown" + +var CUS_NAME = "TDengine" +var CUS_PROMPT = "taos" + +var IsEnterprise = "false" diff --git a/tools/keeper/zbx_taos_keeper_templates.xml b/tools/keeper/zbx_taos_keeper_templates.xml new file mode 100644 index 0000000000..04e260cd21 --- /dev/null +++ b/tools/keeper/zbx_taos_keeper_templates.xml @@ -0,0 +1,111 @@ + + + 5.0 + 2021-12-06T05:55:45Z + + + taos + + + + + + \ No newline at end of file diff --git a/tools/shell/CMakeLists.txt b/tools/shell/CMakeLists.txt index fd46870ac5..4a8e0b9d34 100644 --- a/tools/shell/CMakeLists.txt +++ b/tools/shell/CMakeLists.txt @@ -49,3 +49,32 @@ target_include_directories( ) SET_TARGET_PROPERTIES(shell PROPERTIES OUTPUT_NAME taos) + +# +# generator library shell_ut for uint test +# + +IF(TD_LINUX) + # include + include_directories(${CMAKE_CURRENT_SOURCE_DIR}/inc) + # shell_ut library + add_library(shell_ut STATIC ${SHELL_SRC}) + + IF(TD_WEBSOCKET) + ADD_DEPENDENCIES(shell_ut taosws-rs) + ENDIF() + target_link_libraries(shell_ut PUBLIC taos ${LINK_WEBSOCKET} ${LINK_JEMALLOC} ${LINK_ARGP}) + target_link_libraries(shell_ut PRIVATE os common transport geometry util) + + # util depends + target_link_directories( + shell_ut + PUBLIC "${TD_SOURCE_DIR}/contrib/lzma2" + PUBLIC "${TD_SOURCE_DIR}/contrib/pcre2" + ) + + # unit test + IF(${BUILD_TEST}) + ADD_SUBDIRECTORY(test) + ENDIF(${BUILD_TEST}) +ENDIF() diff --git a/tools/shell/inc/shellAuto.h b/tools/shell/inc/shellAuto.h index bcf500fefc..c9d631f4b2 100644 --- a/tools/shell/inc/shellAuto.h +++ b/tools/shell/inc/shellAuto.h @@ -16,6 +16,10 @@ #ifndef __SHELL_AUTO__ #define __SHELL_AUTO__ +#ifdef __cplusplus +extern "C" { +#endif + #include "shellInt.h" #define TAB_KEY 0x09 @@ -47,4 +51,15 @@ void showAD(bool end); // show all commands help void showHelp(); + +// +// for unit test +// +bool fieldOptionsArea(char* p); +bool isCreateFieldsArea(char* p); + +#ifdef __cplusplus +} +#endif + #endif diff --git a/tools/shell/src/shellAuto.c b/tools/shell/src/shellAuto.c index 65ae9fad54..959e2d6d62 100644 --- a/tools/shell/src/shellAuto.c +++ b/tools/shell/src/shellAuto.c @@ -46,6 +46,7 @@ typedef struct SWord { int32_t len; struct SWord* next; bool free; // if true need free + bool end; // if true is last keyword } SWord; typedef struct { @@ -95,59 +96,62 @@ SWords shellCommands[] = { " ;", 0, 0, NULL}, {"create dnode ", 0, 0, NULL}, {"create index on ()", 0, 0, NULL}, - {"create mnode on dnode ;", 0, 0, NULL}, - {"create qnode on dnode ;", 0, 0, NULL}, + {"create mnode on dnode ;", 0, 0, NULL}, + {"create qnode on dnode ;", 0, 0, NULL}, {"create stream into as select", 0, 0, NULL}, // 26 append sub sql {"create topic as select", 0, 0, NULL}, // 27 append sub sql - {"create function as outputtype language ", 0, 0, NULL}, - {"create or replace as outputtype language ", 0, 0, NULL}, - {"create aggregate function as outputtype bufsize language ", 0, 0, NULL}, - {"create or replace aggregate function as outputtype bufsize language ", 0, 0, NULL}, + {"create tsma on function", 0, 0, NULL}, + {"create recursive tsma on interval(", 0, 0, NULL}, + {"create function as outputtype language ;", 0, 0, NULL}, + {"create or replace as outputtype language ;", 0, 0, NULL}, + {"create aggregate function as outputtype bufsize language ;", 0, 0, NULL}, + {"create or replace aggregate function as outputtype bufsize language ;", 0, 0, NULL}, {"create user pass sysinfo 0;", 0, 0, NULL}, {"create user pass sysinfo 1;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"create view as select", 0, 0, NULL}, {"compact database ", 0, 0, NULL}, #endif - {"describe ", 0, 0, NULL}, + {"describe ;", 0, 0, NULL}, {"delete from where ", 0, 0, NULL}, - {"drop database ", 0, 0, NULL}, - {"drop index ", 0, 0, NULL}, - {"drop table ", 0, 0, NULL}, - {"drop dnode ", 0, 0, NULL}, - {"drop mnode on dnode ;", 0, 0, NULL}, - {"drop qnode on dnode ;", 0, 0, NULL}, - {"drop user ;", 0, 0, NULL}, + {"drop database ;", 0, 0, NULL}, + {"drop index ;", 0, 0, NULL}, + {"drop table ;", 0, 0, NULL}, + {"drop dnode ;", 0, 0, NULL}, + {"drop mnode on dnode ;", 0, 0, NULL}, + {"drop qnode on dnode ;", 0, 0, NULL}, + {"drop user ;", 0, 0, NULL}, // 40 - {"drop function ;", 0, 0, NULL}, + {"drop function ;", 0, 0, NULL}, {"drop consumer group on ", 0, 0, NULL}, - {"drop topic ;", 0, 0, NULL}, - {"drop stream ;", 0, 0, NULL}, - {"explain select", 0, 0, NULL}, // 44 append sub sql - {"flush database ;", 0, 0, NULL}, + {"drop topic ;", 0, 0, NULL}, + {"drop stream ;", 0, 0, NULL}, + {"drop tsma ;", 0, 0, NULL}, + {"explain select ", 0, 0, NULL}, // 44 append sub sql + {"flush database ;", 0, 0, NULL}, {"help;", 0, 0, NULL}, - {"grant all on to ;", 0, 0, NULL}, - {"grant read on to ;", 0, 0, NULL}, - {"grant write on to ;", 0, 0, NULL}, - {"kill connection ;", 0, 0, NULL}, + {"grant all on to ;", 0, 0, NULL}, + {"grant read on to ;", 0, 0, NULL}, + {"grant write on to ;", 0, 0, NULL}, + {"kill connection ;", 0, 0, NULL}, {"kill query ", 0, 0, NULL}, {"kill transaction ", 0, 0, NULL}, #ifdef TD_ENTERPRISE - {"merge vgroup ", 0, 0, NULL}, + {"merge vgroup ;", 0, 0, NULL}, #endif - {"pause stream ;", 0, 0, NULL}, + {"pause stream ;", 0, 0, NULL}, #ifdef TD_ENTERPRISE - {"redistribute vgroup dnode ;", 0, 0, NULL}, + {"redistribute vgroup dnode ;", 0, 0, NULL}, #endif - {"resume stream ;", 0, 0, NULL}, + {"resume stream ;", 0, 0, NULL}, {"reset query cache;", 0, 0, NULL}, - {"restore dnode ;", 0, 0, NULL}, - {"restore vnode on dnode ;", 0, 0, NULL}, - {"restore mnode on dnode ;", 0, 0, NULL}, - {"restore qnode on dnode ;", 0, 0, NULL}, - {"revoke all on from ;", 0, 0, NULL}, - {"revoke read on from ;", 0, 0, NULL}, - {"revoke write on from ;", 0, 0, NULL}, + {"restore dnode ;", 0, 0, NULL}, + {"restore vnode on dnode ;", 0, 0, NULL}, + {"restore mnode on dnode ;", 0, 0, NULL}, + {"restore qnode on dnode ;", 0, 0, NULL}, + {"revoke all on from ;", 0, 0, NULL}, + {"revoke read on from ;", 0, 0, NULL}, + {"revoke write on from ;", 0, 0, NULL}, {"select * from ", 0, 0, NULL}, {"select client_version();", 0, 0, NULL}, // 60 @@ -160,15 +164,17 @@ SWords shellCommands[] = { {"select timezone();", 0, 0, NULL}, {"set max_binary_display_width ", 0, 0, NULL}, {"show apps;", 0, 0, NULL}, + {"show alive;", 0, 0, NULL}, {"show create database \\G;", 0, 0, NULL}, {"show create stable \\G;", 0, 0, NULL}, {"show create table \\G;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"show create view \\G;", 0, 0, NULL}, -#endif - {"show connections;", 0, 0, NULL}, {"show compact", 0, 0, NULL}, {"show compacts;", 0, 0, NULL}, + +#endif + {"show connections;", 0, 0, NULL}, {"show cluster;", 0, 0, NULL}, {"show cluster alive;", 0, 0, NULL}, {"show cluster machines;", 0, 0, NULL}, @@ -190,16 +196,17 @@ SWords shellCommands[] = { {"show subscriptions;", 0, 0, NULL}, {"show tables;", 0, 0, NULL}, {"show tables like", 0, 0, NULL}, - {"show table distributed ", 0, 0, NULL}, - {"show tags from ", 0, 0, NULL}, - {"show tags from ", 0, 0, NULL}, - {"show table tags from ", 0, 0, NULL}, + {"show table distributed ;", 0, 0, NULL}, + {"show tags from ;", 0, 0, NULL}, + {"show table tags from ;", 0, 0, NULL}, {"show topics;", 0, 0, NULL}, {"show transactions;", 0, 0, NULL}, + {"show tsmas;", 0, 0, NULL}, {"show users;", 0, 0, NULL}, {"show variables;", 0, 0, NULL}, {"show local variables;", 0, 0, NULL}, - {"show vnodes ", 0, 0, NULL}, + {"show vnodes;", 0, 0, NULL}, + {"show vnodes on dnode ;", 0, 0, NULL}, {"show vgroups;", 0, 0, NULL}, {"show consumers;", 0, 0, NULL}, {"show grants;", 0, 0, NULL}, @@ -207,22 +214,26 @@ SWords shellCommands[] = { {"show grants logs;", 0, 0, NULL}, #ifdef TD_ENTERPRISE {"show views;", 0, 0, NULL}, - {"split vgroup ", 0, 0, NULL}, + {"show arbgroups;", 0, 0, NULL}, + {"split vgroup ;", 0, 0, NULL}, + {"s3migrate database ;", 0, 0, NULL}, #endif {"insert into values(", 0, 0, NULL}, {"insert into using tags(", 0, 0, NULL}, {"insert into using values(", 0, 0, NULL}, {"insert into file ", 0, 0, NULL}, - {"trim database ", 0, 0, NULL}, - {"s3migrate database ", 0, 0, NULL}, - {"use ", 0, 0, NULL}, + {"trim database ;", 0, 0, NULL}, + {"use ;", 0, 0, NULL}, {"quit", 0, 0, NULL}}; +// where keyword char* keywords[] = { - "and ", "asc ", "desc ", "from ", "fill(", "limit ", "where ", + "where ", "and ", "asc ", "desc ", "from ", "fill(", "limit ", "interval(", "order by ", "order by ", "offset ", "or ", "group by ", "now()", "session(", "sliding ", "slimit ", "soffset ", "state_window(", "today() ", "union all select ", - "partition by "}; + "partition by ", "match", "nmatch ", "between ", "like ", "is null ", "is not null ", + "event_window ", "count_window(" +}; char* functions[] = { "count(", "sum(", @@ -255,6 +266,20 @@ char* functions[] = { "timezone(", "timetruncate(", "twa(", "to_unixtimestamp(", "unique(", "upper(", + "pi(", "round(", + "truncate(", "exp(", + "ln(", "mod(", + "rand(", "sign(", + "degrees(", "radians(", + "greatest(", "least(", + "char_length(", "char(", + "ascii(", "position(", + "trim(", "replace(", + "repeat(", "substring(", + "substring_index(","timediff(", + "week(", "weekday(", + "weekofyear(", "dayofweek(", + "stddev_pop(", "var_pop(" }; char* tb_actions[] = { @@ -275,7 +300,7 @@ char* db_options[] = {"keep ", "cachesize ", "comp ", "duration ", - "wal_fsync_period", + "wal_fsync_period ", "maxrows ", "minrows ", "pages ", @@ -284,17 +309,22 @@ char* db_options[] = {"keep ", "wal_level ", "vgroups ", "single_stable ", - "s3_chunksize ", - "s3_keeplocal ", - "s3_compact ", + "s3_chunksize ", + "s3_keeplocal ", + "s3_compact ", "wal_retention_period ", "wal_roll_period ", "wal_retention_size ", - "wal_segment_size "}; +#ifdef TD_ENTERPRISE + "encrypt_algorithm " +#endif + "keep_time_offset ", + "wal_segment_size " +}; char* alter_db_options[] = {"cachemodel ", "replica ", "keep ", "stt_trigger ", "wal_retention_period ", "wal_retention_size ", "cachesize ", - "s3_keeplocal ", "s3_compact ", + "s3_keeplocal ", "s3_compact ", "wal_fsync_period ", "buffer ", "pages " ,"wal_level "}; char* data_types[] = {"timestamp", "int", @@ -304,6 +334,7 @@ char* data_types[] = {"timestamp", "int", "bigint", "bigint unsigned", "smallint", "smallint unsigned", "tinyint", "tinyint unsigned", + "geometry(64)", "varbinary(16)", "bool", "json"}; char* key_tags[] = {"tags("}; @@ -319,10 +350,20 @@ char* key_systable[] = { char* udf_language[] = {"\'Python\'", "\'C\'"}; +char* field_options[] = { + "encode ", "compress ", "level ", + "\'lz4\' ", "\'zlib\' ", "\'zstd\' ", "\'xz\' ", "\'tsz\' ", "\'disabled\' ", // compress + "\'simple8b\' ", "\'delta-i\' ", "\'delta-d\' ", "\'bit-packing\' ", + "\'high\' ", "\'medium\' ", "\'low\' ", + "comment ", + "primary key " +}; + // global keys can tips on anywhere char* global_keys[] = { "tbname", - "now", + "now", + "vgroups", "_wstart", "_wend", "_wduration", @@ -354,27 +395,29 @@ bool waitAutoFill = false; #define WT_VAR_STREAM 6 #define WT_VAR_UDFNAME 7 #define WT_VAR_VGROUPID 8 +#define WT_VAR_TSMA 9 -#define WT_FROM_DB_MAX 8 // max get content from db +#define WT_FROM_DB_MAX 9 // max get content from db #define WT_FROM_DB_CNT (WT_FROM_DB_MAX + 1) -#define WT_VAR_ALLTABLE 9 -#define WT_VAR_FUNC 10 -#define WT_VAR_KEYWORD 11 -#define WT_VAR_TBACTION 12 -#define WT_VAR_DBOPTION 13 -#define WT_VAR_ALTER_DBOPTION 14 -#define WT_VAR_DATATYPE 15 -#define WT_VAR_KEYTAGS 16 -#define WT_VAR_ANYWORD 17 -#define WT_VAR_TBOPTION 18 -#define WT_VAR_USERACTION 19 -#define WT_VAR_KEYSELECT 20 -#define WT_VAR_SYSTABLE 21 -#define WT_VAR_LANGUAGE 22 -#define WT_VAR_GLOBALKEYS 23 +#define WT_VAR_ALLTABLE 10 +#define WT_VAR_FUNC 11 +#define WT_VAR_KEYWORD 12 +#define WT_VAR_TBACTION 13 +#define WT_VAR_DBOPTION 14 +#define WT_VAR_ALTER_DBOPTION 15 +#define WT_VAR_DATATYPE 16 +#define WT_VAR_KEYTAGS 17 +#define WT_VAR_ANYWORD 18 +#define WT_VAR_TBOPTION 19 +#define WT_VAR_USERACTION 20 +#define WT_VAR_KEYSELECT 21 +#define WT_VAR_SYSTABLE 22 +#define WT_VAR_LANGUAGE 23 +#define WT_VAR_GLOBALKEYS 24 +#define WT_VAR_FIELD_OPTIONS 25 -#define WT_VAR_CNT 24 +#define WT_VAR_CNT 26 #define WT_TEXT 0xFF @@ -387,12 +430,17 @@ TdThreadMutex tiresMutex; TdThread* threads[WT_FROM_DB_CNT]; // obtain var name with sql from server char varTypes[WT_VAR_CNT][64] = { + // get from db "", "", "", "", "", "", "", - "", "", "", "", "", "", "", "", - "", "", "", "", "", "", "", ""}; + "", "", "", + // get from code + "", "", "", "", "", "", + "", "", "", "", "", "", "", + "", "", ""}; char varSqls[WT_FROM_DB_CNT][64] = {"show databases;", "show stables;", "show tables;", "show dnodes;", - "show users;", "show topics;", "show streams;", "show functions;", "show vgroups;"}; + "show users;", "show topics;", "show streams;", "show functions;", + "show vgroups;", "show tsmas;"}; // var words current cursor, if user press any one key except tab, cursorVar can be reset to -1 int cursorVar = -1; @@ -534,6 +582,7 @@ void showHelp() { select timezone();\n\ set max_binary_display_width ...\n\ show apps;\n\ + show alive;\n\ show create database ;\n\ show create stable ;\n\ show create table ;\n\ @@ -567,7 +616,8 @@ void showHelp() { show users;\n\ show variables;\n\ show local variables;\n\ - show vnodes \n\ + show vnodes;\n\ + show vnodes on dnode ;\n\ show vgroups;\n\ show consumers;\n\ show grants;\n\ @@ -588,8 +638,10 @@ void showHelp() { create view as select ...\n\ redistribute vgroup dnode ;\n\ split vgroup ;\n\ + s3migrate database ;\n\ show compacts;\n\ show compact \n\ + show arbgroups;\n\ show views;\n\ show create view ;"); #endif @@ -648,7 +700,12 @@ SWord* addWord(const char* p, int32_t len, bool pattern) { // check format if (pattern && len > 0) { - word->type = wordType(p, len); + if (p[len - 1] == ';') { + word->type = wordType(p, len - 1); + word->end = true; + } else { + word->type = wordType(p, len); + } } else { word->type = WT_TEXT; } @@ -756,6 +813,7 @@ bool shellAutoInit() { GenerateVarType(WT_VAR_SYSTABLE, key_systable, sizeof(key_systable) / sizeof(char*)); GenerateVarType(WT_VAR_LANGUAGE, udf_language, sizeof(udf_language) / sizeof(char*)); GenerateVarType(WT_VAR_GLOBALKEYS, global_keys, sizeof(global_keys) / sizeof(char*)); + GenerateVarType(WT_VAR_FIELD_OPTIONS, field_options, sizeof(field_options) / sizeof(char*)); return true; } @@ -1254,9 +1312,9 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { const char* str = NULL; int strLen = 0; + SWord* word = MATCH_WORD(match); if (firstMatchIndex == curMatchIndex && lastWordBytes == -1) { // first press tab - SWord* word = MATCH_WORD(match); str = word->word + match->matchLen; strLen = word->len - match->matchLen; lastMatchIndex = firstMatchIndex; @@ -1264,8 +1322,6 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { } else { if (lastWordBytes == -1) return; deleteCount(cmd, lastWordBytes); - - SWord* word = MATCH_WORD(match); str = word->word; strLen = word->len; // set current to last @@ -1273,8 +1329,22 @@ void printScreen(TAOS* con, SShellCmd* cmd, SWords* match) { lastWordBytes = word->len; } - // insert new - shellInsertStr(cmd, (char*)str, strLen); + if (word->end && str[strLen - 1] != ';') { + // append end ';' + char* p = taosMemoryCalloc(strLen + 8, 1); + if (p) { + tstrncpy(p, str, strLen + 1); + tstrncpy(p + strLen, ";", 1 + 1); + lastWordBytes += 1; + shellInsertStr(cmd, (char*)p, strLen + 1); + taosMemoryFree(p); + } else { + shellInsertStr(cmd, (char*)str, strLen); + } + } else { + // insert new + shellInsertStr(cmd, (char*)str, strLen); + } } // main key press tab , matched return true else false @@ -1648,38 +1718,69 @@ bool matchSelectQuery(TAOS* con, SShellCmd* cmd) { return appendAfterSelect(con, cmd, p, len); } +// is fields option area +bool fieldOptionsArea(char* p) { + char* p1 = strrchr(p, '('); + char* p2 = strrchr(p, ','); + if (p1 == NULL && p2 == NULL) { + return false; + } + + // find tags + if (strstr(p, " tags") != NULL) { + return false; + } + + if (p2 == NULL) { + // first field area + p2 = p1; + } + + // find blank count + int32_t cnt = 0; + while (p2) { + p2 = strchr(p2, ' '); + if (p2) { + // get prev char + char prec = *(p2 - 1); + if (prec != ',' && prec != '(') { + // blank if before comma, not calc count. like st(ts timestamp, age int + BLANK + TAB only two blank + cnt++; + } + + // continue blank is one blank + while (p2[1] != 0 && p2[1] == ' ') { + // move next if blank again + p2 += 1; + } + p2 += 1; + } + } + + // like create table st(ts timestamp TAB-KEY or st(ts timestamp , age int TAB-KEY + return cnt >= 2; +} + // if is input create fields or tags area, return true bool isCreateFieldsArea(char* p) { - // put to while, support like create table st(ts timestamp, bin1 binary(16), bin2 + blank + TAB - char* p1 = taosStrdup(p); - bool ret = false; - while (1) { - char* left = strrchr(p1, '('); - if (left == NULL) { - // like 'create table st' - ret = false; - break; + int32_t n = 0; // count + char* p1 = p; + while (*p1 != 0) { + switch (*p1) { + case '(': + ++n; + break; + case ')': + --n; + break; + default: + break; } - - char* right = strrchr(p1, ')'); - if (right == NULL) { - // like 'create table st( ' - ret = true; - break; - } - - if (left > right) { - // like 'create table st( ts timestamp, age int) tags(area ' - ret = true; - break; - } - - // set string end by small for next strrchr search - *left = 0; + // move next + ++p1; } - taosMemoryFree(p1); - return ret; + return n > 0; } bool matchCreateTable(TAOS* con, SShellCmd* cmd) { @@ -1718,7 +1819,13 @@ bool matchCreateTable(TAOS* con, SShellCmd* cmd) { // check in create fields or tags input area if (isCreateFieldsArea(ps)) { - ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE); + if (fieldOptionsArea(ps)) { + // fill field options + ret = fillWithType(con, cmd, last, WT_VAR_FIELD_OPTIONS); + } else { + // fill field + ret = fillWithType(con, cmd, last, WT_VAR_DATATYPE); + } } // tags @@ -1726,7 +1833,7 @@ bool matchCreateTable(TAOS* con, SShellCmd* cmd) { // find only one ')' , can insert tags char* p1 = strchr(ps, ')'); if (p1) { - if (strchr(p1 + 1, ')') == NULL && strstr(p1 + 1, "tags") == NULL) { + if (strstr(p1 + 1, "tags") == NULL) { // can insert tags keyword ret = fillWithType(con, cmd, last, WT_VAR_KEYTAGS); } diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 2a583f948e..50a7fe8119 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -22,6 +22,8 @@ #include "shellAuto.h" #include "shellInt.h" +SShellObj shell = {0}; + typedef struct { const char *sql; bool vertical; diff --git a/tools/shell/src/shellMain.c b/tools/shell/src/shellMain.c index 71acf23e41..fc6ba0f7d8 100644 --- a/tools/shell/src/shellMain.c +++ b/tools/shell/src/shellMain.c @@ -17,8 +17,7 @@ #include "shellInt.h" #include "shellAuto.h" -SShellObj shell = {0}; - +extern SShellObj shell; void shellCrashHandler(int signum, void *sigInfo, void *context) { taosIgnSignal(SIGTERM); diff --git a/tools/shell/test/CMakeLists.txt b/tools/shell/test/CMakeLists.txt new file mode 100644 index 0000000000..1eb6c709ab --- /dev/null +++ b/tools/shell/test/CMakeLists.txt @@ -0,0 +1,25 @@ + +MESSAGE(STATUS "build taos-CLI unit test") + +IF(NOT TD_DARWIN) + # GoogleTest requires at least C++11 + SET(CMAKE_CXX_STANDARD 11) + AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) + + ADD_EXECUTABLE(shellTest ${SOURCE_LIST}) + TARGET_LINK_LIBRARIES( + shellTest + PRIVATE shell_ut gtest os common transport geometry util + ) + + target_include_directories( + shell_ut + PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/../inc" + ) + + + add_test( + NAME shellTest + COMMAND shellTest + ) +ENDIF() diff --git a/tools/shell/test/shellTest.cpp b/tools/shell/test/shellTest.cpp new file mode 100644 index 0000000000..cf0ec503fe --- /dev/null +++ b/tools/shell/test/shellTest.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2019 TAOS Data, Inc. + * + * This program is free software: you can use, redistribute, and/or modify + * it under the terms of the GNU Affero General Public License, version 3 + * or later ("AGPL"), as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ + +#include +#include +#include "shellAuto.h" + +TEST(fieldOptionsArea, autoTabTest) { + printf("hellow world SHELL tab test\n"); + + // str false + const char *s0[] = { + "create table st(ts ", + "create table st(ts timestamp, age ", + "create table st(ts timestamp, age", + "create table st(ts timestamp, age int , name ", + "create table st(ts timestamp, age int , name binary(16)", + "create table st(ts timestamp, age int , name binary(16) ) tags( ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary", + "create table st(ts timestamp, age int, name binary(16)) tags(area int , addr varbinary(32)", + "create table st( ts timestamp, age int, name binary(16)) tags( area int, addr", + "create table st (ts timestamp , age int, name binary(16) , area int,", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int ,addr varbinary", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' , no i", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high', no in", + }; + + // str true + const char *s1[] = { + "create table st(ts timestamp ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) , addr varbinary( 32 ) ", + "create table st(ts timestamp, age int , name binary(16) ,area int, addr varbinary(32) ", + "create table st(ts timestamp, age int , name binary(16), area int,addr varbinary(32) ", + "create table st(ts timestamp, age int, name binary(16) , area int,addr varbinary(32) ", + "create table st( ts timestamp, age int, name binary(16) ,area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16), area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16), area int , addr varbinary(32) compress 'zlib' ", + "create table st (ts timestamp , age int, name binary(16), area int , addr varbinary(32) level 'high' ", + "create table st (ts timestamp , age int, name binary(16) , area int , addr varbinary(32) encode 'simple8b' " + "level 'high' ", + }; + + // s0 is false + for (int32_t i = 0; i < sizeof(s0) / sizeof(char *); i++) { + printf("s0 i=%d fieldOptionsArea %s expect false \n", i, s0[i]); + ASSERT(fieldOptionsArea((char *)s0[i]) == false); + } + + // s1 is true + for (int32_t i = 0; i < sizeof(s1) / sizeof(char *); i++) { + printf("s1 i=%d fieldOptionsArea %s expect true \n", i, s1[i]); + ASSERT(fieldOptionsArea((char *)s1[i]) == true); + } +} + +TEST(isCreateFieldsArea, autoTabTest) { + printf("hellow world SHELL tab test\n"); + + // str false + const char *s0[] = { + "create table st(ts )", + "create table st(ts timestamp, age) ", + "create table st(ts timestamp, age)", + "create table st(ts timestamp, age int , name binary(16) )", + "create table st(ts timestamp, age int , name binary(16))", + "create table st(ts timestamp, age int , name binary(16) ) tags( )", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr )", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary)", + "create table st(ts timestamp, age int, name binary(16)) tags(area int , addr varbinary(32))", + "create table st( ts timestamp, age int, name binary(16)) tags( area int, addr int)", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int,addr varbinary(32) )", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int ,addr varbinary(14))", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' )", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high' ) ", + }; + + // str true + const char *s1[] = { + "create table st(ts timestamp ", + "create table st(ts timestamp, age int ", + "create table st(ts timestamp, age int ,", + "create table st(ts timestamp, age int , name binary(16), ", + "create table st(ts timestamp, age int , name binary(16) ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int, addr varbinary(32) ", + "create table st(ts timestamp, age int , name binary(16) ) tags( area int,addr varbinary(32)", + "create table st(ts timestamp, age int, name binary(16)) tags(area int,addr varbinary(32) ", + "create table st( ts timestamp, age int, name binary(16)) tags(area int,addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int, addr varbinary(32) ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) compress " + "'zlib' ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) level " + "'high' ", + "create table st (ts timestamp , age int, name binary(16) ) tags ( area int , addr varbinary(32) encode " + "'simple8b' level 'high' ", + }; + + // s0 is false + for (int32_t i = 0; i < sizeof(s0) / sizeof(char *); i++) { + printf("s0 i=%d isCreateFieldsArea %s expect false. \n", i, s0[i]); + ASSERT(isCreateFieldsArea((char *)s0[i]) == false); + } + + // s1 is true + for (int32_t i = 0; i < sizeof(s1) / sizeof(char *); i++) { + printf("s1 i=%d isCreateFieldsArea %s expect true. \n", i, s1[i]); + ASSERT(isCreateFieldsArea((char *)s1[i]) == true); + } +} + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} \ No newline at end of file diff --git a/utils/test/c/tmq_taosx_ci.c b/utils/test/c/tmq_taosx_ci.c index 3a79a3763c..cd70dd88f5 100644 --- a/utils/test/c/tmq_taosx_ci.c +++ b/utils/test/c/tmq_taosx_ci.c @@ -79,6 +79,7 @@ static void msg_process(TAOS_RES* msg) { } else { taosFprintfFile(g_fp, result); taosFprintfFile(g_fp, "\n"); + taosFsyncFile(g_fp); } } } @@ -132,7 +133,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000, \"ttt\", true)"); if (taos_errno(pRes) != 0) { - printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes)); + printf("failed to create child table ct0, reason:%s\n", taos_errstr(pRes)); return -1; } taos_free_result(pRes); @@ -175,7 +176,7 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { pRes = taos_query( pConn, "insert into ct3 values(1626006833600, 5, 6, 'c') ct1 values(1626006833601, 2, 3, 'sds') (1626006833602, 4, 5, " - "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(now+5s, 23, 32, 's21ds')"); + "'ddd') ct0 values(1626006833603, 4, 3, 'hwj') ct1 values(1626006833703, 23, 32, 's21ds')"); if (taos_errno(pRes) != 0) { printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes)); return -1; @@ -189,6 +190,41 @@ int buildDatabase(TAOS* pConn, TAOS_RES* pRes) { } taos_free_result(pRes); + pRes = taos_query(pConn, "insert into ct1 values(1736006813600, -32222, 43, 'ewb', 99)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 drop column c4"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1736006833600, -4223, 344, 'bfs')"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "alter table st1 add column c4 bigint"); + if (taos_errno(pRes) != 0) { + printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + + pRes = taos_query(pConn, "insert into ct1 values(1766006833600, -4432, 4433, 'e23wb', 9349)"); + if (taos_errno(pRes) != 0) { + printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes)); + return -1; + } + taos_free_result(pRes); + pRes = taos_query(pConn, "alter table st1 modify column c3 binary(64)"); if (taos_errno(pRes) != 0) { printf("failed to alter super table st1, reason:%s\n", taos_errstr(pRes)); @@ -596,6 +632,7 @@ tmq_t* build_consumer() { tmq_conf_set(conf, "enable.auto.commit", "true"); tmq_conf_set(conf, "auto.offset.reset", "earliest"); tmq_conf_set(conf, "msg.consume.excluded", "1"); +// tmq_conf_set(conf, "session.timeout.ms", "1000000"); // tmq_conf_set(conf, "max.poll.interval.ms", "20000"); if (g_conf.snapShot) { @@ -636,6 +673,7 @@ void basic_consume_loop(tmq_t* tmq, tmq_list_t* topics) { TAOS_RES* tmqmessage = tmq_consumer_poll(tmq, 5000); if (tmqmessage) { cnt++; + printf("cnt:%d\n", cnt); msg_process(tmqmessage); taos_free_result(tmqmessage); } else { @@ -844,6 +882,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\"," @@ -991,6 +1031,8 @@ void initLogFile() { "{\"name\":\"t1\",\"type\":4,\"value\":3000}],\"createList\":[]}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\"," "\"colType\":5}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":6,\"colName\":\"c4\"}", + "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":5,\"colName\":\"c4\",\"colType\":5}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":7,\"colName\":\"c3\"," "\"colType\":8,\"colLength\":64}", "{\"type\":\"alter\",\"tableType\":\"super\",\"tableName\":\"st1\",\"alterType\":1,\"colName\":\"t2\","