diff --git a/Jenkinsfile2 b/Jenkinsfile2
index 7ba9b4a933..086883dfac 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -391,6 +391,7 @@ pipeline {
WKDIR = '/var/lib/jenkins/workspace'
WK = '/var/lib/jenkins/workspace/TDinternal'
WKC = '/var/lib/jenkins/workspace/TDinternal/community'
+ WKPY = '/var/lib/jenkins/workspace/taos-connector-python'
DOC_WKC = '/root/doc_ci_work'
td_repo = 'TDengine'
zh_doc_repo = 'docs.taosdata.com'
diff --git a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
index 6e4941b4f1..34719dc135 100644
--- a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
+++ b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
@@ -22,19 +22,19 @@
org.springframework
spring-context
- 5.2.8.RELEASE
+ 5.3.39
org.springframework
spring-jdbc
- 5.1.9.RELEASE
+ 5.3.39
org.springframework
spring-test
- 5.1.9.RELEASE
+ 5.3.39
@@ -47,7 +47,7 @@
com.taosdata.jdbc
taos-jdbcdriver
- 3.0.0
+ 3.4.0
diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml
index ee15f6013e..ba75cdcec3 100644
--- a/docs/examples/JDBC/springbootdemo/pom.xml
+++ b/docs/examples/JDBC/springbootdemo/pom.xml
@@ -5,7 +5,7 @@
org.springframework.boot
spring-boot-starter-parent
- 2.2.1.RELEASE
+ 2.6.15
com.taosdata.example
@@ -65,6 +65,8 @@
spring-boot-starter-aop
+
+
com.taosdata.jdbc
taos-jdbcdriver
diff --git a/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java b/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
index 53edaa5796..df7aa32158 100644
--- a/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
+++ b/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/SpringbootdemoApplication.java
@@ -3,9 +3,10 @@ package com.taosdata.example.springbootdemo;
import org.mybatis.spring.annotation.MapperScan;
import org.springframework.boot.SpringApplication;
import org.springframework.boot.autoconfigure.SpringBootApplication;
+import org.springframework.boot.autoconfigure.data.jdbc.JdbcRepositoriesAutoConfiguration;
@MapperScan(basePackages = {"com.taosdata.example.springbootdemo"})
-@SpringBootApplication
+@SpringBootApplication(exclude = {JdbcRepositoriesAutoConfiguration.class})
public class SpringbootdemoApplication {
public static void main(String[] args) {
diff --git a/docs/examples/JDBC/springbootdemo/src/main/resources/application.properties b/docs/examples/JDBC/springbootdemo/src/main/resources/application.properties
index 00a06a5098..2b231f403b 100644
--- a/docs/examples/JDBC/springbootdemo/src/main/resources/application.properties
+++ b/docs/examples/JDBC/springbootdemo/src/main/resources/application.properties
@@ -15,6 +15,8 @@ spring.datasource.druid.max-wait=30000
spring.datasource.druid.validation-query=select SERVER_VERSION();
spring.aop.auto=true
spring.aop.proxy-target-class=true
+
+spring.jooq.sql-dialect=
#mybatis
mybatis.mapper-locations=classpath:mapper/*.xml
logging.level.com.taosdata.jdbc.springbootdemo.dao=debug
diff --git a/docs/examples/JDBC/taosdemo/pom.xml b/docs/examples/JDBC/taosdemo/pom.xml
index ab5912aa9e..c36973947b 100644
--- a/docs/examples/JDBC/taosdemo/pom.xml
+++ b/docs/examples/JDBC/taosdemo/pom.xml
@@ -10,7 +10,7 @@
Demo project for TDengine
- 5.3.27
+ 5.3.39
@@ -130,6 +130,7 @@
org.apache.maven.plugins
maven-compiler-plugin
+ 3.13.0
8
8
diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java
index ab0a1125d2..33e8845d12 100644
--- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java
+++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/QueryService.java
@@ -37,7 +37,7 @@ public class QueryService {
stmt.execute("use " + dbName);
ResultSet rs = stmt.executeQuery("show stables");
while (rs.next()) {
- String name = rs.getString("name");
+ String name = rs.getString("stable_name");
sqls.add("select count(*) from " + dbName + "." + name);
sqls.add("select first(*) from " + dbName + "." + name);
sqls.add("select last(*) from " + dbName + "." + name);
diff --git a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java
index 621ba7df5d..e8c6432e38 100644
--- a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java
+++ b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/DatabaseServiceTest.java
@@ -1,10 +1,14 @@
package com.taosdata.taosdemo.service;
+import com.zaxxer.hikari.HikariConfig;
+import com.zaxxer.hikari.HikariDataSource;
+import org.junit.BeforeClass;
import org.junit.Test;
import org.springframework.beans.factory.annotation.Autowired;
public class DatabaseServiceTest {
- private DatabaseService service;
+
+ private static DatabaseService service;
@Test
public void testCreateDatabase1() {
@@ -20,4 +24,16 @@ public class DatabaseServiceTest {
public void useDatabase() {
service.useDatabase("test");
}
+
+ @BeforeClass
+ public static void beforeClass() throws ClassNotFoundException {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ HikariConfig config = new HikariConfig();
+ config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
+ config.setUsername("root");
+ config.setPassword("taosdata");
+ HikariDataSource dataSource = new HikariDataSource(config);
+ service = new DatabaseService(dataSource);
+ }
+
}
\ No newline at end of file
diff --git a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java
index f2ad25710c..989aa094f3 100644
--- a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java
+++ b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/QueryServiceTest.java
@@ -15,7 +15,7 @@ public class QueryServiceTest {
@Test
public void generateSuperTableQueries() {
- String[] sqls = queryService.generateSuperTableQueries("restful_test");
+ String[] sqls = queryService.generateSuperTableQueries("test");
for (String sql : sqls) {
System.out.println(sql);
}
@@ -23,8 +23,8 @@ public class QueryServiceTest {
@Test
public void querySuperTable() {
- String[] sqls = queryService.generateSuperTableQueries("restful_test");
- queryService.querySuperTable(sqls, 1000, 10, 10);
+ String[] sqls = queryService.generateSuperTableQueries("test");
+ queryService.querySuperTable(sqls, 100, 3, 3);
}
@BeforeClass
diff --git a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java
index 33e52af1ea..4edba8c518 100644
--- a/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java
+++ b/docs/examples/JDBC/taosdemo/src/test/java/com/taosdata/taosdemo/service/SuperTableServiceTest.java
@@ -3,6 +3,9 @@ package com.taosdata.taosdemo.service;
import com.taosdata.taosdemo.domain.FieldMeta;
import com.taosdata.taosdemo.domain.SuperTableMeta;
import com.taosdata.taosdemo.domain.TagMeta;
+import com.zaxxer.hikari.HikariConfig;
+import com.zaxxer.hikari.HikariDataSource;
+import org.junit.BeforeClass;
import org.junit.Test;
import java.util.ArrayList;
@@ -10,7 +13,7 @@ import java.util.List;
public class SuperTableServiceTest {
- private SuperTableService service;
+ private static SuperTableService service;
@Test
public void testCreate() {
@@ -29,4 +32,15 @@ public class SuperTableServiceTest {
service.create(superTableMeta);
}
+ @BeforeClass
+ public static void beforeClass() throws ClassNotFoundException {
+ Class.forName("com.taosdata.jdbc.TSDBDriver");
+ HikariConfig config = new HikariConfig();
+ config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8");
+ config.setUsername("root");
+ config.setPassword("taosdata");
+ HikariDataSource dataSource = new HikariDataSource(config);
+ service = new SuperTableService(dataSource);
+ }
+
}
\ No newline at end of file
diff --git a/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java
index a366efd419..8fbf33ef6d 100644
--- a/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/DruidDemo.java
@@ -1,4 +1,4 @@
-package com.taosdata.example;
+package com.taos.example;
import com.alibaba.druid.pool.DruidDataSource;
@@ -8,11 +8,11 @@ import java.sql.Statement;
public class DruidDemo {
// ANCHOR: connection_pool
public static void main(String[] args) throws Exception {
- String url = "jdbc:TAOS://127.0.0.1:6030/log";
+ String url = "jdbc:TAOS-WS://127.0.0.1:6041/log";
DruidDataSource dataSource = new DruidDataSource();
// jdbc properties
- dataSource.setDriverClassName("com.taosdata.jdbc.TSDBDriver");
+ dataSource.setDriverClassName("com.taosdata.jdbc.ws.WebSocketDriver");
dataSource.setUrl(url);
dataSource.setUsername("root");
dataSource.setPassword("taosdata");
diff --git a/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java
index 036125e7ea..4045a96642 100644
--- a/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java
@@ -144,8 +144,9 @@ public class GeometryDemo {
private void executeQuery(String sql) {
long start = System.currentTimeMillis();
- try (Statement statement = connection.createStatement()) {
- ResultSet resultSet = statement.executeQuery(sql);
+ try (Statement statement = connection.createStatement();
+ ResultSet resultSet = statement.executeQuery(sql)) {
+
long end = System.currentTimeMillis();
printSql(sql, true, (end - start));
diff --git a/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java
index 50b20fdb0c..e7a90276d7 100644
--- a/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java
+++ b/docs/examples/java/src/main/java/com/taos/example/HikariDemo.java
@@ -1,4 +1,4 @@
-package com.taosdata.example;
+package com.taos.example;
import com.zaxxer.hikari.HikariConfig;
import com.zaxxer.hikari.HikariDataSource;
@@ -11,7 +11,7 @@ public class HikariDemo {
public static void main(String[] args) throws Exception {
HikariConfig config = new HikariConfig();
// jdbc properties
- config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log");
+ config.setJdbcUrl("jdbc:TAOS-WS://127.0.0.1:6041/log");
config.setUsername("root");
config.setPassword("taosdata");
// connection pool configurations
diff --git a/docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java b/docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
index 4c9368288d..11b234c4e0 100644
--- a/docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
+++ b/docs/examples/java/src/main/java/com/taos/example/TelnetLineProtocolExample.java
@@ -39,6 +39,7 @@ public class TelnetLineProtocolExample {
createDatabase(conn);
SchemalessWriter writer = new SchemalessWriter(conn);
writer.write(lines, SchemalessProtocolType.TELNET, SchemalessTimestampType.NOT_CONFIGURED);
+ writer.close();
}
}
diff --git a/docs/zh/06-advanced/05-data-in/07-mqtt.md b/docs/zh/06-advanced/05-data-in/07-mqtt.md
index f54086b61b..8fc69bcaa1 100644
--- a/docs/zh/06-advanced/05-data-in/07-mqtt.md
+++ b/docs/zh/06-advanced/05-data-in/07-mqtt.md
@@ -63,7 +63,7 @@ TDengine 可以通过 MQTT 连接器从 MQTT 代理订阅数据并将其写入 T
在 **Clean Session** 中,选择是否清除会话。默认值为 true。
-在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称。使用如下格式设置: `topic1::0,topic2::1`。
+在 **订阅主题及 QoS 配置** 中填写要消费的 Topic 名称和 QoS。使用如下格式设置: `{topic_name}::{qos}`(如:`my_topic::0`)。MQTT 协议 5.0 支持共享订阅,可以通过多个客户端订阅同一个 Topic 实现负载均衡,使用如下格式: `$share/{group_name}/{topic_name}::{qos}`,其中,`$share` 是固定前缀,表示启用共享订阅,`group_name` 是分组名称,类似 kafka 的消费者组。
点击 **检查连通性** 按钮,检查数据源是否可用。
@@ -146,7 +146,13 @@ json 数据支持 JSONObject 或者 JSONArray,使用 json 解析器可以解
### 7. 高级选项
-在 **日志级别** 下拉列表中选择日志级别。有五个选项:`TRACE`、`DEBUG`、`INFO`、`WARN`、`ERROR`。 默认值为 INFO。
+在 **消息等待队列大小** 中填写接收 MQTT 消息的缓存队列大小,当队列满时,新到达的数据会直接丢弃。可设置为 0,即不缓存。
+
+在 **处理批次上限** 中填写可以同时进行数据处理流程的批次数量,当到达此上限后,不再从消息缓存队列中获取消息,会导致缓存队列的消息积压,最小值为 1。
+
+在 **批次大小** 中填写每次发送给数据处理流程的消息数量,和 **批次延时** 配合使用,当读取的 MQTT 消息数量达到批次大小时,就算 **批次延时** 没有到达也立即向数据处理流程发送数据,最小值为 1。
+
+在 **批次延时** 中填写每次生成批次消息的超时时间(单位:毫秒),从每批次接收到的第一个消息开始算起,和 **批次大小** 配合使用,当读取消息到达超时时间时,就算 **批次大小** 不满足数量也立即向数据处理流程发送数据,最小值为 1。
当 **保存原始数据时**,以下2个参数配置生效。
diff --git a/docs/zh/06-advanced/05-data-in/11-csv.md b/docs/zh/06-advanced/05-data-in/11-csv.md
index 79fbb40740..4924ed2fbd 100644
--- a/docs/zh/06-advanced/05-data-in/11-csv.md
+++ b/docs/zh/06-advanced/05-data-in/11-csv.md
@@ -2,86 +2,123 @@
title: "CSV"
sidebar_label: "CSV"
---
-本节讲述如何通过 Explorer 界面创建数据迁移任务, 从 CSV 迁移数据到当前 TDengine 集群。
+本节讲述如何通过 Explorer 界面创建数据迁移任务,从 CSV 迁移数据到当前 TDengine 集群。
## 功能概述
导入一个或多个 CSV 文件数据到 TDengine。
## 创建任务
### 1. 新增数据源
-在数据写入页面中,点击 **+新增数据源** 按钮,进入新增数据源页面。
+在数据写入任务列表页面中,点击 **+新建任务** 按钮,进入新建任务页面。

### 2. 配置基本信息
-在 **名称** 中输入任务名称,如:“test_csv”;
+在 **名称** 中输入任务名称,如:“test_csv”。
在 **类型** 下拉列表中选择 **CSV**。
-在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮
+在 **目标数据库** 下拉列表中选择一个目标数据库,也可以先点击右侧的 **+创建数据库** 按钮。

### 3. 配置 CSV 选项
-在 **包含表头** 区域点击开启或关闭,如果包含表头,则第一行将被视为列信息。
+在 **包含表头** 区域点击开启或关闭,如果包含表头,则 CSV 文件内容第一行将被视为列信息。
-在 **忽略前 N 行** 区域填写 N,表示忽略 CSV 文件的前 N 行。
+在 **忽略前 N 行** 区域填写数字 N,表示忽略 CSV 文件的前 N 行。
-在 **字段分隔符** 区域进行选择,CSV 字段之间的分隔符,默认是 “,” 。
+在 **字段分隔符** 区域选择 CSV 字段分隔符,用于分隔行内容为多个字段,默认是 `,`。
-在 **字段引用符** 区域进行选择,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 "“"。
+在 **字段引用符** 区域选择 CSV 字段引用符,当 CSV 字段中包含分隔符或换行符时,用于包围字段内容,以确保整个字段被正确识别,默认是 `"`。
-在 **注释前缀符** 区域进行选择,当 CSV 文件中某行以此处指定的字符开头,则忽略该行默认是 “#”。
+在 **注释前缀符** 区域选择 CSV 行注释前缀符,当 CSV 文件中某行以此处指定的字符开头,则忽略该行,默认是 `#`。

### 4. 配置解析 CSV 文件
-在本地上传 CSV 文件,例如:test-json.csv,之后会使用这条示例 csv 文件来配置提取和过滤条件。
-#### 4.1 解析
+#### 4.1 配置数据源
-点击 **选取文件** 后,选择 test-json.csv,点击 **解析** 预览识别的列。
+包含“上传 CSV 文件”与“监听文件目录”两种方式,“上传 CSV 文件”是指将本地文件通过浏览器上传到 taosx 所在服务器作为数据源,“监听文件目录”是指配置一个 taosx 所在服务器的绝对路径作为数据源,以下将分别进行介绍:
+
+##### 4.1.1 上传 CSV 文件
+
+在“上传 CSV 文件”标签页中:
+
+点击 **选取文件** 按钮,选取一个或多个本地文件,上传到服务器作为数据源。
+
+在 **保留已处理文件** 区域点击开启或关闭,如果开启,则文件被处理完成后仍会保留在服务器中,如果关闭,则将被删除。

-**预览解析结果**
+##### 4.1.2 监听文件目录
+
+在“监听文件目录”标签页中:
+
+在 **文件监听目录** 中输入一个 taosx 所在服务器的绝对路径,路径中包含的文件及子目录文件将作为数据源。
+
+在 **匹配模式** 中输入一个正则表达式,用于筛选过滤目录中的文件。
+
+在 **监听新文件** 区域点击开启或关闭,如果开启,则任务永不停止,且持续处理目录中新增的文件,如果关闭,则不处理新增文件,且初始文件处理结束后任务变为完成状态。
+
+在 **监听间隔** 中输入一个数字,用于配置监听新文件的时间间隔。
+
+在 **文件处理顺序** 区域选择“正序”或“倒序”,用于指定文件列表的处理先后顺序,“正序”将按照文件名的字典序正序处理,“倒序”将按照文件名的字典序倒序处理,与此同时,程序总是保持先处理文件后处理同级子目录的顺序。

-#### 4.2 字段拆分
+#### 4.2 解析
-在 **从列中提取或拆分** 中填写从消息体中提取或拆分的字段,例如:将 message 字段拆分成 `text_0` 和 `text_1` 这2个字段,选择 split 提取器,seperator 填写 -, number 填写2。
-点击 **删除**,可以删除当前提取规则。
-点击 **新增**,可以添加更多提取规则。
+上传文件或配置监听目录后,点击解析按钮,页面将获取文件中的示例数据,同时得到识别的列与示例数据解析结果:
-
+
-点击 **放大镜图标** 可预览提取或拆分结果。
+#### 4.2 从列中提取或拆分
+
+在 **从列中提取或拆分** 中填写从消息体中提取或拆分规则,例如:将 `desc` 字段拆分为 `desc_0` 与 `desc_1` 两个字段,可以选择 split 规则,separator 填写 `,`,number 填写 2 即可。
+
+点击 **删除** 可以删除当前提取规则。
+
+点击 **预览** 可以预览提取或拆分结果。
+
+点击 **新增提取/拆分** 可以添加更多提取规则。

-
-
-#### 4.3 表映射
+#### 4.4 映射
在 **目标超级表** 的下拉列表中选择一个目标超级表,也可以先点击右侧的 **创建超级表** 按钮
-在 **映射** 中,填写目标超级表中的子表名称,例如:`t_${groupid}`。
+在映射规则中,填写目标超级表中的子表名称,例如:`csv_meter_${id}`,同时配置映射到超级表的列。
-
-
-点击 **预览**,可以预览映射的结果。
-
-
+点击 **预览** 可以预览映射的结果。
+
### 5. 创建完成
-点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到**数据源列表**页面可查看任务执行情况。
+点击 **提交** 按钮,完成创建 CSV 到 TDengine 的数据同步任务,回到数据写入任务列表页面,可查看任务执行情况,也可以进行任务的“启动/停止”操作与“查看/编辑/删除/复制”操作。
+
+
+
+### 6. 查看运行指标
+
+点击 **查看** 按钮,查看任务的运行指标,同时也可以查看任务中所有文件的处理情况。
+
+
+
+
+
+
\ No newline at end of file
diff --git a/docs/zh/06-advanced/05-data-in/csv-01.png b/docs/zh/06-advanced/05-data-in/csv-01.png
index a183e0dcef..f1494f5c20 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-01.png and b/docs/zh/06-advanced/05-data-in/csv-01.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-02.png b/docs/zh/06-advanced/05-data-in/csv-02.png
index 909e7ff27c..05972c6810 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-02.png and b/docs/zh/06-advanced/05-data-in/csv-02.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-03.png b/docs/zh/06-advanced/05-data-in/csv-03.png
index 1e0bd97a51..bf5a939fe4 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-03.png and b/docs/zh/06-advanced/05-data-in/csv-03.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-04.png b/docs/zh/06-advanced/05-data-in/csv-04.png
index 189bdfa263..2fb0bed47e 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-04.png and b/docs/zh/06-advanced/05-data-in/csv-04.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-05.png b/docs/zh/06-advanced/05-data-in/csv-05.png
index d7e2b51ccb..ec36377c9e 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-05.png and b/docs/zh/06-advanced/05-data-in/csv-05.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-06.png b/docs/zh/06-advanced/05-data-in/csv-06.png
index 398d1dd903..0a3f794b33 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-06.png and b/docs/zh/06-advanced/05-data-in/csv-06.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-07.png b/docs/zh/06-advanced/05-data-in/csv-07.png
index 7fd5ca4a89..ac9a4c1a2c 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-07.png and b/docs/zh/06-advanced/05-data-in/csv-07.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-08.png b/docs/zh/06-advanced/05-data-in/csv-08.png
new file mode 100644
index 0000000000..0cd525dc98
Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/csv-08.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-09.png b/docs/zh/06-advanced/05-data-in/csv-09.png
new file mode 100644
index 0000000000..706d959a8d
Binary files /dev/null and b/docs/zh/06-advanced/05-data-in/csv-09.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-10.png b/docs/zh/06-advanced/05-data-in/csv-10.png
index c0bb68f373..fcb82af87c 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-10.png and b/docs/zh/06-advanced/05-data-in/csv-10.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-11.png b/docs/zh/06-advanced/05-data-in/csv-11.png
index 268e4788ab..352ae3dcdc 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-11.png and b/docs/zh/06-advanced/05-data-in/csv-11.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-12.png b/docs/zh/06-advanced/05-data-in/csv-12.png
index 5a9f312b01..92fd670adf 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-12.png and b/docs/zh/06-advanced/05-data-in/csv-12.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-13.png b/docs/zh/06-advanced/05-data-in/csv-13.png
index ed5108d3eb..a475bf6660 100644
Binary files a/docs/zh/06-advanced/05-data-in/csv-13.png and b/docs/zh/06-advanced/05-data-in/csv-13.png differ
diff --git a/docs/zh/06-advanced/05-data-in/csv-14.png b/docs/zh/06-advanced/05-data-in/csv-14.png
deleted file mode 100644
index e98d8ce334..0000000000
Binary files a/docs/zh/06-advanced/05-data-in/csv-14.png and /dev/null differ
diff --git a/docs/zh/06-advanced/05-data-in/mqtt-14.png b/docs/zh/06-advanced/05-data-in/mqtt-14.png
index 0388d8a705..df80108d29 100644
Binary files a/docs/zh/06-advanced/05-data-in/mqtt-14.png and b/docs/zh/06-advanced/05-data-in/mqtt-14.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/02-management.md b/docs/zh/06-advanced/06-TDgpt/02-management.md
new file mode 100644
index 0000000000..b8261797f9
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/02-management.md
@@ -0,0 +1,122 @@
+---
+title: "安装部署"
+sidebar_label: "安装部署"
+---
+
+### 环境准备
+为了使用 TDgpt 的高级时序数据分析功能功能,需要在 TDengine 集群中安装部署 AI node(Anode)。Anode 可以运行在 Linux/Windows/Mac 等操作系统之上。请确保安装部署 Anode之前,系统中已经具备 3.10 及以上版本的Python环境,以及相应的 Python 包自动安装组件 Pip,否则无法正常安装 Anode。
+
+### 安装及卸载
+不同操作系统上安装及部署操作有细微的差异,主要是安装/卸载操作、安装路径、Anode服务的启停等几个方面。下面将以 Linux 系统为例,说明安装部署的整个流程。使用 Linux 环境下的安装包 TDengine-enterprise-anode-1.x.x.tar.gz 可进行 Anode 的安装部署工作,使用如下命令:
+
+```bash
+tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
+cd TDengine-enterprise-anode-1.0.0
+sudo ./install.sh
+```
+
+在安装完成 Anode 之后,执行命令 `rmtaosanode` 即可已经安装的 Anode。
+Anode 使用 Python 虚拟环境运行,避免影响安装环境中现有的 Python 库。安装后的默认 Python 虚拟环境目录位于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 Anode 执行的命令 `rmtaosanode` 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,手动删除即可。
+
+### 启停服务
+在 Linux 系统中,安装 Anode 以后可以使用 `systemd` 来管理 Anode 服务。使用如下命令可以启动/停止/检查状态。
+
+```bash
+systemctl start taosanoded
+systemctl stop taosanoded
+systemctl status taosanoded
+```
+
+### 目录及配置说明
+|目录/文件|说明|
+|---------------|------|
+|/usr/local/taos/taosanode/bin|可执行文件目录|
+|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
+|/usr/local/taos/taosanode/lib|库文件目录|
+|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
+|/var/log/taos/taosanode/|日志文件目录|
+|/etc/taos/taosanode.ini|配置文件|
+
+#### 配置说明
+
+Anode 提供的服务使用 uWSGI 驱动,因此 Anode 和 uWSGI 的配置信息共同存放在相同的配置文件 `taosanode.ini`,该配置文件默认位于 `/etc/taos/`目录下,其具体内容及说明如下:
+
+```ini
+[uwsgi]
+
+# Anode HTTP service ip:port
+http = 127.0.0.1:6050
+
+# base directory for Anode python files, do NOT modified this
+chdir = /usr/local/taos/taosanode/lib
+
+# initialize Anode python file
+wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
+
+# pid file
+pidfile = /usr/local/taos/taosanode/taosanode.pid
+
+# conflict with systemctl, so do NOT uncomment this
+# daemonize = /var/log/taos/taosanode/taosanode.log
+
+# log directory
+logto = /var/log/taos/taosanode/taosanode.log
+
+# wWSGI monitor port
+stats = 127.0.0.1:8387
+
+# python virtual environment directory, used by Anode
+virtualenv = /usr/local/taos/taosanode/venv/
+
+[taosanode]
+# default app log file
+app-log = /var/log/taos/taosanode/taosanode.app.log
+
+# model storage directory
+model-dir = /usr/local/taos/taosanode/model/
+
+# default log level
+log-level = DEBUG
+
+```
+
+**提示**
+请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
+上面的示例配置文件 `taosanode.ini` 只包含了使用 Anode 提供服务的基础配置参数,对于 uWSGI 的其他配置参数设置及其含义和说明请参考 [uWSGIS官方文档](https://uwsgi-docs-zh.readthedocs.io/zh-cn/latest/Options.html)。
+
+Anode 运行配置主要是以下:
+- app-log: Anode 服务运行产生的日志,用户可以调整其到需要的位置
+- model-dir: 采用算法针对已经存在的数据集的运行完成生成的模型存储位置
+- log-level: app-log文件的日志级别
+
+
+### Anode 基本操作
+#### 创建 Anode
+```sql
+CREATE ANODE {node_url}
+```
+node_url 是提供服务的 Anode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 Anode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 Anode 注册到两个或多个集群中。
+
+#### 查看 Anode
+列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。
+```sql
+SHOW ANODES;
+```
+
+#### 查看提供的时序数据分析服务
+
+```SQL
+SHOW ANODES FULL;
+```
+
+#### 刷新集群中的分析算法缓存
+```SQL
+UPDATE ANODE {node_id}
+UPDATE ALL ANODES
+```
+
+#### 删除 Anode
+```sql
+DROP ANODE {anode_id}
+```
+删除 Anode 只是将 Anode 从 TDengine 集群中删除,管理 Anode 的启停仍然需要使用`systemctl`命令。
diff --git a/docs/zh/06-advanced/06-TDgpt/03-preprocess.md b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md
new file mode 100644
index 0000000000..77ddbb49a4
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/03-preprocess.md
@@ -0,0 +1,33 @@
+---
+title: "数据分析预处理"
+sidebar_label: "数据分析预处理"
+---
+
+import activity from './pic/activity.png';
+import wndata from './pic/white-noise-data.png'
+
+### 分析流程
+在针对时序数据进行高级分析之前,首先进行数据的白噪声检查(White Noise Data check, WND)。整体的流程如下图所示。
+
+
+
+- 对于时间序列数据预测分析,首先进行白噪声检查,不是白噪声数据,进行数据重采样和时间戳对齐的预处理,预处理完成后进行数据预测分析。
+- 对于时间序列异常检测,首先进行白噪声检查,检查通过以后无后续的处理流程,直接进行异常检测分析。
+
+### 白噪声检查
+
+
+
+白噪声时序数据可以简单地认为是随机数构成的时序数据序列(如上图所示),随机数的时间序列没有分析的价值,因此会直接返回空结果。白噪声检查采用 `Ljung-Box` 检验,`Ljung-Box` 统计量的计算过程需遍历整个输入序列。如果用户能够明确输入序列一定不是白噪声序列,那么可以通过增加参数 `wncheck=0` 要求分析平台忽略白噪声输入时间序列检查,从而节省计算资源。
+TDgpt 暂不提供独立的时间序列白噪声检测功能。
+
+
+### 数据重采样和时间戳对齐
+
+对于输入的时间序列数据,在对齐进行预测分析之前需要进行必要的预处理流程。预处理解决以下两个方面的问题:
+
+- 真实时间序列数据时间戳未对齐。由于数据生成的原因或者网关给时间序列数据赋值时间戳并不能保证按照严格的时间间隔赋值,此时 分析平台会自动将输入数据按照用户指定的采样频率对时间戳进行对齐处理。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该时间序列的时间戳将被自动重整为以下时间戳序列 [10, 20, 30, 40]。
+- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。
+
+需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
+
diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
similarity index 93%
rename from docs/zh/06-advanced/06-data-analysis/01-arima.md
rename to docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
index b9d63e924f..0b5a80ad71 100644
--- a/docs/zh/06-advanced/06-data-analysis/01-arima.md
+++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/02-arima.md
@@ -51,4 +51,4 @@ FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5
### 参考文献
- https://en.wikipedia.org/wiki/Autoregressive_moving-average_model
-- https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931?fromtitle=ARMA%E6%A8%A1%E5%9E%8B&fromid=8048415
+- [https://baike.baidu.com/item/自回归滑动平均模型/5023931](https://baike.baidu.com/item/%E8%87%AA%E5%9B%9E%E5%BD%92%E6%BB%91%E5%8A%A8%E5%B9%B3%E5%9D%87%E6%A8%A1%E5%9E%8B/5023931)
diff --git a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md
similarity index 100%
rename from docs/zh/06-advanced/06-data-analysis/02-holtwinters.md
rename to docs/zh/06-advanced/06-TDgpt/04-forecast/03-holtwinters.md
diff --git a/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md
new file mode 100644
index 0000000000..8cc9cb5b6a
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/04-forecast/index.md
@@ -0,0 +1,72 @@
+---
+title: 预测算法
+description: 预测算法
+---
+
+时序数据预测处理以持续一个时间段的时序数据作为输入,预测接下来一个连续时间区间内时间序列数据分布及运行的趋势。用户可以指定输出的(预测)时间序列数据点的数量,因此其输出的结果行数不确定。为此,我们引入了 `FORECAST` 函数提供预测服务。基础数据(用于预测的历史时间序列数据)是该函数的输入,预测结果是该函数的输出。用户可以通过 `FORECAST` 函数调用 Anode 提供的预测算法提供的服务。
+
+##### 语法
+```SQL
+FORECAST(column_expr, option_expr)
+
+option_expr: {"
+algo=expr1
+[,wncheck=1|0]
+[,conf=conf_val]
+[,every=every_val]
+[,rows=rows_val]
+[,start=start_ts_val]
+[,expr2]
+"}
+
+```
+1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型列输入。
+2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测支持 `conf`, `every`, `rows`, `start`, `rows` 几个控制参数,其含义如下:
+
+**参数说明**
+
+|参数|含义|默认值|
+|---|---|---|
+|algo|预测分析使用的算法|holtwinters|
+|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
+|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
+|every|预测数据的采样间隔|输入数据的采样间隔|
+|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
+|rows|预测结果的记录数|10|
+
+1. 预测查询结果新增三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
+2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
+3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
+4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
+
+**示例**
+
+```SQL
+--- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
+SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
+FROM foo;
+
+--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
+SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
+FROM foo;
+```
+```
+taos> select _flow, _fhigh, _frowts, forecast(i32) from foo;
+ _flow | _fhigh | _frowts | forecast(i32) |
+========================================================================================
+ 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
+ -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
+ -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
+ -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
+ -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
+ -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
+ -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
+ -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
+ -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
+ -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
+```
+
+
+**可用预测算法**
+- arima
+- holtwinters
diff --git a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md
similarity index 87%
rename from docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md
rename to docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md
index bdfa455ae3..511a9cef11 100644
--- a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/02-anomaly-detection.md
@@ -1,6 +1,6 @@
---
-title: "Anomaly-detection"
-sidebar_label: "Anomaly-detection"
+title: "异常检测算法"
+sidebar_label: "异常检测算法"
---
本节讲述异常检测算法模型的使用方法。
@@ -18,7 +18,7 @@ sidebar_label: "Anomaly-detection"
|k|标准差倍数|选填|3|
-- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5。无输入参数。
+- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 $Q3–Q1$,位于 $Q3+1.5$。无输入参数。
- Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。
@@ -26,19 +26,19 @@ sidebar_label: "Anomaly-detection"
|参数|说明|是否必选|默认值|
|---|---|---|---|
-|k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5|
+|k|异常点在输入数据集中占比,范围是 $1\le K \le 49.9$ |选填|5|
### 基于数据密度的检测方法
-LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。
+LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 $top(n)$ 个点。
-### 基于深度学习的检测方法
+### 基于自编码器的检测方法
使用自动编码器的异常检测模型。可以对具有周期性的数据具有较好的检测结果。但是使用该模型需要针对输入的时序数据进行训练,同时将训练完成的模型部署到服务目录中,才能够运行与使用。
### 参考文献
-1. https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
+1. [https://en.wikipedia.org/wiki/68–95–99.7 rule](https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule)
2. https://en.wikipedia.org/wiki/Interquartile_range
3. Adikaram, K. K. L. B.; Hussein, M. A.; Effenberger, M.; Becker, T. (2015-01-14). "Data Transformation Technique to Improve the Outlier Detection Power of Grubbs's Test for Data Expected to Follow Linear Relation". Journal of Applied Mathematics. 2015: 1–9. doi:10.1155/2015/708948.
4. Hochenbaum, O. S. Vallis, and A. Kejariwal. 2017. Automatic Anomaly Detection in the Cloud Via Statistical Learning. arXiv preprint arXiv:1704.07706 (2017).
diff --git a/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md
new file mode 100644
index 0000000000..c831b63668
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/05-anomaly-detection/index.md
@@ -0,0 +1,78 @@
+---
+title: 异常检测算法
+description: 异常检测算法
+---
+
+import ad from '../pic/anomaly-detection.png';
+
+时序数据异常检测,在TDengine 查询处理中以异常窗口的形式服务。因此,可以将异常检测获得的窗口视为一种特殊的**事件窗口**,区别在于异常窗口的触发条件和结束条件不是用户指定,而是检测算法自动识别。因此,可以应用在事件窗口上的函数均可应用在异常窗口中。由于异常检测结果是一个时间窗口,因此调用异常检测的方式也与使用事件窗口的方式相同,在 `WHERE` 子句中使用 `ANOMALY_WINDOW` 关键词即可调用时序数据异常检测服务,同时窗口伪列(`_WSTART`, `_WEND`, `_WDURATION`)也能够像其他窗口函数一样使用。例如:
+
+```SQL
+SELECT _wstart, _wend, SUM(i32)
+FROM foo
+ANOMALY_WINDOW(i32, "algo=iqr");
+```
+
+如下图所示,Anode 将返回时序数据异常窗口 $[10:51:30, 10:53:40]$
+
+
+
+在此基础上,用户可以针对异常窗口内的时序数据进行查询聚合、变换处理等操作。
+
+### 语法
+
+```SQL
+ANOMALY_WINDOW(column_name, option_expr)
+
+option_expr: {"
+algo=expr1
+[,wncheck=1|0]
+[,expr2]
+"}
+```
+
+1. `column_name`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
+2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
+3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
+4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
+
+**参数说明**
+|参数|含义|默认值|
+|---|---|---|
+|algo|异常检测调用的算法|iqr|
+|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查|
+
+异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可用的伪列如下:
+1. `_WSTART`: 异常窗口开始时间戳
+2. `_WEND`:异常窗口结束时间戳
+3. `_WDURATION`:异常窗口持续时间
+
+**示例**
+```SQL
+--- 使用 iqr 算法进行异常检测,检测列 i32 列。
+SELECT _wstart, _wend, SUM(i32)
+FROM foo
+ANOMALY_WINDOW(i32, "algo=iqr");
+
+--- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列
+SELECT _wstart, _wend, SUM(i32)
+FROM foo
+ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
+```
+
+```
+taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
+ _wstart | _wend | count(*) |
+====================================================================
+ 2020-01-01 00:00:16.000 | 2020-01-01 00:00:17.000 | 2 |
+Query OK, 1 row(s) in set (0.028946s)
+```
+
+
+**可用异常检测算法**
+- iqr
+- ksigma
+- grubbs
+- lof
+- shesd
+- tac
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md
new file mode 100644
index 0000000000..d375ad44b8
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/02-forecast.md
@@ -0,0 +1,115 @@
+---
+title: "预测算法"
+sidebar_label: "预测算法"
+---
+
+### 输入约定
+`execute` 是预测算法处理的核心方法。框架调用该方法之前,在对象属性参数 `self.list` 中已经设置完毕用于预测的历史时间序列数据。
+
+### 输出约定及父类属性说明
+`execute` 方法执行完成后的返回一个如下字典对象, 预测返回结果如下:
+```python
+return {
+ "mse": mse, # 预测算法的拟合数据最小均方误差(minimum squared error)
+ "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
+}
+```
+
+
+预测算法的父类 `AbstractForecastService` 包含的对象属性如下:
+
+|属性名称|说明|默认值|
+|---|---|---|
+|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,设置为 0 即可| 0|
+|start_ts|预测结果的开始时间| 0|
+|time_step|预测结果的两个数据点之间时间间隔|0 |
+|fc_rows|预测结果的数量| 0 |
+|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
+|conf|置信区间分位数|95|
+
+
+
+### 示例代码
+下面我们开发一个示例预测算法,对于任何输入的时间序列数据,固定返回值 1 作为预测结果。
+
+```python
+import numpy as np
+from service import AbstractForecastService
+
+# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
+class _MyForecastService(AbstractForecastService):
+ """ 定义类,从 AbstractForecastService 继承并实现其定义的抽象方法 execute """
+
+ # 定义算法调用关键词,全小写ASCII码
+ name = 'myfc'
+
+ # 该算法的描述信息(建议添加)
+ desc = """return the forecast time series data"""
+
+ def __init__(self):
+ """类初始化方法"""
+ super().__init__()
+
+ def execute(self):
+ """ 算法逻辑的核心实现"""
+ res = []
+
+ """这个预测算法固定返回 1 作为预测值,预测值的数量是用户通过 self.fc_rows 指定"""
+ ts_list = [self.start_ts + i * self.time_step for i in range(self.fc_rows)]
+ res.app(ts_list) # 设置预测结果时间戳列
+
+ """生成全部为 1 的预测结果 """
+ res_list = [1] * self.fc_rows
+ res.append(res_list)
+
+ """检查用户输入,是否要求返回预测置信区间上下界"""
+ if self.return_conf:
+ """对于没有计算预测置信区间上下界的算法,直接返回预测值作为上下界即可"""
+ bound_list = [1] * self.fc_rows
+ res.append(bound_list) # 预测结果置信区间下界
+ res.append(bound_list) # 预测结果执行区间上界
+
+ """返回结果"""
+ return { "res": res, "mse": 0}
+
+
+ def set_params(self, params):
+ """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
+ pass
+```
+将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。然后就可以通过 SQL 语句调用该检测算法。
+
+```SQL
+--- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类
+SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad')
+```
+
+将该文件保存在 `./taosanalytics/algo/fc/` 目录下,然后重启 taosanode 服务。通过执行 `SHOW ANODES FULL` 能够看到新加入的算法,通过 SQL 语句调用该预测算法。
+
+```SQL
+--- 对 col 列进行异常检测,通过指定 algo 参数为 myfc 来调用新添加的预测类
+SELECT _flow, _fhigh, _frowts, FORECAST(col_name, "algo=myfc")
+FROM foo;
+```
+
+
+### 单元测试
+
+在测试目录`taosanalytics/test`中的 forecast_test.py 中增加单元测试用例或添加新的测试文件。单元测试依赖 Python Unit test 包。
+
+```python
+def test_myfc(self):
+ """ 测试 myfc 类 """
+ s = loader.get_service("myfc")
+
+ # 设置用于预测分析的数据
+ s.set_input_list(self.get_input_list())
+ # 检查预测结果应该全部为 1
+ r = s.set_params(
+ {"fc_rows": 10, "start_ts": 171000000, "time_step": 86400 * 30, "start_p": 0}
+ )
+ r = s.execute()
+
+ expected_list = [1] * 10
+ self.assertEqlist(r["res"][0], expected_list)
+```
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md
new file mode 100644
index 0000000000..8068931653
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/03-ad.md
@@ -0,0 +1,76 @@
+---
+title: "异常检测"
+sidebar_label: "异常检测"
+---
+
+### 输入约定
+`execute` 是算法处理的核心方法。框架调用该方法之前,在对象属性参数 `self.list` 中已经设置完毕用于异常检测的时间序列数据。
+
+### 输出约定
+`execute` 方法执行完成后的返回值是长度与 `self.list` 相同的数组,数组位置 -1 的标识异常值点。
+> 例如:对于输入测量值序列 $[2, 2, 2, 2, 100]$, 假设 100 是异常点,那么方法返回的结果数组则为 $[1, 1, 1, 1, -1]$。
+
+
+### 示例代码
+下面我们开发一个示例异常检测算法,在异常检测中,将输入时间序列值的最后一个值设置为异常值,并返回结果。
+
+```python
+import numpy as np
+from service import AbstractAnomalyDetectionService
+
+# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束
+class _MyAnomalyDetectionService(AbstractAnomalyDetectionService):
+ """ 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象方法 """
+
+ # 定义算法调用关键词,全小写ASCII码
+ name = 'myad'
+
+ # 该算法的描述信息(建议添加)
+ desc = """return the last value as the anomaly data"""
+
+ def __init__(self):
+ """类初始化方法"""
+ super().__init__()
+
+ def execute(self):
+ """ 算法逻辑的核心实现"""
+
+ """创建一个长度为 len(self.list),全部值为 1 的结果数组,然后将最后一个值设置为 -1,表示最后一个值是异常值"""
+ res = [1] * len(self.list)
+ res[-1] = -1
+
+ """返回结果数组"""
+ return res
+
+
+ def set_params(self, params):
+ """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
+ pass
+```
+
+将该文件保存在 `./taosanalytics/algo/ad/` 目录下,然后重启 taosanode 服务。然后就可以通过 SQL 语句调用该检测算法。
+
+```SQL
+--- 对 col 列进行异常检测,通过指定 algo 参数为 myad 来调用新添加的异常检测类
+SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col, 'algo=myad')
+```
+
+
+### 单元测试
+
+在测试目录`taosanalytics/test`中的 anomaly_test.py 中增加单元测试用例或添加新的测试文件。框架中使用了 Python Unit test 包。
+
+```python
+def test_myad(self):
+ """ 测试 _IqrService 类 """
+ s = loader.get_service("myad")
+
+ # 设置需要进行检测的输入数据
+ s.set_input_list(AnomalyDetectionTest.input_list)
+
+ r = s.execute()
+
+ # 最后一个点是异常点
+ self.assertEqual(r[-1], -1)
+ self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
+```
diff --git a/docs/zh/06-advanced/06-TDgpt/06-dev/index.md b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md
new file mode 100644
index 0000000000..6ef9e67a20
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/06-dev/index.md
@@ -0,0 +1,82 @@
+---
+title: "算法开发者指南"
+sidebar_label: "算法开发者指南"
+---
+TDgpt 是一个可扩展的时序数据高级分析平台,用户仅按照简易的步骤就能将新分析算法添加到分析平台中。将开发完成的算法代码文件放入对应的目录文件夹,然后重启 Anode 即可完成扩展升级。Anode 启动后会自动加载特定目录的分析算法。用户可以直接使用 SQL 语句调用添加到 TDgpt 系统中的分析算法。得益于 TDgpt 与 taosd 的松散耦合关系,分析平台升级对 taosd 完全没有影响。应用系统也不需要做任何更改就能够完成分析功能和分析算法的升级。
+
+这种方式能够按需扩展新分析算法,极大地拓展了 TDgpt 适应的范围,用户可以将契合业务场景开发的(预测、异常检测)分析算法嵌入到 TDgpt,并通过 SQL 语句进行调用。在不更改或更改非常少的应用系统代码的前提下,就能够快速完成分析功能的平滑升级。
+
+本节说明如何将预测算法和异常检测算法添加到 TDengine 分析平台。
+
+## 目录结构
+首先需要了解TDgpt的目录结构。其主体目录结构如下图:
+
+```bash
+.
+├── cfg
+├── model
+│ └── ac_detection
+├── release
+├── script
+└── taosanalytics
+ ├── algo
+ │ ├── ad
+ │ └── fc
+ ├── misc
+ └── test
+
+```
+
+|目录|说明|
+|---|---|
+|taosanalytics| 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 保存异常检测算法代码,fc 目录保存预测算法代码|
+|script|是安装脚本和发布脚本放置目录|
+|model|放置针对数据集完成的训练模型|
+|cfg|配置文件目录|
+
+## 约定与限制
+
+- 异常检测算法的 Python 代码文件需放在 `./taos/algo/ad` 目录中
+- 预测算法 Python 代码文件需要放在 `./taos/algo/fc` 目录中
+
+
+### 类命名规范
+
+由于算法采用自动加载,因此其只识别按照特定命名方式的类。算法类的名称需要以下划线开始,以 Service 结尾。例如:`_KsigmaService` 是 KSigma 异常检测算法类。
+
+### 类继承约定
+
+- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
+- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
+
+### 类属性初始化
+每个算法实现的类需要静态初始化两个类属性,分别是:
+
+- `name`:触发调用的关键词,全小写英文字母。该名称也是通过 `SHOW` 命令查看可用分析算法是显示的名称。
+- `desc`:算法的描述信息
+
+```SQL
+--- algo 后面的参数 algo_name 即为类名称 `name`
+SELECT COUNT(*) FROM foo ANOMALY_DETECTION(col_name, 'algo=algo_name')
+```
+
+## 添加具有训练模型的分析算法
+
+某些深度学习的分析算法需要使用输入时间序列数据进行训练,然后生成针对训练数据集的分析模型。这种情况下,同一个分析算法对应不同的输入数据集有不同的分析模型。
+这种类型的分析算法要添加到 TDgpt 中,首先需要在 `model` 目录中建立目录,将采用该算法针对不同的输入时间序列数据生成的训练模型均保存在该目录下。如下图所示,针对不同的数据集,采用自编码器训练的数据异常检测算法生成的模型均保存在该目录下。为了确保模型能够正常读取加载,要求存储的模型使用`joblib`库进行序列化保存。
+采用训练-保存模型的方式可以一次训练,多次调用的优势。避免动态训练调用所带来的反复训练开销。
+
+调用已经保存的模型,需要首先调用`set_params`方法,并在参数中指定调用模型的名称 `{"model": "ad_encoder_keras"}` 即可调用该模型进行计算。调用方式如下:
+
+```python
+def test_autoencoder_ad(self):
+ # 获取特定的算法对象
+ # ...
+
+ # 指定调用的模型,该模型是之前针对该数据集进行训练获得
+ s.set_params({"model": "ad_encoder_keras"})
+
+ # 执行检查动作,并返回结果
+ r = s.execute()
+```
+
diff --git a/docs/zh/06-advanced/06-TDgpt/index.md b/docs/zh/06-advanced/06-TDgpt/index.md
new file mode 100644
index 0000000000..96b654b068
--- /dev/null
+++ b/docs/zh/06-advanced/06-TDgpt/index.md
@@ -0,0 +1,22 @@
+---
+sidebar_label: TDgpt
+title: TDgpt
+---
+
+import TDgpt from './pic/data-analysis.png';
+
+
+TDgpt 是 TDengine Enterprise 中针对时序数据提供高级分析功能的企业级组件,能够独立于 TDengine 主进程部署和运行,不消耗和占用 TDengine 主进程的资源,通过内置接口向 TDengine 提供运行时动态扩展的高级时序数据分析功能。TDgpt 具有服务无状态、功能易扩展、快速弹性部署、应用轻量化、高安全性等特点。
+TDgpt 运行在部署于 TDengine 集群中的 AI Node (Anode)中。每个 TDengine 集群中可以部署一个或若干个 Anode 节点,不同的 Anode 节点之间不相关,无同步或协同的要求。Anode 注册到 TDengine 集群以后,就可以通过内部接口提供服务。TDgpt 提供的高级时序数据分析服务可分为时序数据异常检测和时序数据预测分析两个类别。
+
+下图是部署 TDgpt 的 TDengine 集群示意图。
+
+
+通过注册指令将 Anode 注册到 Mnode 中以后,就加入到 TDengine 集群,并可被查询引擎动态调用执行。在查询处理过程中,查询引擎根据生成的物理执行计划,**按需**向 Anode 请求高级时序数据分析服务。用户可通过SQL语句与 Anode 节点交互,并使用其提供的全部分析服务。需要注意的是 Anode 不直接接受用户的数据分析请求。同时 Anode 提供高效的动态注册机制,其注册和卸载过程完全不影响 TDengine 集群的服务,只影响提供对应的查询服务能力。
+
+TDgpt 提供的高级数据分析功能分为时序数据异常检测和时序数据预测。
+- 时序数据异常检测的结果采用异常窗口的形式提供,即分析系统自动将算法检测到的连续异常数据以时间窗口的形式返回,其使用方式与 TDengine 中其他类型的时间窗口(例如状态窗口、事件窗口)类似。特别地,可以将异常数据窗口视作为一种特殊的**事件窗口(Event Window)**,因此状态窗口可使用的所有查询操作均可应用在异常窗口上。
+- 时序数据预测是基于输入的时间序列数据,使用指定(或默认)预测算法给出输入时序数据后续时间序列的**预测**观测值数据。因此,不同于异常检测是以窗口的形式存在,时序数据预测在 TDengine 中是一个(不确定输出)函数。
+
+
+
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/activity.png b/docs/zh/06-advanced/06-TDgpt/pic/activity.png
new file mode 100644
index 0000000000..2d2a403de5
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/activity.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png b/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png
new file mode 100644
index 0000000000..f198ce88b7
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/anomaly-detection.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png b/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png
new file mode 100755
index 0000000000..baeb51ad5c
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/data-analysis.png differ
diff --git a/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png b/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png
new file mode 100644
index 0000000000..754c6f3d38
Binary files /dev/null and b/docs/zh/06-advanced/06-TDgpt/pic/white-noise-data.png differ
diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md
deleted file mode 100644
index c0b8921718..0000000000
--- a/docs/zh/06-advanced/06-data-analysis/addins.md
+++ /dev/null
@@ -1,170 +0,0 @@
----
-title: "addins"
-sidebar_label: "addins"
----
-
-本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。
-
-## 目录结构
-
-
-
-|目录|说明|
-|---|---|
-|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码,fc 放置预测算法代码|
-|script|是安装脚本和发布脚本放置目录|
-|model|放置针对数据集完成的训练模型|
-|cfg|配置文件目录|
-
-## 约定与限制
-
-定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。
-
-
-### 类命名规范
-
-算法类的名称需要以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。
-
-### 类继承约定
-
-- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`
-- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`
-
-### 类属性初始化
-每个算法实现的类需要静态初始化两个类属性,分别是:
-
-- `name`:触发调用的关键词,全小写英文字母
-- `desc`:算法的描述信息
-
-### 核心方法输入与输出约定
-
-`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。
-
-异常检测输出结果
-
-`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。
-
-预测输出结果
-
-对于预测算法,`AbstractForecastService` 的对象属性说明如下:
-
-|属性名称|说明|默认值|
-|---|---|---|
-|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0|
-|start_ts|预测结果的开始时间| 0|
-|time_step|预测结果的两个数据点之间时间间隔|0 |
-|fc_rows|预测结果的数量| 0 |
-|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1|
-|conf|置信区间分位数 0.05|
-
-
-预测返回结果如下:
-```python
-return {
- "rows": self.fc_rows, # 预测数据行数
- "period": self.period, # 数据周期性,同输入
- "algo": "holtwinters", # 预测使用的算法
- "mse": mse, # 预测算法的 mse
- "res": res # 结果数组 [时间戳数组, 预测结果数组, 预测结果执行区间下界数组,预测结果执行区间上界数组]
-}
-```
-
-
-## 示例代码
-
-```python
-import numpy as np
-from service import AbstractAnomalyDetectionService
-
-# 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。
-class _IqrService(AbstractAnomalyDetectionService):
- """ IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """
-
- # 定义算法调用关键词,全小写ASCII码(必须添加)
- name = 'iqr'
-
- # 该算法的描述信息(建议添加)
- desc = """found the anomaly data according to the inter-quartile range"""
-
- def __init__(self):
- super().__init__()
-
- def execute(self):
- """ execute 是算法实现逻辑的核心实现,直接修改该实现即可 """
-
- # self.list 是输入数值列,list 类型,例如:[1,2,3,4,5]。设置 self.list 的方法在父类中已经进行了定义。实现自己的算法,修改该文件即可,以下代码使用自己的实现替换即可。
- #lower = np.quantile(self.list, 0.25)
- #upper = np.quantile(self.list, 0.75)
-
- #min_val = lower - 1.5 * (upper - lower)
- #max_val = upper + 1.5 * (upper - lower)
- #threshold = [min_val, max_val]
-
- # 返回值是与输入数值列长度相同的数据列,异常值对应位置是 -1。例如上述输入数据列,返回数值列是 [1, 1, 1, 1, -1],表示 [5] 是异常值。
- return [-1 if k < threshold[0] or k > threshold[1] else 1 for k in self.list]
-
-
- def set_params(self, params):
- """该算法无需任何输入参数,直接重载父类该函数,不处理算法参数设置逻辑"""
- pass
-```
-
-
-## 单元测试
-
-在测试文件目录中的 anomaly_test.py 中增加单元测试用例。
-
-```python
-def test_iqr(self):
- """ 测试 _IqrService 类 """
- s = loader.get_service("iqr")
-
- # 设置需要进行检测的输入数据
- s.set_input_list(AnomalyDetectionTest.input_list)
-
- # 测试 set_params 的处理逻辑
- try:
- s.set_params({"k": 2})
- except ValueError as e:
- self.assertEqual(1, 0)
-
- r = s.execute()
-
- # 绘制异常检测结果
- draw_ad_results(AnomalyDetectionTest.input_list, r, "iqr")
-
- # 检查结果
- self.assertEqual(r[-1], -1)
- self.assertEqual(len(r), len(AnomalyDetectionTest.input_list))
-```
-
-## 需要模型的算法
-
-针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。
-
-训练完成后的模型,使用 joblib 进行保存。
-
-并在 model 目录下建立对应的文件夹存放该模型。
-
-保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。
-
-具体的调用方式如下:
-
-```python
-def test_autoencoder_ad(self):
- # 获取特定的算法服务
- s = loader.get_service("ac")
- data = self.__load_remote_data_for_ad()
-
- # 设置异常检查的输入数据
- s.set_input_list(data)
-
- # 指定调用的模型,该模型是之前针对该数据集进行训练获得
- s.set_params({"model": "ad_encoder_keras"})
- # 执行检查动作,并返回结果
- r = s.execute()
-
- num_of_error = -(sum(filter(lambda x: x == -1, r)))
- self.assertEqual(num_of_error, 109)
-```
-
diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md
deleted file mode 100644
index 2cbea1caba..0000000000
--- a/docs/zh/06-advanced/06-data-analysis/index.md
+++ /dev/null
@@ -1,322 +0,0 @@
----
-sidebar_label: 数据分析
-title: 数据分析功能
----
-
-## 概述
-
-ANode(Analysis Node)是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。
-ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。
-下图是数据分析的技术架构示意图。
-
-
-
-## 安装部署
-### 环境准备
-ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip,同时请确保能够正常连接互联网。
-
-### 安装及卸载
-使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。
-
-```bash
-tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz
-cd TDengine-enterprise-anode-1.0.0
-sudo ./install.sh
-```
-
-卸载 ANode,执行命令 `rmtaosanode` 即可。
-
-### 其他
-为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。
-
-## 启动及停止服务
-安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。
-
-```bash
-systemctl start taosanoded
-systemctl stop taosanoded
-systemctl status taosanoded
-```
-
-## 目录及配置说明
-|目录/文件|说明|
-|---------------|------|
-|/usr/local/taos/taosanode/bin|可执行文件目录|
-|/usr/local/taos/taosanode/resource|资源文件目录,链接到文件夹 /var/lib/taos/taosanode/resource/|
-|/usr/local/taos/taosanode/lib|库文件目录|
-|/var/lib/taos/taosanode/model/|模型文件目录,链接到文件夹 /var/lib/taos/taosanode/model|
-|/var/log/taos/taosanode/|日志文件目录|
-|/etc/taos/taosanode.ini|配置文件|
-
-### 配置说明
-
-Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下:
-
-```ini
-[uwsgi]
-# charset
-env = LC_ALL = en_US.UTF-8
-
-# ip:port
-http = 127.0.0.1:6050
-
-# the local unix socket file than communicate to Nginx
-#socket = 127.0.0.1:8001
-#socket-timeout = 10
-
-# base directory
-chdir = /usr/local/taos/taosanode/lib
-
-# initialize python file
-wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py
-
-# call module of uWSGI
-callable = app
-
-# auto remove unix Socket and pid file when stopping
-vacuum = true
-
-# socket exec model
-#chmod-socket = 664
-
-# uWSGI pid
-uid = root
-
-# uWSGI gid
-gid = root
-
-# main process
-master = true
-
-# the number of worker processes
-processes = 2
-
-# pid file
-pidfile = /usr/local/taos/taosanode/taosanode.pid
-
-# enable threads
-enable-threads = true
-
-# the number of threads for each process
-threads = 4
-
-# memory useage report
-memory-report = true
-
-# smooth restart
-reload-mercy = 10
-
-# conflict with systemctl, so do NOT uncomment this
-# daemonize = /var/log/taos/taosanode/taosanode.log
-
-# log directory
-logto = /var/log/taos/taosanode/taosanode.log
-
-# wWSGI monitor port
-stats = 127.0.0.1:8387
-
-# python virtual environment directory
-virtualenv = /usr/local/taos/taosanode/venv/
-
-[taosanode]
-# default app log file
-app-log = /var/log/taos/taosanode/taosanode.app.log
-
-# model storage directory
-model-dir = /usr/local/taos/taosanode/model/
-
-# default log level
-log-level = DEBUG
-
-# draw the query results
-draw-result = 0
-```
-
-**提示**
-请勿设置 `daemonize` 参数,该参数会导致 uWSGI 与 systemctl 冲突,从而无法正常启动。
-
-
-## ANode 基本操作
-### 管理 ANode
-#### 创建 ANode
-```sql
-CREATE ANODE {node_url}
-```
-node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。
-
-#### 查看 ANode
-列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。
-```sql
-SHOW ANODES;
-```
-
-#### 查看提供的时序数据分析服务
-
-```SQL
-SHOW ANODES FULL;
-```
-
-#### 强制刷新集群中的分析算法缓存
-```SQL
-UPDATE ANODE {node_id}
-UPDATE ALL ANODES
-```
-
-#### 删除 ANode
-```sql
-DROP ANODE {anode_id}
-```
-删除 ANode 只是将 ANode 从 TDengine 集群中删除,管理 ANode 的启停仍然需要使用`systemctl`命令。
-
-### 时序数据分析功能
-
-#### 白噪声检查
-
-分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列(White Noise Data, WND)和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。
-如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。
-同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。
-
-
-#### 数据重采样和时间戳对齐
-
-分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别:
-
-- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该序列将被对齐重整为以下序列 [10, 20, 30, 40]。
-- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。
-
-需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。
-
-
-#### 时序数据异常检测
-异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。
-异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。
-对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。
-
-
-##### 语法
-
-```SQL
-ANOMALY_WINDOW(column_name, option_expr)
-
-option_expr: {"
-algo=expr1
-[,wncheck=1|0]
-[,expr2]
-"}
-```
-
-1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。
-2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。
-3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。
-4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。
-
-**参数说明**
-|参数|含义|默认值|
-|---|---|---|
-|algo|异常检测调用的算法|iqr|
-|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查|
-
-异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下:
-1. `_WSTART`: 异常窗口开始时间戳
-2. `_WEND`:异常窗口结束时间戳
-3. `_WDURATION`:异常窗口持续时间
-
-**示例**
-```SQL
---- 使用 iqr 算法进行异常检测,检测列 i32 列。
-SELECT _wstart, _wend, SUM(i32)
-FROM ai.atb
-ANOMALY_WINDOW(i32, "algo=iqr");
-
---- 使用 ksigma 算法进行异常检测,输入参数 k 值为 2,检测列 i32 列
-SELECT _wstart, _wend, SUM(i32)
-FROM ai.atb
-ANOMALY_WINDOW(i32, "algo=ksigma,k=2");
-```
-
-```
-taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32);
- _wstart | _wend | count(*) |
-====================================================================
- 2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 |
-Query OK, 1 row(s) in set (0.028946s)
-```
-
-
-**可用异常检测算法**
-- iqr
-- ksigma
-- grubbs
-- lof
-- shesd
-- tac
-
-
-#### 时序数据预测
-数据预测以一段训练数据作为输入,预测接下来一个连续时间区间内,时序数据的趋势。
-
-##### 语法
-```SQL
-FORECAST(column_expr, option_expr)
-
-option_expr: {"
-algo=expr1
-[,wncheck=1|0]
-[,conf=conf_val]
-[,every=every_val]
-[,rows=rows_val]
-[,start=start_ts_val]
-[,expr2]
-"}
-
-```
-1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。
-2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下:
-
-**参数说明**
-
-|参数|含义|默认值|
-|---|---|---|
-|algo|预测分析使用的算法|holtwinters|
-|wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查|
-|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95|
-|every|预测数据的采样间隔|输入数据的采样间隔|
-|start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段|
-|rows|预测结果的记录数|10|
-
-1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果
-2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。
-3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。
-4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。
-
-**示例**
-
-```SQL
---- 使用 arima 算法进行预测,预测结果是 10 条记录(默认值),数据进行白噪声检查,默认置信区间 95%.
-SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima")
-FROM ai.ftb;
-
---- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%.
-SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10")
-FROM ai.ftb;
-```
-```
-taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb;
- _flow | _fhigh | _frowts | forecast(i32) |
-========================================================================================
- 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 |
- -21.9861946 | 83.3938904 | 2020-01-01 00:01:36.001 | 30 |
- -78.5686035 | 144.6729126 | 2020-01-01 00:01:37.001 | 33 |
- -154.9797363 | 230.3057709 | 2020-01-01 00:01:38.001 | 37 |
- -253.9852905 | 337.6083984 | 2020-01-01 00:01:39.001 | 41 |
- -375.7857971 | 466.4594727 | 2020-01-01 00:01:40.001 | 45 |
- -514.8043823 | 622.4426270 | 2020-01-01 00:01:41.001 | 53 |
- -680.6343994 | 796.2861328 | 2020-01-01 00:01:42.001 | 57 |
- -868.4956665 | 992.8603516 | 2020-01-01 00:01:43.001 | 62 |
- -1076.1566162 | 1214.4498291 | 2020-01-01 00:01:44.001 | 69 |
-```
-
-
-**可用预测算法**
-- arima
-- holtwinters
diff --git a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png b/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png
deleted file mode 100644
index 44fd82832f..0000000000
Binary files a/docs/zh/06-advanced/06-data-analysis/pic/data-analysis.png and /dev/null differ
diff --git a/docs/zh/06-advanced/06-data-analysis/pic/dir.png b/docs/zh/06-advanced/06-data-analysis/pic/dir.png
deleted file mode 100644
index d5aafb4427..0000000000
Binary files a/docs/zh/06-advanced/06-data-analysis/pic/dir.png and /dev/null differ
diff --git a/docs/zh/08-operation/19-debug.md b/docs/zh/08-operation/19-debug.md
new file mode 100644
index 0000000000..ebab9410d6
--- /dev/null
+++ b/docs/zh/08-operation/19-debug.md
@@ -0,0 +1,14 @@
+---
+sidebar_label: 分析调试
+title: 分析调试
+toc_max_heading_level: 4
+---
+为了更好的分析调试 TDengine ,推荐开发者在操作系统中安装以下分析调试工具:
+## gdb
+GDB(GNU Debugger)是一个功能强大的命令行调试器,广泛用于调试 C、C++ 和其他编程语言的程序。
+## valgrind
+valgrind 是一个用于内存调试、内存泄漏检测和性能分析的工具框架。Valgrind 提供了一组工具,帮助开发者检测和修复程序中的内存错误、线程错误和性能问题。
+## bpftrace
+bpftrace 是一个高级的动态跟踪工具,基于 eBPF(Extended Berkeley Packet Filter)技术,用于在 Linux 系统上进行性能分析和故障排除。
+## perf
+perf 是一个强大的 Linux 性能分析工具。它提供了对系统和应用程序的详细性能分析,帮助开发者和系统管理员识别和解决性能瓶颈。
\ No newline at end of file
diff --git a/docs/zh/14-reference/03-taos-sql/02-database.md b/docs/zh/14-reference/03-taos-sql/02-database.md
index 91b39976a1..4f799bdde9 100644
--- a/docs/zh/14-reference/03-taos-sql/02-database.md
+++ b/docs/zh/14-reference/03-taos-sql/02-database.md
@@ -189,23 +189,23 @@ TRIM DATABASE db_name;
FLUSH DATABASE db_name;
```
-落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的数据回放,加速启动过程。
+落盘内存中的数据。在关闭节点之前,执行这条命令可以避免重启后的预写数据日志回放,加速启动过程。
-## 调整VGROUP中VNODE的分布
+## 调整 VGROUP 中 VNODE 的分布
```sql
REDISTRIBUTE VGROUP vgroup_no DNODE dnode_id1 [DNODE dnode_id2] [DNODE dnode_id3]
```
-按照给定的dnode列表,调整vgroup中的vnode分布。因为副本数目最大为3,所以最多输入3个dnode。
+按照给定的 dnode 列表,调整 vgroup 中的 vnode 分布。因为副本数目最大为 3,所以最多输入 3 个 dnode。
-## 自动调整VGROUP中VNODE的分布
+## 自动调整 VGROUP 中 LEADER 的分布
```sql
-BALANCE VGROUP
+BALANCE VGROUP LEADER
```
-自动调整集群所有vgroup中的vnode分布,相当于在vnode级别对集群进行数据的负载均衡操作。
+触发集群所有 vgroup 中的 leader 重新选主,对集群各节点进行负载再均衡操作。
## 查看数据库工作状态
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index ea764e6760..1d9a9bcc61 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -155,6 +155,7 @@ typedef enum EStreamType {
STREAM_MID_RETRIEVE,
STREAM_PARTITION_DELETE_DATA,
STREAM_GET_RESULT,
+ STREAM_DROP_CHILD_TABLE,
} EStreamType;
#pragma pack(push, 1)
@@ -401,6 +402,8 @@ int32_t dumpConfToDataBlock(SSDataBlock* pBlock, int32_t startCol);
#define TSMA_RES_STB_EXTRA_COLUMN_NUM 4 // 3 columns: _wstart, _wend, _wduration, 1 tag: tbname
static inline bool isTsmaResSTb(const char* stbName) {
+ static bool showTsmaTables = false;
+ if (showTsmaTables) return false;
const char* pos = strstr(stbName, TSMA_RES_STB_POSTFIX);
if (pos && strlen(stbName) == (pos - stbName) + strlen(TSMA_RES_STB_POSTFIX)) {
return true;
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index bf3fa716c6..e6c471eaf1 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -188,7 +188,6 @@ extern int32_t tsMaxRetryWaitTime;
extern bool tsUseAdapter;
extern int32_t tsMetaCacheMaxSize;
extern int32_t tsSlowLogThreshold;
-extern int32_t tsSlowLogThresholdTest;
extern char tsSlowLogExceptDb[];
extern int32_t tsSlowLogScope;
extern int32_t tsSlowLogMaxLen;
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index a7da778513..bdf333b635 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -676,7 +676,7 @@ typedef struct {
int32_t tsSlowLogThreshold;
int32_t tsSlowLogMaxLen;
int32_t tsSlowLogScope;
- int32_t tsSlowLogThresholdTest;
+ int32_t tsSlowLogThresholdTest; //Obsolete
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN];
} SMonitorParas;
@@ -3228,6 +3228,7 @@ int tDecodeSVCreateTbBatchRsp(SDecoder* pCoder, SVCreateTbBatchRsp* pRsp);
typedef struct {
char* name;
uint64_t suid; // for tmq in wal format
+ int64_t uid;
int8_t igNotExists;
} SVDropTbReq;
diff --git a/include/libs/executor/storageapi.h b/include/libs/executor/storageapi.h
index db0d6339c8..feb7bcc25e 100644
--- a/include/libs/executor/storageapi.h
+++ b/include/libs/executor/storageapi.h
@@ -336,6 +336,7 @@ typedef struct SStateStore {
int32_t (*streamStatePutParName)(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t (*streamStateGetParName)(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache,
int32_t* pWinCode);
+ int32_t (*streamStateDeleteParName)(SStreamState* pState, int64_t groupId);
int32_t (*streamStateAddIfNotExist)(SStreamState* pState, const SWinKey* key, void** pVal, int32_t* pVLen,
int32_t* pWinCode);
diff --git a/include/libs/stream/streamState.h b/include/libs/stream/streamState.h
index a50451c3eb..2179547352 100644
--- a/include/libs/stream/streamState.h
+++ b/include/libs/stream/streamState.h
@@ -116,6 +116,7 @@ void streamStateCurPrev(SStreamState* pState, SStreamStateCur* pCur);
int32_t streamStatePutParName(SStreamState* pState, int64_t groupId, const char* tbname);
int32_t streamStateGetParName(SStreamState* pState, int64_t groupId, void** pVal, bool onlyCache, int32_t* pWinCode);
+int32_t streamStateDeleteParName(SStreamState* pState, int64_t groupId);
// group id
int32_t streamStateGroupPut(SStreamState* pState, int64_t groupId, void* value, int32_t vLen);
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index de10d6844e..2cf791c8da 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -462,7 +462,7 @@ struct SStreamTask {
struct SStreamMeta* pMeta;
SSHashObj* pNameMap;
void* pBackend;
- int8_t subtableWithoutMd5;
+ int8_t subtableWithoutMd5; // only for tsma stream tasks
char reserve[256];
char* backendPath;
};
diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h
index f95b3f20ca..999adc2eff 100644
--- a/include/libs/wal/wal.h
+++ b/include/libs/wal/wal.h
@@ -138,6 +138,7 @@ typedef struct {
int8_t scanMeta;
int8_t deleteMsg;
int8_t enableRef;
+ int8_t scanDropCtb;
} SWalFilterCond;
// todo hide this struct
diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c
index fa9df5be73..b0be3a4d3b 100644
--- a/source/client/src/clientEnv.c
+++ b/source/client/src/clientEnv.c
@@ -294,8 +294,7 @@ static void deregisterRequest(SRequestObj *pRequest) {
}
}
- if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL ||
- duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) &&
+ if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL) &&
checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) {
(void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1);
if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) {
diff --git a/source/client/test/CMakeLists.txt b/source/client/test/CMakeLists.txt
index 054b5af2b9..7ca3086871 100644
--- a/source/client/test/CMakeLists.txt
+++ b/source/client/test/CMakeLists.txt
@@ -29,6 +29,12 @@ TARGET_LINK_LIBRARIES(
# PUBLIC os util common transport monitor parser catalog scheduler function gtest taos_static qcom executor
#)
+ADD_EXECUTABLE(userOperTest ../../../tests/script/api/passwdTest.c)
+TARGET_LINK_LIBRARIES(
+ userOperTest
+ PUBLIC taos
+)
+
TARGET_INCLUDE_DIRECTORIES(
clientTest
PUBLIC "${TD_SOURCE_DIR}/include/client/"
@@ -69,3 +75,8 @@ add_test(
# NAME clientMonitorTest
# COMMAND clientMonitorTest
# )
+
+add_test(
+ NAME userOperTest
+ COMMAND userOperTest
+)
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 95788a7ff0..0133428c53 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -184,7 +184,6 @@ int32_t tsMaxRetryWaitTime = 10000;
bool tsUseAdapter = false;
int32_t tsMetaCacheMaxSize = -1; // MB
int32_t tsSlowLogThreshold = 10; // seconds
-int32_t tsSlowLogThresholdTest = INT32_MAX; // seconds
char tsSlowLogExceptDb[TSDB_DB_NAME_LEN] = ""; // seconds
int32_t tsSlowLogScope = SLOW_LOG_TYPE_QUERY;
char *tsSlowLogScopeString = "query";
@@ -762,7 +761,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitor", tsEnableMonitor, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "monitorInterval", tsMonitorInterval, 1, 86400, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
- TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogThresholdTest", tsSlowLogThresholdTest, 0, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 1, INT32_MAX, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "slowLogMaxLen", tsSlowLogMaxLen, 1, 16384, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
TAOS_CHECK_RETURN(cfgAddString(pCfg, "slowLogScope", tsSlowLogScopeString, CFG_SCOPE_SERVER, CFG_DYN_SERVER));
@@ -1443,9 +1441,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogExceptDb");
tstrncpy(tsSlowLogExceptDb, pItem->str, TSDB_DB_NAME_LEN);
- TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogThresholdTest");
- tsSlowLogThresholdTest = pItem->i32;
-
TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "slowLogThreshold");
tsSlowLogThreshold = pItem->i32;
@@ -2017,7 +2012,6 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) {
{"monitor", &tsEnableMonitor},
{"monitorInterval", &tsMonitorInterval},
{"slowLogThreshold", &tsSlowLogThreshold},
- {"slowLogThresholdTest", &tsSlowLogThresholdTest},
{"slowLogMaxLen", &tsSlowLogMaxLen},
{"mndSdbWriteDelta", &tsMndSdbWriteDelta},
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index edf0db9954..ca5af34e15 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -76,7 +76,7 @@ static int32_t tSerializeSMonitorParas(SEncoder *encoder, const SMonitorParas *p
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogScope));
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogMaxLen));
TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThreshold));
- TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest));
+ TAOS_CHECK_RETURN(tEncodeI32(encoder, pMonitorParas->tsSlowLogThresholdTest)); //Obsolete
TAOS_CHECK_RETURN(tEncodeCStr(encoder, pMonitorParas->tsSlowLogExceptDb));
return 0;
}
@@ -87,7 +87,7 @@ static int32_t tDeserializeSMonitorParas(SDecoder *decoder, SMonitorParas *pMoni
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogScope));
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogMaxLen));
TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThreshold));
- TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest));
+ TAOS_CHECK_RETURN(tDecodeI32(decoder, &pMonitorParas->tsSlowLogThresholdTest)); //Obsolete
TAOS_CHECK_RETURN(tDecodeCStrTo(decoder, pMonitorParas->tsSlowLogExceptDb));
return 0;
}
@@ -10317,6 +10317,7 @@ static int32_t tEncodeSVDropTbReq(SEncoder *pCoder, const SVDropTbReq *pReq) {
TAOS_CHECK_RETURN(tStartEncode(pCoder));
TAOS_CHECK_RETURN(tEncodeCStr(pCoder, pReq->name));
TAOS_CHECK_RETURN(tEncodeU64(pCoder, pReq->suid));
+ TAOS_CHECK_RETURN(tEncodeI64(pCoder, pReq->uid));
TAOS_CHECK_RETURN(tEncodeI8(pCoder, pReq->igNotExists));
tEndEncode(pCoder);
@@ -10327,6 +10328,7 @@ static int32_t tDecodeSVDropTbReq(SDecoder *pCoder, SVDropTbReq *pReq) {
TAOS_CHECK_RETURN(tStartDecode(pCoder));
TAOS_CHECK_RETURN(tDecodeCStr(pCoder, &pReq->name));
TAOS_CHECK_RETURN(tDecodeU64(pCoder, &pReq->suid));
+ TAOS_CHECK_RETURN(tDecodeI64(pCoder, &pReq->uid));
TAOS_CHECK_RETURN(tDecodeI8(pCoder, &pReq->igNotExists));
tEndDecode(pCoder);
diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
index 78cc35a62c..c01fdcc85b 100644
--- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
+++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
@@ -195,7 +195,6 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
req.clusterCfg.monitorParas.tsSlowLogScope = tsSlowLogScope;
req.clusterCfg.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
req.clusterCfg.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- req.clusterCfg.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
tstrncpy(req.clusterCfg.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
char timestr[32] = "1970-01-01 00:00:00.00";
if (taosParseTime(timestr, &req.clusterCfg.checkTime, (int32_t)strlen(timestr), TSDB_TIME_PRECISION_MILLI, 0) != 0) {
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index 24ae8382f9..406128e232 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -482,7 +482,6 @@ static int32_t mndCheckClusterCfgPara(SMnode *pMnode, SDnodeObj *pDnode, const S
CHECK_MONITOR_PARA(tsEnableMonitor, DND_REASON_STATUS_MONITOR_SWITCH_NOT_MATCH);
CHECK_MONITOR_PARA(tsMonitorInterval, DND_REASON_STATUS_MONITOR_INTERVAL_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogThreshold, DND_REASON_STATUS_MONITOR_SLOW_LOG_THRESHOLD_NOT_MATCH);
- CHECK_MONITOR_PARA(tsSlowLogThresholdTest, DND_REASON_STATUS_MONITOR_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogMaxLen, DND_REASON_STATUS_MONITOR_SLOW_LOG_SQL_MAX_LEN_NOT_MATCH);
CHECK_MONITOR_PARA(tsSlowLogScope, DND_REASON_STATUS_MONITOR_SLOW_LOG_SCOPE_NOT_MATCH);
diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c
index 21aba8df10..fd02367f6d 100644
--- a/source/dnode/mnode/impl/src/mndProfile.c
+++ b/source/dnode/mnode/impl/src/mndProfile.c
@@ -304,7 +304,6 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) {
connectRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
connectRsp.enableAuditDelete = tsEnableAuditDelete;
tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
connectRsp.whiteListVer = pUser->ipWhiteListVer;
@@ -706,7 +705,6 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) {
batchRsp.monitorParas.tsEnableMonitor = tsEnableMonitor;
batchRsp.monitorParas.tsMonitorInterval = tsMonitorInterval;
batchRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold;
- batchRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest;
tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN);
batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen;
batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope;
diff --git a/source/dnode/mnode/impl/src/mndStb.c b/source/dnode/mnode/impl/src/mndStb.c
index 3725d3a3fc..eb6c326d1e 100644
--- a/source/dnode/mnode/impl/src/mndStb.c
+++ b/source/dnode/mnode/impl/src/mndStb.c
@@ -4063,8 +4063,8 @@ static int32_t mndProcessDropStbReqFromMNode(SRpcMsg *pReq) {
}
typedef struct SVDropTbVgReqs {
- SVDropTbBatchReq req;
- SVgroupInfo info;
+ SArray *pBatchReqs;
+ SVgroupInfo info;
} SVDropTbVgReqs;
typedef struct SMDropTbDbInfo {
@@ -4086,45 +4086,21 @@ typedef struct SMDropTbTsmaInfos {
} SMDropTbTsmaInfos;
typedef struct SMndDropTbsWithTsmaCtx {
- SHashObj *pTsmaMap; //
- SHashObj *pDbMap; //
- SHashObj *pVgMap; //
- SArray *pResTbNames; // SArray
+ SHashObj *pVgMap; //
} SMndDropTbsWithTsmaCtx;
-static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
+static int32_t mndDropTbForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
int32_t vgId);
+static void destroySVDropTbBatchReqs(void *p);
static void mndDestroyDropTbsWithTsmaCtx(SMndDropTbsWithTsmaCtx *p) {
if (!p) return;
- if (p->pDbMap) {
- void *pIter = taosHashIterate(p->pDbMap, NULL);
- while (pIter) {
- SMDropTbDbInfo *pInfo = pIter;
- taosArrayDestroy(pInfo->dbVgInfos);
- pIter = taosHashIterate(p->pDbMap, pIter);
- }
- taosHashCleanup(p->pDbMap);
- }
- if (p->pResTbNames) {
- taosArrayDestroyP(p->pResTbNames, taosMemoryFree);
- }
- if (p->pTsmaMap) {
- void *pIter = taosHashIterate(p->pTsmaMap, NULL);
- while (pIter) {
- SMDropTbTsmaInfos *pInfos = pIter;
- taosArrayDestroy(pInfos->pTsmaInfos);
- pIter = taosHashIterate(p->pTsmaMap, pIter);
- }
- taosHashCleanup(p->pTsmaMap);
- }
-
if (p->pVgMap) {
void *pIter = taosHashIterate(p->pVgMap, NULL);
while (pIter) {
SVDropTbVgReqs *pReqs = pIter;
- taosArrayDestroy(pReqs->req.pArray);
+ taosArrayDestroyEx(pReqs->pBatchReqs, destroySVDropTbBatchReqs);
pIter = taosHashIterate(p->pVgMap, pIter);
}
taosHashCleanup(p->pVgMap);
@@ -4136,24 +4112,13 @@ static int32_t mndInitDropTbsWithTsmaCtx(SMndDropTbsWithTsmaCtx **ppCtx) {
int32_t code = 0;
SMndDropTbsWithTsmaCtx *pCtx = taosMemoryCalloc(1, sizeof(SMndDropTbsWithTsmaCtx));
if (!pCtx) return terrno;
- pCtx->pTsmaMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
- if (!pCtx->pTsmaMap) {
- code = terrno;
- goto _end;
- }
-
- pCtx->pDbMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
- if (!pCtx->pDbMap) {
- code = terrno;
- goto _end;
- }
- pCtx->pResTbNames = taosArrayInit(TARRAY_MIN_SIZE, POINTER_BYTES);
pCtx->pVgMap = taosHashInit(4, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (!pCtx->pVgMap) {
code = terrno;
goto _end;
}
+
*ppCtx = pCtx;
_end:
if (code) mndDestroyDropTbsWithTsmaCtx(pCtx);
@@ -4192,16 +4157,43 @@ static void *mndBuildVDropTbsReq(SMnode *pMnode, const SVgroupInfo *pVgInfo, con
}
static int32_t mndSetDropTbsRedoActions(SMnode *pMnode, STrans *pTrans, const SVDropTbVgReqs *pVgReqs, void *pCont,
- int32_t contLen) {
+ int32_t contLen, tmsg_t msgType) {
STransAction action = {0};
action.epSet = pVgReqs->info.epSet;
action.pCont = pCont;
action.contLen = contLen;
- action.msgType = TDMT_VND_DROP_TABLE;
+ action.msgType = msgType;
action.acceptableCode = TSDB_CODE_TDB_TABLE_NOT_EXIST;
return mndTransAppendRedoAction(pTrans, &action);
}
+static int32_t mndBuildDropTbRedoActions(SMnode *pMnode, STrans *pTrans, SHashObj *pVgMap, tmsg_t msgType) {
+ int32_t code = 0;
+ void *pIter = taosHashIterate(pVgMap, NULL);
+ while (pIter) {
+ const SVDropTbVgReqs *pVgReqs = pIter;
+ int32_t len = 0;
+ for (int32_t i = 0; i < taosArrayGetSize(pVgReqs->pBatchReqs) && code == TSDB_CODE_SUCCESS; ++i) {
+ SVDropTbBatchReq *pBatchReq = taosArrayGet(pVgReqs->pBatchReqs, i);
+ void *p = mndBuildVDropTbsReq(pMnode, &pVgReqs->info, pBatchReq, &len);
+ if (!p) {
+ code = TSDB_CODE_MND_RETURN_VALUE_NULL;
+ if (terrno != 0) code = terrno;
+ break;
+ }
+ if ((code = mndSetDropTbsRedoActions(pMnode, pTrans, pVgReqs, p, len, msgType)) != 0) {
+ break;
+ }
+ }
+ if (TSDB_CODE_SUCCESS != code) {
+ taosHashCancelIterate(pVgMap, pIter);
+ break;
+ }
+ pIter = taosHashIterate(pVgMap, pIter);
+ }
+ return code;
+}
+
static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx *pCtx) {
int32_t code = 0;
SMnode *pMnode = pRsp->info.node;
@@ -4216,23 +4208,7 @@ static int32_t mndCreateDropTbsTxnPrepare(SRpcMsg *pRsp, SMndDropTbsWithTsmaCtx
TAOS_CHECK_GOTO(mndTransCheckConflict(pMnode, pTrans), NULL, _OVER);
- void *pIter = taosHashIterate(pCtx->pVgMap, NULL);
- while (pIter) {
- const SVDropTbVgReqs *pVgReqs = pIter;
- int32_t len = 0;
- void *p = mndBuildVDropTbsReq(pMnode, &pVgReqs->info, &pVgReqs->req, &len);
- if (!p) {
- taosHashCancelIterate(pCtx->pVgMap, pIter);
- code = TSDB_CODE_MND_RETURN_VALUE_NULL;
- if (terrno != 0) code = terrno;
- goto _OVER;
- }
- if ((code = mndSetDropTbsRedoActions(pMnode, pTrans, pVgReqs, p, len)) != 0) {
- taosHashCancelIterate(pCtx->pVgMap, pIter);
- goto _OVER;
- }
- pIter = taosHashIterate(pCtx->pVgMap, pIter);
- }
+ if ((code = mndBuildDropTbRedoActions(pMnode, pTrans, pCtx->pVgMap, TDMT_VND_DROP_TABLE)) != 0) goto _OVER;
if ((code = mndTransPrepare(pMnode, pTrans)) != 0) goto _OVER;
_OVER:
@@ -4257,10 +4233,11 @@ static int32_t mndProcessDropTbWithTsma(SRpcMsg *pReq) {
if (code) goto _OVER;
for (int32_t i = 0; i < dropReq.pVgReqs->size; ++i) {
SMDropTbReqsOnSingleVg *pReq = taosArrayGet(dropReq.pVgReqs, i);
- code = mndDropTbAddTsmaResTbsForSingleVg(pMnode, pCtx, pReq->pTbs, pReq->vgInfo.vgId);
+ code = mndDropTbForSingleVg(pMnode, pCtx, pReq->pTbs, pReq->vgInfo.vgId);
if (code) goto _OVER;
}
- if (mndCreateDropTbsTxnPrepare(pReq, pCtx) == 0) {
+ code = mndCreateDropTbsTxnPrepare(pReq, pCtx);
+ if (code == 0) {
code = TSDB_CODE_ACTION_IN_PROGRESS;
}
_OVER:
@@ -4269,87 +4246,58 @@ _OVER:
TAOS_RETURN(code);
}
+static int32_t createDropTbBatchReq(const SVDropTbReq *pReq, SVDropTbBatchReq *pBatchReq) {
+ pBatchReq->nReqs = 1;
+ pBatchReq->pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
+ if (!pBatchReq->pArray) return terrno;
+ if (taosArrayPush(pBatchReq->pArray, pReq) == NULL) {
+ taosArrayDestroy(pBatchReq->pArray);
+ pBatchReq->pArray = NULL;
+ return terrno;
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
+static void destroySVDropTbBatchReqs(void *p) {
+ SVDropTbBatchReq *pReq = p;
+ taosArrayDestroy(pReq->pArray);
+ pReq->pArray = NULL;
+}
+
static int32_t mndDropTbAdd(SMnode *pMnode, SHashObj *pVgHashMap, const SVgroupInfo *pVgInfo, char *name, tb_uid_t suid,
bool ignoreNotExists) {
- SVDropTbReq req = {.name = name, .suid = suid, .igNotExists = ignoreNotExists};
+ SVDropTbReq req = {.name = name, .suid = suid, .igNotExists = ignoreNotExists, .uid = 0};
- SVDropTbVgReqs *pReqs = taosHashGet(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
- SVDropTbVgReqs reqs = {0};
- if (pReqs == NULL) {
- reqs.info = *pVgInfo;
- reqs.req.pArray = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbReq));
- if (reqs.req.pArray == NULL) {
+ SVDropTbVgReqs *pVgReqs = taosHashGet(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId));
+ SVDropTbVgReqs vgReqs = {0};
+ if (pVgReqs == NULL) {
+ vgReqs.info = *pVgInfo;
+ vgReqs.pBatchReqs = taosArrayInit(TARRAY_MIN_SIZE, sizeof(SVDropTbBatchReq));
+ if (!vgReqs.pBatchReqs) return terrno;
+ SVDropTbBatchReq batchReq = {0};
+ int32_t code = createDropTbBatchReq(&req, &batchReq);
+ if (TSDB_CODE_SUCCESS != code) return code;
+ if (taosArrayPush(vgReqs.pBatchReqs, &batchReq) == NULL) {
+ taosArrayDestroy(batchReq.pArray);
return terrno;
}
- if (taosArrayPush(reqs.req.pArray, &req) == NULL) {
- return terrno;
- }
- if (taosHashPut(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId), &reqs, sizeof(reqs)) != 0) {
+ if (taosHashPut(pVgHashMap, &pVgInfo->vgId, sizeof(pVgInfo->vgId), &vgReqs, sizeof(vgReqs)) != 0) {
+ taosArrayDestroyEx(vgReqs.pBatchReqs, destroySVDropTbBatchReqs);
return terrno;
}
} else {
- if (taosArrayPush(pReqs->req.pArray, &req) == NULL) {
+ SVDropTbBatchReq batchReq = {0};
+ int32_t code = createDropTbBatchReq(&req, &batchReq);
+ if (TSDB_CODE_SUCCESS != code) return code;
+ if (taosArrayPush(pVgReqs->pBatchReqs, &batchReq) == NULL) {
+ taosArrayDestroy(batchReq.pArray);
return terrno;
}
}
return 0;
}
-int vgInfoCmp(const void *lp, const void *rp) {
- SVgroupInfo *pLeft = (SVgroupInfo *)lp;
- SVgroupInfo *pRight = (SVgroupInfo *)rp;
- if (pLeft->hashBegin < pRight->hashBegin) {
- return -1;
- } else if (pLeft->hashBegin > pRight->hashBegin) {
- return 1;
- }
-
- return 0;
-}
-
-static int32_t mndGetDbVgInfoForTsma(SMnode *pMnode, const char *dbname, SMDropTbTsmaInfo *pInfo) {
- int32_t code = 0;
- SDbObj *pDb = mndAcquireDb(pMnode, dbname);
- if (!pDb) {
- code = TSDB_CODE_MND_DB_NOT_EXIST;
- goto _end;
- }
-
- pInfo->dbInfo.dbVgInfos = taosArrayInit(pDb->cfg.numOfVgroups, sizeof(SVgroupInfo));
- if (!pInfo->dbInfo.dbVgInfos) {
- code = terrno;
- goto _end;
- }
- mndBuildDBVgroupInfo(pDb, pMnode, pInfo->dbInfo.dbVgInfos);
- taosArraySort(pInfo->dbInfo.dbVgInfos, vgInfoCmp);
-
- pInfo->dbInfo.hashPrefix = pDb->cfg.hashPrefix;
- pInfo->dbInfo.hashSuffix = pDb->cfg.hashSuffix;
- pInfo->dbInfo.hashMethod = pDb->cfg.hashMethod;
-
-_end:
- if (pDb) mndReleaseDb(pMnode, pDb);
- if (code && pInfo->dbInfo.dbVgInfos) {
- taosArrayDestroy(pInfo->dbInfo.dbVgInfos);
- pInfo->dbInfo.dbVgInfos = NULL;
- }
- TAOS_RETURN(code);
-}
-
-int32_t vgHashValCmp(const void *lp, const void *rp) {
- uint32_t *key = (uint32_t *)lp;
- SVgroupInfo *pVg = (SVgroupInfo *)rp;
-
- if (*key < pVg->hashBegin) {
- return -1;
- } else if (*key > pVg->hashEnd) {
- return 1;
- }
-
- return 0;
-}
-
-static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
+static int32_t mndDropTbForSingleVg(SMnode *pMnode, SMndDropTbsWithTsmaCtx *pCtx, SArray *pTbs,
int32_t vgId) {
int32_t code = 0;
@@ -4365,88 +4313,9 @@ static int32_t mndDropTbAddTsmaResTbsForSingleVg(SMnode *pMnode, SMndDropTbsWith
vgInfo.epSet = mndGetVgroupEpset(pMnode, pVgObj);
mndReleaseVgroup(pMnode, pVgObj);
- // get all stb uids
- for (int32_t i = 0; i < pTbs->size; ++i) {
- const SVDropTbReq *pTb = taosArrayGet(pTbs, i);
- if (taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid))) {
- } else {
- SMDropTbTsmaInfos infos = {0};
- infos.pTsmaInfos = taosArrayInit(2, sizeof(SMDropTbTsmaInfo));
- if (!infos.pTsmaInfos) {
- code = terrno;
- goto _end;
- }
- if (taosHashPut(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid), &infos, sizeof(infos)) != 0) {
- code = terrno;
- goto _end;
- }
- }
- }
-
- void *pIter = NULL;
- SSmaObj *pSma = NULL;
- char buf[TSDB_TABLE_FNAME_LEN] = {0};
- // get used tsmas and it's dbs
- while (1) {
- pIter = sdbFetch(pMnode->pSdb, SDB_SMA, pIter, (void **)&pSma);
- if (!pIter) break;
- SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pSma->stbUid, sizeof(pSma->stbUid));
- if (pInfos) {
- SMDropTbTsmaInfo info = {0};
- int32_t len = sprintf(buf, "%s", pSma->name);
- sprintf(info.tsmaResTbDbFName, "%s", pSma->db);
- snprintf(info.tsmaResTbNamePrefix, TSDB_TABLE_FNAME_LEN, "%s", buf);
- SMDropTbDbInfo *pDbInfo = taosHashGet(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN);
- info.suid = pSma->dstTbUid;
- if (!pDbInfo) {
- code = mndGetDbVgInfoForTsma(pMnode, pSma->db, &info);
- if (code != TSDB_CODE_SUCCESS) {
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- if (taosHashPut(pCtx->pDbMap, pSma->db, TSDB_DB_FNAME_LEN, &info.dbInfo, sizeof(SMDropTbDbInfo)) != 0) {
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- } else {
- info.dbInfo = *pDbInfo;
- }
- if (taosArrayPush(pInfos->pTsmaInfos, &info) == NULL) {
- code = terrno;
- sdbCancelFetch(pMnode->pSdb, pIter);
- sdbRelease(pMnode->pSdb, pSma);
- goto _end;
- }
- }
- sdbRelease(pMnode->pSdb, pSma);
- }
-
- // generate vg req map
for (int32_t i = 0; i < pTbs->size; ++i) {
SVDropTbReq *pTb = taosArrayGet(pTbs, i);
TAOS_CHECK_GOTO(mndDropTbAdd(pMnode, pCtx->pVgMap, &vgInfo, pTb->name, pTb->suid, pTb->igNotExists), NULL, _end);
-
- SMDropTbTsmaInfos *pInfos = taosHashGet(pCtx->pTsmaMap, &pTb->suid, sizeof(pTb->suid));
- SArray *pVgInfos = NULL;
- char buf[TSDB_TABLE_FNAME_LEN + TSDB_TABLE_NAME_LEN + 1];
- char resTbFullName[TSDB_TABLE_FNAME_LEN + 1] = {0};
- for (int32_t j = 0; j < pInfos->pTsmaInfos->size; ++j) {
- SMDropTbTsmaInfo *pInfo = taosArrayGet(pInfos->pTsmaInfos, j);
- int32_t len = sprintf(buf, "%s_%s", pInfo->tsmaResTbNamePrefix, pTb->name);
- len = taosCreateMD5Hash(buf, len);
- len = snprintf(resTbFullName, TSDB_TABLE_FNAME_LEN + 1, "%s.%s", pInfo->tsmaResTbDbFName, buf);
- uint32_t hashVal = taosGetTbHashVal(resTbFullName, len, pInfo->dbInfo.hashMethod, pInfo->dbInfo.hashPrefix,
- pInfo->dbInfo.hashSuffix);
- const SVgroupInfo *pVgInfo = taosArraySearch(pInfo->dbInfo.dbVgInfos, &hashVal, vgHashValCmp, TD_EQ);
- void *p = taosStrdup(resTbFullName + strlen(pInfo->tsmaResTbDbFName) + TSDB_NAME_DELIMITER_LEN);
- if (taosArrayPush(pCtx->pResTbNames, &p) == NULL) {
- code = terrno;
- goto _end;
- }
- TAOS_CHECK_GOTO(mndDropTbAdd(pMnode, pCtx->pVgMap, pVgInfo, p, pInfo->suid, true), NULL, _end);
- }
}
_end:
return code;
@@ -4474,9 +4343,10 @@ static int32_t mndProcessFetchTtlExpiredTbs(SRpcMsg *pRsp) {
code = mndInitDropTbsWithTsmaCtx(&pCtx);
if (code) goto _end;
- code = mndDropTbAddTsmaResTbsForSingleVg(pMnode, pCtx, rsp.pExpiredTbs, rsp.vgId);
+ code = mndDropTbForSingleVg(pMnode, pCtx, rsp.pExpiredTbs, rsp.vgId);
if (code) goto _end;
- if (mndCreateDropTbsTxnPrepare(pRsp, pCtx) == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
+ code = mndCreateDropTbsTxnPrepare(pRsp, pCtx);
+ if (code == 0) code = TSDB_CODE_ACTION_IN_PROGRESS;
_end:
if (pCtx) mndDestroyDropTbsWithTsmaCtx(pCtx);
tDecoderClear(&decoder);
diff --git a/source/dnode/snode/src/snodeInitApi.c b/source/dnode/snode/src/snodeInitApi.c
index 680a2fd83c..4fe4333534 100644
--- a/source/dnode/snode/src/snodeInitApi.c
+++ b/source/dnode/snode/src/snodeInitApi.c
@@ -31,6 +31,7 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStatePutParName = streamStatePutParName;
pStore->streamStateGetParName = streamStateGetParName;
+ pStore->streamStateDeleteParName = streamStateDeleteParName;
pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
pStore->streamStateReleaseBuf = streamStateReleaseBuf;
diff --git a/source/dnode/vnode/src/inc/tq.h b/source/dnode/vnode/src/inc/tq.h
index 653b47ff14..3c40100f9d 100644
--- a/source/dnode/vnode/src/inc/tq.h
+++ b/source/dnode/vnode/src/inc/tq.h
@@ -146,7 +146,7 @@ int32_t tqBuildFName(char** data, const char* path, char* name);
int32_t tqOffsetRestoreFromFile(STQ* pTq, char* name);
// tq util
-int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
+int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type, EStreamType blockType);
int32_t tqExtractDataForMq(STQ* pTq, STqHandle* pHandle, const SMqPollReq* pRequest, SRpcMsg* pMsg);
int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp* pRsp, int32_t epoch, int64_t consumerId,
int32_t type, int64_t sver, int64_t ever);
@@ -158,6 +158,7 @@ int32_t doMergeExistedRows(SSubmitTbData* pExisted, const SSubmitTbData* pNew, c
int32_t buildAutoCreateTableReq(const char* stbFullName, int64_t suid, int32_t numOfCols, SSDataBlock* pDataBlock,
SArray* pTagArray, bool newSubTableRule, SVCreateTbReq** pReq);
+int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type);
#define TQ_ERR_GO_TO_END(c) \
do { \
diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c
index 80c04a3276..bbc58004d9 100644
--- a/source/dnode/vnode/src/sma/smaRollup.c
+++ b/source/dnode/vnode/src/sma/smaRollup.c
@@ -1551,7 +1551,7 @@ static int32_t tdRSmaBatchExec(SSma *pSma, SRSmaInfo *pInfo, STaosQall *qall, SA
_resume_delete:
version = RSMA_EXEC_MSG_VER(msg);
if ((code = tqExtractDelDataBlock(RSMA_EXEC_MSG_BODY(msg), RSMA_EXEC_MSG_LEN(msg), version,
- &packData.pDataBlock, 1))) {
+ &packData.pDataBlock, 1, STREAM_DELETE_DATA))) {
taosFreeQitem(msg);
TAOS_CHECK_EXIT(code);
}
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index bd78f62cae..6195899566 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -758,7 +758,8 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV
}
if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) {
- SWalFilterCond cond = {.deleteMsg = 1}; // delete msg also extract from wal files
+ bool scanDropCtb = pTask->subtableWithoutMd5 ? true : false;
+ SWalFilterCond cond = {.deleteMsg = 1, .scanDropCtb = scanDropCtb}; // delete msg also extract from wal files
pTask->exec.pWalReader = walOpenReader(pTq->pVnode->pWal, &cond, pTask->id.taskId);
if (pTask->exec.pWalReader == NULL) {
tqError("vgId:%d failed init wal reader, code:%s", vgId, tstrerror(terrno));
diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c
index 95955e579f..d924e97ae3 100644
--- a/source/dnode/vnode/src/tq/tqRead.c
+++ b/source/dnode/vnode/src/tq/tqRead.c
@@ -366,8 +366,8 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
} else if (pCont->msgType == TDMT_VND_DELETE) {
void* pBody = POINTER_SHIFT(pCont->body, sizeof(SMsgHead));
int32_t len = pCont->bodyLen - sizeof(SMsgHead);
-
- code = tqExtractDelDataBlock(pBody, len, ver, (void**)pItem, 0);
+ EStreamType blockType = STREAM_DELETE_DATA;
+ code = tqExtractDelDataBlock(pBody, len, ver, (void**)pItem, 0, blockType);
if (code == TSDB_CODE_SUCCESS) {
if (*pItem == NULL) {
tqDebug("s-task:%s empty delete msg, discard it, len:%d, ver:%" PRId64, id, len, ver);
@@ -382,6 +382,20 @@ int32_t extractMsgFromWal(SWalReader* pReader, void** pItem, int64_t maxVer, con
return code;
}
+ } else if (pCont->msgType == TDMT_VND_DROP_TABLE && pReader->cond.scanDropCtb) {
+ void* pBody = POINTER_SHIFT(pCont->body, sizeof(SMsgHead));
+ int32_t len = pCont->bodyLen - sizeof(SMsgHead);
+ code = tqExtractDropCtbDataBlock(pBody, len, ver, (void**)pItem, 0);
+ if (TSDB_CODE_SUCCESS == code) {
+ if (!*pItem) {
+ continue;
+ } else {
+ tqDebug("s-task:%s drop ctb msg extract from WAL, len:%d, ver:%"PRId64, id, len, ver);
+ }
+ } else {
+ terrno = code;
+ return code;
+ }
} else {
tqError("s-task:%s invalid msg type:%d, ver:%" PRId64, id, pCont->msgType, ver);
return TSDB_CODE_STREAM_INTERNAL_ERROR;
diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c
index be41f7e99e..3f4ff7f3d9 100644
--- a/source/dnode/vnode/src/tq/tqSink.c
+++ b/source/dnode/vnode/src/tq/tqSink.c
@@ -53,6 +53,7 @@ static int32_t checkTagSchema(SStreamTask* pTask, SVnode* pVnode);
static void reubuildAndSendMultiResBlock(SStreamTask* pTask, const SArray* pBlocks, SVnode* pVnode, int64_t earlyTs);
static int32_t handleResultBlockMsg(SStreamTask* pTask, SSDataBlock* pDataBlock, int32_t index, SVnode* pVnode,
int64_t earlyTs);
+static int32_t doWaitForDstTableDropped(SVnode* pVnode, SStreamTask* pTask, const char* dstTableName);
int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* pDataBlock, SBatchDeleteReq* deleteReq,
const char* pIdStr, bool newSubTableRule) {
@@ -138,7 +139,7 @@ int32_t tqBuildDeleteReq(STQ* pTq, const char* stbFullName, const SSDataBlock* p
return 0;
}
-static int32_t encodeCreateChildTableForRPC(SVCreateTbBatchReq* pReqs, int32_t vgId, void** pBuf, int32_t* contLen) {
+static int32_t encodeCreateChildTableForRPC(void* pReqs, int32_t vgId, void** pBuf, int32_t* contLen) {
int32_t ret = 0;
tEncodeSize(tEncodeSVCreateTbBatchReq, pReqs, *contLen, ret);
@@ -170,17 +171,50 @@ end:
return ret;
}
-static int32_t tqPutReqToQueue(SVnode* pVnode, SVCreateTbBatchReq* pReqs) {
+static int32_t encodeDropChildTableForRPC(void* pReqs, int32_t vgId, void** ppBuf, int32_t *contLen) {
+ int32_t code = 0;
+ SEncoder ec = {0};
+ tEncodeSize(tEncodeSVDropTbBatchReq, pReqs, *contLen, code);
+ if (code < 0) {
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+ *contLen += sizeof(SMsgHead);
+ *ppBuf = rpcMallocCont(*contLen);
+
+ if (!*ppBuf) {
+ code = terrno;
+ goto end;
+ }
+
+ ((SMsgHead*)(*ppBuf))->vgId = vgId;
+ ((SMsgHead*)(*ppBuf))->contLen = htonl(*contLen);
+
+ tEncoderInit(&ec, POINTER_SHIFT(*ppBuf, sizeof(SMsgHead)), (*contLen) - sizeof(SMsgHead));
+ code = tEncodeSVDropTbBatchReq(&ec, pReqs);
+ tEncoderClear(&ec);
+ if (code < 0) {
+ rpcFreeCont(*ppBuf);
+ *ppBuf = NULL;
+ *contLen = 0;
+ code = TSDB_CODE_INVALID_MSG;
+ goto end;
+ }
+end:
+ return code;
+}
+
+static int32_t tqPutReqToQueue(SVnode* pVnode, void* pReqs, int32_t(*encoder)(void* pReqs, int32_t vgId, void** ppBuf, int32_t *contLen), tmsg_t msgType) {
void* buf = NULL;
int32_t tlen = 0;
- int32_t code = encodeCreateChildTableForRPC(pReqs, TD_VID(pVnode), &buf, &tlen);
+ int32_t code = encoder(pReqs, TD_VID(pVnode), &buf, &tlen);
if (code) {
tqError("vgId:%d failed to encode create table msg, create table failed, code:%s", TD_VID(pVnode), tstrerror(code));
return code;
}
- SRpcMsg msg = {.msgType = TDMT_VND_CREATE_TABLE, .pCont = buf, .contLen = tlen};
+ SRpcMsg msg = {.msgType = msgType, .pCont = buf, .contLen = tlen};
code = tmsgPutToQueue(&pVnode->msgCb, WRITE_QUEUE, &msg);
if (code) {
tqError("failed to put into write-queue since %s", terrstr());
@@ -388,7 +422,7 @@ static int32_t doBuildAndSendCreateTableMsg(SVnode* pVnode, char* stbFullName, S
}
reqs.nReqs = taosArrayGetSize(reqs.pArray);
- code = tqPutReqToQueue(pVnode, &reqs);
+ code = tqPutReqToQueue(pVnode, &reqs, encodeCreateChildTableForRPC, TDMT_VND_CREATE_TABLE);
if (code != TSDB_CODE_SUCCESS) {
tqError("s-task:%s failed to send create table msg", id);
}
@@ -399,6 +433,61 @@ _end:
return code;
}
+static int32_t doBuildAndSendDropTableMsg(SVnode* pVnode, char* pStbFullname, SSDataBlock* pDataBlock,
+ SStreamTask* pTask, int64_t suid) {
+ int32_t lino = 0;
+ int32_t code = 0;
+ int32_t rows = pDataBlock->info.rows;
+ const char* id = pTask->id.idStr;
+ SVDropTbBatchReq batchReq = {0};
+ SVDropTbReq req = {0};
+
+ if (rows <= 0 || rows > 1 || pTask->subtableWithoutMd5 == 0) return TSDB_CODE_SUCCESS;
+
+ batchReq.pArray = taosArrayInit(rows, sizeof(SVDropTbReq));
+ if (!batchReq.pArray) return terrno;
+ batchReq.nReqs = rows;
+ req.suid = suid;
+ req.igNotExists = true;
+
+ SColumnInfoData* pTbNameCol = taosArrayGet(pDataBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
+ char tbName[TSDB_TABLE_NAME_LEN + 1] = {0};
+ int32_t i = 0;
+ void* pData = colDataGetVarData(pTbNameCol, i);
+ memcpy(tbName, varDataVal(pData), varDataLen(pData));
+ tbName[varDataLen(pData) + 1] = 0;
+ req.name = tbName;
+ if (taosArrayPush(batchReq.pArray, &req) == NULL) {
+ TSDB_CHECK_CODE(terrno, lino, _exit);
+ }
+
+ SMetaReader mr = {0};
+ metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
+
+ code = metaGetTableEntryByName(&mr, tbName);
+ if (TSDB_CODE_SUCCESS == code && isValidDstChildTable(&mr, TD_VID(pVnode), tbName, pTask->outputInfo.tbSink.stbUid)) {
+ STableSinkInfo* pTableSinkInfo = NULL;
+ bool alreadyCached = doGetSinkTableInfoFromCache(pTask->outputInfo.tbSink.pTbInfo, pDataBlock->info.id.groupId, &pTableSinkInfo);
+ if (alreadyCached) {
+ pTableSinkInfo->uid = mr.me.uid;
+ }
+ }
+ metaReaderClear(&mr);
+ tqDebug("s-task:%s build drop %d table(s) msg", id, rows);
+ code = tqPutReqToQueue(pVnode, &batchReq, encodeDropChildTableForRPC, TDMT_VND_DROP_TABLE);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+
+ code = doWaitForDstTableDropped(pVnode, pTask, tbName);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+_exit:
+ if (batchReq.pArray) {
+ taosArrayDestroy(batchReq.pArray);
+ }
+ return code;
+}
+
int32_t doBuildAndSendSubmitMsg(SVnode* pVnode, SStreamTask* pTask, SSubmitReq2* pReq, int32_t numOfBlocks) {
const char* id = pTask->id.idStr;
int32_t vgId = TD_VID(pVnode);
@@ -807,6 +896,40 @@ int32_t doWaitForDstTableCreated(SVnode* pVnode, SStreamTask* pTask, STableSinkI
return TSDB_CODE_SUCCESS;
}
+static int32_t doWaitForDstTableDropped(SVnode* pVnode, SStreamTask* pTask, const char* dstTableName) {
+ int32_t vgId = TD_VID(pVnode);
+ int64_t suid = pTask->outputInfo.tbSink.stbUid;
+ const char* id = pTask->id.idStr;
+
+ while (1) {
+ if (streamTaskShouldStop(pTask)) {
+ tqDebug("s-task:%s task will stop, quit from waiting for table:%s drop", id, dstTableName);
+ return TSDB_CODE_STREAM_EXEC_CANCELLED;
+ }
+ SMetaReader mr = {0};
+ metaReaderDoInit(&mr, pVnode->pMeta, META_READER_LOCK);
+ int32_t code = metaGetTableEntryByName(&mr, dstTableName);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ metaReaderClear(&mr);
+ break;
+ } else if (TSDB_CODE_SUCCESS == code) {
+ if (isValidDstChildTable(&mr, vgId, dstTableName, suid)) {
+ metaReaderClear(&mr);
+ taosMsleep(100);
+ tqDebug("s-task:%s wait 100ms for table:%s drop", id, dstTableName);
+ } else {
+ metaReaderClear(&mr);
+ break;
+ }
+ } else {
+ tqError("s-task:%s failed to wait for table:%s drop", id, dstTableName);
+ metaReaderClear(&mr);
+ return terrno;
+ }
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
int32_t doCreateSinkTableInfo(const char* pDstTableName, STableSinkInfo** pInfo) {
int32_t nameLen = strlen(pDstTableName);
(*pInfo) = taosMemoryCalloc(1, sizeof(STableSinkInfo) + nameLen + 1);
@@ -1032,7 +1155,7 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
}
bool onlySubmitData = hasOnlySubmitData(pBlocks, numOfBlocks);
- if (!onlySubmitData) {
+ if (!onlySubmitData || pTask->subtableWithoutMd5 == 1) {
tqDebug("vgId:%d, s-task:%s write %d stream resBlock(s) into table, has delete block, submit one-by-one", vgId, id,
numOfBlocks);
@@ -1052,6 +1175,8 @@ void tqSinkDataIntoDstTable(SStreamTask* pTask, void* vnode, void* data) {
code = doBuildAndSendCreateTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
} else if (pDataBlock->info.type == STREAM_CHECKPOINT) {
continue;
+ } else if (pDataBlock->info.type == STREAM_DROP_CHILD_TABLE && pTask->subtableWithoutMd5) {
+ code = doBuildAndSendDropTableMsg(pVnode, stbFullName, pDataBlock, pTask, suid);
} else {
code = handleResultBlockMsg(pTask, pDataBlock, i, pVnode, earlyTs);
}
diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c
index e066938fc0..a92049e5f3 100644
--- a/source/dnode/vnode/src/tq/tqUtil.c
+++ b/source/dnode/vnode/src/tq/tqUtil.c
@@ -572,7 +572,7 @@ int32_t tqDoSendDataRsp(const SRpcHandleInfo* pRpcHandleInfo, const SMqDataRsp*
return 0;
}
-int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type) {
+int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void** pRefBlock, int32_t type, EStreamType blockType) {
int32_t code = 0;
int32_t line = 0;
SDecoder* pCoder = &(SDecoder){0};
@@ -593,7 +593,7 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void*
}
SSDataBlock* pDelBlock = NULL;
- code = createSpecialDataBlock(STREAM_DELETE_DATA, &pDelBlock);
+ code = createSpecialDataBlock(blockType, &pDelBlock);
TSDB_CHECK_CODE(code, line, END);
code = blockDataEnsureCapacity(pDelBlock, numOfTables);
@@ -751,3 +751,45 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b
return TSDB_CODE_SUCCESS;
}
+
+int32_t tqExtractDropCtbDataBlock(const void* data, int32_t len, int64_t ver, void** pRefBlock, int32_t type) {
+ int32_t code = 0;
+ int32_t lino = 0;
+ SDecoder dc = {0};
+ SVDropTbBatchReq batchReq = {0};
+ tDecoderInit(&dc, (uint8_t*)data, len);
+ code = tDecodeSVDropTbBatchReq(&dc, &batchReq);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ if (batchReq.nReqs <= 0) goto _exit;
+
+ SSDataBlock* pBlock = NULL;
+ code = createSpecialDataBlock(STREAM_DROP_CHILD_TABLE, &pBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+ code = blockDataEnsureCapacity(pBlock, batchReq.nReqs);
+ TSDB_CHECK_CODE(code, lino, _exit);
+
+ pBlock->info.rows = batchReq.nReqs;
+ pBlock->info.version = ver;
+ for (int32_t i = 0; i < batchReq.nReqs; ++i) {
+ SVDropTbReq* pReq = batchReq.pReqs + i;
+ SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ TSDB_CHECK_NULL(pCol, code, lino, _exit, terrno);
+ code = colDataSetVal(pCol, i, (const char* )&pReq->uid, false);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+
+ code = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0, pRefBlock);
+ TSDB_CHECK_CODE(code, lino, _exit);
+ ((SStreamRefDataBlock*)(*pRefBlock))->type = STREAM_INPUT__REF_DATA_BLOCK;
+ ((SStreamRefDataBlock*)(*pRefBlock))->pBlock = pBlock;
+
+_exit:
+ tDecoderClear(&dc);
+ if (TSDB_CODE_SUCCESS != code) {
+ tqError("faled to extract drop ctb data block, line:%d code:%s", lino, tstrerror(code));
+ blockDataCleanup(pBlock);
+ taosMemoryFree(pBlock);
+ }
+ return code;
+}
diff --git a/source/dnode/vnode/src/vnd/vnodeInitApi.c b/source/dnode/vnode/src/vnd/vnodeInitApi.c
index d688d1323d..0ac0ee1b8f 100644
--- a/source/dnode/vnode/src/vnd/vnodeInitApi.c
+++ b/source/dnode/vnode/src/vnd/vnodeInitApi.c
@@ -147,6 +147,7 @@ void initStateStoreAPI(SStateStore* pStore) {
pStore->streamStatePutParName = streamStatePutParName;
pStore->streamStateGetParName = streamStateGetParName;
+ pStore->streamStateDeleteParName = streamStateDeleteParName;
pStore->streamStateAddIfNotExist = streamStateAddIfNotExist;
pStore->streamStateReleaseBuf = streamStateReleaseBuf;
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index dd13c975cf..6702b8b588 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -50,6 +50,8 @@ static int32_t vnodeProcessDropIndexReq(SVnode *pVnode, int64_t ver, void *pReq,
static int32_t vnodeProcessCompactVnodeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessConfigChangeReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessArbCheckSyncReq(SVnode *pVnode, void *pReq, int32_t len, SRpcMsg *pRsp);
+static int32_t vnodeProcessDropTSmaCtbReq(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp,
+ SRpcMsg *pOriginRpc);
static int32_t vnodePreCheckAssignedLogSyncd(SVnode *pVnode, char *member0Token, char *member1Token);
static int32_t vnodeCheckAssignedLogSyncd(SVnode *pVnode, char *member0Token, char *member1Token);
@@ -481,6 +483,61 @@ static int32_t vnodePreProcessArbCheckSyncMsg(SVnode *pVnode, SRpcMsg *pMsg) {
return code;
}
+int32_t vnodePreProcessDropTbMsg(SVnode* pVnode, SRpcMsg* pMsg) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ int32_t size = 0;
+ SDecoder dc = {0};
+ SEncoder ec = {0};
+ SVDropTbBatchReq receivedBatchReqs = {0};
+ SVDropTbBatchReq sentBatchReqs = {0};
+
+ tDecoderInit(&dc, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), pMsg->contLen - sizeof(SMsgHead));
+
+ code = tDecodeSVDropTbBatchReq(&dc, &receivedBatchReqs);
+ if (code < 0) {
+ terrno = code;
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+ sentBatchReqs.pArray = taosArrayInit(receivedBatchReqs.nReqs, sizeof(SVDropTbReq));
+ if (!sentBatchReqs.pArray) {
+ code = terrno;
+ goto _exit;
+ }
+
+ for (int32_t i = 0; i < receivedBatchReqs.nReqs; ++i) {
+ SVDropTbReq* pReq = receivedBatchReqs.pReqs + i;
+ tb_uid_t uid = metaGetTableEntryUidByName(pVnode->pMeta, pReq->name);
+ if (uid == 0) {
+ vWarn("vgId:%d, preprocess drop ctb: %s not found", TD_VID(pVnode), pReq->name);
+ continue;
+ }
+ pReq->uid = uid;
+ vDebug("vgId:%d %s for: %s, uid: %"PRId64, TD_VID(pVnode), __func__, pReq->name, pReq->uid);
+ if (taosArrayPush(sentBatchReqs.pArray, pReq) == NULL) {
+ code = terrno;
+ goto _exit;
+ }
+ }
+ sentBatchReqs.nReqs = sentBatchReqs.pArray->size;
+
+ tEncodeSize(tEncodeSVDropTbBatchReq, &sentBatchReqs, size, code);
+ tEncoderInit(&ec, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)), size);
+ code = tEncodeSVDropTbBatchReq(&ec, &sentBatchReqs);
+ tEncoderClear(&ec);
+ if (code != TSDB_CODE_SUCCESS) {
+ vError("vgId:%d %s failed to encode drop tb batch req: %s", TD_VID(pVnode), __func__, tstrerror(code));
+ TSDB_CHECK_CODE(code, lino, _exit);
+ }
+
+_exit:
+ tDecoderClear(&dc);
+ if (sentBatchReqs.pArray) {
+ taosArrayDestroy(sentBatchReqs.pArray);
+ }
+ return code;
+}
+
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
int32_t code = 0;
@@ -507,6 +564,9 @@ int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
case TDMT_VND_ARB_CHECK_SYNC: {
code = vnodePreProcessArbCheckSyncMsg(pVnode, pMsg);
} break;
+ case TDMT_VND_DROP_TABLE: {
+ code = vnodePreProcessDropTbMsg(pVnode, pMsg);
+ } break;
default:
break;
}
@@ -1110,7 +1170,6 @@ static int32_t vnodeProcessCreateTbReq(SVnode *pVnode, int64_t ver, void *pReq,
STbUidStore *pStore = NULL;
SArray *tbUids = NULL;
SArray *tbNames = NULL;
-
pRsp->msgType = TDMT_VND_CREATE_TABLE_RSP;
pRsp->code = TSDB_CODE_SUCCESS;
pRsp->pCont = NULL;
@@ -2512,3 +2571,4 @@ _OVER:
int32_t vnodeAsyncCompact(SVnode *pVnode, int64_t ver, void *pReq, int32_t len, SRpcMsg *pRsp) { return 0; }
int32_t tsdbAsyncCompact(STsdb *tsdb, const STimeWindow *tw, bool sync) { return 0; }
#endif
+
diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c
index 1b823bf69d..af8e01be5e 100644
--- a/source/libs/executor/src/executorInt.c
+++ b/source/libs/executor/src/executorInt.c
@@ -1083,18 +1083,13 @@ void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
bool groupbyTbname(SNodeList* pGroupList) {
bool bytbname = false;
- if (LIST_LENGTH(pGroupList) == 1) {
- SNode* p = nodesListGetNode(pGroupList, 0);
- if (!p) {
- qError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(terrno));
- return false;
- }
- if (p->type == QUERY_NODE_FUNCTION) {
- // partition by tbname/group by tbname
- bytbname = (strcmp(((struct SFunctionNode*)p)->functionName, "tbname") == 0);
+ SNode*pNode = NULL;
+ FOREACH(pNode, pGroupList) {
+ if (pNode->type == QUERY_NODE_FUNCTION) {
+ bytbname = (strcmp(((struct SFunctionNode*)pNode)->functionName, "tbname") == 0);
+ break;
}
}
-
return bytbname;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index fec35c3371..d6e3d26267 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -1326,7 +1326,6 @@ int32_t appendCreateTableRow(void* pState, SExprSupp* pTableSup, SExprSupp* pTag
int32_t winCode = TSDB_CODE_SUCCESS;
code = pAPI->streamStateGetParName(pState, groupId, &pValue, true, &winCode);
QUERY_CHECK_CODE(code, lino, _end);
-
if (winCode != TSDB_CODE_SUCCESS) {
SSDataBlock* pTmpBlock = NULL;
code = blockCopyOneRow(pSrcBlock, rowId, &pTmpBlock);
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index 5b5d5c5d11..84dde6a579 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -289,6 +289,7 @@ static int32_t doSetTagColumnData(STableScanBase* pTableScanInfo, SSDataBlock* p
pTaskInfo, &pTableScanInfo->metaCache);
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ if (pTaskInfo->streamInfo.pState) blockDataCleanup(pBlock);
code = 0;
}
}
@@ -3038,10 +3039,6 @@ static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock
code = addTagPseudoColumnData(&pInfo->readHandle, pInfo->pPseudoExpr, pInfo->numOfPseudoExpr, pInfo->pRes,
pBlockInfo->rows, pTaskInfo, &pTableScanInfo->base.metaCache);
// ignore the table not exists error, since this table may have been dropped during the scan procedure.
- if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
- code = 0;
- }
-
if (code) {
blockDataFreeRes((SSDataBlock*)pBlock);
QUERY_CHECK_CODE(code, lino, _end);
@@ -3535,6 +3532,46 @@ static int32_t copyGetResultBlock(SSDataBlock* dest, TSKEY start, TSKEY end) {
return appendDataToSpecialBlock(dest, &start, &end, NULL, NULL, NULL);
}
+static int32_t deletePartName(SStreamScanInfo* pInfo, SSDataBlock* pBlock, int32_t *deleteNum) {
+ int32_t code = TSDB_CODE_SUCCESS;
+ int32_t lino = 0;
+ for (int32_t i = 0; i < pBlock->info.rows; i++) {
+ // uid is the same as gid
+ SColumnInfoData* pGpIdCol = taosArrayGet(pBlock->pDataBlock, UID_COLUMN_INDEX);
+ SColumnInfoData* pTbnameCol = taosArrayGet(pBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX);
+ int64_t* gpIdCol = (int64_t*)pGpIdCol->pData;
+ void* pParName = NULL;
+ int32_t winCode = 0;
+ code = pInfo->stateStore.streamStateGetParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, gpIdCol[i],
+ &pParName, false, &winCode);
+ if (TSDB_CODE_SUCCESS == code && winCode != 0) {
+ qDebug("delete stream part Name for:%"PRId64 " not found", gpIdCol[i]);
+ colDataSetNULL(pTbnameCol, i);
+ continue;
+ }
+ (*deleteNum)++;
+ QUERY_CHECK_CODE(code, lino, _end);
+ char varTbName[TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE + 1] = {0};
+ varDataSetLen(varTbName, strlen(pParName));
+ int64_t len = tsnprintf(varTbName + VARSTR_HEADER_SIZE, TSDB_TABLE_NAME_LEN + 1, "%s", pParName);
+ code = colDataSetVal(pTbnameCol, i, varTbName, false);
+ qDebug("delete stream part for:%"PRId64 " res tb: %s", gpIdCol[i], (char*)pParName);
+ pInfo->stateStore.streamStateFreeVal(pParName);
+ QUERY_CHECK_CODE(code, lino, _end);
+ code = pInfo->stateStore.streamStateDeleteParName(pInfo->pStreamScanOp->pTaskInfo->streamInfo.pState, gpIdCol[i]);
+ QUERY_CHECK_CODE(code, lino, _end);
+ pBlock->info.id.groupId = gpIdCol[i];
+ // currently, only one valid row in pBlock
+ memcpy(pBlock->info.parTbName, varTbName + VARSTR_HEADER_SIZE, TSDB_TABLE_NAME_LEN + 1);
+ }
+
+_end:
+ if (code != TSDB_CODE_SUCCESS) {
+ qError("%s failed at line %d since %s", __func__, lino, tstrerror(code));
+ }
+ return code;
+}
+
static int32_t doStreamScanNext(SOperatorInfo* pOperator, SSDataBlock** ppRes) {
// NOTE: this operator does never check if current status is done or not
int32_t code = TSDB_CODE_SUCCESS;
@@ -3774,6 +3811,12 @@ FETCH_NEXT_BLOCK:
prepareRangeScan(pInfo, pInfo->pUpdateRes, &pInfo->updateResIndex, NULL);
pInfo->scanMode = STREAM_SCAN_FROM_DATAREADER_RANGE;
} break;
+ case STREAM_DROP_CHILD_TABLE: {
+ int32_t deleteNum = 0;
+ code = deletePartName(pInfo, pBlock, &deleteNum);
+ QUERY_CHECK_CODE(code, lino, _end);
+ if (deleteNum == 0) goto FETCH_NEXT_BLOCK;
+ } break;
case STREAM_CHECKPOINT: {
qError("stream check point error. msg type: STREAM_INPUT__DATA_BLOCK");
} break;
@@ -3915,7 +3958,13 @@ FETCH_NEXT_BLOCK:
}
code = setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false);
- QUERY_CHECK_CODE(code, lino, _end);
+ if (code == TSDB_CODE_PAR_TABLE_NOT_EXIST) {
+ pInfo->pRes->info.rows = 0;
+ code = TSDB_CODE_SUCCESS;
+ } else {
+ QUERY_CHECK_CODE(code, lino, _end);
+ }
+
if (pInfo->pRes->info.rows == 0) {
continue;
}
diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c
index 8fd00e9313..2e906d2ba6 100644
--- a/source/libs/executor/src/streamtimewindowoperator.c
+++ b/source/libs/executor/src/streamtimewindowoperator.c
@@ -5215,7 +5215,7 @@ static int32_t doStreamIntervalAggNext(SOperatorInfo* pOperator, SSDataBlock** p
code = getAllIntervalWindow(pInfo->aggSup.pResultRowHashTable, pInfo->pUpdatedMap);
QUERY_CHECK_CODE(code, lino, _end);
continue;
- } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE) {
+ } else if (pBlock->info.type == STREAM_CREATE_CHILD_TABLE || pBlock->info.type == STREAM_DROP_CHILD_TABLE) {
printDataBlock(pBlock, getStreamOpName(pOperator->operatorType), GET_TASKID(pTaskInfo));
(*ppRes) = pBlock;
return code;
diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c
index 8ac1acb1a2..c2714659ec 100644
--- a/source/libs/parser/src/parser.c
+++ b/source/libs/parser/src/parser.c
@@ -433,9 +433,6 @@ int32_t qStmtBindParams(SQuery* pQuery, TAOS_MULTI_BIND* pParams, int32_t colIdx
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = NULL;
code = nodesCloneNode(pQuery->pPrepareRoot, &pQuery->pRoot);
- if (NULL == pQuery->pRoot) {
- code = code;
- }
}
if (TSDB_CODE_SUCCESS == code) {
rewriteExprAlias(pQuery->pRoot);
diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c
index 34c83acee8..09a4b9c593 100644
--- a/source/libs/planner/src/planLogicCreater.c
+++ b/source/libs/planner/src/planLogicCreater.c
@@ -1534,21 +1534,20 @@ static int32_t createSortLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
if (TSDB_CODE_SUCCESS == code) {
pSort->pSortKeys = NULL;
code = nodesCloneList(pSelect->pOrderByList, &pSort->pSortKeys);
- if (NULL == pSort->pSortKeys) {
- code = code;
- }
- SNode* pNode = NULL;
- SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
- if (isPrimaryKeySort(pSelect->pOrderByList)) pSort->node.outputTsOrder = firstSortKey->order;
- if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) {
- SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr;
- int16_t projIdx = 1;
- FOREACH(pNode, pSelect->pProjectionList) {
- SExprNode* pExpr = (SExprNode*)pNode;
- if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) {
- pCol->projIdx = projIdx; break;
+ if (NULL != pSort->pSortKeys) {
+ SNode* pNode = NULL;
+ SOrderByExprNode* firstSortKey = (SOrderByExprNode*)nodesListGetNode(pSort->pSortKeys, 0);
+ if (isPrimaryKeySort(pSelect->pOrderByList)) pSort->node.outputTsOrder = firstSortKey->order;
+ if (firstSortKey->pExpr->type == QUERY_NODE_COLUMN) {
+ SColumnNode* pCol = (SColumnNode*)firstSortKey->pExpr;
+ int16_t projIdx = 1;
+ FOREACH(pNode, pSelect->pProjectionList) {
+ SExprNode* pExpr = (SExprNode*)pNode;
+ if (0 == strcmp(pCol->node.aliasName, pExpr->aliasName)) {
+ pCol->projIdx = projIdx; break;
+ }
+ projIdx++;
}
- projIdx++;
}
}
}
diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c
index e0e42087f3..e960c0ff5d 100644
--- a/source/libs/planner/src/planSpliter.c
+++ b/source/libs/planner/src/planSpliter.c
@@ -836,11 +836,9 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo
nodesDestroyNode(pMergeWin->pTsEnd);
pMergeWin->pTsEnd = NULL;
code = nodesCloneNode(nodesListGetNode(pPartWin->node.pTargets, index), &pMergeWin->pTsEnd);
- if (NULL == pMergeWin->pTsEnd) {
- code = code;
- }
}
- code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow);
+ if (TSDB_CODE_SUCCESS == code)
+ code = stbSplCreateExchangeNode(pCxt, pInfo->pSplitNode, pPartWindow);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
diff --git a/source/libs/stream/inc/streamBackendRocksdb.h b/source/libs/stream/inc/streamBackendRocksdb.h
index d313acc61d..6a10b21c53 100644
--- a/source/libs/stream/inc/streamBackendRocksdb.h
+++ b/source/libs/stream/inc/streamBackendRocksdb.h
@@ -223,6 +223,7 @@ int32_t streamStateParTagGetKVByCur_rocksdb(SStreamStateCur* pCur, int64_t* pGro
// parname cf
int32_t streamStatePutParName_rocksdb(SStreamState* pState, int64_t groupId, const char tbname[TSDB_TABLE_NAME_LEN]);
int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, void** pVal);
+int32_t streamStateDeleteParName_rocksdb(SStreamState* pState, int64_t groupId);
void streamStateDestroy_rocksdb(SStreamState* pState, bool remove);
diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c
index 09f4e95376..65746b3100 100644
--- a/source/libs/stream/src/streamBackendRocksdb.c
+++ b/source/libs/stream/src/streamBackendRocksdb.c
@@ -4432,6 +4432,12 @@ int32_t streamStateGetParName_rocksdb(SStreamState* pState, int64_t groupId, voi
return code;
}
+int32_t streamStateDeleteParName_rocksdb(SStreamState* pState, int64_t groupId) {
+ int code = 0;
+ STREAM_STATE_DEL_ROCKSDB(pState, "parname", &groupId);
+ return code;
+}
+
int32_t streamDefaultPut_rocksdb(SStreamState* pState, const void* key, void* pVal, int32_t pVLen) {
int code = 0;
STREAM_STATE_PUT_ROCKSDB(pState, "default", key, pVal, pVLen);
diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c
index 20c3e5a6b9..401aa7530d 100644
--- a/source/libs/stream/src/streamQueue.c
+++ b/source/libs/stream/src/streamQueue.c
@@ -166,6 +166,8 @@ const char* streamQueueItemGetTypeStr(int32_t type) {
return "checkpoint-trigger";
case STREAM_INPUT__TRANS_STATE:
return "trans-state";
+ case STREAM_INPUT__REF_DATA_BLOCK:
+ return "ref-block";
default:
return "datablock";
}
@@ -211,7 +213,7 @@ EExtractDataCode streamTaskGetDataFromInputQ(SStreamTask* pTask, SStreamQueueIte
// do not merge blocks for sink node and check point data block
int8_t type = qItem->type;
if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER ||
- type == STREAM_INPUT__TRANS_STATE) {
+ type == STREAM_INPUT__TRANS_STATE || type == STREAM_INPUT__REF_DATA_BLOCK) {
const char* p = streamQueueItemGetTypeStr(type);
if (*pInput == NULL) {
@@ -504,4 +506,4 @@ void streamTaskPutbackToken(STokenBucket* pBucket) {
// size in KB
void streamTaskConsumeQuota(STokenBucket* pBucket, int32_t bytes) { pBucket->quotaRemain -= SIZE_IN_MiB(bytes); }
-void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputq.status, TASK_INPUT_STATUS__FAILED); }
\ No newline at end of file
+void streamTaskInputFail(SStreamTask* pTask) { atomic_store_8(&pTask->inputq.status, TASK_INPUT_STATUS__FAILED); }
diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c
index 794fc346bf..5461b5899b 100644
--- a/source/libs/stream/src/streamState.c
+++ b/source/libs/stream/src/streamState.c
@@ -525,6 +525,18 @@ _end:
return code;
}
+int32_t streamStateDeleteParName(SStreamState* pState, int64_t groupId) {
+ int32_t code = tSimpleHashRemove(pState->parNameMap, &groupId, sizeof(int64_t));
+ if (TSDB_CODE_SUCCESS != code) {
+ qWarn("failed to remove parname from cache, code:%d", code);
+ }
+ code = streamStateDeleteParName_rocksdb(pState, groupId);
+ if (TSDB_CODE_SUCCESS != code) {
+ qWarn("failed to remove parname from rocksdb, code:%d", code);
+ }
+ return TSDB_CODE_SUCCESS;
+}
+
void streamStateDestroy(SStreamState* pState, bool remove) {
streamFileStateDestroy(pState->pFileState);
// streamStateDestroy_rocksdb(pState, remove);
diff --git a/source/libs/wal/src/walRead.c b/source/libs/wal/src/walRead.c
index 610adfb0e1..da5e1f47e9 100644
--- a/source/libs/wal/src/walRead.c
+++ b/source/libs/wal/src/walRead.c
@@ -89,6 +89,8 @@ int32_t walNextValidMsg(SWalReader *pReader) {
if (type == TDMT_VND_SUBMIT || ((type == TDMT_VND_DELETE) && (pReader->cond.deleteMsg == 1)) ||
(IS_META_MSG(type) && pReader->cond.scanMeta)) {
TAOS_RETURN(walFetchBody(pReader));
+ } else if (type == TDMT_VND_DROP_TABLE && pReader->cond.scanDropCtb) {
+ TAOS_RETURN(walFetchBody(pReader));
} else {
TAOS_CHECK_RETURN(walSkipFetchBody(pReader));
diff --git a/tests/army/user/test_passwd.py b/tests/army/user/test_passwd.py
new file mode 100644
index 0000000000..dfec175824
--- /dev/null
+++ b/tests/army/user/test_passwd.py
@@ -0,0 +1,55 @@
+import os
+import platform
+import subprocess
+from frame.log import *
+from frame.cases import *
+from frame.sql import *
+from frame.caseBase import *
+from frame.epath import *
+from frame import *
+
+class TDTestCase(TBase):
+ def apiPath(self):
+ apiPath = None
+ currentFilePath = os.path.dirname(os.path.realpath(__file__))
+ if (os.sep.join(["community", "tests"]) in currentFilePath):
+ testFilePath = currentFilePath[:currentFilePath.find(os.sep.join(["community", "tests"]))]
+ else:
+ testFilePath = currentFilePath[:currentFilePath.find(os.sep.join(["TDengine", "tests"]))]
+
+ for root, dirs, files in os.walk(testFilePath):
+ if ("passwdTest.c" in files):
+ apiPath = root
+ break
+ return apiPath
+
+ def run(self):
+ apiPath = self.apiPath()
+ tdLog.info(f"api path: {apiPath}")
+ if platform.system().lower() == 'linux':
+ p = subprocess.Popen(f"cd {apiPath} && make", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ if 0 != p.returncode:
+ tdLog.exit("Test script passwdTest.c make failed")
+
+ p = subprocess.Popen(f"ls {apiPath}", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ tdLog.info(f"test files: {out}")
+ if apiPath:
+ test_file_cmd = os.sep.join([apiPath, "passwdTest localhost"])
+ try:
+ p = subprocess.Popen(test_file_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ out, err = p.communicate()
+ if 0 != p.returncode:
+ tdLog.exit("Failed to run passwd test with output: %s \n error: %s" % (out, err))
+ else:
+ tdLog.info(out)
+ tdLog.success(f"{__file__} successfully executed")
+ except Exception as e:
+ tdLog.exit(f"Failed to execute {__file__} with error: {e}")
+ else:
+ tdLog.exit("passwdTest.c not found")
+
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task
index 1e3975a556..d52a32adc7 100644
--- a/tests/parallel_test/cases.task
+++ b/tests/parallel_test/cases.task
@@ -44,6 +44,7 @@
,,y,army,./pytest.sh python3 ./test.py -f storage/compressBasic.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f grant/grantBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/queryBugs.py -N 3
+,,n,army,python3 ./test.py -f user/test_passwd.py
,,y,army,./pytest.sh python3 ./test.py -f tmq/tmqBugs.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_compare_asc_desc.py
,,y,army,./pytest.sh python3 ./test.py -f query/last/test_last.py
@@ -51,6 +52,7 @@
,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3
,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py
,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py
+
#
# system test
#
@@ -438,6 +440,7 @@
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/show_tag_index.py
,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/information_schema.py
+,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/grant.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/abs.py -R
,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/and_or_for_byte.py
diff --git a/tests/script/api/makefile b/tests/script/api/makefile
index 9c2bb6be3d..ce5980b37a 100644
--- a/tests/script/api/makefile
+++ b/tests/script/api/makefile
@@ -13,7 +13,7 @@ all: $(TARGET)
exe:
gcc $(CFLAGS) ./batchprepare.c -o $(ROOT)batchprepare $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-test.c -o $(ROOT)stmt2-test $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-test.c -o $(ROOT)stmt2-test $(LFLAGS)
gcc $(CFLAGS) ./stopquery.c -o $(ROOT)stopquery $(LFLAGS)
gcc $(CFLAGS) ./dbTableRoute.c -o $(ROOT)dbTableRoute $(LFLAGS)
gcc $(CFLAGS) ./insertSameTs.c -o $(ROOT)insertSameTs $(LFLAGS)
@@ -22,11 +22,11 @@ exe:
gcc $(CFLAGS) ./insert_stb.c -o $(ROOT)insert_stb $(LFLAGS)
gcc $(CFLAGS) ./tmqViewTest.c -o $(ROOT)tmqViewTest $(LFLAGS)
gcc $(CFLAGS) ./stmtQuery.c -o $(ROOT)stmtQuery $(LFLAGS)
- gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS)
- gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS)
- gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS)
+ # gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS)
gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS)
clean:
diff --git a/tests/script/api/makefile_win64.mak b/tests/script/api/makefile_win64.mak
new file mode 100644
index 0000000000..50a2447a06
--- /dev/null
+++ b/tests/script/api/makefile_win64.mak
@@ -0,0 +1,20 @@
+# Makefile.mak for win64
+
+TARGET = passwdTest.exe
+CC = cl
+CFLAGS = /W4 /EHsc /I"C:\TDengine\include" /DWINDOWS
+LDFLAGS = /link /LIBPATH:"C:\TDengine\driver" taos.lib
+
+SRCS = passwdTest.c
+OBJS = $(SRCS:.c=.obj)
+
+all: $(TARGET)
+
+$(TARGET): $(OBJS)
+ $(CC) $(OBJS) $(LDFLAGS)
+
+.c.obj:
+ $(CC) $(CFLAGS) /c $<
+
+clean:
+ del $(OBJS) $(TARGET)
\ No newline at end of file
diff --git a/tests/script/api/passwdTest.c b/tests/script/api/passwdTest.c
index 928525750e..259d3bec8e 100644
--- a/tests/script/api/passwdTest.c
+++ b/tests/script/api/passwdTest.c
@@ -20,12 +20,27 @@
* passwdTest.c
* - Run the test case in clear TDengine environment with default root passwd 'taosdata'
*/
+#ifdef WINDOWS
+#include
+#include
+#include
+#ifndef PRId64
+#define PRId64 "I64d"
+#endif
+
+#ifndef PRIu64
+#define PRIu64 "I64u"
+#endif
+
+#else
#include
+#include
+#endif
+
#include
#include
#include
-#include
#include "taos.h" // TAOS header file
#define nDup 1
@@ -50,6 +65,16 @@ void sysInfoTest(TAOS *taos, const char *host, char *qstr);
void userDroppedTest(TAOS *taos, const char *host, char *qstr);
void clearTestEnv(TAOS *taos, const char *host, char *qstr);
+void taosMsleep(int64_t ms) {
+ if (ms < 0) return;
+#ifdef WINDOWS
+ Sleep(ms);
+#else
+ usleep(ms * 1000);
+#endif
+}
+
+
int nPassVerNotified = 0;
int nUserDropped = 0;
TAOS *taosu[nRoot] = {0};
@@ -59,7 +84,8 @@ void __taos_notify_cb(void *param, void *ext, int type) {
switch (type) {
case TAOS_NOTIFY_PASSVER: {
++nPassVerNotified;
- printf("%s:%d type:%d user:%s passVer:%d\n", __func__, __LINE__, type, param ? (char *)param : "NULL", *(int *)ext);
+ printf("%s:%d type:%d user:%s passVer:%d\n", __func__, __LINE__, type, param ? (char *)param : "NULL",
+ *(int *)ext);
break;
}
case TAOS_NOTIFY_USER_DROPPED: {
@@ -191,11 +217,11 @@ static int printResult(TAOS_RES *res, char *output) {
printRow(temp, row, fields, numFields);
puts(temp);
}
+ return 0;
}
int main(int argc, char *argv[]) {
char qstr[1024];
-
// connect to server
if (argc < 2) {
printf("please input server-ip \n");
@@ -215,6 +241,7 @@ int main(int argc, char *argv[]) {
taos_close(taos);
taos_cleanup();
+ exit(EXIT_SUCCESS);
}
void createUsers(TAOS *taos, const char *host, char *qstr) {
@@ -234,6 +261,7 @@ void createUsers(TAOS *taos, const char *host, char *qstr) {
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb(TAOS_NOTIFY_PASSVER) for user:%s since %d\n", users[i], code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb(TAOS_NOTIFY_PASSVER) for user:%s\n", users[i]);
}
@@ -260,6 +288,7 @@ void passVerTestMulti(const char *host, char *qstr) {
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb since %d\n", code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb\n");
}
@@ -283,26 +312,25 @@ void passVerTestMulti(const char *host, char *qstr) {
printf("%s:%d [%d] second(s) elasped, passVer notification received:%d, total:%d\n", __func__, __LINE__, i,
nPassVerNotified, nConn);
if (nPassVerNotified >= nConn) break;
- sleep(1);
+ taosMsleep(1000);
}
// close the taos_conn
for (int i = 0; i < nRoot; ++i) {
taos_close(taos[i]);
printf("%s:%d close taos[%d]\n", __func__, __LINE__, i);
- // sleep(1);
+ // taosMsleep(1000);
}
for (int i = 0; i < nUser; ++i) {
taos_close(taosu[i]);
printf("%s:%d close taosu[%d]\n", __func__, __LINE__, i);
- // sleep(1);
+ // taosMsleep(1000);
}
fprintf(stderr, "######## %s #########\n", __func__);
if (nPassVerNotified == nConn) {
- fprintf(stderr, ">>> succeed to get passVer notification since nNotify %d == nConn %d\n", nPassVerNotified,
- nConn);
+ fprintf(stderr, ">>> succeed to get passVer notification since nNotify %d == nConn %d\n", nPassVerNotified, nConn);
} else {
fprintf(stderr, ">>> failed to get passVer notification since nNotify %d != nConn %d\n", nPassVerNotified, nConn);
exit(1);
@@ -337,7 +365,7 @@ void sysInfoTest(TAOS *taosRoot, const char *host, char *qstr) {
TAOS_RES *res = NULL;
int32_t nRep = 0;
-_REP:
+_REP:
fprintf(stderr, "######## %s loop:%d #########\n", __func__, nRep);
res = taos_query(taos[0], qstr);
if (taos_errno(res) != 0) {
@@ -356,7 +384,7 @@ _REP:
fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__);
for (int i = 1; i <= 2; ++i) {
- sleep(1);
+ taosMsleep(1000);
}
res = taos_query(taos[0], qstr);
@@ -372,10 +400,10 @@ _REP:
queryDB(taosRoot, "alter user user0 sysinfo 1");
fprintf(stderr, "%s:%d sleep 2 seconds to wait HB take effect\n", __func__, __LINE__);
for (int i = 1; i <= 2; ++i) {
- sleep(1);
+ taosMsleep(1000);
}
- if(++nRep < 5) {
+ if (++nRep < 5) {
goto _REP;
}
@@ -390,7 +418,7 @@ _REP:
fprintf(stderr, "######## %s #########\n", __func__);
}
static bool isDropUser = true;
-void userDroppedTest(TAOS *taos, const char *host, char *qstr) {
+void userDroppedTest(TAOS *taos, const char *host, char *qstr) {
// users
int nTestUsers = nUser;
int nLoop = 0;
@@ -408,6 +436,7 @@ _loop:
if (code != 0) {
fprintf(stderr, "failed to run: taos_set_notify_cb:%d for user:%s since %d\n", TAOS_NOTIFY_USER_DROPPED, users[i],
code);
+ exit(EXIT_FAILURE);
} else {
fprintf(stderr, "success to run: taos_set_notify_cb:%d for user:%s\n", TAOS_NOTIFY_USER_DROPPED, users[i]);
}
@@ -426,7 +455,7 @@ _loop:
printf("%s:%d [%d] second(s) elasped, user dropped notification received:%d, total:%d\n", __func__, __LINE__, i,
nUserDropped, nConn);
if (nUserDropped >= nConn) break;
- sleep(1);
+ taosMsleep(1000);
}
for (int i = 0; i < nTestUsers; ++i) {
diff --git a/tests/system-test/0-others/grant.py b/tests/system-test/0-others/grant.py
new file mode 100644
index 0000000000..9e54d9ca37
--- /dev/null
+++ b/tests/system-test/0-others/grant.py
@@ -0,0 +1,222 @@
+from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
+import taos
+import sys
+import time
+import os
+
+from util.log import *
+from util.sql import *
+from util.cases import *
+from util.dnodes import *
+from util.dnodes import TDDnodes
+from util.dnodes import TDDnode
+import time
+import socket
+import subprocess
+
+class MyDnodes(TDDnodes):
+ def __init__(self ,dnodes_lists):
+ super(MyDnodes,self).__init__()
+ self.dnodes = dnodes_lists # dnode must be TDDnode instance
+ if platform.system().lower() == 'windows':
+ self.simDeployed = True
+ else:
+ self.simDeployed = False
+
+class TDTestCase:
+ noConn = True
+ def getTDinternalPath():
+ path_parts = os.getcwd().split(os.sep)
+ try:
+ tdinternal_index = path_parts.index("TDinternal")
+ except ValueError:
+ raise ValueError("The specified directory 'TDinternal' was not found in the path.")
+ return os.sep.join(path_parts[:tdinternal_index + 1])
+
+ def init(self, conn, logSql, replicaVar=1):
+ tdLog.debug(f"start to excute {__file__}")
+ self.TDDnodes = None
+ self.depoly_cluster(5)
+ self.master_dnode = self.TDDnodes.dnodes[0]
+ self.host=self.master_dnode.cfgDict["fqdn"]
+ conn1 = taos.connect(self.master_dnode.cfgDict["fqdn"] , config=self.master_dnode.cfgDir)
+ tdSql.init(conn1.cursor(), True)
+ self.TDinternal = TDTestCase.getTDinternalPath()
+ self.workPath = os.path.join(self.TDinternal, "debug", "build", "bin")
+ tdLog.info(self.workPath)
+
+ def getBuildPath(self):
+ selfPath = os.path.dirname(os.path.realpath(__file__))
+
+ if ("community" in selfPath):
+ projPath = selfPath[:selfPath.find("community")]
+ else:
+ projPath = selfPath[:selfPath.find("tests")]
+
+ for root, dirs, files in os.walk(projPath):
+ if ("taosd" in files or "taosd.exe" in files):
+ rootRealPath = os.path.dirname(os.path.realpath(root))
+ if ("packaging" not in rootRealPath):
+ buildPath = root[:len(root) - len("/build/bin")]
+ break
+ return buildPath
+
+ def depoly_cluster(self ,dnodes_nums):
+
+ testCluster = False
+ valgrind = 0
+ hostname = socket.gethostname()
+ dnodes = []
+ start_port = 6030
+ for num in range(1, dnodes_nums+1):
+ dnode = TDDnode(num)
+ dnode.addExtraCfg("firstEp", f"{hostname}:{start_port}")
+ dnode.addExtraCfg("fqdn", f"{hostname}")
+ dnode.addExtraCfg("serverPort", f"{start_port + (num-1)*100}")
+ dnode.addExtraCfg("monitorFqdn", hostname)
+ dnode.addExtraCfg("monitorPort", 7043)
+ dnodes.append(dnode)
+
+ self.TDDnodes = MyDnodes(dnodes)
+ self.TDDnodes.init("")
+ self.TDDnodes.setTestCluster(testCluster)
+ self.TDDnodes.setValgrind(valgrind)
+
+ self.TDDnodes.setAsan(tdDnodes.getAsan())
+ self.TDDnodes.stopAll()
+ for dnode in self.TDDnodes.dnodes:
+ self.TDDnodes.deploy(dnode.index,{})
+
+ for dnode in self.TDDnodes.dnodes:
+ self.TDDnodes.starttaosd(dnode.index)
+
+ # create cluster
+ for dnode in self.TDDnodes.dnodes[1:]:
+ # print(dnode.cfgDict)
+ dnode_id = dnode.cfgDict["fqdn"] + ":" +dnode.cfgDict["serverPort"]
+ dnode_first_host = dnode.cfgDict["firstEp"].split(":")[0]
+ dnode_first_port = dnode.cfgDict["firstEp"].split(":")[-1]
+ cmd = f"{self.getBuildPath()}/build/bin/taos -h {dnode_first_host} -P {dnode_first_port} -s \"create dnode \\\"{dnode_id}\\\"\""
+ print(cmd)
+ os.system(cmd)
+
+ time.sleep(2)
+ tdLog.info(" create cluster done! ")
+
+ def s0_five_dnode_one_mnode(self):
+ tdSql.query("select * from information_schema.ins_dnodes;")
+ tdSql.checkData(0,1,'%s:6030'%self.host)
+ tdSql.checkData(4,1,'%s:6430'%self.host)
+ tdSql.checkData(0,4,'ready')
+ tdSql.checkData(4,4,'ready')
+ tdSql.query("select * from information_schema.ins_mnodes;")
+ tdSql.checkData(0,1,'%s:6030'%self.host)
+ tdSql.checkData(0,2,'leader')
+ tdSql.checkData(0,3,'ready')
+ tdSql.error("create mnode on dnode 1;")
+ tdSql.error("drop mnode on dnode 1;")
+ tdSql.execute("create database if not exists audit");
+ tdSql.execute("use audit");
+ tdSql.execute("create table operations(ts timestamp, c0 int primary key,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)");
+ tdSql.execute("create table t_operations_abc using operations tags(1)");
+ tdSql.execute("drop database if exists db")
+ tdSql.execute("create database if not exists db replica 1")
+ tdSql.execute("use db")
+ tdSql.execute("create table stb0(ts timestamp, c0 int primary key,c1 bigint,c2 int,c3 float,c4 double) tags(t0 bigint unsigned)");
+ tdSql.execute("create table ctb0 using stb0 tags(0)");
+ tdSql.execute("create stream streams1 trigger at_once IGNORE EXPIRED 0 IGNORE UPDATE 0 into streamt as select _wstart, count(*) c1, count(c2) c2 , sum(c3) c3 , max(c4) c4 from stb0 interval(10s)");
+ tdSql.execute("create topic topic_stb_column as select ts, c3 from stb0");
+ tdSql.execute("create topic topic_stb_all as select ts, c1, c2, c3 from stb0");
+ tdSql.execute("create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb0");
+ tdSql.execute("create view view1 as select * from stb0");
+
+ def getConnection(self, dnode):
+ host = dnode.cfgDict["fqdn"]
+ port = dnode.cfgDict["serverPort"]
+ config_dir = dnode.cfgDir
+ return taos.connect(host=host, port=int(port), config=config_dir)
+
+ def s1_check_alive(self):
+ # check cluster alive
+ tdLog.printNoPrefix("======== test cluster alive: ")
+ tdSql.checkDataLoop(0, 0, 1, "show cluster alive;", 20, 0.5)
+
+ tdSql.query("show db.alive;")
+ tdSql.checkData(0, 0, 1)
+
+ def s2_check_show_grants_ungranted(self):
+ tdLog.printNoPrefix("======== test show grants ungranted: ")
+ self.infoPath = os.path.join(self.workPath, ".clusterInfo")
+ infoFile = open(self.infoPath, "w")
+ try:
+ tdSql.query(f'select create_time,expire_time,version from information_schema.ins_cluster;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str, tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show cluster machines;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str,tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show grants;')
+ tdSql.checkEqual(len(tdSql.queryResult), 1)
+ infoFile.write(";".join(map(str,tdSql.queryResult[0])) + "\n")
+ tdSql.query(f'show grants full;')
+ tdSql.checkEqual(len(tdSql.queryResult), 31)
+
+ if infoFile:
+ infoFile.flush()
+
+ files_and_dirs = os.listdir(f'{self.workPath}')
+ print(f"files_and_dirs: {files_and_dirs}")
+
+ process = subprocess.Popen(f'{self.workPath}{os.sep}grantTest', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ output = output.decode(encoding="utf-8")
+ error = error.decode(encoding="utf-8")
+ print(f"code: {process.returncode}")
+ print(f"error:\n{error}")
+ tdSql.checkEqual(process.returncode, 0)
+ tdSql.checkEqual(error, "")
+ lines = output.splitlines()
+ for line in lines:
+ if line.startswith("code:"):
+ fields = line.split(":")
+ tdSql.error(f"{fields[2]}", int(fields[1]), fields[3])
+ except Exception as e:
+ if os.path.exists(self.infoPath):
+ os.remove(self.infoPath)
+ raise Exception(repr(e))
+ finally:
+ if infoFile:
+ infoFile.close()
+
+ def s3_check_show_grants_granted(self):
+ tdLog.printNoPrefix("======== test show grants granted: ")
+ try:
+ process = subprocess.Popen(f'{self.workPath}{os.sep}grantTest 1', shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ output, error = process.communicate()
+ output = output.decode(encoding="utf-8")
+ error = error.decode(encoding="utf-8")
+ print(f"code: {process.returncode}")
+ print(f"error:\n{error}")
+ print(f"output:\n{output}")
+ tdSql.checkEqual(process.returncode, 0)
+ except Exception as e:
+ raise Exception(repr(e))
+ finally:
+ if os.path.exists(self.infoPath):
+ os.remove(self.infoPath)
+
+ def run(self):
+ # print(self.master_dnode.cfgDict)
+ # keep the order of following steps
+ self.s0_five_dnode_one_mnode()
+ self.s1_check_alive()
+ self.s2_check_show_grants_ungranted()
+ self.s3_check_show_grants_granted()
+
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success(f"{__file__} successfully executed")
+
+tdCases.addLinux(__file__, TDTestCase())
+tdCases.addWindows(__file__, TDTestCase())
diff --git a/tests/system-test/0-others/information_schema.py b/tests/system-test/0-others/information_schema.py
index aa548d4e59..c1a3942db6 100644
--- a/tests/system-test/0-others/information_schema.py
+++ b/tests/system-test/0-others/information_schema.py
@@ -299,6 +299,7 @@ class TDTestCase:
'oracle':'Oracle',
'mssql':'SqlServer',
'mongodb':'MongoDB',
+ 'csv':'CSV',
}
tdSql.execute('drop database if exists db2')
diff --git a/tests/system-test/2-query/tsma.py b/tests/system-test/2-query/tsma.py
index a1638ae4cb..1c688d568c 100644
--- a/tests/system-test/2-query/tsma.py
+++ b/tests/system-test/2-query/tsma.py
@@ -604,7 +604,7 @@ class TSMATestSQLGenerator:
class TDTestCase:
- updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4, 'maxTsmaNum': 3}
+ updatecfgDict = {'asynclog': 0, 'ttlUnit': 1, 'ttlPushInterval': 5, 'ratioOfVnodeStreamThrea': 4, 'maxTsmaNum': 3, 'debugFlag': 143}
def __init__(self):
self.vgroups = 4
@@ -804,8 +804,8 @@ class TDTestCase:
self.tsma_tester.check_sql(ctx.sql, ctx)
def test_query_with_tsma(self):
- self.create_tsma('tsma1', 'test', 'meters', ['avg(c1)', 'avg(c2)'], '5m')
- self.create_tsma('tsma2', 'test', 'meters', ['avg(c1)', 'avg(c2)'], '30m')
+ self.create_tsma('tsma1', 'test', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'], '5m')
+ self.create_tsma('tsma2', 'test', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'], '30m')
self.create_tsma('tsma5', 'test', 'norm_tb', ['avg(c1)', 'avg(c2)'], '10m')
self.test_query_with_tsma_interval()
@@ -1237,7 +1237,41 @@ class TDTestCase:
clust_dnode_nums = len(cluster_dnode_list)
if clust_dnode_nums > 1:
self.test_redistribute_vgroups()
-
+ tdSql.execute("drop tsma test.tsma5")
+ for _ in range(4):
+ self.test_td_32519()
+
+ def test_td_32519(self):
+ self.create_recursive_tsma('tsma1', 'tsma_r', 'test', '1h', 'meters', ['avg(c1)', 'avg(c2)', 'count(ts)'])
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:45:00", 1,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:55:00", 2,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('DROP TABLE test.t1', queryTimes=1)
+ self.wait_query_err('desc test.`404e15422d96c8b5de9603c2296681b1`', 10, -2147473917)
+ self.wait_query_err('desc test.`82b56f091c4346369da0af777c3e580d`', 10, -2147473917)
+ self.wait_query_err('desc test.`163b7c69922cf6d83a98bfa44e52dade`', 10, -2147473917)
+ tdSql.execute('CREATE TABLE test.t1 USING test.meters TAGS(1, "a", "b", 1,1,1)')
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 11:59:00", 3,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 12:10:00", 4,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('INSERT INTO test.t1 VALUES("2024-10-24 12:20:00", 5,1,1,1,1,1,1, "a", "a")', queryTimes=1)
+ tdSql.execute('FLUSH DATABASE test', queryTimes=1)
+ tdSql.query('SELECT * FROM test.t1', queryTimes=1)
+ tdSql.checkRows(3)
+ sql = 'SELECT * FROM test.`404e15422d96c8b5de9603c2296681b1`'
+ self.wait_query(sql, 3, 20) ## tsma1 output ctb for t1
+ tdSql.query(sql, queryTimes=1)
+ tdSql.checkData(0,1, 1)
+ tdSql.checkData(1,1, 1)
+ tdSql.checkData(2,1, 1)
+ #sql = 'select * from test.`82b56f091c4346369da0af777c3e580d`'
+ #self.wait_query(sql, 2, 10) ## tsma2 output ctb for t1
+ #tdSql.query(sql, queryTimes=1)
+ #tdSql.checkData(0, 1, 1)
+ #tdSql.checkData(1, 1, 2)
+ sql = 'select * from test.`163b7c69922cf6d83a98bfa44e52dade`'
+ self.wait_query(sql, 2, 20) ## tsma_r output ctb for t1
+ tdSql.checkData(0, 1, 1)
+ self.drop_tsma('tsma_r', 'test')
+
def test_create_tsma(self):
function_name = sys._getframe().f_code.co_name
tdLog.debug(f'-----{function_name}------')