From 195ace04b81df05723717292c3c943a00a138c33 Mon Sep 17 00:00:00 2001 From: Jacob Su Date: Fri, 19 Apr 2024 10:28:37 +0800 Subject: [PATCH 001/127] fix compile errors on macOS 14.4 m2 chip. --- contrib/CMakeLists.txt | 2 ++ include/os/osString.h | 7 ++++++- source/client/CMakeLists.txt | 12 +++++------- source/dnode/mgmt/mgmt_snode/CMakeLists.txt | 5 ++++- source/dnode/mgmt/mgmt_vnode/CMakeLists.txt | 5 ++++- source/dnode/vnode/CMakeLists.txt | 3 +++ source/libs/executor/CMakeLists.txt | 2 ++ source/libs/function/CMakeLists.txt | 3 +++ source/libs/monitorfw/CMakeLists.txt | 2 +- source/libs/sync/CMakeLists.txt | 1 + source/os/src/osString.c | 4 ---- tools/shell/src/shellEngine.c | 2 +- 12 files changed, 32 insertions(+), 16 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index ccd60df19a..4cdafe539f 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -251,11 +251,13 @@ target_include_directories( # zlib set(CMAKE_PROJECT_INCLUDE_BEFORE "${TD_SUPPORT_DIR}/EnableCMP0048.txt.in") add_subdirectory(zlib EXCLUDE_FROM_ALL) +target_compile_options(zlibstatic PRIVATE -Wno-error=deprecated-non-prototype) target_include_directories( zlibstatic PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib PUBLIC ${CMAKE_CURRENT_SOURCE_DIR}/zlib ) +target_compile_options(zlib PRIVATE -Wno-error=deprecated-non-prototype) target_include_directories( zlib PUBLIC ${CMAKE_CURRENT_BINARY_DIR}/zlib diff --git a/include/os/osString.h b/include/os/osString.h index 1aca119293..fd8e34f539 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -22,7 +22,12 @@ extern "C" { typedef wchar_t TdWchar; typedef int32_t TdUcs4; -typedef void *iconv_t; +#ifndef DISALLOW_NCHAR_WITHOUT_ICONV +#include "iconv.h" +#else + typedef void *iconv_t; +#endif + typedef enum { M2C = 0, C2M } ConvType; // If the error is in a third-party library, place this header file under the third-party library header file. diff --git a/source/client/CMakeLists.txt b/source/client/CMakeLists.txt index a17c27c297..293251f35d 100644 --- a/source/client/CMakeLists.txt +++ b/source/client/CMakeLists.txt @@ -10,6 +10,8 @@ else() add_library(taos SHARED ${CLIENT_SRC}) endif() +target_compile_options(taos PRIVATE -Wno-error=deprecated-non-prototype) + INCLUDE_DIRECTORIES(jni) target_include_directories( @@ -23,13 +25,6 @@ target_link_libraries( PRIVATE os util common transport monitor nodes parser command planner catalog scheduler function qcom geometry ) -if(TD_DARWIN_ARM64) - target_link_libraries( - taos - PRIVATE "-arch x86_64" - ) -endif() - if(TD_WINDOWS) INCLUDE_DIRECTORIES(jni/windows) INCLUDE_DIRECTORIES(jni/windows/win32) @@ -53,6 +48,9 @@ set_target_properties( ) add_library(taos_static STATIC ${CLIENT_SRC}) + +target_compile_options(taos_static PRIVATE -Wno-error=deprecated-non-prototype) + target_include_directories( taos_static PUBLIC "${TD_SOURCE_DIR}/include/client" diff --git a/source/dnode/mgmt/mgmt_snode/CMakeLists.txt b/source/dnode/mgmt/mgmt_snode/CMakeLists.txt index 62dc41a0ae..59ea95631a 100644 --- a/source/dnode/mgmt/mgmt_snode/CMakeLists.txt +++ b/source/dnode/mgmt/mgmt_snode/CMakeLists.txt @@ -1,9 +1,12 @@ aux_source_directory(src MGMT_SNODE) add_library(mgmt_snode STATIC ${MGMT_SNODE}) + +target_compile_options(mgmt_snode PRIVATE -Wno-error=deprecated-non-prototype) + target_include_directories( mgmt_snode PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( mgmt_snode node_util -) \ No newline at end of file +) diff --git a/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt b/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt index 15b822ad92..0cfd1da1e1 100644 --- a/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt +++ b/source/dnode/mgmt/mgmt_vnode/CMakeLists.txt @@ -1,9 +1,12 @@ aux_source_directory(src MGMT_VNODE) add_library(mgmt_vnode STATIC ${MGMT_VNODE}) + +target_compile_options(mgmt_vnode PRIVATE -Wno-error=deprecated-non-prototype) + target_include_directories( mgmt_vnode PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) target_link_libraries( mgmt_vnode node_util -) \ No newline at end of file +) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index 43e680c382..886bbf4855 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -1,6 +1,9 @@ # vnode add_subdirectory(src/tqCommon) add_library(vnode STATIC "") + +target_compile_options(vnode PRIVATE -Wno-error=single-bit-bitfield-constant-conversion) + set( VNODE_SOURCE_FILES "src/vnd/vnodeOpen.c" diff --git a/source/libs/executor/CMakeLists.txt b/source/libs/executor/CMakeLists.txt index 838233346e..be94f3145f 100644 --- a/source/libs/executor/CMakeLists.txt +++ b/source/libs/executor/CMakeLists.txt @@ -2,6 +2,8 @@ aux_source_directory(src EXECUTOR_SRC) add_library(executor STATIC ${EXECUTOR_SRC}) +target_compile_options(executor PRIVATE -Wno-error=deprecated-non-prototype) + target_link_libraries(executor PRIVATE os util common function parser planner qcom scalar nodes index wal tdb geometry ) diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 3a68648d49..305fd29832 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -141,6 +141,9 @@ target_link_libraries( #SET(EXECUTABLE_OUTPUT_PATH ${CMAKE_BINARY_DIR}/build/bin) add_executable(udfd src/udfd.c) + +target_compile_options(udfd PRIVATE -Wno-error=deprecated-non-prototype) + target_include_directories( udfd PUBLIC diff --git a/source/libs/monitorfw/CMakeLists.txt b/source/libs/monitorfw/CMakeLists.txt index 610cd63985..cd00c2e2a1 100644 --- a/source/libs/monitorfw/CMakeLists.txt +++ b/source/libs/monitorfw/CMakeLists.txt @@ -5,5 +5,5 @@ target_include_directories( PUBLIC "${TD_SOURCE_DIR}/include/libs/monitorfw" PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/inc" ) - +target_compile_options(monitorfw PRIVATE -Wno-error=deprecated-pragma) target_link_libraries(monitorfw os util common transport) diff --git a/source/libs/sync/CMakeLists.txt b/source/libs/sync/CMakeLists.txt index 6025070cb7..d3344ea6fa 100644 --- a/source/libs/sync/CMakeLists.txt +++ b/source/libs/sync/CMakeLists.txt @@ -1,6 +1,7 @@ aux_source_directory(src SYNC_SRC) add_library(sync STATIC ${SYNC_SRC}) +target_compile_options(sync PRIVATE -Wno-error=deprecated-non-prototype) target_link_libraries( sync PUBLIC common diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 768bbe5cd1..a4440ee5e8 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -17,10 +17,6 @@ #define _DEFAULT_SOURCE #include "os.h" -#ifndef DISALLOW_NCHAR_WITHOUT_ICONV -#include "iconv.h" -#endif - extern int wcwidth(wchar_t c); extern int wcswidth(const wchar_t *s, size_t n); diff --git a/tools/shell/src/shellEngine.c b/tools/shell/src/shellEngine.c index 57ed675303..1d1050c1ec 100644 --- a/tools/shell/src/shellEngine.c +++ b/tools/shell/src/shellEngine.c @@ -56,7 +56,7 @@ static void shellWriteHistory(); static void shellPrintError(TAOS_RES *tres, int64_t st); static bool shellIsCommentLine(char *line); static void shellSourceFile(const char *file); -static bool shellGetGrantInfo(); +static bool shellGetGrantInfo(char* buf); static void shellCleanup(void *arg); static void *shellCancelHandler(void *arg); From 739acf425a3e380ef9e1229d63763d7d62b1d85a Mon Sep 17 00:00:00 2001 From: laiyongtao Date: Thu, 25 Apr 2024 20:58:50 +0800 Subject: [PATCH 002/127] set parent environ to udfd --- source/libs/function/src/tudf.c | 42 +++++++++++++++++++++++++++++++-- 1 file changed, 40 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index 7e344866a5..7c0e66d072 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -49,6 +49,8 @@ SUdfdData udfdGlobal = {0}; int32_t udfStartUdfd(int32_t startDnodeId); int32_t udfStopUdfd(); +extern char **environ; + static int32_t udfSpawnUdfd(SUdfdData *pData); void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int termSignal); static int32_t udfSpawnUdfd(SUdfdData *pData); @@ -147,7 +149,33 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { snprintf(ldLibPathEnvItem, 1024 + 32, "%s=%s", "LD_LIBRARY_PATH", udfdPathLdLib); char *envUdfd[] = {dnodeIdEnvItem, thrdPoolSizeEnvItem, ldLibPathEnvItem, NULL}; - options.env = envUdfd; + char **envUdfdWithPEnv = NULL; + if (environ != NULL) { + int numEnviron = 0; + while (environ[numEnviron] != NULL) { + numEnviron++; + } + int lenEnvUdfd = ARRAY_SIZE(envUdfd); + + envUdfdWithPEnv = (char**) taosMemoryMalloc((numEnviron + lenEnvUdfd) * sizeof(char*)); + + int i; + for (i = 0; i < numEnviron; i++) { + envUdfdWithPEnv[i] = (char*) taosMemoryMalloc(strlen(environ[i]) + 1); + strcpy(envUdfdWithPEnv[i], environ[i]); + } + for (i = 0; i < lenEnvUdfd; i++) { + if (envUdfd[i] != NULL) { + envUdfdWithPEnv[numEnviron + i] = (char*) taosMemoryMalloc(strlen(envUdfd[i]) + 1); + strcpy(envUdfdWithPEnv[numEnviron + i], envUdfd[i]); + } + } + envUdfdWithPEnv[numEnviron + lenEnvUdfd - 1] = NULL; + + options.env = envUdfdWithPEnv; + } else { + options.env = envUdfd; + } int err = uv_spawn(&pData->loop, &pData->process, &options); pData->process.data = (void *)pData; @@ -176,6 +204,16 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { } else { fnInfo("udfd is initialized"); } + + if (envUdfdWithPEnv != NULL) { + int i = 0; + while (envUdfdWithPEnv[i] != NULL) { + taosMemoryFree(envUdfdWithPEnv[i]); + i++; + } + taosMemoryFree(envUdfdWithPEnv); + } + return err; } @@ -1941,7 +1979,7 @@ int32_t doCallUdfScalarFunc(UdfcFuncHandle handle, SScalarParam *input, int32_t convertDataBlockToScalarParm(&resultBlock, output); taosArrayDestroy(resultBlock.pDataBlock); } - + blockDataFreeRes(&inputBlock); return err; } From 37f13bfec2fa6c4c298b5306bfef5c493c77ca08 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Tue, 1 Oct 2024 10:33:00 +0800 Subject: [PATCH 003/127] mod java code comments --- .../JDBC/SpringJdbcTemplate/readme.md | 16 ++--- .../src/main/resources/log4j.properties | 10 ++-- .../src/main/resources/proxool.xml | 21 +++---- .../src/main/java/com/taosdata/Worker.java | 9 +-- docs/examples/JDBC/mybatisplus-demo/readme | 8 +-- docs/examples/JDBC/springbootdemo/readme.md | 14 ++--- docs/examples/JDBC/taosdemo/readme.md | 7 ++- .../taosdemo/TaosDemoApplication.java | 15 +++-- .../taosdata/taosdemo/dao/SubTableMapper.java | 15 ++--- .../taosdemo/dao/SuperTableMapper.java | 22 +++---- .../taosdata/taosdemo/dao/TableMapper.java | 13 ++-- .../taosdemo/service/DatabaseService.java | 4 +- .../taosdemo/service/SubTableService.java | 60 ++++++++++--------- .../taosdemo/service/SuperTableService.java | 2 +- .../taosdemo/service/TableService.java | 5 +- .../service/data/FieldValueGenerator.java | 10 +++- .../service/data/SubTableMetaGenerator.java | 2 +- .../service/data/SuperTableMetaGenerator.java | 13 ++-- .../service/data/TagValueGenerator.java | 2 +- .../taosdemo/utils/TimeStampUtil.java | 6 +- .../src/main/resources/log4j.properties | 10 ++-- 21 files changed, 142 insertions(+), 122 deletions(-) diff --git a/docs/examples/JDBC/SpringJdbcTemplate/readme.md b/docs/examples/JDBC/SpringJdbcTemplate/readme.md index f59bcdbeb5..0e9812385a 100644 --- a/docs/examples/JDBC/SpringJdbcTemplate/readme.md +++ b/docs/examples/JDBC/SpringJdbcTemplate/readme.md @@ -1,11 +1,11 @@ ## TDengine Spring JDBC Template Demo -`Spring JDBC Template` 简化了原生 JDBC Connection 获取释放等操作,使得操作数据库更加方便。 +`Spring JDBC Template` simplifies the operations of acquiring and releasing native JDBC Connections, making database operations more convenient. -### 配置 +### Configuration -修改 `src/main/resources/applicationContext.xml` 文件中 TDengine 的配置信息: +Modify the TDengine configuration in the `src/main/resources/applicationContext.xml` file: ```xml @@ -20,13 +20,15 @@ ``` -### 打包运行 +### Package and run + +Navigate to the `TDengine/tests/examples/JDBC/SpringJdbcTemplate` directory and execute the following commands to generate an executable jar file. -进入 `TDengine/tests/examples/JDBC/SpringJdbcTemplate` 目录下,执行以下命令可以生成可执行 jar 包。 ```shell mvn clean package ``` -打包成功之后,进入 `target/` 目录下,执行以下命令就可运行测试: +After successfully packaging, navigate to the `target/` directory and execute the following commands to run the tests: + ```shell java -jar target/SpringJdbcTemplate-1.0-SNAPSHOT-jar-with-dependencies.jar -``` \ No newline at end of file +``` diff --git a/docs/examples/JDBC/connectionPools/src/main/resources/log4j.properties b/docs/examples/JDBC/connectionPools/src/main/resources/log4j.properties index 1299357be3..a7f4d3d492 100644 --- a/docs/examples/JDBC/connectionPools/src/main/resources/log4j.properties +++ b/docs/examples/JDBC/connectionPools/src/main/resources/log4j.properties @@ -1,21 +1,21 @@ -### 设置### +### Settings### log4j.rootLogger=debug,stdout,DebugLog,ErrorLog -### 输出信息到控制抬 ### +### Output information to the console ### log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.Target=System.out log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n -### 输出DEBUG 级别以上的日志到=logs/debug.log +### Output logs of DEBUG level and above to logs/debug.log log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender log4j.appender.DebugLog.File=logs/debug.log log4j.appender.DebugLog.Append=true log4j.appender.DebugLog.Threshold=DEBUG log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n -### 输出ERROR 级别以上的日志到=logs/error.log +### Output logs of ERROR level and above to logs/error.log log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender log4j.appender.ErrorLog.File=logs/error.log log4j.appender.ErrorLog.Append=true log4j.appender.ErrorLog.Threshold=ERROR log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout -log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n diff --git a/docs/examples/JDBC/connectionPools/src/main/resources/proxool.xml b/docs/examples/JDBC/connectionPools/src/main/resources/proxool.xml index 0e2ac6368a..dcef51ffb5 100644 --- a/docs/examples/JDBC/connectionPools/src/main/resources/proxool.xml +++ b/docs/examples/JDBC/connectionPools/src/main/resources/proxool.xml @@ -1,27 +1,28 @@ + ds - + jdbc:TAOS-RS://127.0.0.1:6041/log - + com.taosdata.jdbc.rs.RestfulDriver - + - + 100 - + 100 - + 1 - + 5 - + 30000 - + select server_version() - \ No newline at end of file + diff --git a/docs/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java b/docs/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java index f6e21cd729..6823ca5b4d 100644 --- a/docs/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java +++ b/docs/examples/JDBC/consumer-demo/src/main/java/com/taosdata/Worker.java @@ -35,17 +35,18 @@ public class Worker implements Runnable { public void run() { while (!Thread.interrupted()) { try { - // 控制请求频率 + // Control request rate if (semaphore.tryAcquire()) { ConsumerRecords records = consumer.poll(Duration.ofMillis(sleepTime)); pool.submit(() -> { RateLimiter limiter = RateLimiter.create(rate); try { for (ConsumerRecord record : records) { - // 流量控制 + // Traffic control limiter.acquire(); - // 业务处理数据 - System.out.println("[" + LocalDateTime.now() + "] Thread id:" + Thread.currentThread().getId() + " -> " + record.value()); + // Business data processing + System.out.println("[" + LocalDateTime.now() + "] Thread id:" + + Thread.currentThread().getId() + " -> " + record.value()); } } finally { semaphore.release(); diff --git a/docs/examples/JDBC/mybatisplus-demo/readme b/docs/examples/JDBC/mybatisplus-demo/readme index b31b6c34bf..a4816d7631 100644 --- a/docs/examples/JDBC/mybatisplus-demo/readme +++ b/docs/examples/JDBC/mybatisplus-demo/readme @@ -1,14 +1,14 @@ -# 使用说明 +# Instructions -## 创建使用db +## Create and use the database ```shell $ taos > create database mp_test ``` -## 执行测试用例 +## Execute test cases ```shell $ mvn clean test -``` \ No newline at end of file +``` diff --git a/docs/examples/JDBC/springbootdemo/readme.md b/docs/examples/JDBC/springbootdemo/readme.md index 625d43e4ed..d11bb33c83 100644 --- a/docs/examples/JDBC/springbootdemo/readme.md +++ b/docs/examples/JDBC/springbootdemo/readme.md @@ -1,6 +1,6 @@ ## TDengine SpringBoot + Mybatis Demo -## 需要提前创建 test 数据库 +## Need to create a test database in advance ``` $ taos -s 'create database if not exists test' @@ -8,7 +8,7 @@ $ taos -s 'create database if not exists test' $ curl http://localhost:8080/weather/init ``` -### 配置 application.properties +### Configure application.properties ```properties # datasource config spring.datasource.driver-class-name=com.taosdata.jdbc.TSDBDriver @@ -38,9 +38,9 @@ mybatis.mapper-locations=classpath:mapper/*.xml logging.level.com.taosdata.jdbc.springbootdemo.dao=debug ``` -### 主要功能 +### Main functions -* 创建数据库和表 +* Create databases and tables ```xml @@ -52,14 +52,14 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug ``` -* 插入单条记录 +* Insert a single record ```xml insert into test.weather (ts, temperature, humidity) values (now, #{temperature,jdbcType=INTEGER}, #{humidity,jdbcType=FLOAT}) ``` -* 插入多条记录 +* Insert multiple records ```xml @@ -69,7 +69,7 @@ logging.level.com.taosdata.jdbc.springbootdemo.dao=debug ``` -* 分页查询 +* Pagination query ```xml diff --git a/docs/examples/JDBC/taosdemo/readme.md b/docs/examples/JDBC/taosdemo/readme.md index 986eef8a05..141391d1f6 100644 --- a/docs/examples/JDBC/taosdemo/readme.md +++ b/docs/examples/JDBC/taosdemo/readme.md @@ -1,11 +1,14 @@ ``` cd tests/examples/JDBC/taosdemo mvn clean package -Dmaven.test.skip=true -# 先建表,再插入的 +# Create tables first, then insert data java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -database -doCreateTable true -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 -# 不建表,直接插入的 +# Insert data directly without creating tables java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -database -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 ``` 如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path 请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。 + + +If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library. diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java index 6854054703..40d44475b8 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/TaosDemoApplication.java @@ -24,14 +24,14 @@ public class TaosDemoApplication { private static final Logger logger = LogManager.getLogger(TaosDemoApplication.class); public static void main(String[] args) throws IOException { - // 读配置参数 + // Read configuration parameters JdbcTaosdemoConfig config = new JdbcTaosdemoConfig(args); boolean isHelp = Arrays.asList(args).contains("--help"); if (isHelp || config.host == null || config.host.isEmpty()) { JdbcTaosdemoConfig.printHelp(); System.exit(0); } - // 初始化 + // final DataSource dataSource = DataSourceFactory.getInstance(config.host, config.port, config.user, config.password); if (config.executeSql != null && !config.executeSql.isEmpty() @@ -50,7 +50,7 @@ public class TaosDemoApplication { final SuperTableService superTableService = new SuperTableService(dataSource); final SubTableService subTableService = new SubTableService(dataSource); - // 创建数据库 + // create database long start = System.currentTimeMillis(); Map databaseParam = new HashMap<>(); databaseParam.put("database", config.database); @@ -81,13 +81,13 @@ public class TaosDemoApplication { config.prefixOfFields, config.numOfTags, config.prefixOfTags); } /**********************************************************************************/ - // 建表 + // create table start = System.currentTimeMillis(); if (config.doCreateTable) { superTableService.drop(superTableMeta.getDatabase(), superTableMeta.getName()); superTableService.create(superTableMeta); if (!config.autoCreateTable) { - // 批量建子表 + // create sub tables in batch subTableService.createSubTable(superTableMeta, config.numOfTables, config.prefixOfTable, config.numOfThreadsForCreate); } @@ -95,7 +95,7 @@ public class TaosDemoApplication { end = System.currentTimeMillis(); logger.info(">>> create table time cost : " + (end - start) + " ms."); /**********************************************************************************/ - // 插入 + // insert data long tableSize = config.numOfTables; int threadSize = config.numOfThreadsForInsert; long startTime = getProperStartTime(config.startTime, config.days); @@ -111,10 +111,9 @@ public class TaosDemoApplication { end = System.currentTimeMillis(); logger.info("insert " + affectedRows + " rows, time cost: " + (end - start) + " ms"); /**********************************************************************************/ - // 查询 /**********************************************************************************/ - // 删除表 + // drop table if (config.dropTable) { superTableService.drop(config.database, config.superTable); } diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java index e0ddd220c1..013d24eb87 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SubTableMapper.java @@ -9,21 +9,22 @@ import java.util.List; @Repository public interface SubTableMapper { - // 创建:子表 + // Create: SubTable void createUsingSuperTable(SubTableMeta subTableMeta); - // 插入:一张子表多个values + // Insert: Multiple records into one SubTable int insertOneTableMultiValues(SubTableValue subTableValue); - // 插入:一张子表多个values, 自动建表 + // Insert: Multiple records into one SubTable, auto create SubTables int insertOneTableMultiValuesUsingSuperTable(SubTableValue subTableValue); - // 插入:多张表多个values + // Insert: Multiple records into multiple SubTable int insertMultiTableMultiValues(List tables); - // 插入:多张表多个values,自动建表 + // Insert: Multiple records into multiple SubTable, auto create SubTables int insertMultiTableMultiValuesUsingSuperTable(List tables); - // + // -} \ No newline at end of file +} diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java index 9f8cec9e8f..15cafd04fb 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/SuperTableMapper.java @@ -6,24 +6,26 @@ import org.springframework.stereotype.Repository; @Repository public interface SuperTableMapper { - // 创建超级表 create table if not exists xxx.xxx (f1 type1, f2 type2, ... ) tags( t1 type1, t2 type2 ...) + // Create super table: create table if not exists xxx.xxx (f1 type1, f2 type2, + // ... ) tags( t1 type1, t2 type2 ...) void createSuperTable(SuperTableMeta tableMetadata); - // 删除超级表 drop table if exists xxx; + // Drop super table: drop table if exists xxx; void dropSuperTable(String database, String name); - // + // - // + // - // + // - // + // - // + // - // - - // + // + // } diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java index 32d1875e4d..c0f75d2204 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/dao/TableMapper.java @@ -9,19 +9,18 @@ import java.util.List; @Repository public interface TableMapper { - // 创建:普通表 + // Create: Normal table void create(TableMeta tableMeta); - // 插入:一张表多个value + // Insert: Multiple records into one table int insertOneTableMultiValues(TableValue values); - // 插入: 一张表多个value,指定的列 + // Insert: Multiple records into one table, specified columns int insertOneTableMultiValuesWithColumns(TableValue values); - // 插入:多个表多个value + // Insert: Multiple records into multiple tables int insertMultiTableMultiValues(List tables); - // 插入:多个表多个value, 指定的列 + // Insert: Multiple records into multiple tables, specified columns int insertMultiTableMultiValuesWithColumns(List tables); - -} \ No newline at end of file +} diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java index 3c8e962406..68ddd78323 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/DatabaseService.java @@ -14,12 +14,12 @@ public class DatabaseService { this.databaseMapper = new DatabaseMapperImpl(dataSource); } - // 建库,指定 name + // Create database with specified name public void createDatabase(String database) { databaseMapper.createDatabase(database); } - // 建库,指定参数 keep,days,replica等 + // Create database with specified parameters such as keep, days, replica, etc. public void createDatabase(Map map) { if (map.isEmpty()) return; diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java index b0a79dea78..690b02f065 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SubTableService.java @@ -27,7 +27,8 @@ public class SubTableService extends AbstractService { this.mapper = new SubTableMapperImpl(datasource); } - public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, int numOfThreadsForCreate) { + public void createSubTable(SuperTableMeta superTableMeta, long numOfTables, String prefixOfTable, + int numOfThreadsForCreate) { ExecutorService executor = Executors.newFixedThreadPool(numOfThreadsForCreate); for (long i = 0; i < numOfTables; i++) { long tableIndex = i; @@ -35,54 +36,58 @@ public class SubTableService extends AbstractService { } executor.shutdown(); try { - executor.awaitTermination(Long.MAX_VALUE,TimeUnit.NANOSECONDS); + executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); } catch (InterruptedException e) { e.printStackTrace(); } } public void createSubTable(SuperTableMeta superTableMeta, String tableName) { - // 构造数据 + // Construct data SubTableMeta meta = SubTableMetaGenerator.generate(superTableMeta, tableName); createSubTable(meta); } - // 创建一张子表,可以指定database,supertable,tablename,tag值 + // Create a sub-table, specifying database, super table, table name, and tag + // values public void createSubTable(SubTableMeta subTableMeta) { mapper.createUsingSuperTable(subTableMeta); } /*************************************************************************************************************************/ - // 插入:多线程,多表 + // Insert: Multi-threaded, multiple tables public int insert(List subTableValues, int threadSize, int frequency) { ExecutorService executor = Executors.newFixedThreadPool(threadSize); Future future = executor.submit(() -> insert(subTableValues)); executor.shutdown(); - //TODO:frequency + // TODO:frequency return getAffectRows(future); } - // 插入:单表,insert into xxx values(),()... + // Insert: Single table, insert into xxx values(),()... public int insert(SubTableValue subTableValue) { return mapper.insertOneTableMultiValues(subTableValue); } - // 插入: 多表,insert into xxx values(),()... xxx values(),()... + // Insert: Multiple tables, insert into xxx values(),()... xxx values(),()... public int insert(List subTableValues) { return mapper.insertMultiTableMultiValues(subTableValues); } - // 插入:单表,自动建表, insert into xxx using xxx tags(...) values(),()... + // Insert: Single table, auto-create table, insert into xxx using xxx tags(...) + // values(),()... public int insertAutoCreateTable(SubTableValue subTableValue) { return mapper.insertOneTableMultiValuesUsingSuperTable(subTableValue); } - // 插入:多表,自动建表, insert into xxx using XXX tags(...) values(),()... xxx using XXX tags(...) values(),()... + // Insert: Multiple tables, auto-create tables, insert into xxx using XXX + // tags(...) values(),()... xxx using XXX tags(...) values(),()... public int insertAutoCreateTable(List subTableValues) { return mapper.insertMultiTableMultiValuesUsingSuperTable(subTableValues); } - public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, long gap, JdbcTaosdemoConfig config) { + public int insertMultiThreads(SuperTableMeta superTableMeta, int threadSize, long tableSize, long startTime, + long gap, JdbcTaosdemoConfig config) { List taskList = new ArrayList<>(); List threads = IntStream.range(0, threadSize) .mapToObj(i -> { @@ -94,8 +99,7 @@ public class SubTableService extends AbstractService { startTime, config.timeGap, config.numOfRowsPerTable, config.numOfTablesPerSQL, config.numOfValuesPerSQL, config.order, config.rate, config.range, - config.prefixOfTable, config.autoCreateTable) - ); + config.prefixOfTable, config.autoCreateTable)); taskList.add(task); return new Thread(task, "InsertThread-" + i); }).collect(Collectors.toList()); @@ -126,7 +130,7 @@ public class SubTableService extends AbstractService { private class InsertTask implements Callable { private final long startTableInd; // included - private final long endTableInd; // excluded + private final long endTableInd; // excluded private final long startTime; private final long timeGap; private final long numOfRowsPerTable; @@ -140,10 +144,10 @@ public class SubTableService extends AbstractService { private final boolean autoCreateTable; public InsertTask(SuperTableMeta superTableMeta, long startTableInd, long endTableInd, - long startTime, long timeGap, - long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL, - int order, int rate, long range, - String prefixOfTable, boolean autoCreateTable) { + long startTime, long timeGap, + long numOfRowsPerTable, long numOfTablesPerSQL, long numOfValuesPerSQL, + int order, int rate, long range, + String prefixOfTable, boolean autoCreateTable) { this.superTableMeta = superTableMeta; this.startTableInd = startTableInd; this.endTableInd = endTableInd; @@ -159,7 +163,6 @@ public class SubTableService extends AbstractService { this.autoCreateTable = autoCreateTable; } - @Override public Integer call() { @@ -171,23 +174,27 @@ public class SubTableService extends AbstractService { int affectRows = 0; // row - for (long rowCnt = 0; rowCnt < numOfRowsPerTable; ) { + for (long rowCnt = 0; rowCnt < numOfRowsPerTable;) { long rowSize = numOfValuesPerSQL; if (rowCnt + rowSize > numOfRowsPerTable) { rowSize = numOfRowsPerTable - rowCnt; } - //table - for (long tableCnt = startTableInd; tableCnt < endTableInd; ) { + // table + for (long tableCnt = startTableInd; tableCnt < endTableInd;) { long tableSize = numOfTablesPerSQL; if (tableCnt + tableSize > endTableInd) { tableSize = endTableInd - tableCnt; } long startTime = this.startTime + rowCnt * timeGap; -// System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + timeGap + ""); + // System.out.println(Thread.currentThread().getName() + " >>> " + "rowCnt: " + + // rowCnt + ", rowSize: " + rowSize + ", " + "tableCnt: " + tableCnt + + // ",tableSize: " + tableSize + ", " + "startTime: " + startTime + ",timeGap: " + // + timeGap + ""); /***********************************************/ - // 生成数据 - List data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, tableSize, rowSize, startTime, timeGap); - // 乱序 + // Construct data + List data = SubTableValueGenerator.generate(superTableMeta, prefixOfTable, tableCnt, + tableSize, rowSize, startTime, timeGap); + // disorder if (order != 0) SubTableValueGenerator.disrupt(data, rate, range); // insert @@ -205,5 +212,4 @@ public class SubTableService extends AbstractService { } } - } diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java index b91348e2d0..47798e0c4e 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/SuperTableService.java @@ -14,7 +14,7 @@ public class SuperTableService { this.superTableMapper = new SuperTableMapperImpl(dataSource); } - // 创建超级表,指定每个field的名称和类型,每个tag的名称和类型 + // Create super table, specifying the name and type of each field and each tag public void create(SuperTableMeta superTableMeta) { superTableMapper.createSuperTable(superTableMeta); } diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java index 2504fdb0b4..2bfb963b4a 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/TableService.java @@ -11,15 +11,14 @@ public class TableService extends AbstractService { private TableMapper tableMapper; - //创建一张表 + // Create a table public void create(TableMeta tableMeta) { tableMapper.create(tableMeta); } - //创建多张表 + // Create multiple tables public void create(List tables) { tables.stream().forEach(this::create); } - } diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java index 73cd981a46..b3ce35231c 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/FieldValueGenerator.java @@ -11,7 +11,8 @@ public class FieldValueGenerator { public static Random random = new Random(System.currentTimeMillis()); - // 生成start到end的时间序列,时间戳为顺序,不含有乱序,field的value为随机生成 + // Generate a time series from start to end, timestamps are in order without + // disorder, field values are randomly generated public static List generate(long start, long end, long timeGap, List fieldMetaList) { List values = new ArrayList<>(); @@ -29,9 +30,12 @@ public class FieldValueGenerator { return values; } - // 生成start到end的时间序列,时间戳为顺序,含有乱序,rate为乱序的比例,range为乱序前跳范围,field的value为随机生成 + // Generate a time series from start to end, timestamps are in order but include + // disorder, rate is the proportion of disorder, range is the jump range before + // disorder, field values are randomly generated public static List disrupt(List values, int rate, long range) { - long timeGap = (long) (values.get(1).getFields().get(0).getValue()) - (long) (values.get(0).getFields().get(0).getValue()); + long timeGap = (long) (values.get(1).getFields().get(0).getValue()) + - (long) (values.get(0).getFields().get(0).getValue()); int bugSize = values.size() * rate / 100; Set bugIndSet = new HashSet<>(); while (bugIndSet.size() < bugSize) { diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java index 88e3c0d26a..7bdd72ec3b 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SubTableMetaGenerator.java @@ -9,7 +9,7 @@ import java.util.List; public class SubTableMetaGenerator { - // 创建tableSize张子表,使用tablePrefix作为子表名的前缀,使用superTableMeta的元数据 + // Create tableSize sub-tables, using tablePrefix as the prefix for sub-table names, and using the metadata from superTableMeta // create table xxx using XXX tags(XXX) public static List generate(SuperTableMeta superTableMeta, int tableSize, String tablePrefix) { List subTableMetaList = new ArrayList<>(); diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java index 05aefd01ac..383b492c5a 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/SuperTableMetaGenerator.java @@ -10,10 +10,11 @@ import java.util.List; public class SuperTableMetaGenerator { - // 创建超级表,使用指定SQL语句 + // Create super table using the specified SQL statement public static SuperTableMeta generate(String superTableSQL) { SuperTableMeta tableMeta = new SuperTableMeta(); - // for example : create table superTable (ts timestamp, temperature float, humidity int) tags(location nchar(64), groupId int) + // for example : create table superTable (ts timestamp, temperature float, + // humidity int) tags(location nchar(64), groupId int) superTableSQL = superTableSQL.trim().toLowerCase(); if (!superTableSQL.startsWith("create")) throw new RuntimeException("invalid create super table SQL"); @@ -54,8 +55,9 @@ public class SuperTableMetaGenerator { return tableMeta; } - // 创建超级表,指定field和tag的个数 - public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, String tagPrefix) { + // Create super table with specified number of fields and tags + public static SuperTableMeta generate(String database, String name, int fieldSize, String fieldPrefix, int tagSize, + String tagPrefix) { if (fieldSize < 2 || tagSize < 1) { throw new RuntimeException("create super table but fieldSize less than 2 or tagSize less than 1"); } @@ -66,7 +68,8 @@ public class SuperTableMetaGenerator { List fields = new ArrayList<>(); fields.add(new FieldMeta("ts", "timestamp")); for (int i = 1; i <= fieldSize; i++) { - fields.add(new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); + fields.add( + new FieldMeta(fieldPrefix + "" + i, TaosConstants.DATA_TYPES[i % TaosConstants.DATA_TYPES.length])); } tableMetadata.setFields(fields); // tags diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java index b8024fea45..f7b18ca7cf 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/service/data/TagValueGenerator.java @@ -9,7 +9,7 @@ import java.util.List; public class TagValueGenerator { - // 创建标签值:使用tagMetas + // Create tag values using tagMetas public static List generate(List tagMetas) { List tagValues = new ArrayList<>(); for (int i = 0; i < tagMetas.size(); i++) { diff --git a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java index 9cfce16d82..53748169ae 100644 --- a/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java +++ b/docs/examples/JDBC/taosdemo/src/main/java/com/taosdata/taosdemo/utils/TimeStampUtil.java @@ -41,17 +41,17 @@ public class TimeStampUtil { if (start == 0) start = now - size * timeGap; - // 如果size小于1异常 + // If size is less than 1, throw an exception if (size < 1) throw new IllegalArgumentException("size less than 1."); - // 如果timeGap为1,已经超长,需要前移start + // If timeGap is 1 and it exceeds the limit, move start forward if (start + size > now) { start = now - size; return new TimeTuple(start, now, 1); } long end = start + (long) (timeGap * size); if (end > now) { - //压缩timeGap + // Compress timeGap end = now; double gap = (end - start) / (size * 1.0f); if (gap < 1.0f) { diff --git a/docs/examples/JDBC/taosdemo/src/main/resources/log4j.properties b/docs/examples/JDBC/taosdemo/src/main/resources/log4j.properties index 352545854d..40b1478a24 100644 --- a/docs/examples/JDBC/taosdemo/src/main/resources/log4j.properties +++ b/docs/examples/JDBC/taosdemo/src/main/resources/log4j.properties @@ -1,21 +1,21 @@ -### 设置### +### Settings ### log4j.rootLogger=info,stdout -### 输出信息到控制抬 ### +### Output information to the console ### log4j.appender.stdout=org.apache.log4j.ConsoleAppender log4j.appender.stdout.Target=System.out log4j.appender.stdout.layout=org.apache.log4j.PatternLayout log4j.appender.stdout.layout.ConversionPattern=[%-5p] %d{yyyy-MM-dd HH:mm:ss,SSS} method:%l%n%m%n -### 输出DEBUG 级别以上的日志到=logs/debug.log +### Output logs of DEBUG level and above to logs/debug.log ### log4j.appender.DebugLog=org.apache.log4j.DailyRollingFileAppender log4j.appender.DebugLog.File=logs/debug.log log4j.appender.DebugLog.Append=true log4j.appender.DebugLog.Threshold=DEBUG log4j.appender.DebugLog.layout=org.apache.log4j.PatternLayout log4j.appender.DebugLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n -### 输出ERROR 级别以上的日志到=logs/error.log +### Output logs of ERROR level and above to logs/error.log ### log4j.appender.ErrorLog=org.apache.log4j.DailyRollingFileAppender log4j.appender.ErrorLog.File=logs/error.log log4j.appender.ErrorLog.Append=true log4j.appender.ErrorLog.Threshold=ERROR log4j.appender.ErrorLog.layout=org.apache.log4j.PatternLayout -log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n \ No newline at end of file +log4j.appender.ErrorLog.layout.ConversionPattern=%-d{yyyy-MM-dd HH:mm:ss} [ %t:%r ] - [ %p ] %m%n From c39042b643d3fa780cf5e5a22be3a83dcd6ad24a Mon Sep 17 00:00:00 2001 From: the5fire Date: Wed, 16 Oct 2024 11:11:12 +0800 Subject: [PATCH 004/127] fix typo: 365 -> 3650 --- docs/zh/05-basic/01-model.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/05-basic/01-model.md b/docs/zh/05-basic/01-model.md index bcd931df3a..f49db17892 100644 --- a/docs/zh/05-basic/01-model.md +++ b/docs/zh/05-basic/01-model.md @@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16; ``` 该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下: - `PRECISION 'ms'` :这个数据库的时序数据使用毫秒(ms)精度的时间戳 -- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 +- `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 - `DURATION 10` :每 10 天的数据放在一个数据文件中 - `BUFFER 16` :写入使用大小为 16MB 的内存池。 @@ -214,4 +214,4 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型 尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。 -总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 \ No newline at end of file +总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 From f5c959da6f6c894ca0fee53150f90ce3a09e561a Mon Sep 17 00:00:00 2001 From: dmchen Date: Thu, 17 Oct 2024 03:34:57 +0000 Subject: [PATCH 005/127] feat/TS-5484-audit-delete --- include/common/tglobal.h | 1 + include/common/tmsg.h | 11 +++++ include/common/tmsgdef.h | 1 + include/libs/audit/audit.h | 1 - include/util/tdef.h | 1 + source/common/src/tglobal.c | 5 ++ source/common/src/tmsg.c | 54 +++++++++++++++++++++ source/dnode/mgmt/mgmt_mnode/src/mmHandle.c | 1 + source/dnode/mnode/impl/src/mndDnode.c | 17 +++++++ 9 files changed, 91 insertions(+), 1 deletion(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index cf918c6e0d..3a7f307cfa 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -140,6 +140,7 @@ extern bool tsMonitorForceV2; // audit extern bool tsEnableAudit; extern bool tsEnableAuditCreateTable; +extern bool tsEnableAuditDelete; extern int32_t tsAuditInterval; // telem diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 1a10f02c96..0008bc0e9d 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1795,6 +1795,17 @@ int32_t tSerializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq); int32_t tDeserializeSStatisReq(void* buf, int32_t bufLen, SStatisReq* pReq); void tFreeSStatisReq(SStatisReq* pReq); +typedef struct { + char db[TSDB_DB_FNAME_LEN]; + char table[TSDB_TABLE_NAME_LEN]; + char operation[AUDIT_OPERATION_LEN]; + int32_t sqlLen; + char* pSql; +} SAuditReq; +int32_t tSerializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq); +int32_t tDeserializeSAuditReq(void* buf, int32_t bufLen, SAuditReq* pReq); +void tFreeSAuditReq(SAuditReq* pReq); + typedef struct { int32_t dnodeId; int64_t clusterId; diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 40464dc29a..4a2206bd2b 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -254,6 +254,7 @@ TD_DEF_MSG_TYPE(TDMT_MND_STREAM_DROP_ORPHANTASKS, "stream-drop-orphan-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_STREAM_TASK_RESET, "stream-reset-tasks", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_UPDATE_DNODE_INFO, "update-dnode-info", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_AUDIT, "audit", NULL, NULL) TD_CLOSE_MSG_SEG(TDMT_END_MND_MSG) TD_NEW_MSG_SEG(TDMT_VND_MSG) // 2<<8 diff --git a/include/libs/audit/audit.h b/include/libs/audit/audit.h index 2e786ab2b3..f5710256e9 100644 --- a/include/libs/audit/audit.h +++ b/include/libs/audit/audit.h @@ -29,7 +29,6 @@ extern "C" { #endif #define AUDIT_DETAIL_MAX 65472 -#define AUDIT_OPERATION_LEN 20 typedef struct { const char *server; diff --git a/include/util/tdef.h b/include/util/tdef.h index a2bc77d819..970570683d 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -604,6 +604,7 @@ enum { RAND_ERR_MEMORY = 1, RAND_ERR_FILE = 2, RAND_ERR_NETWORK = 4 }; #define MONITOR_TAG_VALUE_LEN 300 #define MONITOR_METRIC_NAME_LEN 100 +#define AUDIT_OPERATION_LEN 20 #ifdef __cplusplus } #endif diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 3c05294264..b85fea632e 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -117,6 +117,7 @@ bool tsMonitorForceV2 = true; // audit bool tsEnableAudit = true; bool tsEnableAuditCreateTable = true; +bool tsEnableAuditDelete = true; int32_t tsAuditInterval = 5000; // telem @@ -763,6 +764,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddBool(pCfg, "monitorForceV2", tsMonitorForceV2, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "audit", tsEnableAudit, CFG_SCOPE_SERVER, CFG_DYN_ENT_SERVER)); + TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableAuditDelete", tsEnableAuditDelete, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddBool(pCfg, "auditCreateTable", tsEnableAuditCreateTable, CFG_SCOPE_SERVER, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "auditInterval", tsAuditInterval, 500, 200000, CFG_SCOPE_SERVER, CFG_DYN_NONE)); @@ -1448,6 +1450,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditCreateTable"); tsEnableAuditCreateTable = pItem->bval; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableAuditDelete"); + tsEnableAuditDelete = pItem->bval; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "auditInterval"); tsAuditInterval = pItem->i32; diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 4c4b78278e..10c84d9bf8 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -1802,6 +1802,60 @@ _exit: void tFreeSDropUserReq(SDropUserReq *pReq) { FREESQL(); } +int32_t tSerializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) { + SEncoder encoder = {0}; + int32_t code = 0; + int32_t lino; + int32_t tlen; + tEncoderInit(&encoder, buf, bufLen); + + TAOS_CHECK_EXIT(tStartEncode(&encoder)); + + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->operation)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->db)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->table)); + TAOS_CHECK_EXIT(tEncodeI32(&encoder, pReq->sqlLen)); + TAOS_CHECK_EXIT(tEncodeCStr(&encoder, pReq->pSql)); + + tEndEncode(&encoder); + +_exit: + if (code) { + tlen = code; + } else { + tlen = encoder.pos; + } + tEncoderClear(&encoder); + return tlen; +} + +int32_t tDeserializeSAuditReq(void *buf, int32_t bufLen, SAuditReq *pReq) { + SDecoder decoder = {0}; + int32_t code = 0; + int32_t lino; + tDecoderInit(&decoder, buf, bufLen); + + TAOS_CHECK_EXIT(tStartDecode(&decoder)); + + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->operation)); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->db)); + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->table)); + TAOS_CHECK_EXIT(tDecodeI32(&decoder, &pReq->sqlLen)); + if (pReq->sqlLen > 0) { + pReq->pSql = taosMemoryMalloc(pReq->sqlLen + 1); + if (pReq->pSql == NULL) { + TAOS_CHECK_EXIT(terrno); + } + TAOS_CHECK_EXIT(tDecodeCStrTo(&decoder, pReq->pSql)); + } + tEndDecode(&decoder); +_exit: + tDecoderClear(&decoder); + return code; +} + +void tFreeSAuditReq(SAuditReq *pReq) { taosMemoryFreeClear(pReq->pSql); } + SIpWhiteList *cloneIpWhiteList(SIpWhiteList *pIpWhiteList) { if (pIpWhiteList == NULL) return NULL; diff --git a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c index 7204cde8f7..a182307c49 100644 --- a/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c +++ b/source/dnode/mgmt/mgmt_mnode/src/mmHandle.c @@ -208,6 +208,7 @@ SArray *mmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_VIEW, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_VIEW_META, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_STATIS, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_MND_AUDIT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_KILL_COMPACT, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_MND_CONFIG_CLUSTER, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_QUERY_COMPACT_PROGRESS_RSP, mmPutMsgToReadQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 04041646eb..8eedcaa906 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -86,6 +86,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq); static int32_t mndProcessNotifyReq(SRpcMsg *pReq); static int32_t mndProcessRestoreDnodeReq(SRpcMsg *pReq); static int32_t mndProcessStatisReq(SRpcMsg *pReq); +static int32_t mndProcessAuditReq(SRpcMsg *pReq); static int32_t mndProcessUpdateDnodeInfoReq(SRpcMsg *pReq); static int32_t mndProcessCreateEncryptKeyReq(SRpcMsg *pRsp); static int32_t mndProcessCreateEncryptKeyRsp(SRpcMsg *pRsp); @@ -125,6 +126,7 @@ int32_t mndInitDnode(SMnode *pMnode) { mndSetMsgHandle(pMnode, TDMT_MND_SHOW_VARIABLES, mndProcessShowVariablesReq); mndSetMsgHandle(pMnode, TDMT_MND_RESTORE_DNODE, mndProcessRestoreDnodeReq); mndSetMsgHandle(pMnode, TDMT_MND_STATIS, mndProcessStatisReq); + mndSetMsgHandle(pMnode, TDMT_MND_AUDIT, mndProcessAuditReq); mndSetMsgHandle(pMnode, TDMT_MND_CREATE_ENCRYPT_KEY, mndProcessCreateEncryptKeyReq); mndSetMsgHandle(pMnode, TDMT_DND_CREATE_ENCRYPT_KEY_RSP, mndProcessCreateEncryptKeyRsp); mndSetMsgHandle(pMnode, TDMT_MND_UPDATE_DNODE_INFO, mndProcessUpdateDnodeInfoReq); @@ -604,6 +606,21 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) { return 0; } +static int32_t mndProcessAuditReq(SRpcMsg *pReq) { + if (tsEnableAudit && tsEnableAuditDelete) { + SMnode *pMnode = pReq->info.node; + SAuditReq auditReq = {0}; + + TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq)); + + auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql, + auditReq.sqlLen); + + tFreeSAuditReq(&auditReq); + } + return 0; +} + static int32_t mndUpdateDnodeObj(SMnode *pMnode, SDnodeObj *pDnode) { int32_t code = 0, lino = 0; SDnodeInfoReq infoReq = {0}; From 41acc1ea5ab7f68250f0b1f960b2dc4ad4d53e79 Mon Sep 17 00:00:00 2001 From: dmchen Date: Fri, 18 Oct 2024 03:43:39 +0000 Subject: [PATCH 006/127] feat/TS-5484-audit-delete-client-pull-config --- include/common/tmsg.h | 2 ++ source/dnode/mnode/impl/src/mndProfile.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 0008bc0e9d..74907992e3 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1009,6 +1009,7 @@ typedef struct { char sDetailVer[128]; int64_t whiteListVer; SMonitorParas monitorParas; + int8_t enableAuditDelete; } SConnectRsp; int32_t tSerializeSConnectRsp(void* buf, int32_t bufLen, SConnectRsp* pRsp); @@ -3369,6 +3370,7 @@ typedef struct { int32_t svrTimestamp; SArray* rsps; // SArray SMonitorParas monitorParas; + int8_t enableAuditDelete; } SClientHbBatchRsp; static FORCE_INLINE uint32_t hbKeyHashFunc(const char* key, uint32_t keyLen) { return taosIntHash_64(key, keyLen); } diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index a1ffee9b06..d0047cace1 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -305,6 +305,7 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { connectRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; connectRsp.monitorParas.tsSlowLogThreshold = tsSlowLogThreshold; connectRsp.monitorParas.tsSlowLogThresholdTest = tsSlowLogThresholdTest; + connectRsp.enableAuditDelete = tsEnableAuditDelete; tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); connectRsp.whiteListVer = pUser->ipWhiteListVer; @@ -709,6 +710,7 @@ static int32_t mndProcessHeartBeatReq(SRpcMsg *pReq) { tstrncpy(batchRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); batchRsp.monitorParas.tsSlowLogMaxLen = tsSlowLogMaxLen; batchRsp.monitorParas.tsSlowLogScope = tsSlowLogScope; + batchRsp.enableAuditDelete = tsEnableAuditDelete; int32_t sz = taosArrayGetSize(batchReq.reqs); for (int i = 0; i < sz; i++) { From f40a760f1c8f90a93b8fb15a38c756b2c4eb4a02 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Fri, 18 Oct 2024 15:46:38 +0800 Subject: [PATCH 007/127] fix(stream):modify error message --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 4c9c559457..bea781cc92 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -10617,7 +10617,7 @@ static int32_t checkStreamQuery(STranslateContext* pCxt, SCreateStreamStmt* pStm parseNatualDuration(str, strlen(str), &minDelay, &pVal->unit, pVal->node.resType.precision, false)) { if (pVal->datum.i < minDelay) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, - "stream max delay must be bigger than 5 session"); + "stream max delay must be bigger than 5 seconds"); } } } From 0a9d1282b28cf9fbff94e621527ba0b1d53110e5 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sat, 12 Oct 2024 18:02:30 +0800 Subject: [PATCH 008/127] setResultDataPtr --- source/client/inc/clientInt.h | 3 +-- source/client/src/clientImpl.c | 27 +++++++++++++-------------- source/client/src/clientTmq.c | 3 +-- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 8d45e8b4a8..2ce466dee1 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -295,8 +295,7 @@ void* doFetchRows(SRequestObj* pRequest, bool setupOneRowPtr, bool convertUcs4); void doSetOneRowPtr(SReqResultInfo* pResultInfo); void setResPrecision(SReqResultInfo* pResInfo, int32_t precision); int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableRsp* pRsp, bool convertUcs4); -int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, - bool convertUcs4); +int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4); int32_t setResSchemaInfo(SReqResultInfo* pResInfo, const SSchema* pSchema, int32_t numOfCols); void doFreeReqResultInfo(SReqResultInfo* pResInfo); int32_t transferTableNameList(const char* tbList, int32_t acctId, char* dbName, SArray** pReq); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 9131d29f30..08dbe7af6f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2341,14 +2341,13 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int return TSDB_CODE_SUCCESS; } -int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32_t numOfCols, int32_t numOfRows, - bool convertUcs4) { - if (numOfCols <= 0 || pFields == NULL || pResultInfo == NULL) { +int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { + if (pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL || pResultInfo == NULL) { tscError("setResultDataPtr paras error"); return TSDB_CODE_TSC_INTERNAL_ERROR; } - if (numOfRows == 0) { + if (pResultInfo->numOfRows == 0) { return TSDB_CODE_SUCCESS; } @@ -2356,7 +2355,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 if (code != TSDB_CODE_SUCCESS) { return code; } - code = doConvertJson(pResultInfo, numOfCols, numOfRows); + code = doConvertJson(pResultInfo, pResultInfo->numOfCols, pResultInfo->numOfRows); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2376,9 +2375,9 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 int32_t cols = *(int32_t*)p; p += sizeof(int32_t); - if (rows != numOfRows || cols != numOfCols) { - tscError("setResultDataPtr paras error:rows;%d numOfRows:%d cols:%d numOfCols:%d", rows, numOfRows, cols, - numOfCols); + if (rows != pResultInfo->numOfRows || cols != pResultInfo->numOfCols) { + tscError("setResultDataPtr paras error:rows;%d numOfRows:%" PRId64 " cols:%d numOfCols:%d", rows, pResultInfo->numOfRows, cols, + pResultInfo->numOfCols); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2389,7 +2388,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 p += sizeof(uint64_t); // check fields - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { int8_t type = *(int8_t*)p; p += sizeof(int8_t); @@ -2398,10 +2397,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 } int32_t* colLength = (int32_t*)p; - p += sizeof(int32_t) * numOfCols; + p += sizeof(int32_t) * pResultInfo->numOfCols; char* pStart = p; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { if (blockVersion == BLOCK_VERSION_1) { colLength[i] = htonl(colLength[i]); } @@ -2412,7 +2411,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { pResultInfo->pCol[i].offset = (int32_t*)pStart; - pStart += numOfRows * sizeof(int32_t); + pStart += pResultInfo->numOfRows * sizeof(int32_t); } else { pResultInfo->pCol[i].nullbitmap = pStart; pStart += BitmapLen(pResultInfo->numOfRows); @@ -2429,7 +2428,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, TAOS_FIELD* pFields, int32 p += sizeof(bool); if (convertUcs4) { - code = doConvertUCS4(pResultInfo, numOfRows, numOfCols, colLength); + code = doConvertUCS4(pResultInfo, pResultInfo->numOfRows, pResultInfo->numOfCols, colLength); } return code; @@ -2542,7 +2541,7 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR pResultInfo->totalRows += pResultInfo->numOfRows; int32_t code = - setResultDataPtr(pResultInfo, pResultInfo->fields, pResultInfo->numOfCols, pResultInfo->numOfRows, convertUcs4); + setResultDataPtr(pResultInfo, convertUcs4); return code; } diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c index fd6ca831d1..d8a67d62e2 100644 --- a/source/client/src/clientTmq.c +++ b/source/client/src/clientTmq.c @@ -2868,8 +2868,7 @@ int32_t tmqGetNextResInfo(TAOS_RES* res, bool convertUcs4, SReqResultInfo** pRes pRspObj->resInfo.precision = precision; pRspObj->resInfo.totalRows += pRspObj->resInfo.numOfRows; - int32_t code = setResultDataPtr(&pRspObj->resInfo, pRspObj->resInfo.fields, pRspObj->resInfo.numOfCols, - pRspObj->resInfo.numOfRows, convertUcs4); + int32_t code = setResultDataPtr(&pRspObj->resInfo, convertUcs4); if (code != 0) { return code; } From 161eb54b56d4b37c51086c93149f6d0d7d269b04 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 15:47:26 +0800 Subject: [PATCH 009/127] enh: setResultDataPtr safe check --- source/client/src/clientImpl.c | 42 ++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 9 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 08dbe7af6f..713c07e60f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2079,12 +2079,12 @@ static int32_t doPrepareResPtr(SReqResultInfo* pResInfo) { return TSDB_CODE_SUCCESS; } -static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int32_t numOfCols, int32_t* colLength) { +static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t* colLength) { int32_t idx = -1; iconv_t conv = taosAcquireConv(&idx, C2M); if (conv == (iconv_t)-1) return TSDB_CODE_TSC_INTERNAL_ERROR; - for (int32_t i = 0; i < numOfCols; ++i) { + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { int32_t type = pResultInfo->fields[i].type; int32_t bytes = pResultInfo->fields[i].bytes; @@ -2098,7 +2098,7 @@ static int32_t doConvertUCS4(SReqResultInfo* pResultInfo, int32_t numOfRows, int pResultInfo->convertBuf[i] = p; SResultColumn* pCol = &pResultInfo->pCol[i]; - for (int32_t j = 0; j < numOfRows; ++j) { + for (int32_t j = 0; j < pResultInfo->numOfRows; ++j) { if (pCol->offset[j] != -1) { char* pStart = pCol->offset[j] + pCol->pData; @@ -2131,10 +2131,13 @@ int32_t getVersion1BlockMetaSize(const char* p, int32_t numOfCols) { numOfCols * (sizeof(int8_t) + sizeof(int32_t)); } -static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { +static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) { char* p = (char*)pResultInfo->pData; int32_t blockVersion = *(int32_t*)p; + int32_t numOfRows = pResultInfo->numOfRows; + int32_t numOfCols = pResultInfo->numOfCols; + // | version | total length | total rows | total columns | flag seg| block group id | column schema | each column // length | int32_t cols = *(int32_t*)(p + sizeof(int32_t) * 3); @@ -2196,7 +2199,9 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i return len; } -static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int32_t numOfRows) { +static int32_t doConvertJson(SReqResultInfo* pResultInfo) { + int32_t numOfRows = pResultInfo->numOfRows; + int32_t numOfCols = pResultInfo->numOfCols; bool needConvert = false; for (int32_t i = 0; i < numOfCols; ++i) { if (pResultInfo->fields[i].type == TSDB_DATA_TYPE_JSON) { @@ -2213,7 +2218,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int char* p = (char*)pResultInfo->pData; int32_t blockVersion = *(int32_t*)p; - int32_t dataLen = estimateJsonLen(pResultInfo, numOfCols, numOfRows); + int32_t dataLen = estimateJsonLen(pResultInfo); if (dataLen <= 0) { return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2342,7 +2347,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int } int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { - if (pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL || pResultInfo == NULL) { + if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) { tscError("setResultDataPtr paras error"); return TSDB_CODE_TSC_INTERNAL_ERROR; } @@ -2351,11 +2356,16 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { return TSDB_CODE_SUCCESS; } + if (pResultInfo->pData == NULL) { + tscError("estimateJsonLen error: pData is NULL"); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + int32_t code = doPrepareResPtr(pResultInfo); if (code != TSDB_CODE_SUCCESS) { return code; } - code = doConvertJson(pResultInfo, pResultInfo->numOfCols, pResultInfo->numOfRows); + code = doConvertJson(pResultInfo); if (code != TSDB_CODE_SUCCESS) { return code; } @@ -2401,6 +2411,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { char* pStart = p; for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { + if ((pStart - pResultInfo->pData) >= dataLen) { + tscError("setResultDataPtr invalid offset over dataLen %d", dataLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (blockVersion == BLOCK_VERSION_1) { colLength[i] = htonl(colLength[i]); } @@ -2424,11 +2438,17 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { pStart += colLength[i]; } + p = pStart; // bool blankFill = *(bool*)p; p += sizeof(bool); + int32_t offset = p - pResultInfo->pData; + if (offset > dataLen) { + tscError("invalid offset %d, dataLen %d", offset, dataLen); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (convertUcs4) { - code = doConvertUCS4(pResultInfo, pResultInfo->numOfRows, pResultInfo->numOfCols, colLength); + code = doConvertUCS4(pResultInfo, colLength); } return code; @@ -2535,6 +2555,10 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR return TSDB_CODE_TSC_INTERNAL_ERROR; } } + } else { + pResultInfo->pData = NULL; + pResultInfo->payloadLen = 0; + return TSDB_CODE_TSC_INTERNAL_ERROR; } // TODO handle the compressed case From bee96ce5e253ce0046bcb8a82cd8997e1c6b152e Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 16:40:28 +0800 Subject: [PATCH 010/127] fix: payloadLen may be zero --- source/client/src/clientImpl.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 713c07e60f..7c14359f64 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2555,10 +2555,6 @@ int32_t setQueryResultFromRsp(SReqResultInfo* pResultInfo, const SRetrieveTableR return TSDB_CODE_TSC_INTERNAL_ERROR; } } - } else { - pResultInfo->pData = NULL; - pResultInfo->payloadLen = 0; - return TSDB_CODE_TSC_INTERNAL_ERROR; } // TODO handle the compressed case From 04a9d67965b4bc9a7a7968b68244e1e67fe09271 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 17:39:11 +0800 Subject: [PATCH 011/127] enh: type check --- include/common/ttypes.h | 1 + source/client/src/clientImpl.c | 16 +++++++++++++++- 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 3934553b1c..0f0f30a1c3 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -284,6 +284,7 @@ typedef struct { #define IS_VALID_UINT64(_t) ((_t) >= 0 && (_t) <= UINT64_MAX) #define IS_VALID_FLOAT(_t) ((_t) >= -FLT_MAX && (_t) <= FLT_MAX) #define IS_VALID_DOUBLE(_t) ((_t) >= -DBL_MAX && (_t) <= DBL_MAX) +#define IS_INVALID_TYPE(_t) ((_t) < TSDB_DATA_TYPE_NULL || (_t) >= TSDB_DATA_TYPE_MAX) #define IS_CONVERT_AS_SIGNED(_t) \ (IS_SIGNED_NUMERIC_TYPE(_t) || (_t) == (TSDB_DATA_TYPE_BOOL) || (_t) == (TSDB_DATA_TYPE_TIMESTAMP)) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 7c14359f64..4f268aa96e 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2346,6 +2346,16 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) { return TSDB_CODE_SUCCESS; } +int32_t checkResultInfo(SReqResultInfo* pResultInfo) { + if (pResultInfo->totalRows < pResultInfo->numOfRows) { + tscError("checkResultInfo error: totalRows:%" PRId64 " < numOfRows:%" PRId64, pResultInfo->totalRows, + pResultInfo->numOfRows); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + + return TSDB_CODE_SUCCESS; +} + int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) { tscError("setResultDataPtr paras error"); @@ -2422,7 +2432,10 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { tscError("invalid colLength %d, dataLen %d", colLength[i], dataLen); return TSDB_CODE_TSC_INTERNAL_ERROR; } - + if (IS_INVALID_TYPE(pResultInfo->fields[i].type)) { + tscError("invalid type %d", pResultInfo->fields[i].type); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } if (IS_VAR_DATA_TYPE(pResultInfo->fields[i].type)) { pResultInfo->pCol[i].offset = (int32_t*)pStart; pStart += pResultInfo->numOfRows * sizeof(int32_t); @@ -2450,6 +2463,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { if (convertUcs4) { code = doConvertUCS4(pResultInfo, colLength); } + code = checkResultInfo(pResultInfo); return code; } From d8218b3872531b718c95a1191310f597e2dda92f Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 17 Oct 2024 19:05:23 +0800 Subject: [PATCH 012/127] fix: resultDataLen with json col --- source/client/src/clientImpl.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 4f268aa96e..d8c01ee17c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2196,6 +2196,10 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo) { } pStart += colLen; } + + // Ensure the complete structure of the block, including the blankfill field, + // even though it is not used on the client side. + len += sizeof(bool); return len; } @@ -2341,6 +2345,11 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) { pStart1 += colLen1; } + // Ensure the complete structure of the block, including the blankfill field, + // even though it is not used on the client side. + // (void)memcpy(pStart1, pStart, sizeof(bool)); + totalLen += sizeof(bool); + *(int32_t*)(pResultInfo->convertJson + 4) = totalLen; pResultInfo->pData = pResultInfo->convertJson; return TSDB_CODE_SUCCESS; @@ -2367,7 +2376,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { } if (pResultInfo->pData == NULL) { - tscError("estimateJsonLen error: pData is NULL"); + tscError("setResultDataPtr error: pData is NULL"); return TSDB_CODE_TSC_INTERNAL_ERROR; } From d01a3481bca8a3e8bbcfc6da5ec08570a719314e Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 18 Oct 2024 16:16:57 +0800 Subject: [PATCH 013/127] block buf len check --- include/common/tdatablock.h | 2 +- source/client/src/clientImpl.c | 14 ++++++++++++-- source/client/src/clientMsgHandler.c | 10 ++++++---- source/common/src/tdatablock.c | 13 ++++++++++++- source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 6 +++--- source/dnode/mnode/impl/src/mndShow.c | 6 +++--- source/dnode/vnode/src/tq/tqScan.c | 5 +++-- source/libs/command/src/command.c | 5 +++-- source/libs/command/src/explain.c | 5 +++-- source/libs/executor/src/dataDispatcher.c | 13 +++++++------ source/libs/stream/src/streamDispatch.c | 10 ++++++---- 11 files changed, 59 insertions(+), 30 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 99cdb53103..c6f0b4d517 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -266,7 +266,7 @@ SColumnInfoData createColumnInfoData(int16_t type, int32_t bytes, int16_t colId) int32_t bdGetColumnInfoData(const SSDataBlock* pBlock, int32_t index, SColumnInfoData** pColInfoData); int32_t blockGetEncodeSize(const SSDataBlock* pBlock); -int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols); +int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataLen, int32_t numOfCols); int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos); // for debug diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index d8c01ee17c..69b6fe312c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2355,12 +2355,22 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) { return TSDB_CODE_SUCCESS; } -int32_t checkResultInfo(SReqResultInfo* pResultInfo) { +int32_t resultInfoSafeCheck(SReqResultInfo* pResultInfo) { if (pResultInfo->totalRows < pResultInfo->numOfRows) { tscError("checkResultInfo error: totalRows:%" PRId64 " < numOfRows:%" PRId64, pResultInfo->totalRows, pResultInfo->numOfRows); return TSDB_CODE_TSC_INTERNAL_ERROR; } + for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { + if (pResultInfo->fields[i].bytes <= 0) { + tscError("checkResultInfo error: bytes:%d <= 0", pResultInfo->fields[i].bytes); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if(!IS_VAR_DATA_TYPE(pResultInfo->fields[i].type) && TYPE_BYTES[pResultInfo->fields[i].type] != pResultInfo->fields[i].bytes) { + tscError("checkResultInfo error: type:%d bytes:%d != %d", pResultInfo->fields[i].type, pResultInfo->fields[i].bytes, TYPE_BYTES[pResultInfo->fields[i].type]); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + } return TSDB_CODE_SUCCESS; } @@ -2472,7 +2482,7 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { if (convertUcs4) { code = doConvertUCS4(pResultInfo, colLength); } - code = checkResultInfo(pResultInfo); + code = resultInfoSafeCheck(pResultInfo); return code; } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index aef3cef1c5..f8a0f384de 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -588,7 +588,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { return code; } - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -603,7 +604,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, SHOW_VARIABLES_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, SHOW_VARIABLES_RESULT_COLS); if(len < 0) { uError("buildShowVariablesRsp error, len:%d", len); code = terrno; @@ -741,7 +742,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr return code; } - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -757,7 +759,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, COMPACT_DB_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, COMPACT_DB_RESULT_COLS); if(len < 0) { uError("buildRetriveTableRspForCompactDb error, len:%d", len); code = terrno; diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 60d1f34cf7..2da83a6b74 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3041,7 +3041,7 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha } // return length of encoded data, return -1 if failed -int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { +int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { blockDataCheck(pBlock, false); int32_t dataLen = 0; @@ -3106,9 +3106,11 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { size_t metaSize = 0; if (IS_VAR_DATA_TYPE(pColRes->info.type)) { metaSize = numOfRows * sizeof(int32_t); + if(dataLen + metaSize > dataBuflen) goto _exit; memcpy(data, pColRes->varmeta.offset, metaSize); } else { metaSize = BitmapLen(numOfRows); + if(dataLen + metaSize > dataBuflen) goto _exit; memcpy(data, pColRes->nullbitmap, metaSize); } @@ -3127,12 +3129,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { } colSizes[col] += colSize; dataLen += colSize; + if(dataLen > dataBuflen) goto _exit; (void) memmove(data, pColData, colSize); data += colSize; } } else { colSizes[col] = colDataGetLength(pColRes, numOfRows); dataLen += colSizes[col]; + if(dataLen > dataBuflen) goto _exit; if (pColRes->pData != NULL) { (void) memmove(data, pColRes->pData, colSizes[col]); } @@ -3156,7 +3160,14 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { *actualLen = dataLen; *groupId = pBlock->info.id.groupId; + if (dataLen > dataBuflen) goto _exit; + return dataLen; + +_exit: + uError("blockEncode dataLen:%d, dataBuflen:%" PRIx64, dataLen, dataBuflen); + terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; + return -1; } int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos) { diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 1446faab77..0f3f42acb9 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { } size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + - blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(numOfCols); + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, numOfCols); + int32_t len = blockEncode(pBlock, pStart, dataEncodeSize, numOfCols); if (len < 0) { dError("failed to retrieve data since %s", tstrerror(code)); blockDataDestroy(pBlock); diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 264fea3476..01dc329d04 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -333,8 +333,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows); } - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + - blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)); + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -361,7 +361,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, pShow->pMeta->numOfColumns); + int32_t len = blockEncode(pBlock, pStart, dataEncodeSize, pShow->pMeta->numOfColumns); if(len < 0){ mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, tstrerror(code)); code = terrno; diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index dbc1b16cf5..5632297a2b 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -16,7 +16,8 @@ #include "tq.h" int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) { - int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + blockGetEncodeSize(pBlock); + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeSize; void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) { return terrno; @@ -28,7 +29,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t pRetrieve->compressed = 0; pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeSize, numOfCols); if(actualLen < 0){ taosMemoryFree(buf); return terrno; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index b2417a8597..db3e553c7a 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -35,7 +35,8 @@ extern SConfig* tsCfg; static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) { - size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { return terrno; @@ -49,7 +50,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(numOfCols); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); if(len < 0) { taosMemoryFree(*pRsp); return terrno; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index b82bba250f..4f4f67caf7 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1966,7 +1966,8 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { pBlock->info.rows = rowNum; - int32_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize); if (NULL == rsp) { @@ -1977,7 +1978,7 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { rsp->completed = 1; rsp->numOfRows = htobe64((int64_t)rowNum); - int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, taosArrayGetSize(pBlock->pDataBlock)); + int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, taosArrayGetSize(pBlock->pDataBlock)); if(len < 0) { qError("qExplainGetRspFromCtx: blockEncode failed"); QRY_ERR_JRET(terrno); diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 8acd569358..8ab1558172 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -84,17 +84,18 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pBuf->useSize = sizeof(SDataCacheEntry); { + size_t dataEncodeSize = pBuf->allocSize + 8; if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) { if (pHandle->pCompressBuf == NULL) { // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size - pHandle->pCompressBuf = taosMemoryMalloc(pBuf->allocSize + 8); + pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeSize); if (NULL == pHandle->pCompressBuf) { QRY_RET(terrno); } - pHandle->bufSize = pBuf->allocSize + 8; + pHandle->bufSize = dataEncodeSize; } else { - if (pHandle->bufSize < pBuf->allocSize + 8) { - pHandle->bufSize = pBuf->allocSize + 8; + if (pHandle->bufSize < dataEncodeSize) { + pHandle->bufSize = dataEncodeSize; void* p = taosMemoryRealloc(pHandle->pCompressBuf, pHandle->bufSize); if (p != NULL) { pHandle->pCompressBuf = p; @@ -105,7 +106,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* } } - int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, numOfCols); + int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeSize, numOfCols); if(dataLen < 0) { qError("failed to encode data block, code: %d", dataLen); return terrno; @@ -123,7 +124,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* TAOS_MEMCPY(pEntry->data, pHandle->pCompressBuf, dataLen); } } else { - pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, numOfCols); + pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, dataEncodeSize, numOfCols); if(pEntry->dataLen < 0) { qError("failed to encode data block, code: %d", pEntry->dataLen); return terrno; diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 62d60ff664..2b91f0f806 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -145,7 +145,8 @@ int32_t streamTaskBroadcastRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* r static int32_t buildStreamRetrieveReq(SStreamTask* pTask, const SSDataBlock* pBlock, SStreamRetrieveReq* req) { SRetrieveTableRsp* pRetrieve = NULL; - int32_t len = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t len = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; pRetrieve = taosMemoryCalloc(1, len); if (pRetrieve == NULL) return terrno; @@ -162,7 +163,7 @@ static int32_t buildStreamRetrieveReq(SStreamTask* pTask, const SSDataBlock* pBl pRetrieve->ekey = htobe64(pBlock->info.window.ekey); pRetrieve->version = htobe64(pBlock->info.version); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); if (actualLen < 0) { taosMemoryFree(pRetrieve); return terrno; @@ -1203,7 +1204,8 @@ int32_t streamTaskSendCheckpointSourceRsp(SStreamTask* pTask) { } int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatchReq* pReq) { - int32_t dataStrLen = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock) + PAYLOAD_PREFIX_LEN; + size_t dataEncodeSize = blockGetEncodeSize(pBlock); + int32_t dataStrLen = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) { return terrno; @@ -1225,7 +1227,7 @@ int32_t streamAddBlockIntoDispatchMsg(const SSDataBlock* pBlock, SStreamDispatch int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock); pRetrieve->numOfCols = htonl(numOfCols); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); if (actualLen < 0) { taosMemoryFree(buf); return terrno; From f35d2847b91388ace4baa16d3aacfd44167f6115 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sun, 20 Oct 2024 22:17:16 +0800 Subject: [PATCH 014/127] enh: blockencode --- source/client/src/clientMsgHandler.c | 12 ++++++------ source/dnode/mgmt/mgmt_dnode/src/dmHandle.c | 6 +++--- source/dnode/mnode/impl/src/mndShow.c | 6 +++--- source/dnode/vnode/src/tq/tqScan.c | 6 +++--- source/libs/command/src/command.c | 6 +++--- source/libs/command/src/explain.c | 6 +++--- source/libs/executor/src/dataDispatcher.c | 14 +++++++------- 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index f8a0f384de..099a224f85 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -588,8 +588,8 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { return code; } - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -604,7 +604,7 @@ static int32_t buildShowVariablesRsp(SArray* pVars, SRetrieveTableRsp** pRsp) { (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(SHOW_VARIABLES_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, SHOW_VARIABLES_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, SHOW_VARIABLES_RESULT_COLS); if(len < 0) { uError("buildShowVariablesRsp error, len:%d", len); code = terrno; @@ -742,8 +742,8 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr return code; } - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { code = terrno; @@ -759,7 +759,7 @@ static int32_t buildRetriveTableRspForCompactDb(SCompactDbRsp* pCompactDb, SRetr (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(COMPACT_DB_RESULT_COLS); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, COMPACT_DB_RESULT_COLS); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, COMPACT_DB_RESULT_COLS); if(len < 0) { uError("buildRetriveTableRspForCompactDb error, len:%d", len); code = terrno; diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c index 0f3f42acb9..d6b792ca74 100644 --- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c +++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c @@ -548,8 +548,8 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { } size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeSize; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * numOfCols + dataEncodeBufSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -574,7 +574,7 @@ int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, dataEncodeSize, numOfCols); + int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, numOfCols); if (len < 0) { dError("failed to retrieve data since %s", tstrerror(code)); blockDataDestroy(pBlock); diff --git a/source/dnode/mnode/impl/src/mndShow.c b/source/dnode/mnode/impl/src/mndShow.c index 01dc329d04..29f6c32dbe 100644 --- a/source/dnode/mnode/impl/src/mndShow.c +++ b/source/dnode/mnode/impl/src/mndShow.c @@ -333,8 +333,8 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { mDebug("show:0x%" PRIx64 ", stop retrieve data, rowsRead:%d numOfRows:%d", pShow->id, rowsRead, pShow->numOfRows); } - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeSize; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pShow->pMeta->numOfColumns + dataEncodeBufSize; SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size); if (pRsp == NULL) { @@ -361,7 +361,7 @@ static int32_t mndProcessRetrieveSysTableReq(SRpcMsg *pReq) { pStart += sizeof(SSysTableSchema); } - int32_t len = blockEncode(pBlock, pStart, dataEncodeSize, pShow->pMeta->numOfColumns); + int32_t len = blockEncode(pBlock, pStart, dataEncodeBufSize, pShow->pMeta->numOfColumns); if(len < 0){ mError("show:0x%" PRIx64 ", failed to retrieve data since %s", pShow->id, tstrerror(code)); code = terrno; diff --git a/source/dnode/vnode/src/tq/tqScan.c b/source/dnode/vnode/src/tq/tqScan.c index 5632297a2b..14cf41f1f9 100644 --- a/source/dnode/vnode/src/tq/tqScan.c +++ b/source/dnode/vnode/src/tq/tqScan.c @@ -16,8 +16,8 @@ #include "tq.h" int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t numOfCols, int8_t precision) { - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeSize; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + int32_t dataStrLen = sizeof(SRetrieveTableRspForTmq) + dataEncodeBufSize; void* buf = taosMemoryCalloc(1, dataStrLen); if (buf == NULL) { return terrno; @@ -29,7 +29,7 @@ int32_t tqAddBlockDataToRsp(const SSDataBlock* pBlock, SMqDataRsp* pRsp, int32_t pRetrieve->compressed = 0; pRetrieve->numOfRows = htobe64((int64_t)pBlock->info.rows); - int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeSize, numOfCols); + int32_t actualLen = blockEncode(pBlock, pRetrieve->data, dataEncodeBufSize, numOfCols); if(actualLen < 0){ taosMemoryFree(buf); return terrno; diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index db3e553c7a..471a079e4c 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -35,8 +35,8 @@ extern SConfig* tsCfg; static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRetrieveTableRsp** pRsp) { - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + size_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; *pRsp = taosMemoryCalloc(1, rspSize); if (NULL == *pRsp) { return terrno; @@ -50,7 +50,7 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe (*pRsp)->numOfRows = htobe64((int64_t)pBlock->info.rows); (*pRsp)->numOfCols = htonl(numOfCols); - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, numOfCols); + int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols); if(len < 0) { taosMemoryFree(*pRsp); return terrno; diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 4f4f67caf7..42c214fac7 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1966,8 +1966,8 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { pBlock->info.rows = rowNum; - size_t dataEncodeSize = blockGetEncodeSize(pBlock); - int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeSize + PAYLOAD_PREFIX_LEN; + size_t dataEncodeBufSize = blockGetEncodeSize(pBlock); + int32_t rspSize = sizeof(SRetrieveTableRsp) + dataEncodeBufSize + PAYLOAD_PREFIX_LEN; SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize); if (NULL == rsp) { @@ -1978,7 +1978,7 @@ int32_t qExplainGetRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) { rsp->completed = 1; rsp->numOfRows = htobe64((int64_t)rowNum); - int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeSize, taosArrayGetSize(pBlock->pDataBlock)); + int32_t len = blockEncode(pBlock, rsp->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, taosArrayGetSize(pBlock->pDataBlock)); if(len < 0) { qError("qExplainGetRspFromCtx: blockEncode failed"); QRY_ERR_JRET(terrno); diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 8ab1558172..b9fbf06ef6 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -84,18 +84,18 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pBuf->useSize = sizeof(SDataCacheEntry); { - size_t dataEncodeSize = pBuf->allocSize + 8; + size_t dataEncodeBufSize = pBuf->allocSize + 8; if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) { if (pHandle->pCompressBuf == NULL) { // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size - pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeSize); + pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeBufSize); if (NULL == pHandle->pCompressBuf) { QRY_RET(terrno); } - pHandle->bufSize = dataEncodeSize; + pHandle->bufSize = dataEncodeBufSize; } else { - if (pHandle->bufSize < dataEncodeSize) { - pHandle->bufSize = dataEncodeSize; + if (pHandle->bufSize < dataEncodeBufSize) { + pHandle->bufSize = dataEncodeBufSize; void* p = taosMemoryRealloc(pHandle->pCompressBuf, pHandle->bufSize); if (p != NULL) { pHandle->pCompressBuf = p; @@ -106,7 +106,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* } } - int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeSize, numOfCols); + int32_t dataLen = blockEncode(pInput->pData, pHandle->pCompressBuf, dataEncodeBufSize, numOfCols); if(dataLen < 0) { qError("failed to encode data block, code: %d", dataLen); return terrno; @@ -124,7 +124,7 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* TAOS_MEMCPY(pEntry->data, pHandle->pCompressBuf, dataLen); } } else { - pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, dataEncodeSize, numOfCols); + pEntry->dataLen = blockEncode(pInput->pData, pEntry->data, pBuf->allocSize, numOfCols); if(pEntry->dataLen < 0) { qError("failed to encode data block, code: %d", pEntry->dataLen); return terrno; From 70c79abddca56cbe91a6f4b42bc7d83f09d22c6d Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Mon, 21 Oct 2024 09:27:46 +0800 Subject: [PATCH 015/127] fix: concat null failed --- source/client/src/clientImpl.c | 2 +- source/common/src/tdatablock.c | 2 +- source/libs/executor/src/dataDispatcher.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 69b6fe312c..323a5aed3f 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2362,7 +2362,7 @@ int32_t resultInfoSafeCheck(SReqResultInfo* pResultInfo) { return TSDB_CODE_TSC_INTERNAL_ERROR; } for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { - if (pResultInfo->fields[i].bytes <= 0) { + if (pResultInfo->fields[i].bytes < 0) { tscError("checkResultInfo error: bytes:%d <= 0", pResultInfo->fields[i].bytes); return TSDB_CODE_TSC_INTERNAL_ERROR; } diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 2da83a6b74..d41b433468 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3165,7 +3165,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, in return dataLen; _exit: - uError("blockEncode dataLen:%d, dataBuflen:%" PRIx64, dataLen, dataBuflen); + uError("blockEncode dataLen:%d, dataBuflen:%" PRIu64, dataLen, dataBuflen); terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; return -1; } diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index b9fbf06ef6..d358cedfb7 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -84,10 +84,10 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* pBuf->useSize = sizeof(SDataCacheEntry); { + // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size size_t dataEncodeBufSize = pBuf->allocSize + 8; if ((pBuf->allocSize > tsCompressMsgSize) && (tsCompressMsgSize > 0) && pHandle->pManager->cfg.compress) { if (pHandle->pCompressBuf == NULL) { - // allocate additional 8 bytes to avoid invalid write if compress failed to reduce the size pHandle->pCompressBuf = taosMemoryMalloc(dataEncodeBufSize); if (NULL == pHandle->pCompressBuf) { QRY_RET(terrno); From 7094a8ddcfd49be3074d6001bd96d0e7fa41c9db Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 22 Oct 2024 14:15:48 +0800 Subject: [PATCH 016/127] fix: bytes of repeat and timezone --- include/libs/scalar/scalar.h | 1 + source/client/src/clientImpl.c | 21 ------ source/libs/executor/src/dataDispatcher.c | 78 +++++++++++++++++++++++ source/libs/function/src/builtins.c | 12 ++-- source/libs/scalar/src/sclfunc.c | 4 ++ 5 files changed, 91 insertions(+), 25 deletions(-) diff --git a/include/libs/scalar/scalar.h b/include/libs/scalar/scalar.h index fd936dd087..4b89a6a439 100644 --- a/include/libs/scalar/scalar.h +++ b/include/libs/scalar/scalar.h @@ -105,6 +105,7 @@ int32_t timeTruncateFunction(SScalarParam *pInput, int32_t inputNum, SScalarPara int32_t timeDiffFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t nowFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); +int32_t timeZoneStrLen(); int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t weekdayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); int32_t dayofweekFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput); diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 323a5aed3f..a3e327ee51 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2355,26 +2355,6 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo) { return TSDB_CODE_SUCCESS; } -int32_t resultInfoSafeCheck(SReqResultInfo* pResultInfo) { - if (pResultInfo->totalRows < pResultInfo->numOfRows) { - tscError("checkResultInfo error: totalRows:%" PRId64 " < numOfRows:%" PRId64, pResultInfo->totalRows, - pResultInfo->numOfRows); - return TSDB_CODE_TSC_INTERNAL_ERROR; - } - for (int32_t i = 0; i < pResultInfo->numOfCols; ++i) { - if (pResultInfo->fields[i].bytes < 0) { - tscError("checkResultInfo error: bytes:%d <= 0", pResultInfo->fields[i].bytes); - return TSDB_CODE_TSC_INTERNAL_ERROR; - } - if(!IS_VAR_DATA_TYPE(pResultInfo->fields[i].type) && TYPE_BYTES[pResultInfo->fields[i].type] != pResultInfo->fields[i].bytes) { - tscError("checkResultInfo error: type:%d bytes:%d != %d", pResultInfo->fields[i].type, pResultInfo->fields[i].bytes, TYPE_BYTES[pResultInfo->fields[i].type]); - return TSDB_CODE_TSC_INTERNAL_ERROR; - } - } - - return TSDB_CODE_SUCCESS; -} - int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { if (pResultInfo == NULL || pResultInfo->numOfCols <= 0 || pResultInfo->fields == NULL) { tscError("setResultDataPtr paras error"); @@ -2482,7 +2462,6 @@ int32_t setResultDataPtr(SReqResultInfo* pResultInfo, bool convertUcs4) { if (convertUcs4) { code = doConvertUCS4(pResultInfo, colLength); } - code = resultInfoSafeCheck(pResultInfo); return code; } diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index d358cedfb7..983309b7c2 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -54,6 +54,78 @@ typedef struct SDataDispatchHandle { TdThreadMutex mutex; } SDataDispatchHandle; +static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) { + if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) { + qError("invalid input data"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + SDataBlockDescNode* pSchema = pHandle->pSchema; + if (pSchema == NULL || pSchema->outputRowSize > pInput->pData->info.rowSize) { + qError("invalid schema"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + SNode* pNode; + int32_t numOfCols = 0; + int32_t realOutputRowSize = 0; + FOREACH(pNode, pHandle->pSchema->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + realOutputRowSize += pSlotDesc->dataType.bytes; + ++numOfCols; + } else { + break; + } + } + if (realOutputRowSize != pSchema->outputRowSize) { + qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pSchema->outputRowSize); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + if (numOfCols > taosArrayGetSize(pInput->pData->pDataBlock)) { + qError("invalid column number, schema:%d, input:%" PRIu64, numOfCols, taosArrayGetSize(pInput->pData->pDataBlock)); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + int32_t colNum = 0; + FOREACH(pNode, pHandle->pSchema->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + SColumnInfoData* pColInfoData = taosArrayGet(pInput->pData->pDataBlock, colNum); + if (pColInfoData == NULL) { + return -1; + } + if (pColInfoData->info.bytes < 0) { + qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if (!IS_VAR_DATA_TYPE(pColInfoData->info.type) && + TYPE_BYTES[pColInfoData->info.type] != pColInfoData->info.bytes) { + qError("invalid column bytes, schema:%d, input:%d", TYPE_BYTES[pColInfoData->info.type], + pColInfoData->info.bytes); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + if (pColInfoData->info.type != pSlotDesc->dataType.type) { + qError("invalid column type, schema:%d, input:%d", pSlotDesc->dataType.type, pColInfoData->info.type); + return TSDB_CODE_QRY_INVALID_INPUT; + } + if (pColInfoData->info.bytes != pSlotDesc->dataType.bytes) { + qError("invalid column bytes, schema:%d, input:%d", pSlotDesc->dataType.bytes, pColInfoData->info.bytes); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + if (IS_INVALID_TYPE(pColInfoData->info.type)) { + qError("invalid column type, type:%d", pColInfoData->info.type); + return TSDB_CODE_TSC_INTERNAL_ERROR; + } + ++colNum; + } + } + + + return TSDB_CODE_SUCCESS; +} + // clang-format off // data format: // +----------------+------------------+--------------+--------------+------------------+--------------------------------------------+------------------------------------+-------------+-----------+-------------+-----------+ @@ -67,6 +139,12 @@ static int32_t toDataCacheEntry(SDataDispatchHandle* pHandle, const SInputData* int32_t numOfCols = 0; SNode* pNode; + int32_t code = inputSafetyCheck(pHandle, pInput); + if (code) { + qError("failed to check input data, code:%d", code); + return code; + } + FOREACH(pNode, pHandle->pSchema->pSlots) { SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; if (pSlotDesc->output) { diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 1fd99125a0..1d3a43c498 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -695,7 +695,8 @@ static int32_t translateIsFilledPseudoColumn(SFunctionNode* pFunc, char* pErrBuf } static int32_t translateTimezone(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { - pFunc->node.resType = (SDataType){.bytes = TD_TIMEZONE_LEN, .type = TSDB_DATA_TYPE_BINARY}; + int32_t bytesLen = timeZoneStrLen(); + pFunc->node.resType = (SDataType){.bytes = bytesLen, .type = TSDB_DATA_TYPE_BINARY}; return TSDB_CODE_SUCCESS; } @@ -2465,9 +2466,12 @@ static int32_t translateRepeat(SFunctionNode* pFunc, char* pErrBuf, int32_t len) uint8_t type = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->type; int32_t orgLen = getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0))->bytes; - int32_t count = TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1); - - int32_t resLen = orgLen * count; + int32_t resLen; + if (nodeType(nodesListGetNode(pFunc->pParameterList, 1)) == QUERY_NODE_VALUE) { + resLen = orgLen * TMAX((int32_t)((SValueNode*)nodesListGetNode(pFunc->pParameterList, 1))->datum.i, 1); + } else { + resLen = TSDB_MAX_BINARY_LEN; + } pFunc->node.resType = (SDataType){.bytes = resLen, .type = type}; return TSDB_CODE_SUCCESS; } diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 6f6362a8f7..9aa67c441b 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2662,6 +2662,10 @@ int32_t todayFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOut return TSDB_CODE_SUCCESS; } +int32_t timeZoneStrLen() { + return sizeof(VarDataLenT) + strlen(tsTimezoneStr); +} + int32_t timezoneFunction(SScalarParam *pInput, int32_t inputNum, SScalarParam *pOutput) { char output[TD_TIMEZONE_LEN + VARSTR_HEADER_SIZE] = {0}; (void)memcpy(varDataVal(output), tsTimezoneStr, TD_TIMEZONE_LEN); From f57e3f3c8511222ce74a03d574b233f27cd9d342 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 22 Oct 2024 16:22:27 +0800 Subject: [PATCH 017/127] enh: safetyCheck --- include/common/tglobal.h | 1 + source/common/src/tdatablock.c | 2 +- source/common/src/tglobal.c | 11 +++++++++-- source/libs/executor/src/dataDispatcher.c | 3 +++ 4 files changed, 14 insertions(+), 3 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 41fb692e42..a13693d38c 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -153,6 +153,7 @@ extern bool tsEnableCrashReport; extern char *tsTelemUri; extern char *tsClientCrashReportUri; extern char *tsSvrCrashReportUri; +extern bool tsEnableSafetyCheck; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d41b433468..23efe48209 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3165,7 +3165,7 @@ int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, in return dataLen; _exit: - uError("blockEncode dataLen:%d, dataBuflen:%" PRIu64, dataLen, dataBuflen); + uError("blockEncode dataLen:%d, dataBuflen:%zu", dataLen, dataBuflen); terrno = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; return -1; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 7cff5de008..06791c5ec3 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -139,6 +139,7 @@ bool tsEnableCrashReport = true; #endif char *tsClientCrashReportUri = "/ccrashreport"; char *tsSvrCrashReportUri = "/dcrashreport"; +bool tsEnableSafetyCheck = true; // schemaless bool tsSmlDot2Underline = true; @@ -607,6 +608,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { TAOS_CHECK_RETURN( cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); + TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableSafetyCheck", tsEnableSafetyCheck, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); @@ -1299,6 +1301,9 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark"); tsmaDataDeleteMark = pItem->i32; + + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableSafetyCheck"); + tsEnableSafetyCheck = pItem->bval; TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -2039,7 +2044,8 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"s3UploadDelaySec", &tsS3UploadDelaySec}, {"supportVnodes", &tsNumOfSupportVnodes}, {"experimental", &tsExperimental}, - {"maxTsmaNum", &tsMaxTsmaNum}}; + {"maxTsmaNum", &tsMaxTsmaNum}, + {"enableSafetyCheck", &tsEnableSafetyCheck}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); @@ -2295,7 +2301,8 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"experimental", &tsExperimental}, {"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags}, {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, - {"tsmaDataDeleteMark", &tsmaDataDeleteMark}}; + {"tsmaDataDeleteMark", &tsmaDataDeleteMark}, + {"enableSafetyCheck", &tsEnableSafetyCheck}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 983309b7c2..5c3bc88b5b 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -55,6 +55,9 @@ typedef struct SDataDispatchHandle { } SDataDispatchHandle; static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) { + if(!tsEnableSafetyCheck) { + return TSDB_CODE_SUCCESS; + } if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) { qError("invalid input data"); return TSDB_CODE_QRY_INVALID_INPUT; From 2ae51f311847a00450e22f4b4eab2a475e4b657f Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 06:20:51 +0800 Subject: [PATCH 018/127] enh: safetyCheck config --- source/common/src/tglobal.c | 2 +- source/libs/executor/src/dataDispatcher.c | 2 +- tests/pytest/util/dnodes.py | 4 +++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 06791c5ec3..87cf1d7587 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -139,7 +139,7 @@ bool tsEnableCrashReport = true; #endif char *tsClientCrashReportUri = "/ccrashreport"; char *tsSvrCrashReportUri = "/dcrashreport"; -bool tsEnableSafetyCheck = true; +bool tsEnableSafetyCheck = false; // schemaless bool tsSmlDot2Underline = true; diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 5c3bc88b5b..b7422d849c 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -86,7 +86,7 @@ static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* } if (numOfCols > taosArrayGetSize(pInput->pData->pDataBlock)) { - qError("invalid column number, schema:%d, input:%" PRIu64, numOfCols, taosArrayGetSize(pInput->pData->pDataBlock)); + qError("invalid column number, schema:%d, input:%zu", numOfCols, taosArrayGetSize(pInput->pData->pDataBlock)); return TSDB_CODE_QRY_INVALID_INPUT; } diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index bb6f8ff030..7a836c181b 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -48,6 +48,7 @@ class TDSimClient: "telemetryReporting": "0", "tqDebugflag": "135", "stDebugflag":"135", + "enableSafetyCheck":"1" } def getLogDir(self): @@ -149,7 +150,8 @@ class TDDnode: "statusInterval": "1", "enableQueryHb": "1", "supportVnodes": "1024", - "telemetryReporting": "0" + "telemetryReporting": "0", + "enableSafetyCheck":"1" } def init(self, path, remoteIP = ""): From 00d374358a42a721cb7f4c8c84f7992ee39476c1 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 11:38:46 +0800 Subject: [PATCH 019/127] fix: rowSize check --- source/libs/executor/src/dataDispatcher.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index b7422d849c..732b2566a9 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -63,7 +63,7 @@ static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* return TSDB_CODE_QRY_INVALID_INPUT; } SDataBlockDescNode* pSchema = pHandle->pSchema; - if (pSchema == NULL || pSchema->outputRowSize > pInput->pData->info.rowSize) { + if (pSchema == NULL || pSchema->totalRowSize != pInput->pData->info.rowSize) { qError("invalid schema"); return TSDB_CODE_QRY_INVALID_INPUT; } @@ -77,6 +77,7 @@ static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* realOutputRowSize += pSlotDesc->dataType.bytes; ++numOfCols; } else { + // Slots must be sorted, and slots with 'output' set to true must come first break; } } From ea6c90e023af33ea8e6a6823ca0d195fdb928bfa Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 13:52:53 +0800 Subject: [PATCH 020/127] fix: repeat.csv --- tests/army/query/function/ans/repeat.csv | 68 ++++++++++++------------ 1 file changed, 34 insertions(+), 34 deletions(-) diff --git a/tests/army/query/function/ans/repeat.csv b/tests/army/query/function/ans/repeat.csv index d8f8b3050f..c303f164a9 100644 --- a/tests/army/query/function/ans/repeat.csv +++ b/tests/army/query/function/ans/repeat.csv @@ -108,13 +108,13 @@ taos> select repeat(nch1, id) from ts_4893.meters where id > 0 order by ts limit novelnovelnovelnovelnovel | taos> select repeat(var1, id) from ts_4893.meters where id > 0 order by ts limit 5 - repeat(var1, id) | -=================== - person | - novelnovel | - plateplateplate | - 一二三四五六... | - updateupdateu... | + repeat(var1, id) | +================================= + person | + novelnovel | + plateplateplate | + 一二三四五六七八九十一二三... | + updateupdateupdateupdateupdate | taos> select repeat('nch1', id) from ts_4893.meters where id > 0 order by ts limit 5 repeat('nch1', id) | @@ -229,32 +229,32 @@ taos> select repeat(var1, 3) from ts_4893.meters order by ts limit 10 plateplateplate | taos> select repeat(name, groupid) from ts_4893.d0 order by ts limit 10 - repeat(name, groupid) | -======================== - lili | - x | - lili | - x | - lili | - taos | - haha | - taos | - taos | - haha | + repeat(name, groupid) | +================================= + lili | + x | + lili | + x | + lili | + taos | + haha | + taos | + taos | + haha | taos> select repeat(name, groupid) from ts_4893.meters order by ts limit 10 - repeat(name, groupid) | -======================== - lili | - x | - lili | - x | - lili | - taos | - haha | - taos | - taos | - haha | + repeat(name, groupid) | +================================= + lili | + x | + lili | + x | + lili | + taos | + haha | + taos | + taos | + haha | taos> select repeat(nch1, groupid) from ts_4893.d0 order by ts limit 10 repeat(nch1, groupid) | @@ -355,9 +355,9 @@ taos> select repeat('你好', 2) 你好你好 | taos> select repeat('abc', length('abc')) - repeat('abc', length('abc')) | -=============================== - abcabcabc | + repeat('abc', length('abc')) | +================================= + abcabcabc | taos> select repeat(concat('A', 'B', 'C'), 3) repeat(concat('A', 'B', 'C'), 3) | From e8835c2caa0004dc5db18983ce074a389a90cd34 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 15:37:49 +0800 Subject: [PATCH 021/127] blockDataCheck --- include/common/tdatablock.h | 2 +- include/common/tglobal.h | 7 +++- source/common/src/tdatablock.c | 39 ++++++++++++------- source/common/src/tglobal.c | 16 ++++---- source/libs/executor/src/dataDispatcher.c | 2 +- .../libs/executor/src/dynqueryctrloperator.c | 7 +++- source/libs/executor/src/executor.c | 13 +++++-- source/libs/executor/src/executorInt.c | 9 +++-- source/libs/executor/src/groupcacheoperator.c | 2 +- source/libs/executor/src/mergeoperator.c | 10 +++-- source/libs/executor/src/operator.c | 20 +++++++--- source/libs/executor/src/sortoperator.c | 11 ++++-- tests/pytest/util/dnodes.py | 4 +- 13 files changed, 93 insertions(+), 49 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index c6f0b4d517..3b24ef9490 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -233,7 +233,7 @@ int32_t blockDataSort(SSDataBlock* pDataBlock, SArray* pOrderInfo); * @brief find how many rows already in order start from first row */ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo); -void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk); +int32_t blockDataCheck(const SSDataBlock* pDataBlock); int32_t colInfoDataEnsureCapacity(SColumnInfoData* pColumn, uint32_t numOfRows, bool clearPayload); int32_t blockDataEnsureCapacity(SSDataBlock* pDataBlock, uint32_t numOfRows); diff --git a/include/common/tglobal.h b/include/common/tglobal.h index a13693d38c..5a52abe9ee 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -153,7 +153,12 @@ extern bool tsEnableCrashReport; extern char *tsTelemUri; extern char *tsClientCrashReportUri; extern char *tsSvrCrashReportUri; -extern bool tsEnableSafetyCheck; +extern int8_t tsSafetyCheckLevel; +enum { + TSDB_SAFETY_CHECK_LEVELL_NEVER = 0, + TSDB_SAFETY_CHECK_LEVELL_NORMAL = 1, + TSDB_SAFETY_CHECK_LEVELL_BYROW = 2, +}; // query buffer management extern int32_t tsQueryBufferSize; // maximum allowed usage buffer size in MB for each data node during query processing diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 23efe48209..57df4a6006 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -18,6 +18,7 @@ #include "tcompare.h" #include "tlog.h" #include "tname.h" +#include "tglobal.h" #define MALLOC_ALIGN_BYTES 32 @@ -3042,7 +3043,11 @@ int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, cha // return length of encoded data, return -1 if failed int32_t blockEncode(const SSDataBlock* pBlock, char* data, size_t dataBuflen, int32_t numOfCols) { - blockDataCheck(pBlock, false); + int32_t code = blockDataCheck(pBlock); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return -1; + } int32_t dataLen = 0; @@ -3297,9 +3302,13 @@ int32_t blockDecode(SSDataBlock* pBlock, const char* pData, const char** pEndPos *pEndPos = pStart; - blockDataCheck(pBlock, false); + code = blockDataCheck(pBlock); + if (code != TSDB_CODE_SUCCESS) { + terrno = code; + return code; + } - return code; + return TSDB_CODE_SUCCESS; } int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolList) { @@ -3509,20 +3518,19 @@ int32_t blockDataGetSortedRows(SSDataBlock* pDataBlock, SArray* pOrderInfo) { return nextRowIdx; } -void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { - return; - - if (NULL == pDataBlock || pDataBlock->info.rows == 0) { - return; +#define BLOCK_DATA_CHECK_TRESSA(o) \ + if (!(o)) { \ + uError("blockDataCheck failed! line:%d", __LINE__); \ + return TSDB_CODE_INTERNAL_ERROR; \ + } +int32_t blockDataCheck(const SSDataBlock* pDataBlock) { + if (tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER || NULL == pDataBlock || pDataBlock->info.rows == 0) { + return TSDB_CODE_SUCCESS; } -#define BLOCK_DATA_CHECK_TRESSA(o) ; -//#define BLOCK_DATA_CHECK_TRESSA(o) A S S E R T(o) - BLOCK_DATA_CHECK_TRESSA(pDataBlock->info.rows > 0); - - if (!pDataBlock->info.dataLoad && !forceChk) { - return; + if (!pDataBlock->info.dataLoad) { + return TSDB_CODE_SUCCESS; } bool isVarType = false; @@ -3544,6 +3552,7 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { nextPos = 0; for (int64_t r = 0; r < checkRows; ++r) { + if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break; if (!colDataIsNull_s(pCol, r)) { BLOCK_DATA_CHECK_TRESSA(pCol->pData); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen); @@ -3578,7 +3587,7 @@ void blockDataCheck(const SSDataBlock* pDataBlock, bool forceChk) { } } - return; + return TSDB_CODE_SUCCESS; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 87cf1d7587..6b222b15ab 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -137,9 +137,9 @@ bool tsEnableCrashReport = false; #else bool tsEnableCrashReport = true; #endif -char *tsClientCrashReportUri = "/ccrashreport"; -char *tsSvrCrashReportUri = "/dcrashreport"; -bool tsEnableSafetyCheck = false; +char *tsClientCrashReportUri = "/ccrashreport"; +char *tsSvrCrashReportUri = "/dcrashreport"; +int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NEVER; // schemaless bool tsSmlDot2Underline = true; @@ -608,7 +608,7 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { TAOS_CHECK_RETURN( cfgAddInt64(pCfg, "randErrorDivisor", tsRandErrDivisor, 1, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); TAOS_CHECK_RETURN(cfgAddInt64(pCfg, "randErrorScope", tsRandErrScope, 0, INT64_MAX, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); - TAOS_CHECK_RETURN(cfgAddBool(pCfg, "enableSafetyCheck", tsEnableSafetyCheck, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); + TAOS_CHECK_RETURN(cfgAddInt32(pCfg, "safetyCheckLevel", tsSafetyCheckLevel, 0, 5, CFG_SCOPE_BOTH, CFG_DYN_BOTH)); tsNumOfRpcThreads = tsNumOfCores / 2; tsNumOfRpcThreads = TRANGE(tsNumOfRpcThreads, 1, TSDB_MAX_RPC_THREADS); @@ -1302,8 +1302,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "tsmaDataDeleteMark"); tsmaDataDeleteMark = pItem->i32; - TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "enableSafetyCheck"); - tsEnableSafetyCheck = pItem->bval; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "safetyCheckLevel"); + tsSafetyCheckLevel = pItem->i32; TAOS_RETURN(TSDB_CODE_SUCCESS); } @@ -2045,7 +2045,7 @@ static int32_t taosCfgDynamicOptionsForServer(SConfig *pCfg, const char *name) { {"supportVnodes", &tsNumOfSupportVnodes}, {"experimental", &tsExperimental}, {"maxTsmaNum", &tsMaxTsmaNum}, - {"enableSafetyCheck", &tsEnableSafetyCheck}}; + {"safetyCheckLevel", &tsSafetyCheckLevel}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); @@ -2302,7 +2302,7 @@ static int32_t taosCfgDynamicOptionsForClient(SConfig *pCfg, const char *name) { {"multiResultFunctionStarReturnTags", &tsMultiResultFunctionStarReturnTags}, {"maxTsmaCalcDelay", &tsMaxTsmaCalcDelay}, {"tsmaDataDeleteMark", &tsmaDataDeleteMark}, - {"enableSafetyCheck", &tsEnableSafetyCheck}}; + {"safetyCheckLevel", &tsSafetyCheckLevel}}; if ((code = taosCfgSetOption(debugOptions, tListLen(debugOptions), pItem, true)) != TSDB_CODE_SUCCESS) { code = taosCfgSetOption(options, tListLen(options), pItem, false); diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 732b2566a9..236d6a4d3e 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -55,7 +55,7 @@ typedef struct SDataDispatchHandle { } SDataDispatchHandle; static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* pInput) { - if(!tsEnableSafetyCheck) { + if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) { return TSDB_CODE_SUCCESS; } if (pInput == NULL || pInput->pData == NULL || pInput->pData->info.rows <= 0) { diff --git a/source/libs/executor/src/dynqueryctrloperator.c b/source/libs/executor/src/dynqueryctrloperator.c index eb49057d89..62f199387e 100644 --- a/source/libs/executor/src/dynqueryctrloperator.c +++ b/source/libs/executor/src/dynqueryctrloperator.c @@ -528,7 +528,12 @@ static void seqJoinLaunchNewRetrieveImpl(SOperatorInfo* pOperator, SSDataBlock** qDebug("%s dynamic post task begin", GET_TASKID(pOperator->pTaskInfo)); code = pOperator->pDownstream[1]->fpSet.getNextExtFn(pOperator->pDownstream[1], pParam, ppRes); if (*ppRes && (code == 0)) { - blockDataCheck(*ppRes, false); + code = blockDataCheck(*ppRes); + if (code) { + qError("Invalid block data, blockDataCheck failed, error:%s", tstrerror(code)); + pOperator->pTaskInfo->code = code; + T_LONG_JMP(pOperator->pTaskInfo->env, pOperator->pTaskInfo->code); + } pPost->isStarted = true; pStbJoin->execInfo.postBlkNum++; pStbJoin->execInfo.postBlkRows += (*ppRes)->info.rows; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 27dd687f40..632daf0aa0 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -699,12 +699,12 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo if (pTaskInfo->pOpParam && !pTaskInfo->paramSet) { pTaskInfo->paramSet = true; code = pTaskInfo->pRoot->fpSet.getNextExtFn(pTaskInfo->pRoot, pTaskInfo->pOpParam, &pRes); - blockDataCheck(pRes, false); } else { code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes); - blockDataCheck(pRes, false); } + QUERY_CHECK_CODE(code, lino, _end); + code = blockDataCheck(pRes); QUERY_CHECK_CODE(code, lino, _end); if (pRes == NULL) { @@ -749,7 +749,8 @@ int32_t qExecTaskOpt(qTaskInfo_t tinfo, SArray* pResList, uint64_t* useconds, bo } code = pTaskInfo->pRoot->fpSet.getNextFn(pTaskInfo->pRoot, &pRes); - blockDataCheck(pRes, false); + QUERY_CHECK_CODE(code, lino, _end); + code = blockDataCheck(pRes); QUERY_CHECK_CODE(code, lino, _end); } @@ -848,7 +849,11 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo)); } - blockDataCheck(*pRes, false); + code = blockDataCheck(*pRes); + if (code) { + pTaskInfo->code = code; + qError("%s failed at line %d, code:%s %s", __func__, __LINE__, tstrerror(code), GET_TASKID(pTaskInfo)); + } uint64_t el = (taosGetTimestampUs() - st); diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index 64a07c4653..b39cf4014d 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -616,11 +616,12 @@ int32_t doFilter(SSDataBlock* pBlock, SFilterInfo* pFilterInfo, SColMatchInfo* p } } } - code = TSDB_CODE_SUCCESS; - + code = blockDataCheck(pBlock); + QUERY_CHECK_CODE(code, lino, _err); _err: - blockDataCheck(pBlock, true); - + if (code != TSDB_CODE_SUCCESS) { + qError("%s failed at line %d since %s", __func__, lino, tstrerror(code)); + } colDataDestroy(p); taosMemoryFree(p); return code; diff --git a/source/libs/executor/src/groupcacheoperator.c b/source/libs/executor/src/groupcacheoperator.c index 10b372319b..d47ab366b6 100644 --- a/source/libs/executor/src/groupcacheoperator.c +++ b/source/libs/executor/src/groupcacheoperator.c @@ -765,7 +765,7 @@ static FORCE_INLINE int32_t getBlkFromDownstreamOperator(struct SOperatorInfo* p } } - blockDataCheck(pBlock, false); + code = blockDataCheck(pBlock); *ppRes = pBlock; return code; diff --git a/source/libs/executor/src/mergeoperator.c b/source/libs/executor/src/mergeoperator.c index 7fd6b91e52..4a288a277b 100644 --- a/source/libs/executor/src/mergeoperator.c +++ b/source/libs/executor/src/mergeoperator.c @@ -65,11 +65,14 @@ static int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock); int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; - int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); - blockDataCheck(*ppBlock, false); + int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); if (code) { qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); } + blockDataCheck(*ppBlock); + if (code) { + qError("failed to check data block got from upstream, %s code:%s", __func__, tstrerror(code)); + } return code; } @@ -526,7 +529,8 @@ int32_t doMultiwayMerge(SOperatorInfo* pOperator, SSDataBlock** pResBlock) { if ((*pResBlock) != NULL) { pOperator->resultInfo.totalRows += (*pResBlock)->info.rows; - blockDataCheck(*pResBlock, false); + code = blockDataCheck(*pResBlock); + QUERY_CHECK_CODE(code, lino, _end); } else { setOperatorCompleted(pOperator); } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 7914f9f320..f88b544b0f 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -870,15 +870,25 @@ int32_t setOperatorParams(struct SOperatorInfo* pOperator, SOperatorParam* pInpu SSDataBlock* getNextBlockFromDownstream(struct SOperatorInfo* pOperator, int32_t idx) { SSDataBlock* p = NULL; - int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p); - blockDataCheck(p, false); - return (code == 0)? p:NULL; + int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, true, &p); + if (code == TSDB_CODE_SUCCESS) { + code = blockDataCheck(p); + if (code != TSDB_CODE_SUCCESS) { + qError("blockDataCheck failed, code:%s", tstrerror(code)); + } + } + return (code == 0) ? p : NULL; } SSDataBlock* getNextBlockFromDownstreamRemain(struct SOperatorInfo* pOperator, int32_t idx) { SSDataBlock* p = NULL; - int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p); - blockDataCheck(p, false); + int32_t code = getNextBlockFromDownstreamImpl(pOperator, idx, false, &p); + if (code == TSDB_CODE_SUCCESS) { + code = blockDataCheck(p); + if (code != TSDB_CODE_SUCCESS) { + qError("blockDataCheck failed, code:%s", tstrerror(code)); + } + } return (code == 0)? p:NULL; } diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c index 1c241dffec..a6ca20c5ee 100644 --- a/source/libs/executor/src/sortoperator.c +++ b/source/libs/executor/src/sortoperator.c @@ -334,10 +334,14 @@ static int32_t getSortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t loadNextDataBlock(void* param, SSDataBlock** ppBlock) { SOperatorInfo* pOperator = (SOperatorInfo*)param; - int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); - blockDataCheck(*ppBlock, false); + int32_t code = pOperator->fpSet.getNextFn(pOperator, ppBlock); if (code) { qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); + } else { + code = blockDataCheck(*ppBlock); + if (code) { + qError("failed to check block data, %s code:%s", __func__, tstrerror(code)); + } } return code; } @@ -630,7 +634,8 @@ int32_t fetchNextGroupSortDataBlock(void* param, SSDataBlock** ppBlock) { QUERY_CHECK_CODE(code, lino, _end); if (block != NULL) { - blockDataCheck(block, false); + code = blockDataCheck(block); + QUERY_CHECK_CODE(code, lino, _end); if (block->info.id.groupId == grpSortOpInfo->currGroupId) { grpSortOpInfo->childOpStatus = CHILD_OP_SAME_GROUP; *ppBlock = block; diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 7a836c181b..3832530218 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -48,7 +48,7 @@ class TDSimClient: "telemetryReporting": "0", "tqDebugflag": "135", "stDebugflag":"135", - "enableSafetyCheck":"1" + "safetyCheckLevel":"2" } def getLogDir(self): @@ -151,7 +151,7 @@ class TDDnode: "enableQueryHb": "1", "supportVnodes": "1024", "telemetryReporting": "0", - "enableSafetyCheck":"1" + "safetyCheckLevel":"2" } def init(self, path, remoteIP = ""): From 1e8565eca839a2476133f22b9f9adb86a07e5bc4 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 15:51:46 +0800 Subject: [PATCH 022/127] fix: check return value --- source/libs/executor/src/mergeoperator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/executor/src/mergeoperator.c b/source/libs/executor/src/mergeoperator.c index 4a288a277b..0dfe89e10e 100644 --- a/source/libs/executor/src/mergeoperator.c +++ b/source/libs/executor/src/mergeoperator.c @@ -69,7 +69,7 @@ int32_t sortMergeloadNextDataBlock(void* param, SSDataBlock** ppBlock) { if (code) { qError("failed to get next data block from upstream, %s code:%s", __func__, tstrerror(code)); } - blockDataCheck(*ppBlock); + code = blockDataCheck(*ppBlock); if (code) { qError("failed to check data block got from upstream, %s code:%s", __func__, tstrerror(code)); } From a7bbb7863b87e92e81b79925f8308622169ec071 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 23 Oct 2024 17:21:22 +0800 Subject: [PATCH 023/127] fix: varType length check --- source/common/src/tdatablock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 57df4a6006..cb5b746844 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3570,7 +3570,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]); BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); - BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); + BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->varmeta.length); if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA((pCol->varmeta.offset[r] + colLen) <= pCol->varmeta.length); From 1ba0b39582501d013ecc0e7284cd24c3a91f59e3 Mon Sep 17 00:00:00 2001 From: jiajingbin Date: Wed, 23 Oct 2024 23:55:15 +0800 Subject: [PATCH 024/127] test: format code and add dead-lock return code to crash_gen --- tests/pytest/auto_crash_gen.py | 61 +++++++------- tests/pytest/auto_crash_gen_valgrind.py | 74 ++++++++--------- .../pytest/auto_crash_gen_valgrind_cluster.py | 82 ++++++++----------- 3 files changed, 102 insertions(+), 115 deletions(-) diff --git a/tests/pytest/auto_crash_gen.py b/tests/pytest/auto_crash_gen.py index 343cbd72c3..4e4679db6a 100755 --- a/tests/pytest/auto_crash_gen.py +++ b/tests/pytest/auto_crash_gen.py @@ -6,15 +6,12 @@ import requests # -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = False -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -112,9 +109,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,7 +122,7 @@ def random_args(args_list): # args_list["--ignore-errors"]=[] ## can add error codes for detail - + args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -162,7 +159,7 @@ def random_args(args_list): if args_list["--larger-data"]: threads = [16,32] else: - threads = [32,64,128,256] + threads = [32,64,128,256] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug return args_list @@ -176,7 +173,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -186,10 +183,10 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - + return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -216,9 +213,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: @@ -239,7 +236,7 @@ def start_taosd(): start_cmd = 'cd %s && python3 test.py >>/dev/null '%(start_path) os.system(start_cmd) -def get_cmds(args_list): +def get_cmds(args_list): crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) return crash_gen_cmd @@ -276,11 +273,15 @@ def check_status(): os.system("tail -n 50 %s>>%s"%(result_file,exit_status_logs)) core_check = subprocess.Popen('ls -l %s | grep "^-" | wc -l'%core_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + dead_lock_check = subprocess.Popen("grep -i 'dead locked' %s "%result_file, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + + if dead_lock_check: + return 6 + if "Crash_Gen is now exiting with status code: 1" in run_code: return 1 elif "Crash_Gen is now exiting with status code: 0" in run_code: @@ -293,7 +294,7 @@ def main(): args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) @@ -301,24 +302,24 @@ def main(): build_path = get_path() - + if repo =="community": crash_gen_path = build_path[:-5]+"community/tests/pytest/" elif repo =="TDengine": crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] - + # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir os.system('rm -rf %s'%run_dir ) @@ -329,9 +330,9 @@ def main(): run_crash_gen(crash_cmds) endtime = datetime.datetime.now() status = check_status() - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -344,15 +345,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -364,12 +365,12 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - - send_msg(get_msg(text)) + + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - + if __name__ == '__main__': main() diff --git a/tests/pytest/auto_crash_gen_valgrind.py b/tests/pytest/auto_crash_gen_valgrind.py index 29d9d61732..1e0de6ace1 100755 --- a/tests/pytest/auto_crash_gen_valgrind.py +++ b/tests/pytest/auto_crash_gen_valgrind.py @@ -9,15 +9,12 @@ import requests # -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = True -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -48,6 +45,7 @@ def send_msg(json): 'Content-Type': 'application/json' } + req = requests.post(url=group_url, headers=headers, json=json) inf = req.json() if "StatusCode" in inf and inf["StatusCode"] == 0: @@ -115,9 +113,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,13 +123,13 @@ def random_args(args_list): args_list["--track-memory-leaks"]=False args_list["--max-steps"]=random.randint(200,500) - + threads = [16,32] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug # args_list["--ignore-errors"]=[] ## can add error codes for detail - + args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -177,7 +175,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -187,10 +185,10 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - + return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -217,9 +215,9 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - + crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0203,0x4012 '%(crash_gen_path ,arguments) else: @@ -228,7 +226,6 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): return crash_gen_cmd - def start_taosd(): build_path = get_path() if repo == "community": @@ -272,7 +269,7 @@ def check_status(): if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + mem_status = check_memory() if mem_status >0: return mem_status @@ -281,8 +278,8 @@ def check_status(): elif "Crash_Gen is now exiting with status code: 0" in run_code: return 0 else: - return 2 - + return 2 + def check_memory(): @@ -301,34 +298,37 @@ def check_memory(): os.mkdir(back_path) stderr_file = os.path.join(crash_gen_path , "valgrind.err") - + stdout_file = os.path.join(crash_gen_path, 'valgrind.out') + status = 0 grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: + # mem-leak can be also occure when exit normally when dead lock # os.system("cp %s %s"%(stderr_file , back_path)) - status = 5 - + dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + status = 6 if dead_lock_res else 5 + return status def main(): args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) @@ -341,17 +341,17 @@ def main(): crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir @@ -364,9 +364,9 @@ def main(): endtime = datetime.datetime.now() status = check_status() # back_path = os.path.join(core_path,"valgrind_report") - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -379,15 +379,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -399,12 +399,12 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - + if __name__ == '__main__': main() diff --git a/tests/pytest/auto_crash_gen_valgrind_cluster.py b/tests/pytest/auto_crash_gen_valgrind_cluster.py index 8546d436de..22f453e51e 100755 --- a/tests/pytest/auto_crash_gen_valgrind_cluster.py +++ b/tests/pytest/auto_crash_gen_valgrind_cluster.py @@ -1,23 +1,17 @@ #!/usr/bin/python3 - - +# -*- coding: utf-8 -*- import datetime import os import socket import requests - -# -*- coding: utf-8 -*- import os ,sys import random -import argparse import subprocess -import time -import platform # valgrind mode ? valgrind_mode = True -msg_dict = {0:"success" , 1:"failed" , 2:"other errors" , 3:"crash occured" , 4:"Invalid read/write" , 5:"memory leak" } +msg_dict = {0: "success", 1: "failed", 2: "other errors", 3: "crash occured", 4: "Invalid read/write", 5: "memory leak", 6: "dead locked"} # formal hostname = socket.gethostname() @@ -115,9 +109,9 @@ def random_args(args_list): # args_list["--connector-type"]=connect_types[random.randint(0,2)] args_list["--connector-type"]= connect_types[0] args_list["--max-dbs"]= random.randint(1,10) - + # dnodes = [1,3] # set single dnodes; - + # args_list["--num-dnodes"]= random.sample(dnodes,1)[0] # args_list["--num-replicas"]= random.randint(1,args_list["--num-dnodes"]) args_list["--debug"]=False @@ -125,13 +119,12 @@ def random_args(args_list): args_list["--track-memory-leaks"]=False args_list["--max-steps"]=random.randint(200,500) - + threads = [16,32] args_list["--num-threads"]=random.sample(threads,1)[0] #$ debug # args_list["--ignore-errors"]=[] ## can add error codes for detail - args_list["--run-tdengine"]= False args_list["--use-shadow-db"]= False args_list["--dynamic-db-table-names"]= True @@ -177,7 +170,7 @@ def limits(args_list): pass # env is start by test frame , not crash_gen instance - + # elif args_list["--num-replicas"]==0: # print(" make sure num-replicas is at least 1 ") # args_list["--num-replicas"]=1 @@ -187,10 +180,9 @@ def limits(args_list): # elif args_list["--num-replicas"]>1: # if not args_list["--auto-start-service"]: # print("it should be deployed by crash_gen auto-start-service for multi replicas") - + # else: # pass - return args_list def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): @@ -217,18 +209,13 @@ def get_auto_mix_cmds(args_list ,valgrind=valgrind_mode): arguments+="" else: arguments+=(k+"="+str(v)+" ") - + if valgrind : - crash_gen_cmd = 'cd %s && ./crash_gen.sh --valgrind -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0707,0x0203,0x4012 '%(crash_gen_path ,arguments) - else: - crash_gen_cmd = 'cd %s && ./crash_gen.sh -i 3 %s -g 0x32c,0x32d,0x3d3,0x18,0x2501,0x369,0x388,0x061a,0x2550,0x0014,0x0707,0x0203,0x4012'%(crash_gen_path ,arguments) - return crash_gen_cmd - def start_taosd(): build_path = get_path() if repo == "community": @@ -242,7 +229,7 @@ def start_taosd(): os.system(start_cmd +">>/dev/null") def get_cmds(args_list): - + crash_gen_cmd = get_auto_mix_cmds(args_list,valgrind=valgrind_mode) return crash_gen_cmd @@ -272,7 +259,7 @@ def check_status(): if int(core_check.strip().rstrip()) > 0: # it means core files has occured return 3 - + mem_status = check_memory() if mem_status >0: return mem_status @@ -281,8 +268,7 @@ def check_status(): elif "Crash_Gen is now exiting with status code: 0" in run_code: return 0 else: - return 2 - + return 2 def check_memory(): @@ -301,57 +287,58 @@ def check_memory(): os.mkdir(back_path) stderr_file = os.path.join(crash_gen_path , "valgrind.err") - + stdout_file = os.path.join(crash_gen_path, 'valgrind.out') status = 0 grep_res = subprocess.Popen("grep -i 'Invalid read' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'Invalid write' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: # os.system("cp %s %s"%(stderr_file , back_path)) status = 4 - + grep_res = subprocess.Popen("grep -i 'taosMemoryMalloc' %s "%stderr_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") - + if grep_res: + # mem-leak can be also occure when exit normally when dead lock # os.system("cp %s %s"%(stderr_file , back_path)) - status = 5 - + dead_lock_res = subprocess.Popen("grep -i 'dead locked' %s "%stdout_file , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8") + status = 6 if dead_lock_res else 5 + return status def main(): - args_list = {"--auto-start-service":False ,"--max-dbs":0,"--connector-type":"native","--debug":False,"--run-tdengine":False,"--ignore-errors":[], "--track-memory-leaks":False , "--larger-data":False, "--mix-oos-data":False, "--dynamic-db-table-names":False, - "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , + "--per-thread-db-connection":False , "--record-ops":False , "--max-steps":100, "--num-threads":10, "--verify-data":False,"--use-shadow-db":False , "--continue-on-exception":False } args = random_args(args_list) args = limits(args) - build_path = get_path() + build_path = get_path() if repo =="community": crash_gen_path = build_path[:-5]+"community/tests/pytest/" elif repo =="TDengine": crash_gen_path = build_path[:-5]+"/tests/pytest/" else: pass - + if os.path.exists(crash_gen_path+"crash_gen.sh"): print(" make sure crash_gen.sh is ready") else: print( " crash_gen.sh is not exists ") sys.exit(1) - + git_commit = subprocess.Popen("cd %s && git log | head -n1"%crash_gen_path, shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")[7:16] # crash_cmds = get_cmds() - + crash_cmds = get_cmds(args) # clean run_dir @@ -364,9 +351,9 @@ def main(): endtime = datetime.datetime.now() status = check_status() # back_path = os.path.join(core_path,"valgrind_report") - + print("exit status : ", status) - + if status ==4: print('======== crash_gen found memory bugs ========') if status ==5: @@ -379,15 +366,15 @@ def main(): try: cmd = crash_cmds.split('&')[2] if status == 0: - log_dir = "none" + log_dir = "none" else: - log_dir= "/root/pxiao/crash_gen_logs" - + log_dir= "/root/pxiao/crash_gen_logs" + if status == 3: core_dir = "/root/pxiao/crash_gen_logs" else: core_dir = "none" - + text = f''' exit status: {msg_dict[status]} test scope: crash_gen @@ -399,12 +386,11 @@ def main(): log dir: {log_dir} core dir: {core_dir} cmd: {cmd}''' - - send_msg(get_msg(text)) + + send_msg(get_msg(text)) except Exception as e: print("exception:", e) exit(status) - if __name__ == '__main__': main() From a29f6145b0560a84ce4a02180a39573dc6a873b7 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 24 Oct 2024 06:40:37 +0800 Subject: [PATCH 025/127] check skip:pcol.pdata --- source/common/src/tdatablock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index cb5b746844..ee551d54db 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3554,7 +3554,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { for (int64_t r = 0; r < checkRows; ++r) { if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break; if (!colDataIsNull_s(pCol, r)) { - BLOCK_DATA_CHECK_TRESSA(pCol->pData); + // BLOCK_DATA_CHECK_TRESSA(pCol->pData); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen); if (isVarType) { From f91226b1d6357aca99b3b0976e5115629bf3be67 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 24 Oct 2024 06:53:47 +0800 Subject: [PATCH 026/127] skip: pCol.pData Null --- source/common/src/tdatablock.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index ee551d54db..b47abeeb65 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3543,6 +3543,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i); isVarType = IS_VAR_DATA_TYPE(pCol->info.type); checkRows = pDataBlock->info.rows; + if(pCol->pData == NULL) continue; if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); From 3a505157f372fad76b57716a27d0f7562281f14e Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 24 Oct 2024 10:33:18 +0800 Subject: [PATCH 027/127] fix: type overflow --- source/common/src/tdatablock.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index b47abeeb65..b8e9579c6b 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3582,7 +3582,15 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { typeValue = *(char*)(pCol->pData + pCol->varmeta.offset[r] + colLen - 1); } else { - GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r)); + if (TSDB_DATA_TYPE_FLOAT == pCol->info.type) { + float v = 0; + GET_TYPED_DATA(v, float, pCol->info.type, colDataGetNumData(pCol, r)); + } else if (TSDB_DATA_TYPE_DOUBLE == pCol->info.type) { + double v = 0; + GET_TYPED_DATA(v, double, pCol->info.type, colDataGetNumData(pCol, r)); + } else { + GET_TYPED_DATA(typeValue, int64_t, pCol->info.type, colDataGetNumData(pCol, r)); + } } } } From 5d20fde69209fda9e89ec1b7019cbaf4d7727559 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 24 Oct 2024 17:44:13 +0800 Subject: [PATCH 028/127] safetycheck, use reserve state --- include/common/tmsg.h | 1 + source/common/src/tdatablock.c | 10 +++++----- source/libs/executor/src/executil.c | 1 + 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 01808d4f2f..14e3f9b0eb 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1215,6 +1215,7 @@ typedef struct { int32_t bytes; int8_t type; uint8_t pk; + bool reserve; } SColumnInfo; typedef struct STimeWindow { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index b8e9579c6b..9240e18700 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3543,7 +3543,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i); isVarType = IS_VAR_DATA_TYPE(pCol->info.type); checkRows = pDataBlock->info.rows; - if(pCol->pData == NULL) continue; + if (pCol->info.reserve == true) continue; if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); @@ -3555,12 +3555,12 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { for (int64_t r = 0; r < checkRows; ++r) { if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break; if (!colDataIsNull_s(pCol, r)) { - // BLOCK_DATA_CHECK_TRESSA(pCol->pData); + BLOCK_DATA_CHECK_TRESSA(pCol->pData); BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.length <= pCol->varmeta.allocLen); - + if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.allocLen > 0); - BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] < pCol->varmeta.length); + BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length); if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0); } else if (0 == r) { @@ -3571,7 +3571,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]); BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); - BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->varmeta.length); + BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA((pCol->varmeta.offset[r] + colLen) <= pCol->varmeta.length); diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index b15cc2ab45..1a43d42348 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -390,6 +390,7 @@ SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode) { createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId); idata.info.scale = pDescNode->dataType.scale; idata.info.precision = pDescNode->dataType.precision; + idata.info.reserve = pDescNode->reserve; code = blockDataAppendColInfo(pBlock, &idata); if (code != TSDB_CODE_SUCCESS) { From b50a89804f4ce4400297bf4a3268c28e8643eed8 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 25 Oct 2024 10:16:28 +0800 Subject: [PATCH 029/127] fix: reserve check --- source/common/src/tdatablock.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 9240e18700..d59c9252f3 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3541,9 +3541,10 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); for (int32_t i = 0; i < colNum; ++i) { SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pDataBlock->pDataBlock, i); + BLOCK_DATA_CHECK_TRESSA(pCol != NULL); isVarType = IS_VAR_DATA_TYPE(pCol->info.type); checkRows = pDataBlock->info.rows; - if (pCol->info.reserve == true) continue; + if (pCol->info.reserve == false) continue; if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); From f897543994d156d4055c4ae9c062a98308c0cdce Mon Sep 17 00:00:00 2001 From: charles Date: Fri, 25 Oct 2024 17:31:21 +0800 Subject: [PATCH 030/127] add test case for case...when... by charles --- tests/army/query/test_case_when.py | 361 +++++++++++++++++++++++++++++ tests/parallel_test/cases.task | 1 + 2 files changed, 362 insertions(+) create mode 100644 tests/army/query/test_case_when.py diff --git a/tests/army/query/test_case_when.py b/tests/army/query/test_case_when.py new file mode 100644 index 0000000000..9edc9daad6 --- /dev/null +++ b/tests/army/query/test_case_when.py @@ -0,0 +1,361 @@ +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame import * +from frame.eos import * +from datetime import datetime, timedelta + + +class TDTestCase(TBase): + """Verify the case...when... expression in the query statement + """ + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + tdSql.init(conn.cursor()) + self.stable_schema = { + "columns": { + "ts": "timestamp", + "c_null": "int", + "c_bool": "bool", + "c_tinyint": "tinyint", + "c_smallint": "smallint", + "c_int": "int", + "c_bigint": "bigint", + "c_float": "float", + "c_double": "double", + "c_varchar": "varchar(16)", + "c_timestamp": "timestamp", + "c_nchar": "nchar(16)", + "c_utinyint": "tinyint unsigned", + "c_usmallint": "smallint unsigned", + "c_uint": "int unsigned", + "c_ubigint": "bigint unsigned", + "c_varbinary": "varbinary(16)", + "c_geometry": "geometry(32)" + }, + "tags": { + "t_null": "int", + "t_bool": "bool", + "t_tinyint": "tinyint", + "t_smallint": "smallint", + "t_int": "int", + "t_bigint": "bigint", + "t_float": "float", + "t_double": "double", + "t_varchar": "varchar(16)", + "t_timestamp": "timestamp", + "t_nchar": "nchar(16)", + "t_utinyint": "tinyint unsigned", + "t_usmallint": "smallint unsigned", + "t_uint": "int unsigned", + "t_ubigint": "bigint unsigned", + "t_varbinary": "varbinary(16)", + "t_geometry": "geometry(32)" + } + } + + def prepare_data(self): + # create database + tdSql.execute("create database test_case_when;") + tdSql.execute("use test_case_when;") + # create stable + columns = ",".join([f"{k} {v}" for k, v in self.stable_schema["columns"].items()]) + tags = ",".join([f"{k} {v}" for k, v in self.stable_schema["tags"].items()]) + st_sql = f"create stable st1 ({columns}) tags ({tags});" + tdSql.execute(st_sql) + st_sql_json_tag = f"create stable st2 ({columns}) tags (t json);" + tdSql.execute(st_sql_json_tag) + # create child table + tdSql.execute("create table ct1 using st1 tags(NULL, True, 1, 1, 1, 1, 1.1, 1.11, 'aaaaaaaa', '2021-09-01 00:00:00.000', 'aaaaaaaa', 1, 1, 1, 1, \"0x06\",'POINT(1 1)');") + tdSql.execute("""create table ct2 using st2 tags('{"name": "test", "location": "beijing"}');""") + # insert data + ct1_data = [ + """'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""", + """'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""", + """'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""", + """'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""", + """'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""", + """'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""", + """'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""", + """'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""", + """'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""", + """'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL""" + ] + ct1_insert_sql = "insert into ct1 values(%s);" % "),(".join(ct1_data) + tdSql.execute(ct1_insert_sql) + ct2_data = [ + """'2024-10-01 00:00:00.000', NULL, True, 2, 2, 2, 2, 2.2, 2.22, 'bbbbbbbb', '2021-09-01 00:00:00.000', 'bbbbbbbb', 2, 2, 2, 2, "0x07",'POINT(2 2)'""", + """'2024-10-01 00:00:01.000', NULL, False, 3, 3, 3, 3, 3.3, 3.33, 'cccccccc', '2021-09-01 00:00:00.000', 'cccccccc', 3, 3, 3, 3, "0x08",'POINT(3 3)'""", + """'2024-10-01 00:00:02.000', NULL, True, 4, 4, 4, 4, 4.4, 4.44, 'dddddddd', '2021-09-01 00:00:00.000', 'dddddddd', 4, 4, 4, 4, "0x09",'POINT(4 4)'""", + """'2024-10-01 00:00:03.000', NULL, False, 5, 5, 5, 5, 5.5, 5.55, 'eeeeeeee', '2021-09-01 00:00:00.000', 'eeeeeeee', 5, 5, 5, 5, "0x0A",'POINT(5 5)'""", + """'2024-10-01 00:00:04.000', NULL, True, 6, 6, 6, 6, 6.6, 6.66, 'ffffffff', '2021-09-01 00:00:00.000', 'ffffffff', 6, 6, 6, 6, "0x0B",'POINT(6 6)'""", + """'2024-10-01 00:00:05.000', NULL, False, 7, 7, 7, 7, 7.7, 7.77, 'gggggggg', '2021-09-01 00:00:00.000', 'gggggggg', 7, 7, 7, 7, "0x0C",'POINT(7 7)'""", + """'2024-10-01 00:00:06.000', NULL, True, 8, 8, 8, 8, 8.8, 8.88, 'hhhhhhhh', '2021-09-01 00:00:00.000', 'hhhhhhhh', 8, 8, 8, 8, "0x0D",'POINT(8 8)'""", + """'2024-10-01 00:00:07.000', NULL, False, 9, 9, 9, 9, 9.9, 9.99, 'iiiiiiii', '2021-09-01 00:00:00.000', 'iiiiiiii', 9, 9, 9, 9, "0x0E",'POINT(9 9)'""", + """'2024-10-01 00:00:08.000', NULL, True, 10, 10, 10, 10, 10.10, 10.1010, 'jjjjjjjj', '2021-09-01 00:00:00.000', 'jjjjjjjj', 10, 10, 10, 10, "0x0F",'POINT(10 10)'""", + """'2024-10-01 00:00:09.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL""" + ] + ct2_insert_sql = "insert into ct2 values(%s);" % "),(".join(ct2_data) + tdSql.execute(ct2_insert_sql) + + def test_case_when_statements(self): + tdSql.execute("use test_case_when;") + tdSql.query("select case when c_null is null then c_null else t_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then c_null else t_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bool is null then c_bool else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bool is not null then c_bool else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_tinyint is null then c_tinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_tinyint is not null then c_tinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint is null then c_smallint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_smallint is not null then c_smallint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int is null then c_int else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_int is not null then c_int else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bigint is null then c_bigint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_bigint is not null then c_bigint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float is null then c_float else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_float is not null then c_float else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2.200000',), ('3.300000',), ('4.400000',), ('5.500000',), ('6.600000',), ('7.700000',), ('8.800000',), ('9.900000',), ('10.100000',), (None,)]) + + tdSql.query("select case when c_double is null then c_double else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_double is not null then c_double else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2.220000',), ('3.330000',), ('4.440000',), ('5.550000',), ('6.660000',), ('7.770000',), ('8.880000',), ('9.990000',), ('10.101000',), (None,)]) + + tdSql.query("select case when c_varchar is null then c_varchar else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_varchar is not null then c_varchar else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_nchar is null then c_nchar else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_nchar is not null then c_nchar else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_utinyint is null then c_utinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_utinyint is not null then c_utinyint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_usmallint is null then c_usmallint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_usmallint is not null then c_usmallint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_uint is null then c_uint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_uint is not null then c_uint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint is null then c_ubigint else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_ubigint is not null then c_ubigint else c_null end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('2',), ('3',), ('4',), ('5',), ('6',), ('7',), ('8',), ('9',), ('10',), (None,)]) + + tdSql.error("select case when c_varbinary is null then c_varbinary else c_null end from st1;") + tdSql.error("select case when c_varbinary is not null then c_varbinary else c_null end from st1;") + + tdSql.query("select case when c_null is null then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(True,), (False,), (True,), (False,), (True,), (False,), (True,), (False,), (True,), (None,)]) + + tdSql.query("select case when c_bool=true then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(None,), (False,), (None,), (False,), (None,), (False,), (None,), (False,), (None,), (None,)]) + + tdSql.query("select case when c_bool!=true then NULL else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,), (True,), (None,)]) + + tdSql.query("select case when c_tinyint=2 then c_tinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_tinyint!=2 then c_tinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint=2 then c_smallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_smallint!=2 then c_smallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int=2 then c_int else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_int!=2 then c_int else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bigint=2 then c_bigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_bigint!=2 then c_bigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float=2.2 then c_float else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res[1:] == [(0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)]) + + tdSql.query("select case when c_float!=2.2 then c_float else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res[0] == (1.0,)) + + tdSql.query("select case when c_double=2.22 then c_double else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (0.0,), (1.0,), (None,)]) + + tdSql.query("select case when c_double!=2.2 then c_double else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2.22,), (3.33,), (4.44,), (5.55,), (6.66,), (7.77,), (8.88,), (9.99,), (10.101,), (None,)]) + + tdSql.query("select case when c_varchar='bbbbbbbb' then c_varchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)]) + + tdSql.query("select case when c_varchar!='bbbbbbbb' then c_varchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_timestamp='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (1630425600000,), (None,)]) + + tdSql.query("select case when c_timestamp!='2021-09-01 00:00:00.000' then c_timestamp else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_nchar='bbbbbbbb' then c_nchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbbbbbb',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), ('false',), ('true',), (None,)]) + + tdSql.query("select case when c_nchar!='bbbbbbbb' then c_nchar else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('true',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) + + tdSql.query("select case when c_utinyint=2 then c_utinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_utinyint!=2 then c_utinyint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_usmallint=2 then c_usmallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_usmallint!=2 then c_usmallint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_uint=2 then c_uint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_uint!=2 then c_uint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_ubigint=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (1,), (0,), (1,), (0,), (1,), (0,), (1,), (None,)]) + + tdSql.query("select case when c_ubigint!=2 then c_ubigint else c_bool end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(1,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.error("select case when c_varbinary='\x30783037' then c_varbinary else c_bool end from st1;") + tdSql.error("select case when c_varbinary!='\x30783037' then c_varbinary else c_bool end from st1;") + + tdSql.query("select case when c_null is null then NULL else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + tdSql.query("select case when c_null is not null then NULL else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_bool=true then false else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(0,), (3,), (0,), (5,), (0,), (7,), (0,), (9,), (0,), (None,)]) + + tdSql.query("select case when c_bool!=true then false else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (0,), (4,), (0,), (6,), (0,), (8,), (0,), (10,), (None,)]) + + tdSql.query("select case when c_smallint=2 then c_smallint else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_smallint!=2 then c_smallint else c_tinyint end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int=2 then c_smallint else c_int end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_int!=2 then c_smallint else c_int end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [(2,), (3,), (4,), (5,), (6,), (7,), (8,), (9,), (10,), (None,)]) + + tdSql.query("select case when c_float=2.2 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when c_double=2.22 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('387897',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when c_varchar='cccccccc' then 'test' when c_varchar='bbbbbbbb' then 'bbbb' else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('bbbb',), ('test',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',), ('test message',)]) + + tdSql.query("select case when ts='2024-10-01 00:00:04.000' then 456646546 when ts>'2024-10-01 00:00:04.000' then 'after today' else 'before today or unknow date' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('456646546',), ('after today',), ('after today',), ('after today',), ('after today',), ('after today',)]) + + tdSql.query("select case when c_geometry is null then c_geometry else c_null end from st1;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + # tdSql.error("select case when c_geometry is not null then c_geometry else c_null end from st1;") + # tdSql.error("select case when c_geometry='POINT(2 2)' then c_geometry else c_bool end from st1;") + # tdSql.error("select case when c_geometry!='POINT(2 2)' then c_geometry else c_bool end from st1;") + + tdSql.query("select case when t is null then t else c_null end from st2;") + assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) + + # tdSql.error("select case when t is not null then t else c_null end from st2;") + # tdSql.error("select case when t->'location'='beijing' then t->'location' else c_bool end from st2;") + # tdSql.error("select case when t->'location'!='beijing' then t->'location' else c_bool end from st1;") + + tdSql.query("select case when c_float!=2.2 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) + + tdSql.query("select case when c_double!=2.22 then 387897 else 'test message' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) + + # tdSql.query("select case c_tinyint when 2 then -2147483648 when 3 then 'three' else '4294967295' end from st1;") + # tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;") + # tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;") + + def run(self): + self.prepare_data() + self.test_case_when_statements() + + def stop(self): + # tdSql.execute("drop database test_case_when;") + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 09216add82..a03f6cf904 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -20,6 +20,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/function/cast.py ,,y,army,./pytest.sh python3 ./test.py -f query/test_join.py ,,y,army,./pytest.sh python3 ./test.py -f query/test_compare.py +,,y,army,./pytest.sh python3 ./test.py -f query/test_case_when.py ,,y,army,./pytest.sh python3 ./test.py -f insert/test_column_tag_boundary.py ,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_desc.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/fill/fill_null.py From 18c062016ecc222f5bfdf928c141e49a4599d897 Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Fri, 25 Oct 2024 17:54:43 +0800 Subject: [PATCH 031/127] update sample code --- .../com/taos/example/ConsumerLoopFull.java | 15 ++--- .../com/taos/example/ConsumerLoopImp.java | 8 ++- .../com/taos/example/WsConsumerLoopFull.java | 15 ++--- .../com/taos/example/WsConsumerLoopImp.java | 8 ++- .../example/highvolume/DataBaseMonitor.java | 3 + .../taos/example/highvolume/SQLWriter.java | 3 + .../src/test/java/com/taos/test/TestAll.java | 57 +++++++++++++++---- 7 files changed, 81 insertions(+), 28 deletions(-) diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java index a399f3aa6a..647855dc48 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class ConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class ConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class ConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class ConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class ConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java index a59bfc282f..378ef8ae6d 100644 --- a/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/ConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -31,7 +31,11 @@ public class ConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java index 6db65f47f2..02db97a5a9 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopFull.java @@ -1,8 +1,9 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; +import com.fasterxml.jackson.core.JsonProcessingException; import com.taosdata.jdbc.TSDBDriver; import com.taosdata.jdbc.tmq.*; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.*; import java.time.Duration; @@ -60,7 +61,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: create_consumer } - public static void pollExample(TaosConsumer consumer) throws SQLException { + public static void pollExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: poll_data_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -73,7 +74,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } } } catch (Exception ex) { @@ -91,7 +92,7 @@ public class WsConsumerLoopFull { // ANCHOR_END: poll_data_code_piece } - public static void seekExample(TaosConsumer consumer) throws SQLException { + public static void seekExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: consumer_seek List topics = Collections.singletonList("topic_meters"); try { @@ -99,7 +100,7 @@ public class WsConsumerLoopFull { consumer.subscribe(topics); System.out.println("Subscribe topics successfully."); Set assignment = consumer.assignment(); - System.out.println("Now assignment: " + JSON.toJSONString(assignment)); + System.out.println("Now assignment: " + JsonUtil.getObjectMapper().writeValueAsString(assignment)); ConsumerRecords records = ConsumerRecords.emptyRecord(); // make sure we have got some data @@ -125,7 +126,7 @@ public class WsConsumerLoopFull { } - public static void commitExample(TaosConsumer consumer) throws SQLException { + public static void commitExample(TaosConsumer consumer) throws SQLException, JsonProcessingException { // ANCHOR: commit_code_piece List topics = Collections.singletonList("topic_meters"); try { @@ -135,7 +136,7 @@ public class WsConsumerLoopFull { for (ConsumerRecord record : records) { ResultBean bean = record.value(); // Add your data processing logic here - System.out.println("data: " + JSON.toJSONString(bean)); + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(bean)); } if (!records.isEmpty()) { // after processing the data, commit the offset manually diff --git a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java index 70e29503f8..77c6a4fd1b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java +++ b/docs/examples/java/src/main/java/com/taos/example/WsConsumerLoopImp.java @@ -1,7 +1,7 @@ package com.taos.example; -import com.alibaba.fastjson.JSON; import com.taosdata.jdbc.TSDBDriver; +import com.taosdata.jdbc.utils.JsonUtil; import java.sql.Connection; import java.sql.DriverManager; @@ -28,7 +28,11 @@ public abstract class WsConsumerLoopImp { final AbsConsumerLoop consumerLoop = new AbsConsumerLoop() { @Override public void process(ResultBean result) { - System.out.println("data: " + JSON.toJSONString(result)); + try{ + System.out.println("data: " + JsonUtil.getObjectMapper().writeValueAsString(result)); + } catch (Exception e) { + throw new RuntimeException(e); + } } }; diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java index 8678f65231..fa6ebf0858 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/DataBaseMonitor.java @@ -13,6 +13,9 @@ public class DataBaseMonitor { public DataBaseMonitor init() throws SQLException { if (conn == null) { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } conn = DriverManager.getConnection(jdbcURL); stmt = conn.createStatement(); } diff --git a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java index dc820f161c..1497992f6b 100644 --- a/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java +++ b/docs/examples/java/src/main/java/com/taos/example/highvolume/SQLWriter.java @@ -69,6 +69,9 @@ public class SQLWriter { */ private static Connection getConnection() throws SQLException { String jdbcURL = System.getenv("TDENGINE_JDBC_URL"); + if (jdbcURL == null || jdbcURL == ""){ + jdbcURL = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + } return DriverManager.getConnection(jdbcURL); } diff --git a/docs/examples/java/src/test/java/com/taos/test/TestAll.java b/docs/examples/java/src/test/java/com/taos/test/TestAll.java index e014a3b315..a92ddd116c 100644 --- a/docs/examples/java/src/test/java/com/taos/test/TestAll.java +++ b/docs/examples/java/src/test/java/com/taos/test/TestAll.java @@ -17,6 +17,37 @@ public class TestAll { stmt.execute("drop database if exists " + dbName); } } + waitTransaction(); + } + + public void dropTopic(String topicName) throws SQLException { + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + stmt.execute("drop topic if exists " + topicName); + } + } + waitTransaction(); + } + + public void waitTransaction() throws SQLException { + + String jdbcUrl = "jdbc:TAOS://localhost:6030?user=root&password=taosdata"; + try (Connection conn = DriverManager.getConnection(jdbcUrl)) { + try (Statement stmt = conn.createStatement()) { + for (int i = 0; i < 10; i++) { + stmt.execute("show transactions"); + try (ResultSet resultSet = stmt.getResultSet()) { + if (resultSet.next()) { + int count = resultSet.getInt(1); + if (count == 0) { + break; + } + } + } + } + } + } } public void insertData() throws SQLException { @@ -104,14 +135,20 @@ public class TestAll { SubscribeDemo.main(args); } -// @Test -// public void testSubscribeJni() throws SQLException, InterruptedException { -// dropDB("power"); -// ConsumerLoopFull.main(args); -// } -// @Test -// public void testSubscribeWs() throws SQLException, InterruptedException { -// dropDB("power"); -// WsConsumerLoopFull.main(args); -// } + @Test + public void testSubscribeJni() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + ConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } + @Test + public void testSubscribeWs() throws SQLException, InterruptedException { + dropTopic("topic_meters"); + dropDB("power"); + WsConsumerLoopFull.main(args); + dropTopic("topic_meters"); + dropDB("power"); + } } From 0a8b3714f9496276f6af5e67b1935c8ba2ed7b40 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 25 Oct 2024 18:45:29 +0800 Subject: [PATCH 032/127] fix: reserve --- source/libs/planner/src/planPhysiCreater.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 738ccf3224..a44fbbdbee 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -233,7 +233,7 @@ static int32_t buildDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SD int32_t len = 0; code = getSlotKey(pNode, NULL, &name, &len, 16); if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false)); + code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, true)); } code = putSlotToHash(name, len, pDataBlockDesc->dataBlockId, slotId, pNode, pHash); if (TSDB_CODE_SUCCESS == code) { @@ -367,7 +367,7 @@ static int32_t addDataBlockSlotsForProject(SPhysiPlanContext* pCxt, const char* } static int32_t pushdownDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, true, true); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, true, false); } typedef struct SSetSlotIdCxt { From 380f43349972ff44794b1c7e05bf5ad48ebc8b22 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 27 Oct 2024 15:40:17 +0800 Subject: [PATCH 033/127] fix(stream): use the refId in stream meta list, in order to avoid access already freed stream tasks. --- include/libs/stream/tstream.h | 12 +++--- source/libs/stream/inc/streamInt.h | 5 ++- source/libs/stream/src/streamTask.c | 66 ++++++++++++++++++----------- 3 files changed, 53 insertions(+), 30 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index a189cee0bb..449df5207f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -70,7 +70,8 @@ typedef struct SActiveCheckpointInfo SActiveCheckpointInfo; #define SSTREAM_TASK_NEED_CONVERT_VER 2 #define SSTREAM_TASK_SUBTABLE_CHANGED_VER 3 -extern int32_t streamMetaId; +extern int32_t streamMetaRefPool; +extern int32_t streamTaskRefPool; enum { STREAM_STATUS__NORMAL = 0, @@ -258,6 +259,7 @@ typedef struct STaskId { typedef struct SStreamTaskId { int64_t streamId; int32_t taskId; + int64_t refId; const char* idStr; } SStreamTaskId; @@ -291,7 +293,6 @@ typedef struct SStreamStatus { int8_t schedStatus; int8_t statusBackup; int32_t schedIdleTime; // idle time before invoke again - int32_t timerActive; // timer is active int64_t lastExecTs; // last exec time stamp int32_t inScanHistorySentinel; bool appendTranstateBlock; // has append the transfer state data block already @@ -546,7 +547,7 @@ typedef int32_t (*__state_trans_user_fn)(SStreamTask*, void* param); int32_t tNewStreamTask(int64_t streamId, int8_t taskLevel, SEpSet* pEpset, bool fillHistory, int64_t triggerParam, SArray* pTaskList, bool hasFillhistory, int8_t subtableWithoutMd5, SStreamTask** pTask); -void tFreeStreamTask(SStreamTask* pTask); +void tFreeStreamTask(void* pTask); int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask); int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask); int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, int64_t ver); @@ -664,6 +665,8 @@ void streamTaskResetStatus(SStreamTask* pTask); void streamTaskSetStatusReady(SStreamTask* pTask); ETaskStatus streamTaskGetPrevStatus(const SStreamTask* pTask); const char* streamTaskGetExecType(int32_t type); +int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId); +void streamTaskFreeRefId(int64_t* pRefId); bool streamTaskUpdateEpsetInfo(SStreamTask* pTask, SArray* pNodeList); void streamTaskResetUpstreamStageInfo(SStreamTask* pTask); @@ -752,16 +755,15 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); +int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask); int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); -int32_t streamMetaAcquireOneTask(SStreamTask* pTask); void streamMetaClear(SStreamMeta* pMeta); void streamMetaInitBackend(SStreamMeta* pMeta); int32_t streamMetaCommit(SStreamMeta* pMeta); int64_t streamMetaGetLatestCheckpointId(SStreamMeta* pMeta); void streamMetaNotifyClose(SStreamMeta* pMeta); void streamMetaStartHb(SStreamMeta* pMeta); -bool streamMetaTaskInTimer(SStreamMeta* pMeta); int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, int64_t startTs, int64_t endTs, bool ready); int32_t streamMetaInitStartInfo(STaskStartInfo* pStartInfo); diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index 94c196d280..93c8dc3b40 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -21,6 +21,7 @@ #include "streamBackendRocksdb.h" #include "trpc.h" #include "tstream.h" +#include "tref.h" #ifdef __cplusplus extern "C" { @@ -70,7 +71,7 @@ struct SActiveCheckpointInfo { SStreamTmrInfo chkptReadyMsgTmr; }; -int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask); +void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param); typedef struct { int8_t type; @@ -225,6 +226,8 @@ void destroyMetaHbInfo(SMetaHbInfo* pInfo); void streamMetaWaitForHbTmrQuit(SStreamMeta* pMeta); void streamMetaGetHbSendInfo(SMetaHbInfo* pInfo, int64_t* pStartTs, int32_t* pSendCount); int32_t streamMetaSendHbHelper(SStreamMeta* pMeta); +int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid); +void metaRefMgtRemove(int64_t* pRefId); ECHECKPOINT_BACKUP_TYPE streamGetCheckpointBackupType(); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index b359cdfc81..e00984ea9b 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -211,18 +211,19 @@ int32_t tDecodeStreamTaskId(SDecoder* pDecoder, STaskId* pTaskId) { return 0; } -void tFreeStreamTask(SStreamTask* pTask) { - char* p = NULL; - int32_t taskId = pTask->id.taskId; +void tFreeStreamTask(void* pParam) { + char* p = NULL; + SStreamTask* pTask = pParam; + int32_t taskId = pTask->id.taskId; STaskExecStatisInfo* pStatis = &pTask->execInfo; ETaskStatus status1 = TASK_STATUS__UNINIT; streamMutexLock(&pTask->lock); if (pTask->status.pSM != NULL) { - SStreamTaskState pStatus = streamTaskGetStatus(pTask); - p = pStatus.name; - status1 = pStatus.state; + SStreamTaskState status = streamTaskGetStatus(pTask); + p = status.name; + status1 = status.state; } streamMutexUnlock(&pTask->lock); @@ -235,12 +236,6 @@ void tFreeStreamTask(SStreamTask* pTask) { taskId, pStatis->created, pStatis->checkTs, pStatis->readyTs, pStatis->updateCount, pStatis->latestUpdateTs, pCkInfo->checkpointId, pCkInfo->checkpointVer, pCkInfo->nextProcessVer, pStatis->checkpoint); - // remove the ref by timer - while (pTask->status.timerActive > 0) { - stDebug("s-task:%s wait for task stop timer activities, ref:%d", pTask->id.idStr, pTask->status.timerActive); - taosMsleep(100); - } - if (pTask->schedInfo.pDelayTimer != NULL) { streamTmrStop(pTask->schedInfo.pDelayTimer); pTask->schedInfo.pDelayTimer = NULL; @@ -429,6 +424,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } pTask->refCnt = 1; + pTask->id.refId = 0; pTask->inputq.status = TASK_INPUT_STATUS__NORMAL; pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL; @@ -441,7 +437,6 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i } pTask->status.schedStatus = TASK_SCHED_STATUS__INACTIVE; - pTask->status.timerActive = 0; code = streamCreateStateMachine(pTask); if (pTask->status.pSM == NULL || code != TSDB_CODE_SUCCESS) { @@ -837,28 +832,31 @@ int8_t streamTaskSetSchedStatusInactive(SStreamTask* pTask) { int32_t streamTaskClearHTaskAttr(SStreamTask* pTask, int32_t resetRelHalt) { int32_t code = 0; SStreamMeta* pMeta = pTask->pMeta; - STaskId sTaskId = {.streamId = pTask->streamTaskId.streamId, .taskId = pTask->streamTaskId.taskId}; + SStreamTask* pStreamTask = NULL; + if (pTask->info.fillHistory == 0) { return code; } - SStreamTask** ppStreamTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &sTaskId, sizeof(sTaskId)); - if (ppStreamTask != NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->streamTaskId, &pStreamTask); + if (code == 0) { stDebug("s-task:%s clear the related stream task:0x%x attr to fill-history task", pTask->id.idStr, - (int32_t)sTaskId.taskId); + (int32_t)pTask->streamTaskId.taskId); - streamMutexLock(&(*ppStreamTask)->lock); - CLEAR_RELATED_FILLHISTORY_TASK((*ppStreamTask)); + streamMutexLock(&(pStreamTask->lock)); + CLEAR_RELATED_FILLHISTORY_TASK(pStreamTask); if (resetRelHalt) { stDebug("s-task:0x%" PRIx64 " set the persistent status attr to be ready, prev:%s, status in sm:%s", - sTaskId.taskId, streamTaskGetStatusStr((*ppStreamTask)->status.taskStatus), - streamTaskGetStatus(*ppStreamTask).name); - (*ppStreamTask)->status.taskStatus = TASK_STATUS__READY; + pTask->streamTaskId.taskId, streamTaskGetStatusStr(pStreamTask->status.taskStatus), + streamTaskGetStatus(pStreamTask).name); + pStreamTask->status.taskStatus = TASK_STATUS__READY; } - code = streamMetaSaveTask(pMeta, *ppStreamTask); - streamMutexUnlock(&(*ppStreamTask)->lock); + code = streamMetaSaveTask(pMeta, pStreamTask); + streamMutexUnlock(&(pStreamTask->lock)); + + streamMetaReleaseTask(pMeta, pStreamTask); } return code; @@ -1282,3 +1280,23 @@ const char* streamTaskGetExecType(int32_t type) { return "invalid-exec-type"; } } + +int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId) { + *pRefId = taosMemoryMalloc(sizeof(int64_t)); + if (*pRefId != NULL) { + **pRefId = pTask->id.refId; + metaRefMgtAdd(pTask->pMeta->vgId, *pRefId); + return 0; + } else { + stError("s-task:%s failed to alloc new ref id, code:%s", pTask->id.idStr, tstrerror(terrno)); + return terrno; + } +} + +void streamTaskFreeRefId(int64_t* pRefId) { + if (pRefId == NULL) { + return; + } + + metaRefMgtRemove(pRefId); +} \ No newline at end of file From 4802f59dfe4ee049351041be269962edb8d19c05 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 27 Oct 2024 15:49:40 +0800 Subject: [PATCH 034/127] fix(stream): use the refId in stream meta list, in order to avoid access already freed stream tasks. --- source/dnode/snode/src/snode.c | 8 +- source/dnode/vnode/src/sma/smaRollup.c | 15 +- source/dnode/vnode/src/tq/tq.c | 8 +- source/dnode/vnode/src/tq/tqRead.c | 17 +- source/dnode/vnode/src/tq/tqStreamTask.c | 4 +- source/dnode/vnode/src/tq/tqUtil.c | 14 +- source/dnode/vnode/src/tqCommon/tqCommon.c | 93 +++-- source/libs/stream/src/streamCheckStatus.c | 106 +++--- source/libs/stream/src/streamCheckpoint.c | 101 +++--- source/libs/stream/src/streamDispatch.c | 164 ++++++--- source/libs/stream/src/streamHb.c | 86 +++-- source/libs/stream/src/streamMeta.c | 382 ++++++++++---------- source/libs/stream/src/streamSched.c | 67 ++-- source/libs/stream/src/streamStartHistory.c | 273 +++++++------- source/libs/stream/src/streamStartTask.c | 65 ++-- source/libs/stream/src/streamTimer.c | 10 +- 16 files changed, 769 insertions(+), 644 deletions(-) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index d61f3d80d3..e8d4663bbb 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -45,17 +45,17 @@ int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProce char *p = streamTaskGetStatus(pTask).name; if (pTask->info.fillHistory) { - sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, status:%s fill-history:%d, related stream task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam); } else { - sndInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + sndInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, status:%s fill-history:%d, related fill-task:0x%x trigger:%" PRId64 " ms", - SNODE_HANDLE, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + SNODE_HANDLE, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pTask->info.fillHistory, (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam); } diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 4fdf299e50..80c04a3276 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -238,13 +238,18 @@ int32_t tdFetchTbUidList(SSma *pSma, STbUidStore **ppStore, tb_uid_t suid, tb_ui } static void tdRSmaTaskInit(SStreamMeta *pMeta, SRSmaInfoItem *pItem, SStreamTaskId *pId) { - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask *pTask = NULL; + streamMetaRLock(pMeta); - SStreamTask **ppTask = (SStreamTask **)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask && *ppTask) { - pItem->submitReqVer = (*ppTask)->chkInfo.checkpointVer; - pItem->fetchResultVer = (*ppTask)->info.delaySchedParam; + + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + pItem->submitReqVer = pTask->chkInfo.checkpointVer; + pItem->fetchResultVer = pTask->info.delaySchedParam; + streamMetaReleaseTask(pMeta, pTask); } + streamMetaRUnLock(pMeta); } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index b75baea08d..2929121029 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -774,19 +774,19 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV const char* pNext = streamTaskGetStatusStr(pTask->status.taskStatus); if (pTask->info.fillHistory) { - tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, cur-status:%s, next-status:%s fill-history:%d, related stream task:0x%x " "delaySched:%" PRId64 " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->streamTaskId.taskId, pTask->info.delaySchedParam, nextProcessVer); } else { - tqInfo("vgId:%d build stream task, s-task:%s, checkpointId:%" PRId64 " checkpointVer:%" PRId64 + tqInfo("vgId:%d build stream task, s-task:%s, %p checkpointId:%" PRId64 " checkpointVer:%" PRId64 " nextProcessVer:%" PRId64 " child id:%d, level:%d, cur-status:%s next-status:%s fill-history:%d, related fill-task:0x%x " "delaySched:%" PRId64 " ms, inputVer:%" PRId64, - vgId, pTask->id.idStr, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, + vgId, pTask->id.idStr, pTask, pChkInfo->checkpointId, pChkInfo->checkpointVer, pChkInfo->nextProcessVer, pTask->info.selfChildId, pTask->info.taskLevel, p, pNext, pTask->info.fillHistory, (int32_t)pTask->hTaskInfo.id.taskId, pTask->info.delaySchedParam, nextProcessVer); diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index f2f85773b5..9d9e7c431a 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1115,13 +1115,18 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { break; } - SStreamTask* pTask = *(SStreamTask**)pIter; - if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) { - int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd); - if (code != 0) { - tqError("vgId:%d, s-task:%s update qualified table error for stream task", vgId, pTask->id.idStr); - continue; + int64_t refId = *(int64_t*)pIter; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, refId); + if (pTask != NULL) { + int32_t taskId = pTask->id.taskId; + + if ((pTask->info.taskLevel == TASK_LEVEL__SOURCE) && (pTask->exec.pExecutor != NULL)) { + int32_t code = qUpdateTableListForStreamScanner(pTask->exec.pExecutor, tbUidList, isAdd); + if (code != 0) { + tqError("vgId:%d, s-task:0x%x update qualified table error for stream task", vgId, taskId); + } } + taosReleaseRef(streamTaskRefPool, refId); } } diff --git a/source/dnode/vnode/src/tq/tqStreamTask.c b/source/dnode/vnode/src/tq/tqStreamTask.c index 3ec269ec22..24c892de8b 100644 --- a/source/dnode/vnode/src/tq/tqStreamTask.c +++ b/source/dnode/vnode/src/tq/tqStreamTask.c @@ -79,7 +79,7 @@ static void doStartScanWal(void* param, void* tmrId) { SBuildScanWalMsgParam* pParam = (SBuildScanWalMsgParam*)param; - SStreamMeta* pMeta = taosAcquireRef(streamMetaId, pParam->metaId); + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, pParam->metaId); if (pMeta == NULL) { tqError("metaRid:%" PRId64 " not valid now, stream meta has been freed", pParam->metaId); taosMemoryFree(pParam); @@ -97,7 +97,7 @@ static void doStartScanWal(void* param, void* tmrId) { tqError("vgId:%d failed sched task to scan wal, code:%s", vgId, tstrerror(code)); } - code = taosReleaseRef(streamMetaId, pParam->metaId); + code = taosReleaseRef(streamMetaRefPool, pParam->metaId); if (code) { tqError("vgId:%d failed to release ref for streamMeta, rid:%" PRId64 " code:%s", vgId, pParam->metaId, tstrerror(code)); diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index b4866b8c65..5b51d6a94f 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -683,19 +683,21 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b continue; } - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { tqError("vgId:%d failed to acquire task:0x%x in retrieving progress", pMeta->vgId, pId->taskId); continue; } - if ((*ppTask)->info.taskLevel != TASK_LEVEL__SOURCE) { + if (pTask->info.taskLevel != TASK_LEVEL__SOURCE) { + streamMetaReleaseTask(pMeta, pTask); continue; } // here we get the required stream source task - SStreamTask* pTask = *ppTask; *fhFinished = !HAS_RELATED_FILLHISTORY_TASK(pTask); int64_t ver = walReaderGetCurrentVer(pTask->exec.pWalReader); @@ -711,6 +713,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b SWalReader* pReader = walOpenReader(pTask->exec.pWalReader->pWal, NULL, 0); if (pReader == NULL) { tqError("failed to open wal reader to extract exec progress, vgId:%d", pMeta->vgId); + streamMetaReleaseTask(pMeta, pTask); continue; } @@ -736,6 +739,7 @@ int32_t tqGetStreamExecInfo(SVnode* pVnode, int64_t streamId, int64_t* pDelay, b } walCloseReader(pReader); + streamMetaReleaseTask(pMeta, pTask); } streamMetaRUnLock(pMeta); diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index a00e92997c..e8d929e4aa 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -138,13 +138,15 @@ int32_t tqStreamStartOneTaskAsync(SStreamMeta* pMeta, SMsgCb* cb, int64_t stream // this is to process request from transaction, always return true. int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pMsg, bool restored) { - int32_t vgId = pMeta->vgId; - char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t len = pMsg->contLen - sizeof(SMsgHead); - SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; - int64_t st = taosGetTimestampMs(); - bool updated = false; - int32_t code = 0; + int32_t vgId = pMeta->vgId; + char* msg = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); + int32_t len = pMsg->contLen - sizeof(SMsgHead); + SRpcMsg rsp = {.info = pMsg->info, .code = TSDB_CODE_SUCCESS}; + int64_t st = taosGetTimestampMs(); + bool updated = false; + int32_t code = 0; + SStreamTask* pTask = NULL; + SStreamTask* pHTask = NULL; SStreamTaskNodeUpdateMsg req = {0}; @@ -170,9 +172,9 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamMetaWLock(pMeta); // the task epset may be updated again and again, when replaying the WAL, the task may be in stop status. - STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL || *ppTask == NULL) { + STaskId id = {.streamId = req.streamId, .taskId = req.taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { tqError("vgId:%d failed to acquire task:0x%x when handling update task epset, it may have been dropped", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; @@ -181,12 +183,13 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM return rsp.code; } - SStreamTask* pTask = *ppTask; - const char* idstr = pTask->id.idStr; + const char* idstr = pTask->id.idStr; if (req.transId <= 0) { tqError("vgId:%d invalid update nodeEp task, transId:%d, discard", vgId, req.taskId); rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); taosArrayDestroy(req.pNodeList); @@ -227,24 +230,23 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM streamTaskStopMonitorCheckRsp(&pTask->taskCheckInfo, pTask->id.idStr); - SStreamTask** ppHTask = NULL; if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { - ppHTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); - if (ppHTask == NULL || *ppHTask == NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHTask); + if (code != 0) { tqError( "vgId:%d failed to acquire fill-history task:0x%x when handling update, may have been dropped already, rel " "stream task:0x%x", vgId, (uint32_t)pTask->hTaskInfo.id.taskId, req.taskId); CLEAR_RELATED_FILLHISTORY_TASK(pTask); } else { - tqDebug("s-task:%s fill-history task update nodeEp along with stream task", (*ppHTask)->id.idStr); - bool updateEpSet = streamTaskUpdateEpsetInfo(*ppHTask, req.pNodeList); + tqDebug("s-task:%s fill-history task update nodeEp along with stream task", pHTask->id.idStr); + bool updateEpSet = streamTaskUpdateEpsetInfo(pHTask, req.pNodeList); if (updateEpSet) { updated = updateEpSet; } - streamTaskResetStatus(*ppHTask); - streamTaskStopMonitorCheckRsp(&(*ppHTask)->taskCheckInfo, (*ppHTask)->id.idStr); + streamTaskResetStatus(pHTask); + streamTaskStopMonitorCheckRsp(&pHTask->taskCheckInfo, pHTask->id.idStr); } } @@ -256,8 +258,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqError("s-task:%s vgId:%d failed to save task, code:%s", idstr, vgId, tstrerror(code)); } - if (ppHTask != NULL) { - code = streamMetaSaveTask(pMeta, *ppHTask); + if (pHTask != NULL) { + code = streamMetaSaveTask(pMeta, pHTask); if (code) { tqError("s-task:%s vgId:%d failed to save related history task, code:%s", idstr, vgId, tstrerror(code)); } @@ -271,15 +273,17 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqError("s-task:%s vgId:%d failed to stop task, code:%s", idstr, vgId, tstrerror(code)); } - if (ppHTask != NULL) { - code = streamTaskStop(*ppHTask); + if (pHTask != NULL) { + code = streamTaskStop(pHTask); if (code) { tqError("s-task:%s vgId:%d failed to stop related history task, code:%s", idstr, vgId, tstrerror(code)); } } // keep info - streamMetaAddIntoUpdateTaskList(pMeta, pTask, (ppHTask != NULL) ? (*ppHTask) : NULL, req.transId, st); + streamMetaAddIntoUpdateTaskList(pMeta, pTask, (pHTask != NULL) ? (pHTask) : NULL, req.transId, st); + streamMetaReleaseTask(pMeta, pTask); + streamMetaReleaseTask(pMeta, pHTask); rsp.code = TSDB_CODE_SUCCESS; @@ -643,7 +647,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve if (code < 0) { tqError("failed to add s-task:0x%x into vgId:%d meta, existed:%d, code:%s", vgId, taskId, numOfTasks, tstrerror(code)); - tFreeStreamTask(pTask); return code; } @@ -673,7 +676,6 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve } } else { tqWarn("vgId:%d failed to add s-task:0x%x, since already exists in meta store, total:%d", vgId, taskId, numOfTasks); - tFreeStreamTask(pTask); } return code; @@ -681,25 +683,25 @@ int32_t tqStreamTaskProcessDeployReq(SStreamMeta* pMeta, SMsgCb* cb, int64_t sve int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen) { SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + STaskId hTaskId = {0}; + SStreamTask* pTask = NULL; - int32_t code = 0; - int32_t vgId = pMeta->vgId; - STaskId hTaskId = {0}; tqDebug("vgId:%d receive msg to drop s-task:0x%x", vgId, pReq->taskId); streamMetaWLock(pMeta); - STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if ((ppTask != NULL) && ((*ppTask) != NULL)) { - int32_t unusedRetRef = streamMetaAcquireOneTask(*ppTask); - SStreamTask* pTask = *ppTask; - + STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { if (HAS_RELATED_FILLHISTORY_TASK(pTask)) { hTaskId.streamId = pTask->hTaskInfo.id.streamId; hTaskId.taskId = pTask->hTaskInfo.id.taskId; } + // clear the relationship, and then release the stream tasks, to avoid invalid accessing of already freed + // related stream(history) task streamTaskSetRemoveBackendFiles(pTask); code = streamTaskClearHTaskAttr(pTask, pReq->resetRelHalt); streamMetaReleaseTask(pMeta, pTask); @@ -742,18 +744,19 @@ int32_t tqStreamTaskProcessDropReq(SStreamMeta* pMeta, char* msg, int32_t msgLen int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored, char* msg) { SVUpdateCheckpointInfoReq* pReq = (SVUpdateCheckpointInfoReq*)msg; + int32_t code = 0; + int32_t vgId = pMeta->vgId; + SStreamTask* pTask = NULL; - int32_t code = 0; - int32_t vgId = pMeta->vgId; tqDebug("vgId:%d receive msg to update-checkpoint-info for s-task:0x%x", vgId, pReq->taskId); streamMetaWLock(pMeta); - STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - - if (ppTask != NULL && (*ppTask) != NULL) { - code = streamTaskUpdateTaskCheckpointInfo(*ppTask, restored, pReq); + STaskId id = {.streamId = pReq->streamId, .taskId = pReq->taskId}; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + code = streamTaskUpdateTaskCheckpointInfo(pTask, restored, pReq); + streamMetaReleaseTask(pMeta, pTask); } else { // failed to get the task. int32_t numOfTasks = streamMetaGetNumOfTasks(pMeta); tqError( @@ -763,7 +766,6 @@ int32_t tqStreamTaskProcessUpdateCheckpointReq(SStreamMeta* pMeta, bool restored } streamMetaWUnLock(pMeta); - // always return success when handling the requirement issued by mnode during transaction. return TSDB_CODE_SUCCESS; } @@ -789,11 +791,6 @@ static int32_t restartStreamTasks(SStreamMeta* pMeta, bool isLeader) { tqInfo("vgId:%d tasks are all updated and stopped, restart all tasks, triggered by transId:%d, ts:%" PRId64, vgId, pMeta->updateInfo.completeTransId, pMeta->updateInfo.completeTs); - while (streamMetaTaskInTimer(pMeta)) { - tqDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); - taosMsleep(100); - } - streamMetaWLock(pMeta); streamMetaClear(pMeta); diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index c1c54b3c0b..60f8744448 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -299,13 +299,14 @@ void streamTaskStartMonitorCheckRsp(SStreamTask* pTask) { return; } - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); // add task ref here streamTaskInitTaskCheckInfo(pInfo, &pTask->outputInfo, taosGetTimestampMs()); - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start check-rsp monitor, ref:%d ", pTask->id.idStr, ref); - streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId, - "check-status-monitor"); + int64_t* pTaskRefId = NULL; + code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTaskRefId, streamTimer, &pInfo->checkRspTmr, vgId, + "check-status-monitor"); + } streamMutexUnlock(&pInfo->checkInfoLock); } @@ -721,21 +722,45 @@ int32_t addDownstreamFailedStatusResultAsync(SMsgCb* pMsgCb, int32_t vgId, int64 return streamTaskSchedTask(pMsgCb, vgId, streamId, taskId, STREAM_EXEC_T_ADD_FAILED_TASK); } +static void doCleanup(SStreamTask* pTask, SArray* pNotReadyList, SArray* pTimeoutList, void* param) { + streamMetaReleaseTask(pTask->pMeta, pTask); + + taosArrayDestroy(pNotReadyList); + taosArrayDestroy(pTimeoutList); + streamTaskFreeRefId(param); +} + // this function is executed in timer thread void rspMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - SStreamMeta* pMeta = pTask->pMeta; - STaskCheckInfo* pInfo = &pTask->taskCheckInfo; - int32_t vgId = pTask->pMeta->vgId; - int64_t now = taosGetTimestampMs(); - int64_t timeoutDuration = now - pInfo->timeoutStartTs; - const char* id = pTask->id.idStr; int32_t numOfReady = 0; int32_t numOfFault = 0; int32_t numOfNotRsp = 0; int32_t numOfNotReady = 0; int32_t numOfTimeout = 0; - int32_t total = taosArrayGetSize(pInfo->pList); + int64_t taskRefId = *(int64_t*)param; + int64_t now = taosGetTimestampMs(); + SArray* pNotReadyList = NULL; + SArray* pTimeoutList = NULL; + SStreamMeta* pMeta = NULL; + STaskCheckInfo* pInfo = NULL; + int32_t vgId = -1; + int64_t timeoutDuration = 0; + const char* id = NULL; + int32_t total = 0; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + + pMeta = pTask->pMeta; + pInfo = &pTask->taskCheckInfo; + vgId = pTask->pMeta->vgId; + timeoutDuration = now - pInfo->timeoutStartTs; + id = pTask->id.idStr; + total = (int32_t) taosArrayGetSize(pInfo->pList); stDebug("s-task:%s start to do check-downstream-rsp check in tmr", id); @@ -744,12 +769,10 @@ void rspMonitorFn(void* param, void* tmrId) { streamMutexUnlock(&pTask->lock); if (state.state == TASK_STATUS__STOP) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref); - + stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, true, id); - // not record the failed of the current task if try to close current vnode + // not record the failure of the current task if try to close current vnode // otherwise, the put of message operation may incur invalid read of message queue. if (!pMeta->closeFlag) { int32_t code = addDownstreamFailedStatusResultAsync(pTask->pMsgCb, vgId, pTask->id.streamId, pTask->id.taskId); @@ -758,33 +781,30 @@ void rspMonitorFn(void* param, void* tmrId) { } } - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } if (state.state == TASK_STATUS__DROPPING || state.state == TASK_STATUS__READY) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr, ref:%d", id, state.name, vgId, ref); + stDebug("s-task:%s status:%s vgId:%d quit from monitor check-rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, true, id); - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } streamMutexLock(&pInfo->checkInfoLock); if (pInfo->notReadyTasks == 0) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr, ref:%d", id, state.name, vgId, - ref); + stDebug("s-task:%s status:%s vgId:%d all downstream ready, quit from monitor rsp tmr", id, state.name, vgId); streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); - streamMetaReleaseTask(pMeta, pTask); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } - SArray* pNotReadyList = taosArrayInit(4, sizeof(int64_t)); - SArray* pTimeoutList = taosArrayInit(4, sizeof(int64_t)); + pNotReadyList = taosArrayInit(4, sizeof(int64_t)); + pTimeoutList = taosArrayInit(4, sizeof(int64_t)); if (state.state == TASK_STATUS__UNINIT) { getCheckRspStatus(pInfo, timeoutDuration, &numOfReady, &numOfFault, &numOfNotRsp, pTimeoutList, pNotReadyList, id); @@ -795,31 +815,25 @@ void rspMonitorFn(void* param, void* tmrId) { // fault tasks detected, not try anymore bool jumpOut = false; if ((numOfReady + numOfFault + numOfNotReady + numOfTimeout + numOfNotRsp) != total) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError( "s-task:%s vgId:%d internal error in handling the check downstream procedure, rsp number is inconsistent, " - "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "stop rspMonitor tmr, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d", + id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); jumpOut = true; } if (numOfFault > 0) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug( "s-task:%s status:%s vgId:%d all rsp. quit from monitor rsp tmr, since vnode-transfer/leader-change/restart " - "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "detected, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, ready:%d", + id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); jumpOut = true; } if (jumpOut) { streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); - streamMetaReleaseTask(pMeta, pTask); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } } else { // unexpected status @@ -828,11 +842,10 @@ void rspMonitorFn(void* param, void* tmrId) { // checking of downstream tasks has been stopped by other threads if (pInfo->stopCheckProcess == 1) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); stDebug( "s-task:%s status:%s vgId:%d stopped by other threads to check downstream process, total:%d, notRsp:%d, " - "notReady:%d, fault:%d, timeout:%d, ready:%d ref:%d", - id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady, ref); + "notReady:%d, fault:%d, timeout:%d, ready:%d", + id, state.name, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); streamTaskCompleteCheckRsp(pInfo, false, id); streamMutexUnlock(&pInfo->checkInfoLock); @@ -842,10 +855,7 @@ void rspMonitorFn(void* param, void* tmrId) { stError("s-task:%s failed to create async record start failed task, code:%s", id, tstrerror(code)); } - streamMetaReleaseTask(pMeta, pTask); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, param); return; } @@ -857,7 +867,7 @@ void rspMonitorFn(void* param, void* tmrId) { handleTimeoutDownstreamTasks(pTask, pTimeoutList); } - streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, pTask, streamTimer, &pInfo->checkRspTmr, vgId, + streamTmrStart(rspMonitorFn, CHECK_RSP_CHECK_INTERVAL, param, streamTimer, &pInfo->checkRspTmr, vgId, "check-status-monitor"); streamMutexUnlock(&pInfo->checkInfoLock); @@ -865,7 +875,5 @@ void rspMonitorFn(void* param, void* tmrId) { "s-task:%s vgId:%d continue checking rsp in 300ms, total:%d, notRsp:%d, notReady:%d, fault:%d, timeout:%d, " "ready:%d", id, vgId, total, numOfNotRsp, numOfNotReady, numOfFault, numOfTimeout, numOfReady); - - taosArrayDestroy(pNotReadyList); - taosArrayDestroy(pTimeoutList); + doCleanup(pTask, pNotReadyList, pTimeoutList, NULL); } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index 302090bb37..af2d8b559b 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -345,13 +345,15 @@ int32_t streamProcessCheckpointTriggerBlock(SStreamTask* pTask, SStreamDataBlock SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); if (old == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start checkpoint-trigger monitor in 10s, ref:%d ", pTask->id.idStr, ref); + stDebug("s-task:%s start checkpoint-trigger monitor in 10s", pTask->id.idStr); - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, - "trigger-recv-monitor"); - pTmrInfo->launchChkptId = pActiveInfo->activeId; + int64_t* pTaskRefId = NULL; + code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(checkpointTriggerMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId, + "trigger-recv-monitor"); + pTmrInfo->launchChkptId = pActiveInfo->activeId; + } } else { // already launched, do nothing stError("s-task:%s previous checkpoint-trigger monitor tmr is set, not start new one", pTask->id.idStr); } @@ -890,7 +892,7 @@ int32_t streamTaskBuildCheckpoint(SStreamTask* pTask) { return code; } -static int32_t doChkptStatusCheck(SStreamTask* pTask) { +static int32_t doChkptStatusCheck(SStreamTask* pTask, void* param) { const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; @@ -898,25 +900,24 @@ static int32_t doChkptStatusCheck(SStreamTask* pTask) { // checkpoint-trigger recv flag is set, quit if (pActiveInfo->allUpstreamTriggerRecv) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger, ref:%d", id, vgId, - ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d all checkpoint-trigger recv, quit from monitor checkpoint-trigger", id, vgId); return -1; } if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stWarn("s-task:%s vgId:%d checkpoint-trigger retrieve by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", - id, vgId, pTmrInfo->launchChkptId, ref); + ", quit", + id, vgId, pTmrInfo->launchChkptId); return -1; } // active checkpoint info is cleared for now if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (pTask->chkInfo.startTs == 0)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr, ref:%d", - id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from retrieve checkpoint-trigger send tmr", id, + vgId); return -1; } @@ -964,22 +965,22 @@ static int32_t doFindNotSendUpstream(SStreamTask* pTask, SArray* pList, SArray** return 0; } -static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSendList) { +static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, void* param, SArray* pNotSendList) { const char* id = pTask->id.idStr; SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; int32_t vgId = pTask->pMeta->vgId; - int32_t code = doChkptStatusCheck(pTask); + int32_t code = doChkptStatusCheck(pTask, param); if (code) { return code; } code = doFindNotSendUpstream(pTask, pList, &pNotSendList); if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr, ref:%d", id, tstrerror(code), ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s failed to find not send upstream, code:%s, out of tmr", id, tstrerror(code)); return code; } @@ -993,36 +994,48 @@ static int32_t chkptTriggerRecvMonitorHelper(SStreamTask* pTask, SArray* pNotSen return code; } +static void doCleanup(SStreamTask* pTask, SArray* pList) { + streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pList); +} + void checkpointTriggerMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - int32_t vgId = pTask->pMeta->vgId; - int64_t now = taosGetTimestampMs(); - const char* id = pTask->id.idStr; - SArray* pNotSendList = NULL; - SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg int32_t code = 0; int32_t numOfNotSend = 0; + SArray* pNotSendList = NULL; + int64_t taskRefId = *(int64_t*)param; + int64_t now = taosGetTimestampMs(); + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + + int32_t vgId = pTask->pMeta->vgId; + const char* id = pTask->id.idStr; + SArray* pList = pTask->upstreamInfo.pList; // send msg to retrieve checkpoint trigger msg SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptTriggerMsgTmr; if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, ref:%d quit", id, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stError("s-task:%s source task should not start the checkpoint-trigger monitor fn, quit", id); + doCleanup(pTask, pNotSendList); return; } // check the status every 100ms if (streamTaskShouldStop(pTask)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d quit from monitor checkpoint-trigger", id, vgId); + doCleanup(pTask, pNotSendList); return; } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); return; } @@ -1035,20 +1048,19 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { streamMutexUnlock(&pTask->lock); if (state.state != TASK_STATUS__CK) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger, ref:%d", id, - vgId, state.name, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:%s not in checkpoint status, quit from monitor checkpoint-trigger", id, + vgId, state.name); + doCleanup(pTask, pNotSendList); return; } streamMutexLock(&pActiveInfo->lock); - code = chkptTriggerRecvMonitorHelper(pTask, pNotSendList); + code = chkptTriggerRecvMonitorHelper(pTask, param, pNotSendList); streamMutexUnlock(&pActiveInfo->lock); if (code != TSDB_CODE_SUCCESS) { - streamMetaReleaseTask(pTask->pMeta, pTask); - taosArrayDestroy(pNotSendList); + doCleanup(pTask, pNotSendList); return; } @@ -1056,15 +1068,14 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { numOfNotSend = taosArrayGetSize(pNotSendList); if (numOfNotSend > 0) { stDebug("s-task:%s start to monitor checkpoint-trigger in 10s", id); - streamTmrStart(checkpointTriggerMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); } else { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr, ref:%d", id, ref); - streamMetaReleaseTask(pTask->pMeta, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s all checkpoint-trigger recved, quit from monitor checkpoint-trigger tmr", id); } - taosArrayDestroy(pNotSendList); + doCleanup(pTask, pNotSendList); } int32_t doSendRetrieveTriggerMsg(SStreamTask* pTask, SArray* pNotSendList) { diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 62d60ff664..ff41008759 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -518,45 +518,66 @@ static void doSendFailedDispatch(SStreamTask* pTask, SDispatchEntry* pEntry, int } } +static void cleanupInMonitor(int32_t taskId, int64_t taskRefId, void* param) { + int32_t ret = taosReleaseRef(streamTaskRefPool, taskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, taskRefId); + } + streamTaskFreeRefId(param); +} + static void doMonitorDispatchData(void* param, void* tmrId) { - SStreamTask* pTask = param; - const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; - SDispatchMsgInfo* pMsgInfo = &pTask->msgInfo; - int32_t msgId = pMsgInfo->msgId; int32_t code = 0; int64_t now = taosGetTimestampMs(); bool inDispatch = true; + SStreamTask* pTask = NULL; + int64_t taskRefId = *(int64_t*)param; + const char* id = NULL; + int32_t vgId = -1; + SDispatchMsgInfo* pMsgInfo = NULL; + int32_t msgId = -1; - stDebug("s-task:%s start monitor dispatch data", id); + pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + + id = pTask->id.idStr; + vgId = pTask->pMeta->vgId; + pMsgInfo = &pTask->msgInfo; + msgId = pMsgInfo->msgId; + + stDebug("s-task:%s start to monitor dispatch data", id); if (streamTaskShouldStop(pTask)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); + stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); setNotInDispatchMonitor(pMsgInfo); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } // slave task not handle the dispatch, downstream not ready will break the monitor timer // follower not handle the dispatch rsp if ((pTask->pMeta->role == NODE_ROLE_FOLLOWER) || (pTask->status.downstreamReady != 1)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr, ref:%d", id, vgId, ref); + stError("s-task:%s vgId:%d follower or downstream not ready, jump out of monitor tmr", id, vgId); setNotInDispatchMonitor(pMsgInfo); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } streamMutexLock(&pMsgInfo->lock); if (pTask->outputq.status == TASK_OUTPUT_STATUS__NORMAL) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s not in dispatch procedure, abort from timer, ref:%d", pTask->id.idStr, ref); - + stDebug("s-task:%s not in dispatch procedure, abort from timer", pTask->id.idStr); pMsgInfo->inMonitor = 0; inDispatch = false; } + streamMutexUnlock(&pMsgInfo->lock); if (!inDispatch) { + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } @@ -564,6 +585,7 @@ static void doMonitorDispatchData(void* param, void* tmrId) { if (numOfFailed == 0) { stDebug("s-task:%s no error occurs, check again in %dms", id, DISPATCH_RETRY_INTERVAL_MS); streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); + cleanupInMonitor(pTask->id.taskId, taskRefId, param); return; } @@ -628,18 +650,23 @@ static void doMonitorDispatchData(void* param, void* tmrId) { } if (streamTaskShouldStop(pTask)) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s should stop, abort from timer, ref:%d", pTask->id.idStr, ref); + stDebug("s-task:%s should stop, abort from timer", pTask->id.idStr); setNotInDispatchMonitor(pMsgInfo); } else { streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); } + + cleanupInMonitor(pTask->id.taskId, taskRefId, param); } void streamStartMonitorDispatchData(SStreamTask* pTask, int64_t waitDuration) { - int32_t vgId = pTask->pMeta->vgId; - streamTmrStart(doMonitorDispatchData, waitDuration, pTask, streamTimer, &pTask->msgInfo.pRetryTmr, vgId, - "dispatch-monitor"); + int32_t vgId = pTask->pMeta->vgId; + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(doMonitorDispatchData, waitDuration, pTaskRefId, streamTimer, &pTask->msgInfo.pRetryTmr, vgId, + "dispatch-monitor"); + } } static int32_t doAddDispatchBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, @@ -854,9 +881,9 @@ int32_t streamDispatchStreamBlock(SStreamTask* pTask) { } else { streamMutexLock(&pTask->msgInfo.lock); if (pTask->msgInfo.inMonitor == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start dispatch monitor tmr in %dms, ref:%d, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, - ref, tstrerror(code)); +// int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:%s start dispatch monitor tmr in %dms, dispatch code:%s", id, DISPATCH_RETRY_INTERVAL_MS, + tstrerror(code)); streamStartMonitorDispatchData(pTask, DISPATCH_RETRY_INTERVAL_MS); pTask->msgInfo.inMonitor = 1; } else { @@ -911,31 +938,31 @@ int32_t initCheckpointReadyMsg(SStreamTask* pTask, int32_t upstreamNodeId, int32 return TSDB_CODE_SUCCESS; } -static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, int32_t num) { +static int32_t doTaskChkptStatusCheck(SStreamTask* pTask, void* param, int32_t num) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; const char* id = pTask->id.idStr; int32_t vgId = pTask->pMeta->vgId; if (pTmrInfo->launchChkptId != pActiveInfo->activeId) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stWarn("s-task:%s vgId:%d ready-msg send tmr launched by previous checkpoint procedure, checkpointId:%" PRId64 - ", quit, ref:%d", - id, vgId, pTmrInfo->launchChkptId, ref); + ", quit", + id, vgId, pTmrInfo->launchChkptId); return -1; } // active checkpoint info is cleared for now if ((pActiveInfo->activeId == 0) || (pActiveInfo->transId == 0) || (num == 0) || (pTask->chkInfo.startTs == 0)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr, ref:%d", id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d active checkpoint may be cleared, quit from readyMsg send tmr", id, vgId); return -1; } if (taosArrayGetSize(pTask->upstreamInfo.pList) != num) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr, ref:%d", id, - vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stWarn("s-task:%s vgId:%d upstream number:%d not equals sent readyMsg:%d, quit from readyMsg send tmr", id, + vgId, (int32_t)taosArrayGetSize(pTask->upstreamInfo.pList), num); return -1; } @@ -1011,7 +1038,7 @@ static void doSendChkptReadyMsg(SStreamTask* pTask, SArray* pNotRspList, int64_t } } -static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) { +static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, void* param, SArray* pNotRspList) { SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; SArray* pList = pActiveInfo->pReadyMsgList; @@ -1021,16 +1048,15 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) const char* id = pTask->id.idStr; int32_t notRsp = 0; - int32_t code = doTaskChkptStatusCheck(pTask, num); + int32_t code = doTaskChkptStatusCheck(pTask, param, num); if (code) { return code; } code = doFindNotConfirmUpstream(&pNotRspList, pList, num, vgId, pTask->info.taskLevel, id); if (code) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr, ref:%d", id, - tstrerror(code), ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stError("s-task:%s failed to find not rsp checkpoint-ready downstream, code:%s, out of tmr", id, tstrerror(code)); return code; } @@ -1045,26 +1071,41 @@ static int32_t chkptReadyMsgSendHelper(SStreamTask* pTask, SArray* pNotRspList) } static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { - SStreamTask* pTask = param; - int32_t vgId = pTask->pMeta->vgId; - const char* id = pTask->id.idStr; - SActiveCheckpointInfo* pActiveInfo = pTask->chkInfo.pActiveInfo; - SStreamTmrInfo* pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; SArray* pNotRspList = NULL; int32_t code = 0; int32_t notRsp = 0; + int64_t taskRefId = *(int64_t*)param; + int32_t vgId = -1; + const char* id = NULL; + SActiveCheckpointInfo* pActiveInfo = NULL; + SStreamTmrInfo* pTmrInfo = NULL; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + + vgId = pTask->pMeta->vgId; + id = pTask->id.idStr; + pActiveInfo = pTask->chkInfo.pActiveInfo; + pTmrInfo = &pActiveInfo->chkptReadyMsgTmr; // check the status every 100ms if (streamTaskShouldStop(pTask)) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger, ref:%d", id, vgId, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:stop, quit from monitor checkpoint-trigger", id, vgId); streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } if (++pTmrInfo->activeCounter < 50) { - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); + streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } @@ -1078,15 +1119,16 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { // 1. check status in the first place if (state.state != TASK_STATUS__CK) { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); - stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready, ref:%d", id, vgId, - state.name, ref); + streamCleanBeforeQuitTmr(pTmrInfo, param); + stDebug("s-task:%s vgId:%d status:%s not in checkpoint, quit from monitor checkpoint-ready", id, vgId, + state.name); streamMetaReleaseTask(pTask->pMeta, pTask); + taosArrayDestroy(pNotRspList); return; } streamMutexLock(&pActiveInfo->lock); - code = chkptReadyMsgSendHelper(pTask, pNotRspList); + code = chkptReadyMsgSendHelper(pTask, param, pNotRspList); streamMutexUnlock(&pActiveInfo->lock); if (code != TSDB_CODE_SUCCESS) { @@ -1098,18 +1140,18 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { notRsp = taosArrayGetSize(pNotRspList); if (notRsp > 0) { // send checkpoint-ready msg again stDebug("s-task:%s start to monitor checkpoint-ready msg recv status in 10s", id); - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "chkpt-ready-monitor"); } else { - int32_t ref = streamCleanBeforeQuitTmr(pTmrInfo, pTask); + streamCleanBeforeQuitTmr(pTmrInfo, param); stDebug( "s-task:%s vgId:%d checkpoint-ready msg confirmed by all upstream task(s), clear checkpoint-ready msg and quit " - "from timer, ref:%d", - id, vgId, ref); - // release should be the last execution, since pTask may be destroy after it immidiately. - streamMetaReleaseTask(pTask->pMeta, pTask); + "from timer", + id, vgId); } + // release should be the last execution, since pTask may be destroyed after it immediately. + streamMetaReleaseTask(pTask->pMeta, pTask); taosArrayDestroy(pNotRspList); } @@ -1160,15 +1202,17 @@ int32_t streamTaskSendCheckpointReadyMsg(SStreamTask* pTask) { int8_t old = atomic_val_compare_exchange_8(&pTmrInfo->isActive, 0, 1); if (old == 0) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s start checkpoint-ready monitor in 10s, ref:%d ", pTask->id.idStr, ref); - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); + stDebug("s-task:%s start checkpoint-ready monitor in 10s", pTask->id.idStr); - streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTask, streamTimer, &pTmrInfo->tmrHandle, vgId, - "chkpt-ready-monitor"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(chkptReadyMsgSendMonitorFn, 200, pTaskRefId, streamTimer, &pTmrInfo->tmrHandle, vgId, + "chkpt-ready-monitor"); - // mark the timer monitor checkpointId - pTmrInfo->launchChkptId = pActiveInfo->activeId; + // mark the timer monitor checkpointId + pTmrInfo->launchChkptId = pActiveInfo->activeId; + } } else { stError("s-task:%s previous checkpoint-ready monitor tmr is set, not start new one", pTask->id.idStr); } diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 19391bf7a0..96e853630e 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -21,7 +21,7 @@ #include "ttimer.h" #include "wal.h" -int32_t streamMetaId = 0; +int32_t streamMetaRefPool = 0; struct SMetaHbInfo { tmr_h hbTmr; @@ -123,17 +123,21 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { for(int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (pTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { continue; } - if ((*pTask)->info.fillHistory == 1) { + if (pTask->info.fillHistory == 1) { + streamMetaReleaseTask(pMeta, pTask); continue; } - epsetAssign(&epset, &(*pTask)->info.mnodeEpset); + epsetAssign(&epset, &pTask->info.mnodeEpset); + streamMetaReleaseTask(pMeta, pTask); break; } @@ -159,28 +163,30 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); - STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (pTask == NULL) { + STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; + SStreamTask* pTask = NULL; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code != 0) { continue; } // not report the status of fill-history task - if ((*pTask)->info.fillHistory == 1) { + if (pTask->info.fillHistory == 1) { + streamMetaReleaseTask(pMeta, pTask); continue; } - streamMutexLock(&(*pTask)->lock); - STaskStatusEntry entry = streamTaskGetStatusEntry(*pTask); - streamMutexUnlock(&(*pTask)->lock); + streamMutexLock(&pTask->lock); + STaskStatusEntry entry = streamTaskGetStatusEntry(pTask); + streamMutexUnlock(&pTask->lock); entry.inputRate = entry.inputQUsed * 100.0 / (2 * STREAM_TASK_QUEUE_CAPACITY_IN_SIZE); - if ((*pTask)->info.taskLevel == TASK_LEVEL__SINK) { - entry.sinkQuota = (*pTask)->outputInfo.pTokenBucket->quotaRate; - entry.sinkDataSize = SIZE_IN_MiB((*pTask)->execInfo.sink.dataSize); + if (pTask->info.taskLevel == TASK_LEVEL__SINK) { + entry.sinkQuota = pTask->outputInfo.pTokenBucket->quotaRate; + entry.sinkDataSize = SIZE_IN_MiB(pTask->execInfo.sink.dataSize); } - SActiveCheckpointInfo* p = (*pTask)->chkInfo.pActiveInfo; + SActiveCheckpointInfo* p = pTask->chkInfo.pActiveInfo; if (p->activeId != 0) { entry.checkpointInfo.failed = (p->failedId >= p->activeId) ? 1 : 0; entry.checkpointInfo.activeId = p->activeId; @@ -188,40 +194,42 @@ int32_t streamMetaSendHbHelper(SStreamMeta* pMeta) { if (entry.checkpointInfo.failed) { stInfo("s-task:%s set kill checkpoint trans in hbMsg, transId:%d, clear the active checkpointInfo", - (*pTask)->id.idStr, p->transId); + pTask->id.idStr, p->transId); - streamMutexLock(&(*pTask)->lock); - streamTaskClearCheckInfo((*pTask), true); - streamMutexUnlock(&(*pTask)->lock); + streamMutexLock(&pTask->lock); + streamTaskClearCheckInfo(pTask, true); + streamMutexUnlock(&pTask->lock); } } - streamMutexLock(&(*pTask)->lock); - entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(*pTask, pMsg->ts); + streamMutexLock(&pTask->lock); + entry.checkpointInfo.consensusChkptId = streamTaskCheckIfReqConsenChkptId(pTask, pMsg->ts); if (entry.checkpointInfo.consensusChkptId) { entry.checkpointInfo.consensusTs = pMsg->ts; } - streamMutexUnlock(&(*pTask)->lock); + streamMutexUnlock(&pTask->lock); - if ((*pTask)->exec.pWalReader != NULL) { - entry.processedVer = walReaderGetCurrentVer((*pTask)->exec.pWalReader) - 1; + if (pTask->exec.pWalReader != NULL) { + entry.processedVer = walReaderGetCurrentVer(pTask->exec.pWalReader) - 1; if (entry.processedVer < 0) { - entry.processedVer = (*pTask)->chkInfo.processedVer; + entry.processedVer = pTask->chkInfo.processedVer; } - walReaderValidVersionRange((*pTask)->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer); + walReaderValidVersionRange(pTask->exec.pWalReader, &entry.verRange.minVer, &entry.verRange.maxVer); } - addUpdateNodeIntoHbMsg(*pTask, pMsg); + addUpdateNodeIntoHbMsg(pTask, pMsg); p = taosArrayPush(pMsg->pTaskStatus, &entry); if (p == NULL) { - stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", (*pTask)->id.taskId, pMeta->vgId); + stError("failed to add taskInfo:0x%x in hbMsg, vgId:%d", pTask->id.taskId, pMeta->vgId); } if (!hasMnodeEpset) { - epsetAssign(&epset, &(*pTask)->info.mnodeEpset); + epsetAssign(&epset, &pTask->info.mnodeEpset); hasMnodeEpset = true; } + + streamMetaReleaseTask(pMeta, pTask); } pMsg->numOfTasks = taosArrayGetSize(pMsg->pTaskStatus); @@ -244,9 +252,10 @@ void streamMetaHbToMnode(void* param, void* tmrId) { int32_t vgId = 0; int32_t role = 0; - SStreamMeta* pMeta = taosAcquireRef(streamMetaId, rid); + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, rid); if (pMeta == NULL) { - stError("invalid rid:%" PRId64 " failed to acquired stream-meta", rid); + stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", rid); +// taosMemoryFree(param); return; } @@ -256,24 +265,26 @@ void streamMetaHbToMnode(void* param, void* tmrId) { // need to stop, stop now if (pMeta->closeFlag) { pMeta->pHbInfo->hbStart = 0; - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code == TSDB_CODE_SUCCESS) { stDebug("vgId:%d jump out of meta timer", vgId); } else { stError("vgId:%d jump out of meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } +// taosMemoryFree(param); return; } // not leader not send msg if (pMeta->role != NODE_ROLE_LEADER) { pMeta->pHbInfo->hbStart = 0; - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code == TSDB_CODE_SUCCESS) { stInfo("vgId:%d role:%d not leader not send hb to mnode", vgId, role); } else { stError("vgId:%d role:%d not leader not send hb to mnodefailed to release the meta rid:%" PRId64, vgId, role, rid); } +// taosMemoryFree(param); return; } @@ -281,7 +292,7 @@ void streamMetaHbToMnode(void* param, void* tmrId) { streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, vgId, "meta-hb-tmr"); - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code) { stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } @@ -298,12 +309,13 @@ void streamMetaHbToMnode(void* param, void* tmrId) { if (code) { stError("vgId:%d failed to send hmMsg to mnode, try again in 5s, code:%s", pMeta->vgId, tstrerror(code)); } + streamMetaRUnLock(pMeta); streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, param, streamTimer, &pMeta->pHbInfo->hbTmr, pMeta->vgId, "meta-hb-tmr"); - code = taosReleaseRef(streamMetaId, rid); + code = taosReleaseRef(streamMetaRefPool, rid); if (code) { stError("vgId:%d in meta timer, failed to release the meta rid:%" PRId64, vgId, rid); } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7e9b60b61a..db46934e47 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -13,7 +13,6 @@ * along with this program. If not, see . */ -#include "executor.h" #include "streamBackendRocksdb.h" #include "streamInt.h" #include "tmisce.h" @@ -28,6 +27,7 @@ static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT; int32_t streamBackendId = 0; int32_t streamBackendCfWrapperId = 0; int32_t taskDbWrapperId = 0; +int32_t streamTaskRefPool = 0; static int32_t streamMetaBegin(SStreamMeta* pMeta); static void streamMetaCloseImpl(void* arg); @@ -41,14 +41,14 @@ SMetaRefMgt gMetaRefMgt; int32_t metaRefMgtInit(); void metaRefMgtCleanup(); -int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid); static void streamMetaEnvInit() { streamBackendId = taosOpenRef(64, streamBackendCleanup); streamBackendCfWrapperId = taosOpenRef(64, streamBackendHandleCleanup); taskDbWrapperId = taosOpenRef(64, taskDbDestroy2); - streamMetaId = taosOpenRef(64, streamMetaCloseImpl); + streamMetaRefPool = taosOpenRef(64, streamMetaCloseImpl); + streamTaskRefPool = taosOpenRef(64, tFreeStreamTask); int32_t code = metaRefMgtInit(); if (code) { @@ -72,7 +72,8 @@ void streamMetaInit() { void streamMetaCleanup() { taosCloseRef(streamBackendId); taosCloseRef(streamBackendCfWrapperId); - taosCloseRef(streamMetaId); + taosCloseRef(streamMetaRefPool); + taosCloseRef(streamTaskRefPool); metaRefMgtCleanup(); streamTimerCleanUp(); @@ -98,16 +99,21 @@ int32_t metaRefMgtInit() { void metaRefMgtCleanup() { void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL); while (pIter) { - SArray* list = *(SArray**)pIter; - for (int i = 0; i < taosArrayGetSize(list); i++) { - void* rid = taosArrayGetP(list, i); - taosMemoryFree(rid); - } - taosArrayDestroy(list); + int64_t* p = *(int64_t**) pIter; + stInfo("---------------free refId:%"PRId64", %p", *p, p); + + taosMemoryFree(p); + +// SArray* list = *(SArray**)pIter; +// for (int i = 0; i < taosArrayGetSize(list); i++) { +// void* rid = taosArrayGetP(list, i); +// taosMemoryFree(rid); +// } +// taosArrayDestroy(list); pIter = taosHashIterate(gMetaRefMgt.pTable, pIter); } - taosHashCleanup(gMetaRefMgt.pTable); + taosHashCleanup(gMetaRefMgt.pTable); streamMutexDestroy(&gMetaRefMgt.mutex); } @@ -117,35 +123,31 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) { streamMutexLock(&gMetaRefMgt.mutex); - p = taosHashGet(gMetaRefMgt.pTable, &vgId, sizeof(vgId)); + p = taosHashGet(gMetaRefMgt.pTable, &rid, sizeof(rid)); if (p == NULL) { - SArray* pList = taosArrayInit(8, POINTER_BYTES); - if (pList == NULL) { - return terrno; - } - - p = taosArrayPush(pList, &rid); - if (p == NULL) { - return terrno; - } - - code = taosHashPut(gMetaRefMgt.pTable, &vgId, sizeof(vgId), &pList, sizeof(void*)); + code = taosHashPut(gMetaRefMgt.pTable, &rid, sizeof(rid), &rid, sizeof(void*)); if (code) { stError("vgId:%d failed to put into metaRef table, rid:%" PRId64, (int32_t)vgId, *rid); return code; + } else { + stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); } } else { - SArray* list = *(SArray**)p; - void* px = taosArrayPush(list, &rid); - if (px == NULL) { - code = terrno; - } + // todo } streamMutexUnlock(&gMetaRefMgt.mutex); return code; } +void metaRefMgtRemove(int64_t* pRefId) { + streamMutexLock(&gMetaRefMgt.mutex); + + taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId)); + stInfo("remove refId from mgt, refId:%"PRId64", %p", *pRefId, pRefId); + streamMutexUnlock(&gMetaRefMgt.mutex); +} + int32_t streamMetaOpenTdb(SStreamMeta* pMeta) { if (tdbOpen(pMeta->path, 16 * 1024, 1, &pMeta->db, 0, 0, NULL) < 0) { stError("vgId:%d open file:%s failed, stream meta open failed", pMeta->vgId, pMeta->path); @@ -434,7 +436,7 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, pMeta->closeFlag = false; stInfo("vgId:%d open stream meta succ, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, stage); - pMeta->rid = taosAddRef(streamMetaId, pMeta); + pMeta->rid = taosAddRef(streamMetaRefPool, pMeta); // set the attribute when running on Linux OS TdThreadRwlockAttr attr; @@ -527,17 +529,20 @@ void streamMetaClear(SStreamMeta* pMeta) { // remove all existed tasks in this vnode void* pIter = NULL; while ((pIter = taosHashIterate(pMeta->pTasksMap, pIter)) != NULL) { - SStreamTask* p = *(SStreamTask**)pIter; + int64_t refId = *(int64_t*)pIter; + SStreamTask* p = taosAcquireRef(streamTaskRefPool, refId); + if (p == NULL) { + continue; + } // release the ref by timer if (p->info.delaySchedParam != 0 && p->info.fillHistory == 0) { // one more ref in timer stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt); streamTmrStop(p->schedInfo.pDelayTimer); p->info.delaySchedParam = 0; - streamMetaReleaseTask(pMeta, p); } - streamMetaReleaseTask(pMeta, p); + taosRemoveRef(streamTaskRefPool, refId); } if (pMeta->streamBackendRid != 0) { @@ -567,9 +572,9 @@ void streamMetaClose(SStreamMeta* pMeta) { if (pMeta == NULL) { return; } - int32_t code = taosRemoveRef(streamMetaId, pMeta->rid); + int32_t code = taosRemoveRef(streamMetaRefPool, pMeta->rid); if (code) { - stError("vgId:%d failed to remove ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); + stError("vgId:%d failed to remove meta ref:%" PRId64 ", code:%s", pMeta->vgId, pMeta->rid, tstrerror(code)); } } @@ -656,9 +661,16 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { code = tdbTbUpsert(pMeta->pTaskDb, id, STREAM_TASK_KEY_LEN, buf, len, pMeta->txn); if (code != TSDB_CODE_SUCCESS) { code = terrno; - stError("s-task:%s vgId:%d task meta save to disk failed, code:%s", pTask->id.idStr, vgId, tstrerror(terrno)); + stError("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk failed, remove ref, code:%s", pTask->id.idStr, + vgId, pTask->id.refId, tstrerror(code)); + + int64_t refId = pTask->id.refId; + int32_t ret = taosRemoveRef(streamTaskRefPool, pTask->id.refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id[1], refId); + } } else { - stDebug("s-task:%s vgId:%d task meta save to disk", pTask->id.idStr, vgId); + stDebug("s-task:%s vgId:%d refId:%" PRId64 " task meta save to disk", pTask->id.idStr, vgId, pTask->id.refId); } taosMemoryFree(buf); @@ -683,34 +695,47 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa *pAdded = false; int32_t code = 0; + int64_t refId = 0; STaskId id = streamTaskGetTaskId(pTask); void* p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (p != NULL) { stDebug("s-task:%" PRIx64 " already exist in meta, no need to register", id.taskId); + tFreeStreamTask(pTask); return code; } if ((code = pMeta->buildTaskFn(pMeta->ahandle, pTask, ver)) != 0) { + tFreeStreamTask(pTask); return code; } p = taosArrayPush(pMeta->pTaskList, &pTask->id); if (p == NULL) { stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId); + tFreeStreamTask(pTask); return terrno; } - code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES); + pTask->id.refId = refId = taosAddRef(streamTaskRefPool, pTask); + code = taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t)); if (code) { stError("s-task:0x%" PRIx64 " failed to register task into meta-list, code: out of memory", id.taskId); + + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); + } return code; } if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) { + taosRemoveRef(streamTaskRefPool, refId); return code; } if ((code = streamMetaCommit(pMeta)) != 0) { + taosRemoveRef(streamTaskRefPool, refId); return code; } @@ -733,16 +758,72 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta) { } int32_t streamMetaAcquireTaskNoLock(SStreamMeta* pMeta, int64_t streamId, int32_t taskId, SStreamTask** pTask) { - STaskId id = {.streamId = streamId, .taskId = taskId}; - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL || streamTaskShouldStop(*ppTask)) { - *pTask = NULL; + QRY_PARAM_CHECK(pTask); + STaskId id = {.streamId = streamId, .taskId = taskId}; + int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); + if (pTaskRefId == NULL) { return TSDB_CODE_STREAM_TASK_NOT_EXIST; } - int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1); - stTrace("s-task:%s acquire task, ref:%d", (*ppTask)->id.idStr, ref); - *pTask = *ppTask; + SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId); + if (p == NULL) { + stDebug("s-task:%x failed to acquire task refId:%"PRId64", may have been destoried", taskId, *pTaskRefId); + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + if (p->id.refId != *pTaskRefId) { + stFatal("s-task:%x inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, taskId, *pTaskRefId, + p->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId); + } + + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + if (streamTaskShouldStop(p)) { + stDebug("s-task:%s is stopped, failed to acquire it now", p->id.idStr); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, *pTaskRefId); + } + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId); + *pTask = p; + return TSDB_CODE_SUCCESS; +} + +int32_t streamMetaAcquireTaskUnsafe(SStreamMeta* pMeta, STaskId* pId, SStreamTask** pTask) { + QRY_PARAM_CHECK(pTask); + int64_t* pTaskRefId = (int64_t*)taosHashGet(pMeta->pTasksMap, pId, sizeof(*pId)); + + if (pTaskRefId == NULL) { + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + SStreamTask* p = taosAcquireRef(streamTaskRefPool, *pTaskRefId); + if (p == NULL) { + stDebug("s-task:%" PRIx64 " failed to acquire task refId:%" PRId64 ", may have been destoried", pId->taskId, + *pTaskRefId); + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + if (p->id.refId != *pTaskRefId) { + stFatal("s-task:%" PRIx64 " inconsistent refId, task refId:%" PRId64 " try acquire:%" PRId64, pId->taskId, + *pTaskRefId, p->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, *pTaskRefId); + if (ret) { + stError("s-task:0x%" PRIx64 " failed to release task refId:%" PRId64, pId->taskId, *pTaskRefId); + } + + return TSDB_CODE_STREAM_TASK_NOT_EXIST; + } + + stDebug("s-task:%s acquire task, refId:%" PRId64, p->id.idStr, p->id.refId); + *pTask = p; return TSDB_CODE_SUCCESS; } @@ -753,28 +834,17 @@ int32_t streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t task return code; } -int32_t streamMetaAcquireOneTask(SStreamTask* pTask) { - int32_t ref = atomic_add_fetch_32(&pTask->refCnt, 1); - stTrace("s-task:%s acquire task, ref:%d", pTask->id.idStr, ref); - return ref; -} - void streamMetaReleaseTask(SStreamMeta* UNUSED_PARAM(pMeta), SStreamTask* pTask) { if (pTask == NULL) { return; } int32_t taskId = pTask->id.taskId; - int32_t ref = atomic_sub_fetch_32(&pTask->refCnt, 1); - - // not safe to use the pTask->id.idStr, since pTask may be released by other threads when print logs. - if (ref > 0) { - stTrace("s-task:0x%x release task, ref:%d", taskId, ref); - } else if (ref == 0) { - stTrace("s-task:0x%x all refs are gone, free it", taskId); - tFreeStreamTask(pTask); - } else if (ref < 0) { - stError("task ref is invalid, ref:%d, 0x%x", ref, taskId); + int64_t refId = pTask->id.refId; + stDebug("s-task:0x%x release task, refId:%" PRId64, taskId, pTask->id.refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, pTask->id.refId); + if (ret) { + stError("s-task:0x%x failed to release task refId:%" PRId64, taskId, refId); } } @@ -812,13 +882,10 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t int32_t code = 0; STaskId id = {.streamId = streamId, .taskId = taskId}; - // pre-delete operation streamMetaWLock(pMeta); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - pTask = *ppTask; - + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { // desc the paused task counter if (streamTaskShouldPause(pTask)) { int32_t num = atomic_sub_fetch_32(&pMeta->numOfPausedTasks, 1); @@ -830,43 +897,9 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t if (code) { stError("s-task:0x%" PRIx64 " failed to handle dropping event async, code:%s", id.taskId, tstrerror(code)); } - } else { - stDebug("vgId:%d failed to find the task:0x%x, it may be dropped already", vgId, taskId); - streamMetaWUnLock(pMeta); - return 0; - } - streamMetaWUnLock(pMeta); + stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId); - stDebug("s-task:0x%x vgId:%d set task status:dropping and start to unregister it", taskId, vgId); - - while (1) { - int32_t timerActive = 0; - - streamMetaRLock(pMeta); - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - // to make sure check status will not start the check downstream status when we start to check timerActive count. - streamMutexLock(&pTask->taskCheckInfo.checkInfoLock); - timerActive = (*ppTask)->status.timerActive; - streamMutexUnlock(&pTask->taskCheckInfo.checkInfoLock); - } - streamMetaRUnLock(pMeta); - - if (timerActive > 0) { - taosMsleep(100); - stDebug("s-task:0x%" PRIx64 " wait for quit from timer", id.taskId); - } else { - break; - } - } - - // let's do delete of stream task - streamMetaWLock(pMeta); - - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask) { - pTask = *ppTask; // it is a fill-history task, remove the related stream task's id that points to it if (pTask->info.fillHistory == 0) { int32_t ret = atomic_sub_fetch_32(&pMeta->numOfStreamTasks, 1); @@ -884,21 +917,22 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t if (sizeInList != size) { stError("vgId:%d tasks number not consistent in list:%d and map:%d, ", vgId, sizeInList, size); } - streamMetaWUnLock(pMeta); - - int32_t numOfTmr = pTask->status.timerActive; - if (numOfTmr != 0) { - stError("s-task:%s vgId:%d invalid timer Active record:%d, internal error", pTask->id.idStr, vgId, numOfTmr); - } if (pTask->info.delaySchedParam != 0 && pTask->info.fillHistory == 0) { stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); streamTmrStop(pTask->schedInfo.pDelayTimer); pTask->info.delaySchedParam = 0; - streamMetaReleaseTask(pMeta, pTask); + } + + + int64_t refId = pTask->id.refId; + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%"PRId64, (int32_t) id.taskId, refId); } streamMetaReleaseTask(pMeta, pTask); + streamMetaWUnLock(pMeta); } else { stDebug("vgId:%d failed to find the task:0x%x, it may have been dropped already", vgId, taskId); streamMetaWUnLock(pMeta); @@ -1008,13 +1042,13 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { return; } + vgId = pMeta->vgId; pRecycleList = taosArrayInit(4, sizeof(STaskId)); if (pRecycleList == NULL) { stError("vgId:%d failed prepare load all tasks, code:out of memory", vgId); return; } - vgId = pMeta->vgId; stInfo("vgId:%d load stream tasks from meta files", vgId); code = tdbTbcOpen(pMeta->pTaskDb, &pCur, NULL); @@ -1058,9 +1092,9 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { if (pTask->status.taskStatus == TASK_STATUS__DROPPING) { int32_t taskId = pTask->id.taskId; - tFreeStreamTask(pTask); - STaskId id = streamTaskGetTaskId(pTask); + + tFreeStreamTask(pTask); void* px = taosArrayPush(pRecycleList, &id); if (px == NULL) { stError("s-task:0x%x failed record the task into recycle list due to out of memory", taskId); @@ -1096,13 +1130,22 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { continue; } - if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask, POINTER_BYTES) != 0) { - stError("s-task:0x%x failed to put into hashTable, code:%s, continue", pTask->id.taskId, tstrerror(terrno)); - void* px = taosArrayPop(pMeta->pTaskList); - tFreeStreamTask(pTask); + pTask->id.refId = taosAddRef(streamTaskRefPool, pTask); + + if (taosHashPut(pMeta->pTasksMap, &id, sizeof(id), &pTask->id.refId, sizeof(int64_t)) != 0) { + int64_t refId = pTask->id.refId; + stError("s-task:0x%x failed to put into hashTable, code:%s, remove task ref, refId:%" PRId64 " continue", + pTask->id.taskId, tstrerror(terrno), refId); + + void* px = taosArrayPop(pMeta->pTaskList); + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret != 0) { + stError("s-task:0x%x failed to remove ref, refId:%" PRId64, (int32_t)id.taskId, refId); + } continue; } + stInfo("s-task:0x%x vgId:%d set refId:%"PRId64, (int32_t) id.taskId, vgId, pTask->id.refId); if (pTask->info.fillHistory == 0) { int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1); } @@ -1138,72 +1181,22 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { } } -bool streamMetaTaskInTimer(SStreamMeta* pMeta) { - bool inTimer = false; - streamMetaRLock(pMeta); - - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pMeta->pTasksMap, pIter); - if (pIter == NULL) { - break; - } - - SStreamTask* pTask = *(SStreamTask**)pIter; - if (pTask->status.timerActive >= 1) { - stDebug("s-task:%s in timer, blocking tasks in vgId:%d restart, set closing again", pTask->id.idStr, pMeta->vgId); - int32_t code = streamTaskStop(pTask); - if (code) { - stError("s-task:%s failed to stop task, code:%s", pTask->id.idStr, tstrerror(code)); - } - inTimer = true; - } - } - - streamMetaRUnLock(pMeta); - return inTimer; -} - void streamMetaNotifyClose(SStreamMeta* pMeta) { int32_t vgId = pMeta->vgId; int64_t startTs = 0; int32_t sendCount = 0; - streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount); + streamMetaGetHbSendInfo(pMeta->pHbInfo, &startTs, &sendCount); stInfo("vgId:%d notify all stream tasks that current vnode is closing. isLeader:%d startHb:%" PRId64 ", totalHb:%d", vgId, (pMeta->role == NODE_ROLE_LEADER), startTs, sendCount); // wait for the stream meta hb function stopping streamMetaWaitForHbTmrQuit(pMeta); - - streamMetaWLock(pMeta); - pMeta->closeFlag = true; - void* pIter = NULL; - while (1) { - pIter = taosHashIterate(pMeta->pTasksMap, pIter); - if (pIter == NULL) { - break; - } - - SStreamTask* pTask = *(SStreamTask**)pIter; - stDebug("vgId:%d s-task:%s set task closing flag", vgId, pTask->id.idStr); - int32_t code = streamTaskStop(pTask); - if (code) { - stError("vgId:%d failed to stop task:0x%x, code:%s", vgId, pTask->id.taskId, tstrerror(code)); - } - } - - streamMetaWUnLock(pMeta); stDebug("vgId:%d start to check all tasks for closing", vgId); int64_t st = taosGetTimestampMs(); - while (streamMetaTaskInTimer(pMeta)) { - stDebug("vgId:%d some tasks in timer, wait for 100ms and recheck", pMeta->vgId); - taosMsleep(100); - } - streamMetaRLock(pMeta); SArray* pTaskList = NULL; @@ -1211,14 +1204,34 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { if (code != TSDB_CODE_SUCCESS) { } - streamMetaRUnLock(pMeta); + int32_t numOfTasks = taosArrayGetSize(pTaskList); + for (int32_t i = 0; i < numOfTasks; ++i) { + SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = NULL; - if (pTaskList != NULL) { - taosArrayDestroy(pTaskList); + code = streamMetaAcquireTaskNoLock(pMeta, pTaskId->streamId, pTaskId->taskId, &pTask); + if (code != TSDB_CODE_SUCCESS) { + continue; + } + + int64_t refId = pTask->id.refId; + int32_t ret = streamTaskStop(pTask); + if (ret) { + stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret)); + } + + streamMetaReleaseTask(pMeta, pTask); + ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d failed to remove task:0x%x, refId:%"PRId64, pMeta->vgId, pTaskId->taskId, refId); + } } - int64_t el = taosGetTimestampMs() - st; - stDebug("vgId:%d all stream tasks are not in timer, continue close, elapsed time:%" PRId64 " ms", pMeta->vgId, el); + taosArrayDestroy(pTaskList); + + double el = (taosGetTimestampMs() - st) / 1000.0; + stDebug("vgId:%d stop all %d task(s) completed, elapsed time:%.2f Sec.", pMeta->vgId, numOfTasks, el); + streamMetaRUnLock(pMeta); } void streamMetaStartHb(SStreamMeta* pMeta) { @@ -1228,12 +1241,12 @@ void streamMetaStartHb(SStreamMeta* pMeta) { return; } + *pRid = pMeta->rid; int32_t code = metaRefMgtAdd(pMeta->vgId, pRid); if (code) { return; } - *pRid = pMeta->rid; streamMetaHbToMnode(pRid, NULL); } @@ -1308,13 +1321,15 @@ bool streamMetaAllTasksReady(const SStreamMeta* pMeta) { for (int32_t i = 0; i < num; ++i) { SStreamTaskId* pId = taosArrayGet(pMeta->pTaskList, i); STaskId id = {.streamId = pId->streamId, .taskId = pId->taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (ppTask == NULL) { - continue; - } + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe((SStreamMeta*)pMeta, &id, &pTask); - if ((*ppTask)->status.downstreamReady == 0) { - return false; + if (code == 0) { + if (pTask->status.downstreamReady == 0) { + streamMetaReleaseTask((SStreamMeta*)pMeta, pTask); + return false; + } + streamMetaReleaseTask((SStreamMeta*)pMeta, pTask); } } @@ -1331,10 +1346,13 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pMeta->pTaskList, i); - - STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; - SStreamTask** pTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - streamTaskResetStatus(*pTask); + STaskId id = {.streamId = pTaskId->streamId, .taskId = pTaskId->taskId}; + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + streamTaskResetStatus(pTask); + streamMetaReleaseTask(pMeta, pTask); + } } return 0; @@ -1343,7 +1361,7 @@ int32_t streamMetaResetTaskStatus(SStreamMeta* pMeta) { void streamMetaAddIntoUpdateTaskList(SStreamMeta* pMeta, SStreamTask* pTask, SStreamTask* pHTask, int32_t transId, int64_t startTs) { const char* id = pTask->id.idStr; - int32_t vgId = pTask->pMeta->vgId; + int32_t vgId = pMeta->vgId; int32_t code = 0; // keep the already updated info diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index cdaa603e38..7c77797ef9 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -22,13 +22,13 @@ static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { int64_t delaySchema = pTask->info.delaySchedParam; if (delaySchema != 0 && pTask->info.fillHistory == 0) { - int32_t ref = streamMetaAcquireOneTask(pTask); - stDebug("s-task:%s setup scheduler trigger, ref:%d delay:%" PRId64 " ms", pTask->id.idStr, ref, - pTask->info.delaySchedParam); - - streamTmrStart(streamTaskSchedHelper, (int32_t)delaySchema, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, - pTask->pMeta->vgId, "sched-tmr"); - pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE; + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(streamTaskSchedHelper, (int32_t)delaySchema, pTaskRefId, streamTimer, + &pTask->schedInfo.pDelayTimer, pTask->pMeta->vgId, "sched-tmr"); + pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE; + } } } @@ -75,49 +75,65 @@ void streamTaskClearSchedIdleInfo(SStreamTask* pTask) { pTask->status.schedIdleT void streamTaskSetIdleInfo(SStreamTask* pTask, int32_t idleTime) { pTask->status.schedIdleTime = idleTime; } void streamTaskResumeInFuture(SStreamTask* pTask) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s task should idle, add into timer to retry in %dms, ref:%d", pTask->id.idStr, - pTask->status.schedIdleTime, ref); + stDebug("s-task:%s task should idle, add into timer to retry in %dms", pTask->id.idStr, + pTask->status.schedIdleTime); // add one ref count for task - int32_t unusedRetRef = streamMetaAcquireOneTask(pTask); - streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTask, streamTimer, &pTask->schedInfo.pIdleTimer, - pTask->pMeta->vgId, "resume-task-tmr"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(streamTaskResumeHelper, pTask->status.schedIdleTime, pTaskRefId, streamTimer, + &pTask->schedInfo.pIdleTimer, pTask->pMeta->vgId, "resume-task-tmr"); + } } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// void streamTaskResumeHelper(void* param, void* tmrId) { - SStreamTask* pTask = (SStreamTask*)param; + int32_t code = 0; + int64_t taskRefId = *(int64_t*)param; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + SStreamTaskId* pId = &pTask->id; SStreamTaskState p = streamTaskGetStatus(pTask); - int32_t code = 0; if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) { int8_t status = streamTaskSetSchedStatusInactive(pTask); TAOS_UNUSED(status); - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s not resume task, ref:%d", pId->idStr, p.name, ref); - + stDebug("s-task:%s status:%s not resume task", pId->idStr, p.name); streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } code = streamTaskSchedTask(pTask->pMsgCb, pTask->info.nodeId, pId->streamId, pId->taskId, STREAM_EXEC_T_RESUME_TASK); - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); if (code) { - stError("s-task:%s sched task failed, code:%s, ref:%d", pId->idStr, tstrerror(code), ref); + stError("s-task:%s sched task failed, code:%s", pId->idStr, tstrerror(code)); } else { - stDebug("trigger to resume s-task:%s after idled for %dms, ref:%d", pId->idStr, pTask->status.schedIdleTime, ref); + stDebug("trigger to resume s-task:%s after idled for %dms", pId->idStr, pTask->status.schedIdleTime); // release the task ref count streamTaskClearSchedIdleInfo(pTask); - streamMetaReleaseTask(pTask->pMeta, pTask); } + + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); } void streamTaskSchedHelper(void* param, void* tmrId) { - SStreamTask* pTask = (void*)param; + int64_t taskRefId = *(int64_t*)param; + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + const char* id = pTask->id.idStr; int32_t nextTrigger = (int32_t)pTask->info.delaySchedParam; int32_t vgId = pTask->pMeta->vgId; @@ -127,6 +143,8 @@ void streamTaskSchedHelper(void* param, void* tmrId) { if (streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) { stDebug("s-task:%s should stop, jump out of schedTimer", id); + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } @@ -171,6 +189,7 @@ void streamTaskSchedHelper(void* param, void* tmrId) { } _end: - streamTmrStart(streamTaskSchedHelper, nextTrigger, pTask, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, + streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, "sched-run-tmr"); + streamMetaReleaseTask(pTask->pMeta, pTask); } diff --git a/source/libs/stream/src/streamStartHistory.c b/source/libs/stream/src/streamStartHistory.c index 4d7bf2ba87..54026f5db2 100644 --- a/source/libs/stream/src/streamStartHistory.c +++ b/source/libs/stream/src/streamStartHistory.c @@ -15,6 +15,7 @@ #include "streamInt.h" #include "streamsm.h" +#include "tref.h" #include "trpc.h" #include "ttimer.h" #include "wal.h" @@ -24,7 +25,7 @@ #define SCANHISTORY_IDLE_TICK ((SCANHISTORY_MAX_IDLE_TIME * 1000) / SCANHISTORY_IDLE_TIME_SLICE) typedef struct SLaunchHTaskInfo { - SStreamMeta* pMeta; + int64_t metaRid; STaskId id; STaskId hTaskId; } SLaunchHTaskInfo; @@ -89,7 +90,7 @@ void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) { // add ref for task SStreamTask* p = NULL; - int32_t code = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, &p); + int32_t code = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, &p); if (p == NULL || code != 0) { stError("s-task:0x%x failed to acquire task, status:%s, not exec scan-history data", pTask->id.taskId, streamTaskGetStatus(pTask).name); @@ -98,10 +99,13 @@ void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) { pTask->schedHistoryInfo.numOfTicks = numOfTicks; - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s scan-history resumed in %.2fs, ref:%d", pTask->id.idStr, numOfTicks * 0.1, ref); - streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, - &pTask->schedHistoryInfo.pTimer, vgId, "history-task"); + stDebug("s-task:%s scan-history resumed in %.2fs", pTask->id.idStr, numOfTicks * 0.1); + int64_t* pTaskRefId = NULL; + int32_t ret = streamTaskAllocRefId(pTask, &pTaskRefId); + if (ret == 0) { + streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer, + &pTask->schedHistoryInfo.pTimer, vgId, "history-task"); + } } int32_t streamTaskStartScanHistory(SStreamTask* pTask) { @@ -220,42 +224,32 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { // Set the execution conditions, including the query time window and the version range streamMetaRLock(pMeta); - SStreamTask** pHTask = taosHashGet(pMeta->pTasksMap, &pTask->hTaskInfo.id, sizeof(pTask->hTaskInfo.id)); + SStreamTask* pHisTask = NULL; + code = streamMetaAcquireTaskUnsafe(pMeta, &pTask->hTaskInfo.id, &pHisTask); streamMetaRUnLock(pMeta); - if (pHTask != NULL) { // it is already added into stream meta store. - SStreamTask* pHisTask = NULL; - code = streamMetaAcquireTask(pMeta, hStreamId, hTaskId, &pHisTask); - if (pHisTask == NULL) { - stDebug("s-task:%s failed acquire and start fill-history task, it may have been dropped/stopped", idStr); - code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false); + if (code == 0) { // it is already added into stream meta store. + if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing + stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr); + code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true); if (code) { stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code)); } - } else { - if (pHisTask->status.downstreamReady == 1) { // it's ready now, do nothing - stDebug("s-task:%s fill-history task is ready, no need to check downstream", pHisTask->id.idStr); - code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, true); - if (code) { - stError("s-task:%s failed to record start task status, code:%s", idStr, tstrerror(code)); - } - } else { // exist, but not ready, continue check downstream task status - if (pHisTask->pBackend == NULL) { - code = pMeta->expandTaskFn(pHisTask); - if (code != TSDB_CODE_SUCCESS) { - streamMetaAddFailedTaskSelf(pHisTask, now); - stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code)); - } - } - - if (code == TSDB_CODE_SUCCESS) { - checkFillhistoryTaskStatus(pTask, pHisTask); + } else { // exist, but not ready, continue check downstream task status + if (pHisTask->pBackend == NULL) { + code = pMeta->expandTaskFn(pHisTask); + if (code != TSDB_CODE_SUCCESS) { + streamMetaAddFailedTaskSelf(pHisTask, now); + stError("s-task:%s failed to expand fill-history task, code:%s", pHisTask->id.idStr, tstrerror(code)); } } - streamMetaReleaseTask(pMeta, pHisTask); + if (code == TSDB_CODE_SUCCESS) { + checkFillhistoryTaskStatus(pTask, pHisTask); + } } + streamMetaReleaseTask(pMeta, pHisTask); return code; } else { return launchNotBuiltFillHistoryTask(pTask); @@ -296,14 +290,14 @@ void notRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, SStreamMeta* pMeta = pTask->pMeta; SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); +// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); if (code) { stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code)); } else { - stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x, ref:%d", - pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId, ref); + stError("s-task:%s max retry:%d reached, quit from retrying launch related fill-history task:0x%x", + pTask->id.idStr, MAX_RETRY_LAUNCH_HISTORY_TASK, (int32_t)pHTaskInfo->id.taskId); } pHTaskInfo->id.taskId = 0; @@ -315,9 +309,9 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; if (streamTaskShouldStop(pTask)) { // record the failure - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64 ", ref:%d", pInfo->id.taskId, - pInfo->hTaskId.taskId, ref); +// int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); + stDebug("s-task:0x%" PRIx64 " stopped, not launch rel history task:0x%" PRIx64, pInfo->id.taskId, + pInfo->hTaskId.taskId); int32_t code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); if (code) { @@ -336,30 +330,60 @@ void doRetryLaunchFillHistoryTask(SStreamTask* pTask, SLaunchHTaskInfo* pInfo, i } } +static void doCleanup(SStreamTask* pTask, int64_t metaRid, SLaunchHTaskInfo* pInfo) { + SStreamMeta* pMeta = pTask->pMeta; + int32_t vgId = pMeta->vgId; + + streamMetaReleaseTask(pMeta, pTask); + int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid); + if (ret) { + stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid); + } + + if (pInfo != NULL) { + taosMemoryFree(pInfo); + } +} + void tryLaunchHistoryTask(void* param, void* tmrId) { SLaunchHTaskInfo* pInfo = param; - SStreamMeta* pMeta = pInfo->pMeta; + int64_t metaRid = pInfo->metaRid; int64_t now = taosGetTimestampMs(); int32_t code = 0; + SStreamTask* pTask = NULL; + int32_t vgId = 0; + + SStreamMeta* pMeta = taosAcquireRef(streamMetaRefPool, metaRid); + if (pMeta == NULL) { + stError("invalid meta rid:%" PRId64 " failed to acquired stream-meta", metaRid); + taosMemoryFree(pInfo); + return; + } + + vgId = pMeta->vgId; streamMetaWLock(pMeta); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasksMap, &pInfo->id, sizeof(pInfo->id)); - if (ppTask == NULL || *ppTask == NULL) { + code = streamMetaAcquireTaskUnsafe(pMeta, &pInfo->id, &pTask); + if (code != 0) { stError("s-task:0x%x and rel fill-history task:0x%" PRIx64 " all have been destroyed, not launch", (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId); streamMetaWUnLock(pMeta); + int32_t ret = taosReleaseRef(streamMetaRefPool, metaRid); + if (ret) { + stError("vgId:%d failed to release meta refId:%"PRId64, vgId, metaRid); + } + // already dropped, no need to set the failure info into the stream task meta. taosMemoryFree(pInfo); return; } - if (streamTaskShouldStop(*ppTask)) { - char* p = streamTaskGetStatus(*ppTask).name; - int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); - stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d, ref:%d", - (*ppTask)->id.idStr, p, (*ppTask)->hTaskInfo.retryTimes, ref); + if (streamTaskShouldStop(pTask)) { + char* p = streamTaskGetStatus(pTask).name; + stDebug("s-task:%s status:%s should stop, quit launch fill-history task timer, retry:%d", pTask->id.idStr, p, + pTask->hTaskInfo.retryTimes); streamMetaWUnLock(pMeta); @@ -369,77 +393,54 @@ void tryLaunchHistoryTask(void* param, void* tmrId) { stError("s-task:0x%" PRId64 " failed to record the start task status, code:%s", pInfo->hTaskId.taskId, tstrerror(code)); } - taosMemoryFree(pInfo); + + doCleanup(pTask, metaRid, pInfo); return; } - SStreamTask* pTask = NULL; - code = streamMetaAcquireTaskNoLock(pMeta, pInfo->id.streamId, pInfo->id.taskId, &pTask); - if (code != TSDB_CODE_SUCCESS) { - // todo - } streamMetaWUnLock(pMeta); - if (pTask != NULL) { - SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; - - pHTaskInfo->tickCount -= 1; - if (pHTaskInfo->tickCount > 0) { - streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer, - pTask->pMeta->vgId, " start-history-task-tmr"); - streamMetaReleaseTask(pMeta, pTask); - return; - } - - if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) { - notRetryLaunchFillHistoryTask(pTask, pInfo, now); - } else { // not reach the limitation yet, let's continue retrying launch related fill-history task. - streamTaskSetRetryInfoForLaunch(pHTaskInfo); - if (pTask->status.timerActive < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return; - } - - // abort the timer if intend to stop task - SStreamTask* pHTask = NULL; - code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask); - if (pHTask == NULL) { - doRetryLaunchFillHistoryTask(pTask, pInfo, now); - streamMetaReleaseTask(pMeta, pTask); - return; - } else { - if (pHTask->pBackend == NULL) { - code = pMeta->expandTaskFn(pHTask); - if (code != TSDB_CODE_SUCCESS) { - streamMetaAddFailedTaskSelf(pHTask, now); - stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code)); - } - } - - if (code == TSDB_CODE_SUCCESS) { - checkFillhistoryTaskStatus(pTask, pHTask); - // not in timer anymore - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:0x%x fill-history task launch completed, retry times:%d, ref:%d", (int32_t)pInfo->id.taskId, - pHTaskInfo->retryTimes, ref); - } - streamMetaReleaseTask(pMeta, pHTask); - } - } - - streamMetaReleaseTask(pMeta, pTask); - } else { - code = streamMetaAddTaskLaunchResult(pMeta, pInfo->hTaskId.streamId, pInfo->hTaskId.taskId, 0, now, false); - if (code) { - stError("s-task:%s failed to record the start task status, code:%s", pTask->id.idStr, tstrerror(code)); - } - - int32_t ref = atomic_sub_fetch_32(&(*ppTask)->status.timerActive, 1); - stError("s-task:0x%x rel fill-history task:0x%" PRIx64 " may have been destroyed, not launch, ref:%d", - (int32_t)pInfo->id.taskId, pInfo->hTaskId.taskId, ref); + SHistoryTaskInfo* pHTaskInfo = &pTask->hTaskInfo; + pHTaskInfo->tickCount -= 1; + if (pHTaskInfo->tickCount > 0) { + streamTmrStart(tryLaunchHistoryTask, LAUNCH_HTASK_INTERVAL, pInfo, streamTimer, &pHTaskInfo->pTimer, + pTask->pMeta->vgId, " start-history-task-tmr"); + doCleanup(pTask, metaRid, NULL); + return; } - taosMemoryFree(pInfo); + if (pHTaskInfo->retryTimes > MAX_RETRY_LAUNCH_HISTORY_TASK) { + notRetryLaunchFillHistoryTask(pTask, pInfo, now); + } else { // not reach the limitation yet, let's continue retrying launch related fill-history task. + streamTaskSetRetryInfoForLaunch(pHTaskInfo); + + // abort the timer if intend to stop task + SStreamTask* pHTask = NULL; + code = streamMetaAcquireTask(pMeta, pHTaskInfo->id.streamId, pHTaskInfo->id.taskId, &pHTask); + if (pHTask == NULL) { + doRetryLaunchFillHistoryTask(pTask, pInfo, now); + doCleanup(pTask, metaRid, NULL); + return; + } else { + if (pHTask->pBackend == NULL) { + code = pMeta->expandTaskFn(pHTask); + if (code != TSDB_CODE_SUCCESS) { + streamMetaAddFailedTaskSelf(pHTask, now); + stError("failed to expand fill-history task:%s, code:%s", pHTask->id.idStr, tstrerror(code)); + } + } + + if (code == TSDB_CODE_SUCCESS) { + checkFillhistoryTaskStatus(pTask, pHTask); + // not in timer anymore + stDebug("s-task:0x%x fill-history task launch completed, retry times:%d", (int32_t)pInfo->id.taskId, + pHTaskInfo->retryTimes); + } + streamMetaReleaseTask(pMeta, pHTask); + } + } + + doCleanup(pTask, metaRid, pInfo); } int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStreamId, int32_t hTaskId, @@ -455,7 +456,7 @@ int32_t createHTaskLaunchInfo(SStreamMeta* pMeta, STaskId* pTaskId, int64_t hStr (*pInfo)->hTaskId.streamId = hStreamId; (*pInfo)->hTaskId.taskId = hTaskId; - (*pInfo)->pMeta = pMeta; + (*pInfo)->metaRid = pMeta->rid; return TSDB_CODE_SUCCESS; } @@ -485,12 +486,10 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) { // check for the timer if (pTask->hTaskInfo.pTimer == NULL) { - int32_t ref = atomic_add_fetch_32(&pTask->status.timerActive, 1); pTask->hTaskInfo.pTimer = taosTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer); if (pTask->hTaskInfo.pTimer == NULL) { - ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stError("s-task:%s failed to start timer, related fill-history task not launched, ref:%d", idStr, ref); + stError("s-task:%s failed to start timer, related fill-history task not launched", idStr); taosMemoryFree(pInfo); code = streamMetaAddTaskLaunchResult(pMeta, hStreamId, hTaskId, pExecInfo->checkTs, pExecInfo->readyTs, false); @@ -500,18 +499,8 @@ int32_t launchNotBuiltFillHistoryTask(SStreamTask* pTask) { return terrno; } - if (ref < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return TSDB_CODE_STREAM_INTERNAL_ERROR; - } - - stDebug("s-task:%s set timer active flag, ref:%d", idStr, ref); + stDebug("s-task:%s set timer active flag", idStr); } else { // timer exists - if (pTask->status.timerActive < 1) { - stError("s-task:%s invalid timerActive recorder:%d, abort timer", pTask->id.idStr, pTask->status.timerActive); - return TSDB_CODE_STREAM_INTERNAL_ERROR; - } - stDebug("s-task:%s set timer active flag, task timer not null", idStr); streamTmrStart(tryLaunchHistoryTask, WAIT_FOR_MINIMAL_INTERVAL, pInfo, streamTimer, &pTask->hTaskInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); @@ -590,15 +579,22 @@ int32_t streamTaskSetRangeStreamCalc(SStreamTask* pTask) { } void doExecScanhistoryInFuture(void* param, void* tmrId) { - SStreamTask* pTask = param; + int64_t taskRefId = *(int64_t*) param; + + SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); + if (pTask == NULL) { + stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + streamTaskFreeRefId(param); + return; + } + pTask->schedHistoryInfo.numOfTicks -= 1; SStreamTaskState p = streamTaskGetStatus(pTask); if (p.state == TASK_STATUS__DROPPING || p.state == TASK_STATUS__STOP) { - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s status:%s not start scan-history again, ref:%d", pTask->id.idStr, p.name, ref); - + stDebug("s-task:%s status:%s not start scan-history again", pTask->id.idStr, p.name); streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); return; } @@ -608,16 +604,19 @@ void doExecScanhistoryInFuture(void* param, void* tmrId) { stError("s-task:%s async start history task failed", pTask->id.idStr); } - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr, ref:%d", pTask->id.idStr, - pTask->info.fillHistory, ref); - - // release the task. - streamMetaReleaseTask(pTask->pMeta, pTask); + stDebug("s-task:%s fill-history:%d start scan-history data, out of tmr", pTask->id.idStr, + pTask->info.fillHistory); } else { - streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTask, streamTimer, - &pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); + int64_t* pTaskRefId = NULL; + int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); + if (code == 0) { + streamTmrStart(doExecScanhistoryInFuture, SCANHISTORY_IDLE_TIME_SLICE, pTaskRefId, streamTimer, + &pTask->schedHistoryInfo.pTimer, pTask->pMeta->vgId, " start-history-task-tmr"); + } } + + streamMetaReleaseTask(pTask->pMeta, pTask); + streamTaskFreeRefId(param); } int32_t doStartScanHistoryTask(SStreamTask* pTask) { diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index 0858f57414..ee9117ddc8 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -196,19 +196,17 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 STaskId id = {.streamId = streamId, .taskId = taskId}; int32_t vgId = pMeta->vgId; bool allRsp = true; + SStreamTask* p = NULL; streamMetaWLock(pMeta); - SStreamTask** p = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - if (p == NULL) { // task does not exists in current vnode, not record the complete info + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, &id, &p); + if (code != 0) { // task does not exist in current vnode, not record the complete info stError("vgId:%d s-task:0x%x not exists discard the check downstream info", vgId, taskId); streamMetaWUnLock(pMeta); return 0; } - // clear the send consensus-checkpointId flag -// streamMutexLock(&(*p)->lock); -// (*p)->status.sendConsensusChkptId = false; -// streamMutexUnlock(&(*p)->lock); + streamMetaReleaseTask(pMeta, p); if (pStartInfo->startAllTasks != 1) { int64_t el = endTs - startTs; @@ -222,7 +220,7 @@ int32_t streamMetaAddTaskLaunchResult(SStreamMeta* pMeta, int64_t streamId, int3 STaskInitTs initTs = {.start = startTs, .end = endTs, .success = ready}; SHashObj* pDst = ready ? pStartInfo->pReadyTaskSet : pStartInfo->pFailedTaskSet; - int32_t code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); + code = taosHashPut(pDst, &id, sizeof(id), &initTs, sizeof(STaskInitTs)); if (code) { if (code == TSDB_CODE_DUP_KEY) { stError("vgId:%d record start task result failed, s-task:0x%" PRIx64 @@ -296,13 +294,14 @@ void displayStatusInfo(SStreamMeta* pMeta, SHashObj* pTaskSet, bool succ) { while ((pIter = taosHashIterate(pTaskSet, pIter)) != NULL) { STaskInitTs* pInfo = pIter; void* key = taosHashGetKey(pIter, &keyLen); - - SStreamTask** pTask1 = taosHashGet(pMeta->pTasksMap, key, sizeof(STaskId)); - if (pTask1 == NULL) { - stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); + SStreamTask* pTask = NULL; + int32_t code = streamMetaAcquireTaskUnsafe(pMeta, key, &pTask); + if (code == 0) { + stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", pTask->id.idStr, + pTask->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + streamMetaReleaseTask(pMeta, pTask); } else { - stInfo("s-task:%s level:%d vgId:%d, init:%" PRId64 ", initEnd:%" PRId64 ", %s", (*pTask1)->id.idStr, - (*pTask1)->info.taskLevel, vgId, pInfo->start, pInfo->end, pInfo->success ? "success" : "failed"); + stInfo("s-task:0x%x is dropped already, %s", (int32_t)((STaskId*)key)->taskId, succ ? "success" : "failed"); } } } @@ -417,8 +416,10 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { streamMetaRLock(pMeta); + SArray* pTaskList = NULL; int32_t num = taosArrayGetSize(pMeta->pTaskList); stDebug("vgId:%d stop all %d stream task(s)", pMeta->vgId, num); + if (num == 0) { stDebug("vgId:%d stop all %d task(s) completed, elapsed time:0 Sec.", pMeta->vgId, num); streamMetaRUnLock(pMeta); @@ -428,14 +429,12 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { int64_t st = taosGetTimestampMs(); // send hb msg to mnode before closing all tasks. - SArray* pTaskList = NULL; int32_t code = streamMetaSendMsgBeforeCloseTasks(pMeta, &pTaskList); if (code != TSDB_CODE_SUCCESS) { return code; } int32_t numOfTasks = taosArrayGetSize(pTaskList); - for (int32_t i = 0; i < numOfTasks; ++i) { SStreamTaskId* pTaskId = taosArrayGet(pTaskList, i); SStreamTask* pTask = NULL; @@ -445,11 +444,17 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { continue; } + int64_t refId = pTask->id.refId; int32_t ret = streamTaskStop(pTask); if (ret) { stError("s-task:0x%x failed to stop task, code:%s", pTaskId->taskId, tstrerror(ret)); } + streamMetaReleaseTask(pMeta, pTask); + ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d failed to remove task:0x%x, refId:%"PRId64, pMeta->vgId, pTaskId->taskId, refId); + } } taosArrayDestroy(pTaskList); @@ -466,6 +471,7 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) { int32_t vgId = pTask->pMeta->vgId; if (pConChkptInfo->status == TASK_CONSEN_CHKPT_REQ) { + // mark the sending of req consensus checkpoint request. pConChkptInfo->status = TASK_CONSEN_CHKPT_SEND; pConChkptInfo->statusTs = ts; stDebug("s-task:%s vgId:%d set requiring consensus-chkptId in hbMsg, ts:%" PRId64, pTask->id.idStr, @@ -473,6 +479,8 @@ int32_t streamTaskCheckIfReqConsenChkptId(SStreamTask* pTask, int64_t ts) { return 1; } else { int32_t el = (ts - pConChkptInfo->statusTs) / 1000; + + // not recv consensus-checkpoint rsp for 60sec, send it again in hb to mnode if ((pConChkptInfo->status == TASK_CONSEN_CHKPT_SEND) && el > 60) { pConChkptInfo->statusTs = ts; @@ -492,7 +500,7 @@ void streamTaskSetConsenChkptIdRecv(SStreamTask* pTask, int32_t transId, int64_t pInfo->status = TASK_CONSEN_CHKPT_RECV; pInfo->statusTs = ts; - stDebug("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId); + stInfo("s-task:%s set recv consen-checkpointId, transId:%d", pTask->id.idStr, transId); } void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) { @@ -507,23 +515,24 @@ void streamTaskSetReqConsenChkptId(SStreamTask* pTask, int64_t ts) { } int32_t streamMetaAddFailedTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { - int32_t code = TSDB_CODE_SUCCESS; - int64_t now = taosGetTimestampMs(); - int64_t startTs = 0; - bool hasFillhistoryTask = false; - STaskId hId = {0}; + int32_t code = TSDB_CODE_SUCCESS; + int64_t now = taosGetTimestampMs(); + int64_t startTs = 0; + bool hasFillhistoryTask = false; + STaskId hId = {0}; + STaskId id = {.streamId = streamId, .taskId = taskId}; + SStreamTask* pTask = NULL; stDebug("vgId:%d add start failed task:0x%x", pMeta->vgId, taskId); streamMetaRLock(pMeta); - STaskId id = {.streamId = streamId, .taskId = taskId}; - SStreamTask** ppTask = taosHashGet(pMeta->pTasksMap, &id, sizeof(id)); - - if (ppTask != NULL) { - startTs = (*ppTask)->taskCheckInfo.startTs; - hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(*ppTask); - hId = (*ppTask)->hTaskInfo.id; + code = streamMetaAcquireTaskUnsafe(pMeta, &id, &pTask); + if (code == 0) { + startTs = pTask->taskCheckInfo.startTs; + hasFillhistoryTask = HAS_RELATED_FILLHISTORY_TASK(pTask); + hId = pTask->hTaskInfo.id; + streamMetaReleaseTask(pMeta, pTask); streamMetaRUnLock(pMeta); diff --git a/source/libs/stream/src/streamTimer.c b/source/libs/stream/src/streamTimer.c index 0da9acfd1d..848e9c874e 100644 --- a/source/libs/stream/src/streamTimer.c +++ b/source/libs/stream/src/streamTimer.c @@ -66,15 +66,9 @@ void streamTmrStop(tmr_h tmrId) { } } -int32_t streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, SStreamTask* pTask) { +void streamCleanBeforeQuitTmr(SStreamTmrInfo* pInfo, void* param) { pInfo->activeCounter = 0; pInfo->launchChkptId = 0; atomic_store_8(&pInfo->isActive, 0); - - int32_t ref = atomic_sub_fetch_32(&pTask->status.timerActive, 1); - if (ref < 0) { - stFatal("invalid task timer ref value:%d, %s", ref, pTask->id.idStr); - } - - return ref; + streamTaskFreeRefId(param); } \ No newline at end of file From 0fa7bf1a26b403a35315279378677e456fb30754 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 27 Oct 2024 19:51:52 +0800 Subject: [PATCH 035/127] fix(stream): fix memory leak. --- source/libs/stream/src/streamMeta.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index db46934e47..05aaa66049 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -100,16 +100,7 @@ void metaRefMgtCleanup() { void* pIter = taosHashIterate(gMetaRefMgt.pTable, NULL); while (pIter) { int64_t* p = *(int64_t**) pIter; - stInfo("---------------free refId:%"PRId64", %p", *p, p); - taosMemoryFree(p); - -// SArray* list = *(SArray**)pIter; -// for (int i = 0; i < taosArrayGetSize(list); i++) { -// void* rid = taosArrayGetP(list, i); -// taosMemoryFree(rid); -// } -// taosArrayDestroy(list); pIter = taosHashIterate(gMetaRefMgt.pTable, pIter); } @@ -127,13 +118,14 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) { if (p == NULL) { code = taosHashPut(gMetaRefMgt.pTable, &rid, sizeof(rid), &rid, sizeof(void*)); if (code) { - stError("vgId:%d failed to put into metaRef table, rid:%" PRId64, (int32_t)vgId, *rid); + stError("vgId:%d failed to put into refId mgt, refId:%" PRId64" %p, code:%s", (int32_t)vgId, *rid, rid, + tstrerror(code)); return code; - } else { - stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); + } else { // not +// stInfo("add refId:%"PRId64" vgId:%d, %p", *rid, (int32_t)vgId, rid); } } else { - // todo + stFatal("try to add refId:%"PRId64" vgId:%d, %p that already added into mgt", *rid, (int32_t) vgId, rid); } streamMutexUnlock(&gMetaRefMgt.mutex); @@ -144,7 +136,7 @@ void metaRefMgtRemove(int64_t* pRefId) { streamMutexLock(&gMetaRefMgt.mutex); taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId)); - stInfo("remove refId from mgt, refId:%"PRId64", %p", *pRefId, pRefId); + taosMemoryFree(pRefId); streamMutexUnlock(&gMetaRefMgt.mutex); } From 51f3f29d5bf323d34dbe70d9e471e729e638a6e5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sun, 27 Oct 2024 21:52:46 +0800 Subject: [PATCH 036/127] fix(stream): fix syntax check failure. --- source/dnode/vnode/src/tq/tqRead.c | 5 ++++- source/libs/stream/src/streamMeta.c | 20 +++++++++++++++----- source/libs/stream/src/streamTask.c | 8 ++++++-- 3 files changed, 25 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tq/tqRead.c b/source/dnode/vnode/src/tq/tqRead.c index 9d9e7c431a..bb8491108f 100644 --- a/source/dnode/vnode/src/tq/tqRead.c +++ b/source/dnode/vnode/src/tq/tqRead.c @@ -1126,7 +1126,10 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) { tqError("vgId:%d, s-task:0x%x update qualified table error for stream task", vgId, taskId); } } - taosReleaseRef(streamTaskRefPool, refId); + int32_t ret = taosReleaseRef(streamTaskRefPool, refId); + if (ret) { + tqError("vgId:%d release task refId failed, refId:%" PRId64, vgId, refId); + } } } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 05aaa66049..d414b02d69 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -135,7 +135,7 @@ int32_t metaRefMgtAdd(int64_t vgId, int64_t* rid) { void metaRefMgtRemove(int64_t* pRefId) { streamMutexLock(&gMetaRefMgt.mutex); - taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId)); + int32_t code = taosHashRemove(gMetaRefMgt.pTable, &pRefId, sizeof(pRefId)); taosMemoryFree(pRefId); streamMutexUnlock(&gMetaRefMgt.mutex); } @@ -534,7 +534,10 @@ void streamMetaClear(SStreamMeta* pMeta) { p->info.delaySchedParam = 0; } - taosRemoveRef(streamTaskRefPool, refId); + int32_t code = taosRemoveRef(streamTaskRefPool, refId); + if (code) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } } if (pMeta->streamBackendRid != 0) { @@ -722,12 +725,19 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa } if ((code = streamMetaSaveTask(pMeta, pTask)) != 0) { - taosRemoveRef(streamTaskRefPool, refId); + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } return code; } if ((code = streamMetaCommit(pMeta)) != 0) { - taosRemoveRef(streamTaskRefPool, refId); + int32_t ret = taosRemoveRef(streamTaskRefPool, refId); + if (ret) { + stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); + } + return code; } @@ -1215,7 +1225,7 @@ void streamMetaNotifyClose(SStreamMeta* pMeta) { streamMetaReleaseTask(pMeta, pTask); ret = taosRemoveRef(streamTaskRefPool, refId); if (ret) { - stError("vgId:%d failed to remove task:0x%x, refId:%"PRId64, pMeta->vgId, pTaskId->taskId, refId); + stError("vgId:%d failed to remove task:0x%x, refId:%" PRId64, pMeta->vgId, pTaskId->taskId, refId); } } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index e00984ea9b..67d76ba2ef 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -1285,8 +1285,12 @@ int32_t streamTaskAllocRefId(SStreamTask* pTask, int64_t** pRefId) { *pRefId = taosMemoryMalloc(sizeof(int64_t)); if (*pRefId != NULL) { **pRefId = pTask->id.refId; - metaRefMgtAdd(pTask->pMeta->vgId, *pRefId); - return 0; + int32_t code = metaRefMgtAdd(pTask->pMeta->vgId, *pRefId); + if (code != 0) { + stError("s-task:%s failed to add refId:%" PRId64 " into refId-mgmt, code:%s", pTask->id.idStr, pTask->id.refId, + tstrerror(code)); + } + return code; } else { stError("s-task:%s failed to alloc new ref id, code:%s", pTask->id.idStr, tstrerror(terrno)); return terrno; From 323088dc7113d1c5e2e3a80f14e682e02ba73697 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sun, 27 Oct 2024 22:41:34 +0800 Subject: [PATCH 037/127] fix: create partialfunction --- include/libs/function/functionMgt.h | 1 + source/libs/function/src/builtins.c | 2 +- source/libs/function/src/functionMgt.c | 36 +++++++++++++++++++------- 3 files changed, 28 insertions(+), 11 deletions(-) diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index e5bacf85b2..0b6928fa50 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -290,6 +290,7 @@ bool fmIsElapsedFunc(int32_t funcId); void getLastCacheDataType(SDataType* pType, int32_t pkBytes); int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNode** pFunc); +int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** pFunc); int32_t fmGetDistMethod(const SFunctionNode* pFunc, SFunctionNode** pPartialFunc, SFunctionNode** pMidFunc, SFunctionNode** pMergeFunc); diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 7c659bdef5..bd72498508 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1648,7 +1648,7 @@ static int32_t translateOutVarchar(SFunctionNode* pFunc, char* pErrBuf, int32_t bytes = TSDB_TABLE_FNAME_LEN - 1 + VARSTR_HEADER_SIZE; break; case FUNCTION_TYPE_TIMEZONE: - bytes = TD_TIMEZONE_LEN; + bytes = timeZoneStrLen(); break; case FUNCTION_TYPE_IRATE_PARTIAL: bytes = getIrateInfoSize((pFunc->hasPk) ? pFunc->pkBytes : 0) + VARSTR_HEADER_SIZE; diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index aaa66441ee..a406b23c59 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -412,6 +412,27 @@ int32_t createFunction(const char* pName, SNodeList* pParameterList, SFunctionNo return code; } +int32_t createFunctionWithSrcFunc(const char* pName, const SFunctionNode* pSrcFunc, SNodeList* pParameterList, SFunctionNode** ppFunc) { + int32_t code = nodesMakeNode(QUERY_NODE_FUNCTION, (SNode**)ppFunc); + if (NULL == *ppFunc) { + return code; + } + + (*ppFunc)->hasPk = pSrcFunc->hasPk; + (*ppFunc)->pkBytes = pSrcFunc->pkBytes; + + (void)snprintf((*ppFunc)->functionName, sizeof((*ppFunc)->functionName), "%s", pName); + (*ppFunc)->pParameterList = pParameterList; + code = getFuncInfo((*ppFunc)); + if (TSDB_CODE_SUCCESS != code) { + (*ppFunc)->pParameterList = NULL; + nodesDestroyNode((SNode*)*ppFunc); + *ppFunc = NULL; + return code; + } + return code; +} + static int32_t createColumnByFunc(const SFunctionNode* pFunc, SColumnNode** ppCol) { int32_t code = nodesMakeNode(QUERY_NODE_COLUMN, (SNode**)ppCol); if (NULL == *ppCol) { @@ -438,7 +459,8 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod if (NULL == pParameterList) { return code; } - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pParameterList,pPartialFunc ); + code = + createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pPartialFunc, pSrcFunc, pParameterList, pPartialFunc); if (TSDB_CODE_SUCCESS != code) { nodesDestroyList(pParameterList); return code; @@ -452,8 +474,6 @@ static int32_t createPartialFunction(const SFunctionNode* pSrcFunc, SFunctionNod return TSDB_CODE_FAILED; } tstrncpy((*pPartialFunc)->node.aliasName, name, TSDB_COL_NAME_LEN); - (*pPartialFunc)->hasPk = pSrcFunc->hasPk; - (*pPartialFunc)->pkBytes = pSrcFunc->pkBytes; return TSDB_CODE_SUCCESS; } @@ -479,9 +499,9 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList); if (TSDB_CODE_SUCCESS == code) { if(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc != NULL){ - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMiddleFunc, pSrcFunc, pParameterList, &pFunc); }else{ - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc); } } if (TSDB_CODE_SUCCESS == code) { @@ -493,8 +513,6 @@ static int32_t createMidFunction(const SFunctionNode* pSrcFunc, const SFunctionN } else { nodesDestroyList(pParameterList); } - (*pMidFunc)->hasPk = pPartialFunc->hasPk; - (*pMidFunc)->pkBytes = pPartialFunc->pkBytes; return code; } @@ -505,7 +523,7 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio int32_t code = createMergeFuncPara(pSrcFunc, pPartialFunc, &pParameterList); if (TSDB_CODE_SUCCESS == code) { - code = createFunction(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pParameterList, &pFunc); + code = createFunctionWithSrcFunc(funcMgtBuiltins[pSrcFunc->funcId].pMergeFunc, pSrcFunc, pParameterList, &pFunc); } if (TSDB_CODE_SUCCESS == code) { pFunc->hasOriginalFunc = true; @@ -522,8 +540,6 @@ static int32_t createMergeFunction(const SFunctionNode* pSrcFunc, const SFunctio } else { nodesDestroyList(pParameterList); } - (*pMergeFunc)->hasPk = pPartialFunc->hasPk; - (*pMergeFunc)->pkBytes = pPartialFunc->pkBytes; return code; } From b02ad5c8e28ede23bbf29f7ab2bed07713657802 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sun, 27 Oct 2024 23:35:21 +0800 Subject: [PATCH 038/127] fix to_char length --- source/libs/scalar/src/sclfunc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/scalar/src/sclfunc.c b/source/libs/scalar/src/sclfunc.c index 9aa67c441b..da4a39a551 100644 --- a/source/libs/scalar/src/sclfunc.c +++ b/source/libs/scalar/src/sclfunc.c @@ -2413,7 +2413,7 @@ int32_t toCharFunction(SScalarParam* pInput, int32_t inputNum, SScalarParam* pOu char *ts = colDataGetData(pInput[0].columnData, i); char *formatData = colDataGetData(pInput[1].columnData, pInput[1].numOfRows > 1 ? i : 0); - len = TMIN(TS_FORMAT_MAX_LEN - 1, varDataLen(formatData)); + len = TMIN(TS_FORMAT_MAX_LEN - VARSTR_HEADER_SIZE, varDataLen(formatData)); if (pInput[1].numOfRows > 1 || i == 0) { (void)strncpy(format, varDataVal(formatData), len); format[len] = '\0'; From 0bc5f8cb884496bc47d4598016cee31c26e31224 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 28 Oct 2024 10:20:08 +0800 Subject: [PATCH 039/127] fix(stream): relase the task. --- source/dnode/mnode/impl/src/mndScheduler.c | 19 +++++++++++-------- source/libs/stream/src/streamCheckpoint.c | 1 + source/libs/stream/src/streamStartTask.c | 6 ++++-- 3 files changed, 16 insertions(+), 10 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 4f72b26a5e..8d33e61733 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -248,7 +248,7 @@ static int32_t doAddSinkTask(SStreamObj* pStream, SMnode* pMnode, SVgObj* pVgrou return code; } - mDebug("doAddSinkTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddSinkTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, isFillhistory); pTask->info.nodeId = pVgroup->vgId; pTask->info.epSet = mndGetVgroupEpset(pMnode, pVgroup); @@ -364,12 +364,13 @@ static int32_t buildSourceTask(SStreamObj* pStream, SEpSet* pEpset, bool isFillh static void addNewTaskList(SStreamObj* pStream) { SArray* pTaskList = taosArrayInit(0, POINTER_BYTES); if (taosArrayPush(pStream->tasks, &pTaskList) == NULL) { - mError("failed to put array"); + mError("failed to put into array"); } + if (pStream->conf.fillHistory) { pTaskList = taosArrayInit(0, POINTER_BYTES); if (taosArrayPush(pStream->pHTasksList, &pTaskList) == NULL) { - mError("failed to put array"); + mError("failed to put into array"); } } } @@ -402,7 +403,8 @@ static int32_t doAddSourceTask(SMnode* pMnode, SSubplan* plan, SStreamObj* pStre return code; } - mDebug("doAddSourceTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddSourceTask taskId:%s, %p vgId:%d, isFillHistory:%d", pTask->id.idStr, pTask, pVgroup->vgId, + isFillhistory); if (pStream->conf.fillHistory) { haltInitialTaskStatus(pTask, plan, isFillhistory); @@ -512,19 +514,20 @@ static int32_t doAddAggTask(SStreamObj* pStream, SMnode* pMnode, SSubplan* plan, SSnodeObj* pSnode, bool isFillhistory, bool useTriggerParam) { int32_t code = 0; SStreamTask* pTask = NULL; + const char* id = NULL; code = buildAggTask(pStream, pEpset, isFillhistory, useTriggerParam, &pTask); if (code != TSDB_CODE_SUCCESS) { return code; } + id = pTask->id.idStr; if (pSnode != NULL) { code = mndAssignStreamTaskToSnode(pMnode, pTask, plan, pSnode); - mDebug("doAddAggTask taskId:%s, snode id:%d, isFillHistory:%d", pTask->id.idStr, pSnode->id, isFillhistory); - + mDebug("doAddAggTask taskId:%s, %p snode id:%d, isFillHistory:%d", id, pTask, pSnode->id, isFillhistory); } else { code = mndAssignStreamTaskToVgroup(pMnode, pTask, plan, pVgroup); - mDebug("doAddAggTask taskId:%s, vgId:%d, isFillHistory:%d", pTask->id.idStr, pVgroup->vgId, isFillhistory); + mDebug("doAddAggTask taskId:%s, %p vgId:%d, isFillHistory:%d", id, pTask, pVgroup->vgId, isFillhistory); } return code; } @@ -678,7 +681,7 @@ static int32_t doScheduleStream(SStreamObj* pStream, SMnode* pMnode, SQueryPlan* if (numOfPlanLevel > 1 || externalTargetDB || multiTarget || pStream->fixedSinkVgId) { // add extra sink hasExtraSink = true; - int32_t code = addSinkTask(pMnode, pStream, pEpset); + code = addSinkTask(pMnode, pStream, pEpset); if (code != TSDB_CODE_SUCCESS) { return code; } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index af2d8b559b..d1b57c32b9 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -1037,6 +1037,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { if (++pTmrInfo->activeCounter < 50) { streamTmrStart(checkpointTriggerMonitorFn, 200, param, streamTimer, &pTmrInfo->tmrHandle, vgId, "trigger-recv-monitor"); + doCleanup(pTask, pNotSendList); return; } diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index ee9117ddc8..3518b8681d 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -355,7 +355,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas // fill-history task can only be launched by related stream tasks. STaskExecStatisInfo* pInfo = &pTask->execInfo; if (pTask->info.fillHistory == 1) { - stError("s-task:0x%x vgId:%d fill-histroy task, not start here", taskId, vgId); + stError("s-task:0x%x vgId:%d fill-history task, not start here", taskId, vgId); streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; } @@ -363,6 +363,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas // the start all tasks procedure may happen to start the newly deployed stream task, and results in the // concurrently start this task by two threads. streamMutexLock(&pTask->lock); + SStreamTaskState status = streamTaskGetStatus(pTask); if (status.state != TASK_STATUS__UNINIT) { stError("s-task:0x%x vgId:%d status:%s not uninit status, not start stream task", taskId, vgId, status.name); @@ -379,6 +380,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas if(pTask->status.downstreamReady != 0) { stFatal("s-task:0x%x downstream should be not ready, but it ready here, internal error happens", taskId); + streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_STREAM_INTERNAL_ERROR; } @@ -395,7 +397,7 @@ int32_t streamMetaStartOneTask(SStreamMeta* pMeta, int64_t streamId, int32_t tas streamMutexUnlock(&pTask->lock); } - // concurrently start task may cause the later started task be failed, and also failed to added into meta result. + // concurrently start task may cause the latter started task be failed, and also failed to added into meta result. if (code == TSDB_CODE_SUCCESS) { code = streamTaskHandleEvent(pTask->status.pSM, TASK_EVENT_INIT); if (code != TSDB_CODE_SUCCESS) { From fb34549ae76d58bd930fc5fca13ce9059fd5b644 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Mon, 28 Oct 2024 11:50:25 +0800 Subject: [PATCH 040/127] fix: json tag length check --- source/common/src/tdatablock.c | 13 ++++++++++--- source/libs/function/src/builtins.c | 2 +- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index d59c9252f3..5ac5ffdd9b 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3569,11 +3569,18 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { } else { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos); } - - colLen = varDataTLen(pCol->pData + pCol->varmeta.offset[r]); + + char* pColData = pCol->pData + pCol->varmeta.offset[r]; + int32_t colSize = 0; + if (pCol->info.type == TSDB_DATA_TYPE_JSON) { + colLen = getJsonValueLen(pColData); + } else { + colLen = varDataTLen(pColData); + } + BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); - + if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA((pCol->varmeta.offset[r] + colLen) <= pCol->varmeta.length); } else { diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index bd72498508..e66ddf5197 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1606,7 +1606,7 @@ static int32_t translateOutVarchar(SFunctionNode* pFunc, char* pErrBuf, int32_t break; case FUNCTION_TYPE_BLOCK_DIST: case FUNCTION_TYPE_BLOCK_DIST_INFO: - bytes = 128; + bytes = sizeof(STableBlockDistInfo); break; case FUNCTION_TYPE_TO_CHAR: bytes = 4096; From 85989212c64e8cdeb16ef865a177a569bbb0be61 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Mon, 28 Oct 2024 19:38:56 +0800 Subject: [PATCH 041/127] fix: set scolumnInfo nodata flag where table scan notload --- include/common/tmsg.h | 2 +- source/common/src/tdatablock.c | 8 ++++++-- source/libs/executor/src/executil.c | 2 +- source/libs/executor/src/scanoperator.c | 13 +++++++++++++ source/libs/planner/src/planPhysiCreater.c | 8 ++++---- 5 files changed, 25 insertions(+), 8 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 14e3f9b0eb..d6aad934e0 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -1215,7 +1215,7 @@ typedef struct { int32_t bytes; int8_t type; uint8_t pk; - bool reserve; + bool noData; } SColumnInfo; typedef struct STimeWindow { diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 5ac5ffdd9b..4724a3d241 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -3544,7 +3544,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { BLOCK_DATA_CHECK_TRESSA(pCol != NULL); isVarType = IS_VAR_DATA_TYPE(pCol->info.type); checkRows = pDataBlock->info.rows; - if (pCol->info.reserve == false) continue; + if (pCol->info.noData == true) continue; if (isVarType) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset); @@ -3578,7 +3578,11 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { colLen = varDataTLen(pColData); } - BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); + if (pCol->info.type == TSDB_DATA_TYPE_JSON) { + BLOCK_DATA_CHECK_TRESSA(colLen >= CHAR_BYTES); + } else { + BLOCK_DATA_CHECK_TRESSA(colLen >= VARSTR_HEADER_SIZE); + } BLOCK_DATA_CHECK_TRESSA(colLen <= pCol->info.bytes); if (pCol->reassigned) { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 1a43d42348..8cd573e971 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -390,7 +390,7 @@ SSDataBlock* createDataBlockFromDescNode(SDataBlockDescNode* pNode) { createColumnInfoData(pDescNode->dataType.type, pDescNode->dataType.bytes, pDescNode->slotId); idata.info.scale = pDescNode->dataType.scale; idata.info.precision = pDescNode->dataType.precision; - idata.info.reserve = pDescNode->reserve; + idata.info.noData = pDescNode->reserve; code = blockDataAppendColInfo(pBlock, &idata); if (code != TSDB_CODE_SUCCESS) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index bae9926f63..f936e95005 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1461,6 +1461,18 @@ static void destroyTableScanOperatorInfo(void* param) { taosMemoryFreeClear(param); } +static void resetClolumnReserve(SSDataBlock* pBlock, int32_t dataRequireFlag) { + if (pBlock && dataRequireFlag == FUNC_DATA_REQUIRED_NOT_LOAD) { + int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pCol = (SColumnInfoData*)taosArrayGet(pBlock->pDataBlock, i); + if (pCol) { + pCol->info.noData = true; + } + } + } +} + int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* readHandle, STableListInfo* pTableListInfo, SExecTaskInfo* pTaskInfo, SOperatorInfo** pOptrInfo) { @@ -1511,6 +1523,7 @@ int32_t createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SReadHa pInfo->base.readerAPI = pTaskInfo->storageAPI.tsdReader; initResultSizeInfo(&pOperator->resultInfo, 4096); pInfo->pResBlock = createDataBlockFromDescNode(pDescNode); + resetClolumnReserve(pInfo->pResBlock, pInfo->base.dataBlockLoadFlag); QUERY_CHECK_NULL(pInfo->pResBlock, code, lino, _error, terrno); code = prepareDataBlockBuf(pInfo->pResBlock, &pInfo->base.matchInfo); diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index a48504b79d..a67071516d 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -233,7 +233,7 @@ static int32_t buildDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SD int32_t len = 0; code = getSlotKey(pNode, NULL, &name, &len, 16); if (TSDB_CODE_SUCCESS == code) { - code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, true)); + code = nodesListStrictAppend(pDataBlockDesc->pSlots, createSlotDesc(pCxt, name, pNode, slotId, true, false)); } code = putSlotToHash(name, len, pDataBlockDesc->dataBlockId, slotId, pNode, pHash); if (TSDB_CODE_SUCCESS == code) { @@ -341,7 +341,7 @@ static int32_t addDataBlockSlotsImpl(SPhysiPlanContext* pCxt, SNodeList* pList, } static int32_t addDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, false, false); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, false, true); } static int32_t addDataBlockSlot(SPhysiPlanContext* pCxt, SNode** pNode, SDataBlockDescNode* pDataBlockDesc) { @@ -363,11 +363,11 @@ static int32_t addDataBlockSlot(SPhysiPlanContext* pCxt, SNode** pNode, SDataBlo static int32_t addDataBlockSlotsForProject(SPhysiPlanContext* pCxt, const char* pStmtName, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, pStmtName, false, false); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, pStmtName, false, true); } static int32_t pushdownDataBlockSlots(SPhysiPlanContext* pCxt, SNodeList* pList, SDataBlockDescNode* pDataBlockDesc) { - return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, true, false); + return addDataBlockSlotsImpl(pCxt, pList, pDataBlockDesc, NULL, true, true); } typedef struct SSetSlotIdCxt { From 1463bd3efdcfc2fa3d5333ad44fb327c45b17804 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 29 Oct 2024 00:03:40 +0800 Subject: [PATCH 042/127] fix: show streams --- source/common/src/systable.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index eef38bf18e..211aa20645 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -165,8 +165,8 @@ static const SSysDbTableSchema userStbsSchema[] = { static const SSysDbTableSchema streamSchema[] = { {.name = "stream_name", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP, .sysInfo = false}, - {.name = "stream_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "history_id", .bytes = 16 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "stream_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "history_id", .bytes = 19 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "sql", .bytes = TSDB_SHOW_SQL_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "source_db", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, From 097d3caa902beab27bd2b0ba4dbefe6fa85cef8c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 10:20:27 +0800 Subject: [PATCH 043/127] fix(stream): add refId for meta at the end of init functions --- source/libs/stream/src/streamMeta.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7e9b60b61a..17883f5fb1 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -434,7 +434,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, pMeta->closeFlag = false; stInfo("vgId:%d open stream meta succ, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, stage); - pMeta->rid = taosAddRef(streamMetaId, pMeta); // set the attribute when running on Linux OS TdThreadRwlockAttr attr; @@ -468,6 +467,9 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = taosThreadMutexInit(&pMeta->backendMutex, NULL); TSDB_CHECK_CODE(code, lino, _err); + // add refId at the end of initialization function + pMeta->rid = taosAddRef(streamMetaId, pMeta); + *p = pMeta; return code; From 281668738acf1b057d78b0cb52c1407f5c280997 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 10:20:27 +0800 Subject: [PATCH 044/127] fix(stream): add refId for meta at the end of init functions --- source/libs/stream/src/streamMeta.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7e9b60b61a..17883f5fb1 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -434,7 +434,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, pMeta->closeFlag = false; stInfo("vgId:%d open stream meta succ, latest checkpoint:%" PRId64 ", stage:%" PRId64, vgId, pMeta->chkpId, stage); - pMeta->rid = taosAddRef(streamMetaId, pMeta); // set the attribute when running on Linux OS TdThreadRwlockAttr attr; @@ -468,6 +467,9 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = taosThreadMutexInit(&pMeta->backendMutex, NULL); TSDB_CHECK_CODE(code, lino, _err); + // add refId at the end of initialization function + pMeta->rid = taosAddRef(streamMetaId, pMeta); + *p = pMeta; return code; From 36e115d2cd0799c5e577981bfde1e313e9420361 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 29 Oct 2024 15:49:16 +0800 Subject: [PATCH 045/127] fix: colDataSetVal repeatedly sets the same row --- source/common/src/systable.c | 6 +++--- source/common/src/tdatablock.c | 12 ++++++++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 211aa20645..cb11e4a24e 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -190,9 +190,9 @@ static const SSysDbTableSchema streamTaskSchema[] = { {.name = "stage", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT, .sysInfo = false}, {.name = "in_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, {.name = "process_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "process_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "out_total", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, - {.name = "out_throughput", .bytes = 14, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "process_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "out_total", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, + {.name = "out_throughput", .bytes = 15, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_throughput", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "dispatch_total", .bytes = 12, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, // {.name = "out_queue", .bytes = 20, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false}, diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 4724a3d241..3187d2315a 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -109,6 +109,14 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const dataLen = varDataTLen(pData); } + if(rowIndex == 0 && pColumnInfoData->varmeta.length > 0) { + return 0; + } + if (pColumnInfoData->varmeta.offset[rowIndex] > 0 && + pColumnInfoData->varmeta.offset[rowIndex] < pColumnInfoData->varmeta.length) { + return 0; + } + SVarColAttr* pAttr = &pColumnInfoData->varmeta; if (pAttr->allocLen < pAttr->length + dataLen) { uint32_t newSize = pAttr->allocLen; @@ -3552,7 +3560,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { BLOCK_DATA_CHECK_TRESSA(pCol->nullbitmap); } - nextPos = 0; + nextPos = -1; for (int64_t r = 0; r < checkRows; ++r) { if (tsSafetyCheckLevel <= TSDB_SAFETY_CHECK_LEVELL_NORMAL) break; if (!colDataIsNull_s(pCol, r)) { @@ -3564,7 +3572,7 @@ int32_t blockDataCheck(const SSDataBlock* pDataBlock) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] <= pCol->varmeta.length); if (pCol->reassigned) { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] >= 0); - } else if (0 == r) { + } else if (0 == r || nextPos == -1) { nextPos = pCol->varmeta.offset[r]; } else { BLOCK_DATA_CHECK_TRESSA(pCol->varmeta.offset[r] == nextPos); From 946b3efd46a26ad6aeb232e499208efbe43364ed Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 29 Oct 2024 18:02:02 +0800 Subject: [PATCH 046/127] Update test_case_when.py --- tests/army/query/test_case_when.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/army/query/test_case_when.py b/tests/army/query/test_case_when.py index 9edc9daad6..dea5a5f01f 100644 --- a/tests/army/query/test_case_when.py +++ b/tests/army/query/test_case_when.py @@ -348,6 +348,18 @@ class TDTestCase(TBase): # tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;") # tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;") + tdSql.query("select avg(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(6.0,)]) + + tdSql.query("select sum(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(54,)]) + + tdSql.query("select first(case when c_int >=2 then 'abc' else 0 end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [('abc',)]) + + tdSql.query("select last(case when c_int >=2 then c_int else 0 end) from st1;") + assert(tdSql.checkRows(1) and tdSql.res == [(0,)]) + def run(self): self.prepare_data() self.test_case_when_statements() From 69f6c3eb5149033e3d3af4438f9ec452e90f8178 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 18:45:30 +0800 Subject: [PATCH 047/127] fix(stream): adjust init ref position. --- source/libs/stream/src/streamMeta.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 17883f5fb1..064098ed35 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -458,9 +458,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = metaRefMgtAdd(pMeta->vgId, pRid); TSDB_CHECK_CODE(code, lino, _err); - code = createMetaHbInfo(pRid, &pMeta->pHbInfo); - TSDB_CHECK_CODE(code, lino, _err); - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); TSDB_CHECK_CODE(code, lino, _err); @@ -469,6 +466,9 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, // add refId at the end of initialization function pMeta->rid = taosAddRef(streamMetaId, pMeta); + code = createMetaHbInfo(pRid, &pMeta->pHbInfo); + + TSDB_CHECK_CODE(code, lino, _err); *p = pMeta; return code; From c149449717079441ead616a0aca7197a8e307983 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 18:45:30 +0800 Subject: [PATCH 048/127] fix(stream): adjust init ref position. --- source/libs/stream/src/streamMeta.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 17883f5fb1..064098ed35 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -458,9 +458,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = metaRefMgtAdd(pMeta->vgId, pRid); TSDB_CHECK_CODE(code, lino, _err); - code = createMetaHbInfo(pRid, &pMeta->pHbInfo); - TSDB_CHECK_CODE(code, lino, _err); - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); TSDB_CHECK_CODE(code, lino, _err); @@ -469,6 +466,9 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, // add refId at the end of initialization function pMeta->rid = taosAddRef(streamMetaId, pMeta); + code = createMetaHbInfo(pRid, &pMeta->pHbInfo); + + TSDB_CHECK_CODE(code, lino, _err); *p = pMeta; return code; From 9bae0adba6a56cefdad9da1965e949b433eee49e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 19:14:37 +0800 Subject: [PATCH 049/127] fix(stream): start scheduler task after set the refId. --- source/dnode/snode/src/snode.c | 1 - source/dnode/vnode/src/tq/tq.c | 1 - source/libs/stream/src/streamCheckStatus.c | 2 +- source/libs/stream/src/streamCheckpoint.c | 2 +- source/libs/stream/src/streamDispatch.c | 4 ++-- source/libs/stream/src/streamMeta.c | 6 ++++++ source/libs/stream/src/streamSched.c | 23 +++++++++++++++------ source/libs/stream/src/streamStartHistory.c | 2 +- 8 files changed, 28 insertions(+), 13 deletions(-) diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index e8d4663bbb..6eee8c510b 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -38,7 +38,6 @@ int32_t sndBuildStreamTask(SSnode *pSnode, SStreamTask *pTask, int64_t nextProce streamTaskOpenAllUpstreamInput(pTask); streamTaskResetUpstreamStageInfo(pTask); - streamSetupScheduleTrigger(pTask); SCheckpointInfo *pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 2929121029..ec7ac1054c 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -765,7 +765,6 @@ int32_t tqBuildStreamTask(void* pTqObj, SStreamTask* pTask, int64_t nextProcessV } streamTaskResetUpstreamStageInfo(pTask); - streamSetupScheduleTrigger(pTask); SCheckpointInfo* pChkInfo = &pTask->chkInfo; tqSetRestoreVersionInfo(pTask); diff --git a/source/libs/stream/src/streamCheckStatus.c b/source/libs/stream/src/streamCheckStatus.c index 60f8744448..64b19e4ed9 100644 --- a/source/libs/stream/src/streamCheckStatus.c +++ b/source/libs/stream/src/streamCheckStatus.c @@ -750,7 +750,7 @@ void rspMonitorFn(void* param, void* tmrId) { SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } diff --git a/source/libs/stream/src/streamCheckpoint.c b/source/libs/stream/src/streamCheckpoint.c index d1b57c32b9..7724d1c5ff 100644 --- a/source/libs/stream/src/streamCheckpoint.c +++ b/source/libs/stream/src/streamCheckpoint.c @@ -1008,7 +1008,7 @@ void checkpointTriggerMonitorFn(void* param, void* tmrId) { SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index ff41008759..5f31364e76 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -539,7 +539,7 @@ static void doMonitorDispatchData(void* param, void* tmrId) { pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } @@ -1082,7 +1082,7 @@ static void chkptReadyMsgSendMonitorFn(void* param, void* tmrId) { SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index d414b02d69..598f809c21 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -745,6 +745,9 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1); } + // enable the scheduler for stream tasks + streamSetupScheduleTrigger(pTask); + *pAdded = true; return code; } @@ -1147,6 +1150,9 @@ void streamMetaLoadAllTasks(SStreamMeta* pMeta) { continue; } + // enable the scheduler for stream tasks after acquire the task RefId. + streamSetupScheduleTrigger(pTask); + stInfo("s-task:0x%x vgId:%d set refId:%"PRId64, (int32_t) id.taskId, vgId, pTask->id.refId); if (pTask->info.fillHistory == 0) { int32_t val = atomic_add_fetch_32(&pMeta->numOfStreamTasks, 1); diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 7c77797ef9..25d6161aba 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -20,12 +20,15 @@ static void streamTaskResumeHelper(void* param, void* tmrId); static void streamTaskSchedHelper(void* param, void* tmrId); void streamSetupScheduleTrigger(SStreamTask* pTask) { - int64_t delaySchema = pTask->info.delaySchedParam; - if (delaySchema != 0 && pTask->info.fillHistory == 0) { + int64_t delayParam = pTask->info.delaySchedParam; + if (delayParam != 0 && pTask->info.fillHistory == 0) { int64_t* pTaskRefId = NULL; int32_t code = streamTaskAllocRefId(pTask, &pTaskRefId); if (code == 0) { - streamTmrStart(streamTaskSchedHelper, (int32_t)delaySchema, pTaskRefId, streamTimer, + stDebug("s-task:%s refId:%" PRId64 " enable the scheduler trigger, delay:%" PRId64, pTask->id.idStr, + pTask->id.refId, delayParam); + + streamTmrStart(streamTaskSchedHelper, (int32_t)delayParam, pTaskRefId, streamTimer, &pTask->schedInfo.pDelayTimer, pTask->pMeta->vgId, "sched-tmr"); pTask->schedInfo.status = TASK_TRIGGER_STATUS__INACTIVE; } @@ -93,7 +96,7 @@ void streamTaskResumeHelper(void* param, void* tmrId) { int64_t taskRefId = *(int64_t*)param; SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } @@ -129,7 +132,7 @@ void streamTaskSchedHelper(void* param, void* tmrId) { int64_t taskRefId = *(int64_t*)param; SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } @@ -141,13 +144,21 @@ void streamTaskSchedHelper(void* param, void* tmrId) { int8_t status = atomic_load_8(&pTask->schedInfo.status); stTrace("s-task:%s in scheduler, trigger status:%d, next:%dms", id, status, nextTrigger); - if (streamTaskShouldStop(pTask) || streamTaskShouldPause(pTask)) { + if (streamTaskShouldStop(pTask)) { stDebug("s-task:%s should stop, jump out of schedTimer", id); streamMetaReleaseTask(pTask->pMeta, pTask); streamTaskFreeRefId(param); return; } + if (streamTaskShouldPause(pTask)) { + stDebug("s-task:%s is paused, recheck in %.2fs", id, nextTrigger/1000.0); + streamTmrStart(streamTaskSchedHelper, nextTrigger, param, streamTimer, &pTask->schedInfo.pDelayTimer, vgId, + "sched-run-tmr"); + streamMetaReleaseTask(pTask->pMeta, pTask); + return; + } + if (streamTaskGetStatus(pTask).state == TASK_STATUS__CK) { stDebug("s-task:%s in checkpoint procedure, not retrieve result, next:%dms", id, nextTrigger); } else { diff --git a/source/libs/stream/src/streamStartHistory.c b/source/libs/stream/src/streamStartHistory.c index 54026f5db2..4d38c48fc5 100644 --- a/source/libs/stream/src/streamStartHistory.c +++ b/source/libs/stream/src/streamStartHistory.c @@ -583,7 +583,7 @@ void doExecScanhistoryInFuture(void* param, void* tmrId) { SStreamTask* pTask = taosAcquireRef(streamTaskRefPool, taskRefId); if (pTask == NULL) { - stError("invalid task rid:%" PRId64 " failed to acquired stream-task", taskRefId); + stError("invalid task rid:%" PRId64 " failed to acquired stream-task at %s", taskRefId, __func__); streamTaskFreeRefId(param); return; } From 71f4ae695720e108c20bcf6513d15e5199f7aa9c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 19:48:17 +0800 Subject: [PATCH 050/127] fix(stream): adjust refId init position. --- source/libs/stream/src/streamMeta.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 064098ed35..bc7878f761 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -451,13 +451,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = taosThreadRwlockAttrDestroy(&attr); TSDB_CHECK_CODE(code, lino, _err); - int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); - TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); - - memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); - code = metaRefMgtAdd(pMeta->vgId, pRid); - TSDB_CHECK_CODE(code, lino, _err); - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); TSDB_CHECK_CODE(code, lino, _err); @@ -466,6 +459,15 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, // add refId at the end of initialization function pMeta->rid = taosAddRef(streamMetaId, pMeta); + + int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); + TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); + + memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); + + code = metaRefMgtAdd(pMeta->vgId, pRid); + TSDB_CHECK_CODE(code, lino, _err); + code = createMetaHbInfo(pRid, &pMeta->pHbInfo); TSDB_CHECK_CODE(code, lino, _err); From c6c23271c73d908d8d8b2e3feadf51770d2d85da Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 29 Oct 2024 19:48:17 +0800 Subject: [PATCH 051/127] fix(stream): adjust refId init position. --- source/libs/stream/src/streamMeta.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 064098ed35..bc7878f761 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -451,13 +451,6 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, code = taosThreadRwlockAttrDestroy(&attr); TSDB_CHECK_CODE(code, lino, _err); - int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); - TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); - - memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); - code = metaRefMgtAdd(pMeta->vgId, pRid); - TSDB_CHECK_CODE(code, lino, _err); - code = bkdMgtCreate(tpath, (SBkdMgt**)&pMeta->bkdChkptMgt); TSDB_CHECK_CODE(code, lino, _err); @@ -466,6 +459,15 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, // add refId at the end of initialization function pMeta->rid = taosAddRef(streamMetaId, pMeta); + + int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); + TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); + + memcpy(pRid, &pMeta->rid, sizeof(pMeta->rid)); + + code = metaRefMgtAdd(pMeta->vgId, pRid); + TSDB_CHECK_CODE(code, lino, _err); + code = createMetaHbInfo(pRid, &pMeta->pHbInfo); TSDB_CHECK_CODE(code, lino, _err); From 694e7577703a5d5e23c1d2f3ddcef09dc48c86a6 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 29 Oct 2024 19:50:21 +0800 Subject: [PATCH 052/127] fix: coldata set val --- source/common/src/tdatablock.c | 35 +++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 3187d2315a..c185ac9d86 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -87,6 +87,16 @@ int32_t getJsonValueLen(const char* data) { return dataLen; } +static int32_t getDataLen(int32_t type, const char* pData) { + int32_t dataLen = 0; + if (type == TSDB_DATA_TYPE_JSON) { + dataLen = getJsonValueLen(pData); + } else { + dataLen = varDataTLen(pData); + } + return dataLen; +} + int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { if (isNull || pData == NULL) { // There is a placehold for each NULL value of binary or nchar type. @@ -102,19 +112,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const int32_t type = pColumnInfoData->info.type; if (IS_VAR_DATA_TYPE(type)) { - int32_t dataLen = 0; - if (type == TSDB_DATA_TYPE_JSON) { - dataLen = getJsonValueLen(pData); - } else { - dataLen = varDataTLen(pData); - } - - if(rowIndex == 0 && pColumnInfoData->varmeta.length > 0) { - return 0; - } - if (pColumnInfoData->varmeta.offset[rowIndex] > 0 && - pColumnInfoData->varmeta.offset[rowIndex] < pColumnInfoData->varmeta.length) { - return 0; + int32_t dataLen = getDataLen(type, pData); + if (pColumnInfoData->varmeta.offset[rowIndex] > 0) { + if (rowIndex == 0) { + pColumnInfoData->varmeta.length = 0; + } else { + int32_t start = pColumnInfoData->varmeta.offset[rowIndex - 1]; + int32_t lastDataLen = getDataLen(type, pColumnInfoData->pData + start); + if (start + lastDataLen < pColumnInfoData->varmeta.length) { + uInfo("column data is reassigned, row:%d, offset:%d, length:%d", rowIndex, start, lastDataLen); + pColumnInfoData->varmeta.length = start + lastDataLen; + } + } } SVarColAttr* pAttr = &pColumnInfoData->varmeta; From 62fbb62b9ce036ab47c148f5a162df8c0e482567 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Tue, 29 Oct 2024 23:06:57 +0800 Subject: [PATCH 053/127] fix: merge conflict --- source/libs/command/src/command.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c index 2f44da6c4f..6272ac7049 100644 --- a/source/libs/command/src/command.c +++ b/source/libs/command/src/command.c @@ -52,8 +52,6 @@ static int32_t buildRetrieveTableRsp(SSDataBlock* pBlock, int32_t numOfCols, SRe int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols); if(len < 0) { - int32_t len = blockEncode(pBlock, (*pRsp)->data + PAYLOAD_PREFIX_LEN, dataEncodeBufSize, numOfCols); - if (len < 0) { taosMemoryFree(*pRsp); return terrno; } From 6b5e64dc8e8d7a900703a4d0cfe80acb7e1fa54d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 30 Oct 2024 09:13:07 +0800 Subject: [PATCH 054/127] fix(stream): adjust start timer position. --- source/libs/stream/src/streamHb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 19391bf7a0..65102edc24 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -316,12 +316,13 @@ int32_t createMetaHbInfo(int64_t* pRid, SMetaHbInfo** pRes) { return terrno; } - streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); pInfo->tickCounter = 0; pInfo->msgSendTs = -1; pInfo->hbCount = 0; *pRes = pInfo; + + streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); return TSDB_CODE_SUCCESS; } From e618a04e510e2b83f0cb929d3fac85656d7a2f9b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 30 Oct 2024 09:13:07 +0800 Subject: [PATCH 055/127] fix(stream): adjust start timer position. --- source/libs/stream/src/streamHb.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamHb.c b/source/libs/stream/src/streamHb.c index 19391bf7a0..65102edc24 100644 --- a/source/libs/stream/src/streamHb.c +++ b/source/libs/stream/src/streamHb.c @@ -316,12 +316,13 @@ int32_t createMetaHbInfo(int64_t* pRid, SMetaHbInfo** pRes) { return terrno; } - streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); pInfo->tickCounter = 0; pInfo->msgSendTs = -1; pInfo->hbCount = 0; *pRes = pInfo; + + streamTmrStart(streamMetaHbToMnode, META_HB_CHECK_INTERVAL, pRid, streamTimer, &pInfo->hbTmr, 0, "stream-hb"); return TSDB_CODE_SUCCESS; } From 0b5f2ec57b8d8bcf8c8632527eccabc185f76285 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 30 Oct 2024 11:11:29 +0800 Subject: [PATCH 056/127] test(blob): testing & fixes for blob --- tests/army/storage/blob/s3Basic.json | 2 +- tests/army/storage/blob/s3Basic1.json | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/army/storage/blob/s3Basic.json b/tests/army/storage/blob/s3Basic.json index ee341b2096..2b911a989f 100644 --- a/tests/army/storage/blob/s3Basic.json +++ b/tests/army/storage/blob/s3Basic.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", diff --git a/tests/army/storage/blob/s3Basic1.json b/tests/army/storage/blob/s3Basic1.json index 02be308443..087f89edec 100644 --- a/tests/army/storage/blob/s3Basic1.json +++ b/tests/army/storage/blob/s3Basic1.json @@ -20,7 +20,7 @@ "replica": 1, "duration":"10d", "s3_keeplocal":"30d", - "s3_chunksize":"131072", + "s3_chunkpages":"131072", "tsdb_pagesize":"1", "s3_compact":"1", "wal_retention_size":"1", From 6d4d79a4d3c5724fbcfcc19d392c697196bac7a1 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 30 Oct 2024 14:04:12 +0800 Subject: [PATCH 057/127] fix: geometry ST_AsText --- source/libs/function/src/builtins.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index e66ddf5197..d91060fb6e 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -1533,14 +1533,16 @@ static int32_t translateToJson(SFunctionNode* pFunc, char* pErrBuf, int32_t len) static int32_t translateOutGeom(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_GEOMETRY].bytes, .type = TSDB_DATA_TYPE_GEOMETRY}; + SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_GEOMETRY}; return TSDB_CODE_SUCCESS; } static int32_t translateInGeomOutStr(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { FUNC_ERR_RET(validateParam(pFunc, pErrBuf, len)); - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_VARCHAR].bytes, .type = TSDB_DATA_TYPE_VARCHAR}; + SDataType dt = *getSDataTypeFromNode(nodesListGetNode(pFunc->pParameterList, 0)); + pFunc->node.resType = (SDataType){.bytes = dt.bytes, .type = TSDB_DATA_TYPE_VARCHAR}; return TSDB_CODE_SUCCESS; } From e9ef7a85744d7a84c6008fe4ea7cb192a03297ac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Wed, 30 Oct 2024 14:29:26 +0800 Subject: [PATCH 058/127] rename s3_chunksize to s3_chunkpages --- tests/army/storage/blob/ablob.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py index fae492a3df..f6e783d6f3 100644 --- a/tests/army/storage/blob/ablob.py +++ b/tests/army/storage/blob/ablob.py @@ -152,13 +152,13 @@ class TDTestCase(TBase): if keepLocal is not None: kw1 = f"s3_keeplocal {keepLocal}" if chunkSize is not None: - kw2 = f"s3_chunksize {chunkSize}" + kw2 = f"s3_chunkpages {chunkSize}" if compact is not None: kw3 = f"s3_compact {compact}" sql = f" create database db1 vgroups 1 duration 1h {kw1} {kw2} {kw3}" tdSql.execute(sql, show=True) - #sql = f"select name,s3_keeplocal,s3_chunksize,s3_compact from information_schema.ins_databases where name='db1';" + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" sql = f"select * from information_schema.ins_databases where name='db1';" tdSql.query(sql) # 29 30 31 -> chunksize keeplocal compact @@ -178,9 +178,9 @@ class TDTestCase(TBase): f"create database db2 s3_keeplocal -1", f"create database db2 s3_keeplocal 0", f"create database db2 s3_keeplocal 365001", - f"create database db2 s3_chunksize -1", - f"create database db2 s3_chunksize 0", - f"create database db2 s3_chunksize 900000000", + f"create database db2 s3_chunkpages -1", + f"create database db2 s3_chunkpages 0", + f"create database db2 s3_chunkpages 900000000", f"create database db2 s3_compact -1", f"create database db2 s3_compact 100", f"create database db2 duration 1d s3_keeplocal 1d" From 73be7ddbc68ef247ff293af7506113d027fe402c Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Wed, 30 Oct 2024 19:38:08 +0800 Subject: [PATCH 059/127] fix: set tablefield of stream block to NULL --- source/dnode/vnode/src/tq/tqUtil.c | 3 +++ source/libs/executor/src/scanoperator.c | 13 ++++++++++++- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tq/tqUtil.c b/source/dnode/vnode/src/tq/tqUtil.c index b4866b8c65..1edc3d42ab 100644 --- a/source/dnode/vnode/src/tq/tqUtil.c +++ b/source/dnode/vnode/src/tq/tqUtil.c @@ -626,6 +626,9 @@ int32_t tqExtractDelDataBlock(const void* pData, int32_t len, int64_t ver, void* tmp = taosArrayGet(pDelBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); TSDB_CHECK_NULL(tmp, code, line, END, terrno) colDataSetNULL(tmp, i); + tmp = taosArrayGet(pDelBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); + TSDB_CHECK_NULL(tmp, code, line, END, terrno) + colDataSetNULL(tmp, i); } if (type == 0) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index f936e95005..918be0465f 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2115,6 +2115,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = pSrcGp[i]; if (groupId == 0) { @@ -2152,7 +2153,7 @@ static int32_t generateSessionScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSr QUERY_CHECK_CODE(code, lino, _end); colDataSetNULL(pDestCalStartTsCol, i); - colDataSetNULL(pDestCalEndTsCol, i); + colDataSetNULL(pDestTableNameInxCol, i); pDestBlock->info.rows++; } @@ -2205,6 +2206,7 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB SColumnInfoData* pDestGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pDestCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pDestCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows; i++) { uint64_t groupId = pSrcGp[i]; if (groupId == 0) { @@ -2233,6 +2235,8 @@ static int32_t generateCountScanRange(SStreamScanInfo* pInfo, SSDataBlock* pSrcB code = colDataSetVal(pDestCalEndTsCol, i, (const char*)&range.win.ekey, false); QUERY_CHECK_CODE(code, lino, _end); + colDataSetNULL(pDestTableNameInxCol, i); + pDestBlock->info.rows++; } @@ -2287,6 +2291,7 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS SColumnInfoData* pGpCol = taosArrayGet(pDestBlock->pDataBlock, GROUPID_COLUMN_INDEX); SColumnInfoData* pCalStartTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX); SColumnInfoData* pCalEndTsCol = taosArrayGet(pDestBlock->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX); + SColumnInfoData* pDestTableNameInxCol = taosArrayGet(pDestBlock->pDataBlock, TABLE_NAME_COLUMN_INDEX); for (int32_t i = 0; i < pSrcBlock->info.rows;) { uint64_t srcUid = srcUidData[i]; uint64_t groupId = srcGp[i]; @@ -2319,6 +2324,8 @@ static int32_t generateIntervalScanRange(SStreamScanInfo* pInfo, SSDataBlock* pS code = colDataSetVal(pGpCol, pDestBlock->info.rows, (const char*)(&groupId), false); QUERY_CHECK_CODE(code, lino, _end); + colDataSetNULL(pDestTableNameInxCol, pDestBlock->info.rows); + pDestBlock->info.rows++; } @@ -3074,6 +3081,7 @@ static int32_t filterDelBlockByUid(SSDataBlock* pDst, const SSDataBlock* pSrc, S colDataSetNULL(taosArrayGet(pDst->pDataBlock, GROUPID_COLUMN_INDEX), j); colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_START_TS_COLUMN_INDEX), j); colDataSetNULL(taosArrayGet(pDst->pDataBlock, CALCULATE_END_TS_COLUMN_INDEX), j); + colDataSetNULL(taosArrayGet(pDst->pDataBlock, TABLE_NAME_COLUMN_INDEX), j); j++; } } @@ -3670,6 +3678,9 @@ FETCH_NEXT_BLOCK: // printDataBlock(pInfo->pCheckpointRes, "stream scan ck", GET_TASKID(pTaskInfo)); (*ppRes) = pInfo->pCheckpointRes; return code; + } else { + qError("stream scan error, invalid block type %d, %s", pInfo->blockType, id); + code = TSDB_CODE_QRY_EXECUTOR_INTERNAL_ERROR; } _end: From 01748085cca30ff398a81c151ca519965caef9fe Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 00:25:26 +0800 Subject: [PATCH 060/127] fix(stream): not remove task when trying to stop it. --- source/libs/stream/src/streamStartTask.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/source/libs/stream/src/streamStartTask.c b/source/libs/stream/src/streamStartTask.c index 3518b8681d..ed12687e41 100644 --- a/source/libs/stream/src/streamStartTask.c +++ b/source/libs/stream/src/streamStartTask.c @@ -453,10 +453,6 @@ int32_t streamMetaStopAllTasks(SStreamMeta* pMeta) { } streamMetaReleaseTask(pMeta, pTask); - ret = taosRemoveRef(streamTaskRefPool, refId); - if (ret) { - stError("vgId:%d failed to remove task:0x%x, refId:%"PRId64, pMeta->vgId, pTaskId->taskId, refId); - } } taosArrayDestroy(pTaskList); From be82bbf90ca87b1cb6a80cb78810994e354baac0 Mon Sep 17 00:00:00 2001 From: yanyuxing Date: Thu, 31 Oct 2024 10:46:39 +0800 Subject: [PATCH 061/127] feat(packaging): add client install summary information --- packaging/tools/mac_install_summary_client.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 packaging/tools/mac_install_summary_client.txt diff --git a/packaging/tools/mac_install_summary_client.txt b/packaging/tools/mac_install_summary_client.txt new file mode 100644 index 0000000000..f49703c555 --- /dev/null +++ b/packaging/tools/mac_install_summary_client.txt @@ -0,0 +1,4 @@ +TDengine client is installed successfully. Please open a terminal and execute the commands below: + +To configure TDengine client, sudo vi /etc/taos/taos.cfg +To access TDengine command line interface, taos -h YouServerName \ No newline at end of file From ad83a9a6509244e1c04ec53de5a4eb2d2d5f9e45 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 31 Oct 2024 11:16:39 +0800 Subject: [PATCH 062/127] fix: reset offset when trimdatablock --- source/common/src/tdatablock.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index c185ac9d86..49dd9948f2 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -114,16 +114,7 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const if (IS_VAR_DATA_TYPE(type)) { int32_t dataLen = getDataLen(type, pData); if (pColumnInfoData->varmeta.offset[rowIndex] > 0) { - if (rowIndex == 0) { - pColumnInfoData->varmeta.length = 0; - } else { - int32_t start = pColumnInfoData->varmeta.offset[rowIndex - 1]; - int32_t lastDataLen = getDataLen(type, pColumnInfoData->pData + start); - if (start + lastDataLen < pColumnInfoData->varmeta.length) { - uInfo("column data is reassigned, row:%d, offset:%d, length:%d", rowIndex, start, lastDataLen); - pColumnInfoData->varmeta.length = start + lastDataLen; - } - } + pColumnInfoData->varmeta.length = pColumnInfoData->varmeta.offset[rowIndex]; } SVarColAttr* pAttr = &pColumnInfoData->varmeta; @@ -3391,6 +3382,7 @@ int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolL } memcpy(p2, p1, len); + pDst->varmeta.offset[numOfRows] = -1; code = colDataSetVal(pDst, numOfRows, p2, false); taosMemoryFree(p2); if (code) { From 930dc4abf7d7907ffaa6cdef64b1ec82084b36e4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 13:15:01 +0800 Subject: [PATCH 063/127] fix(stream): remove invalid acquire. --- source/libs/stream/src/streamStartHistory.c | 9 --------- source/libs/stream/src/streamTask.c | 2 +- 2 files changed, 1 insertion(+), 10 deletions(-) diff --git a/source/libs/stream/src/streamStartHistory.c b/source/libs/stream/src/streamStartHistory.c index 4d38c48fc5..54a8929123 100644 --- a/source/libs/stream/src/streamStartHistory.c +++ b/source/libs/stream/src/streamStartHistory.c @@ -88,15 +88,6 @@ void streamExecScanHistoryInFuture(SStreamTask* pTask, int32_t idleDuration) { numOfTicks = SCANHISTORY_IDLE_TICK; } - // add ref for task - SStreamTask* p = NULL; - int32_t code = streamMetaAcquireTask(pTask->pMeta, pTask->id.streamId, pTask->id.taskId, &p); - if (p == NULL || code != 0) { - stError("s-task:0x%x failed to acquire task, status:%s, not exec scan-history data", pTask->id.taskId, - streamTaskGetStatus(pTask).name); - return; - } - pTask->schedHistoryInfo.numOfTicks = numOfTicks; stDebug("s-task:%s scan-history resumed in %.2fs", pTask->id.idStr, numOfTicks * 0.1); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 67d76ba2ef..297dd6e72f 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -227,7 +227,7 @@ void tFreeStreamTask(void* pParam) { } streamMutexUnlock(&pTask->lock); - stDebug("start to free s-task:0x%x %p, state:%s", taskId, pTask, p); + stDebug("start to free s-task:0x%x %p, state:%s, refId:%" PRId64, taskId, pTask, p, pTask->id.refId); SCheckpointInfo* pCkInfo = &pTask->chkInfo; stDebug("s-task:0x%x task exec summary: create:%" PRId64 ", init:%" PRId64 ", start:%" PRId64 From 871dd7104003bd945131f45a8609d3a1fafd8f43 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 14:08:38 +0800 Subject: [PATCH 064/127] fix(stream): add some logs. --- source/libs/stream/src/streamSched.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/stream/src/streamSched.c b/source/libs/stream/src/streamSched.c index 25d6161aba..c5c4a8cc34 100644 --- a/source/libs/stream/src/streamSched.c +++ b/source/libs/stream/src/streamSched.c @@ -137,6 +137,8 @@ void streamTaskSchedHelper(void* param, void* tmrId) { return; } + stDebug("s-task:%s acquire task, refId:%"PRId64, pTask->id.idStr, pTask->id.refId); + const char* id = pTask->id.idStr; int32_t nextTrigger = (int32_t)pTask->info.delaySchedParam; int32_t vgId = pTask->pMeta->vgId; From 974591f2658ce8b53cf360db386ce038df24c208 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 31 Oct 2024 14:23:28 +0800 Subject: [PATCH 065/127] fix: colDataSetOrCover --- include/common/tdatablock.h | 1 + source/common/src/tdatablock.c | 19 +++++++++++++++---- source/libs/executor/src/executorInt.c | 2 +- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 3b24ef9490..6578999db4 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -190,6 +190,7 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint int32_t getJsonValueLen(const char* data); int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); +int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData); int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, bool trimValue); diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 49dd9948f2..94ebcc7033 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -97,8 +97,8 @@ static int32_t getDataLen(int32_t type, const char* pData) { return dataLen; } -int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { - if (isNull || pData == NULL) { +static int32_t colDataSetValHelp(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + if (isNull || pData == NULL) { // There is a placehold for each NULL value of binary or nchar type. if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { pColumnInfoData->varmeta.offset[rowIndex] = -1; // it is a null value of VAR type. @@ -143,7 +143,7 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const uint32_t len = pColumnInfoData->varmeta.length; pColumnInfoData->varmeta.offset[rowIndex] = len; - (void) memmove(pColumnInfoData->pData + len, pData, dataLen); + (void)memmove(pColumnInfoData->pData + len, pData, dataLen); pColumnInfoData->varmeta.length += dataLen; } else { memcpy(pColumnInfoData->pData + pColumnInfoData->info.bytes * rowIndex, pData, pColumnInfoData->info.bytes); @@ -153,6 +153,18 @@ int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const return 0; } +int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + if (IS_VAR_DATA_TYPE(pColumnInfoData->info.type)) { + pColumnInfoData->varmeta.offset[rowIndex] = -1; + } + + return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull); +} + +int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull) { + return colDataSetValHelp(pColumnInfoData, rowIndex, pData, isNull); +} + int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData) { int32_t type = pColumnInfoData->info.type; @@ -3382,7 +3394,6 @@ int32_t trimDataBlock(SSDataBlock* pBlock, int32_t totalRows, const bool* pBoolL } memcpy(p2, p1, len); - pDst->varmeta.offset[numOfRows] = -1; code = colDataSetVal(pDst, numOfRows, p2, false); taosMemoryFree(p2); if (code) { diff --git a/source/libs/executor/src/executorInt.c b/source/libs/executor/src/executorInt.c index b39cf4014d..1b823bf69d 100644 --- a/source/libs/executor/src/executorInt.c +++ b/source/libs/executor/src/executorInt.c @@ -702,7 +702,7 @@ int32_t copyResultrowToDataBlock(SExprInfo* pExprInfo, int32_t numOfExprs, SResu QUERY_CHECK_NULL(pColInfoData, code, lino, _end, terrno); char* in = GET_ROWCELL_INTERBUF(pCtx[j].resultInfo); for (int32_t k = 0; k < pRow->numOfRows; ++k) { - code = colDataSetVal(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); + code = colDataSetValOrCover(pColInfoData, pBlock->info.rows + k, in, pCtx[j].resultInfo->isNullRes); QUERY_CHECK_CODE(code, lino, _end); } } From b1ad121bae8cf31956efcbca9e777ea9b5e53038 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 14:49:16 +0800 Subject: [PATCH 066/127] fix(stream): release task before returning. --- source/dnode/vnode/src/tqCommon/tqCommon.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/dnode/vnode/src/tqCommon/tqCommon.c b/source/dnode/vnode/src/tqCommon/tqCommon.c index e8d929e4aa..59e739d1ff 100644 --- a/source/dnode/vnode/src/tqCommon/tqCommon.c +++ b/source/dnode/vnode/src/tqCommon/tqCommon.c @@ -200,6 +200,8 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM bool update = streamMetaInitUpdateTaskList(pMeta, req.transId); if (!update) { rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); taosArrayDestroy(req.pNodeList); @@ -214,7 +216,10 @@ int32_t tqStreamTaskProcessUpdateReq(SStreamMeta* pMeta, SMsgCb* cb, SRpcMsg* pM tqDebug("s-task:%s (vgId:%d) already update in transId:%d, discard the nodeEp update msg", idstr, vgId, req.transId); rsp.code = TSDB_CODE_SUCCESS; + + streamMetaReleaseTask(pMeta, pTask); streamMetaWUnLock(pMeta); + taosArrayDestroy(req.pNodeList); return rsp.code; } From 8b596ec4c41690ed42b7e60931b8a0eeba5c4e2b Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 15:14:26 +0800 Subject: [PATCH 067/127] fix(stream): release task after acquire. --- source/libs/stream/src/streamMeta.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 598f809c21..2a271b9960 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -538,6 +538,11 @@ void streamMetaClear(SStreamMeta* pMeta) { if (code) { stError("vgId:%d remove task refId failed, refId:%" PRId64, pMeta->vgId, refId); } + + code = taosReleaseRef(streamTaskRefPool, refId); + if (code) { + stError("vgId:%d failed to release refId:%" PRId64, pMeta->vgId, refId); + } } if (pMeta->streamBackendRid != 0) { From a685c1394a4316fe0c1dfc9a472c4f6be14f6ee9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 31 Oct 2024 15:20:18 +0800 Subject: [PATCH 068/127] refactor: do some internal refactor. --- include/libs/stream/tstream.h | 1 - source/libs/stream/src/streamMeta.c | 4 ++-- source/libs/stream/src/streamTask.c | 2 -- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 449df5207f..81be70e35f 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -455,7 +455,6 @@ struct SStreamTask { // the followings attributes don't be serialized SScanhistorySchedInfo schedHistoryInfo; - int32_t refCnt; int32_t transferStateAlignCnt; struct SStreamMeta* pMeta; SSHashObj* pNameMap; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2a271b9960..93c205005f 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -529,7 +529,7 @@ void streamMetaClear(SStreamMeta* pMeta) { // release the ref by timer if (p->info.delaySchedParam != 0 && p->info.fillHistory == 0) { // one more ref in timer - stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", p->id.idStr, p->refCnt); + stDebug("s-task:%s stop schedTimer", p->id.idStr); streamTmrStop(p->schedInfo.pDelayTimer); p->info.delaySchedParam = 0; } @@ -929,7 +929,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t } if (pTask->info.delaySchedParam != 0 && pTask->info.fillHistory == 0) { - stDebug("s-task:%s stop schedTimer, and (before) desc ref:%d", pTask->id.idStr, pTask->refCnt); + stDebug("s-task:%s stop schedTimer", pTask->id.idStr); streamTmrStop(pTask->schedInfo.pDelayTimer); pTask->info.delaySchedParam = 0; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 297dd6e72f..08870a5443 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -423,9 +423,7 @@ int32_t streamTaskInit(SStreamTask* pTask, SStreamMeta* pMeta, SMsgCb* pMsgCb, i return code; } - pTask->refCnt = 1; pTask->id.refId = 0; - pTask->inputq.status = TASK_INPUT_STATUS__NORMAL; pTask->outputq.status = TASK_OUTPUT_STATUS__NORMAL; From b663adf946bf6d247c5ebd9ee0e363a7ecc2a144 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Thu, 31 Oct 2024 17:33:08 +0800 Subject: [PATCH 069/127] feat: delete report --- include/libs/monitor/clientMonitor.h | 3 ++ source/client/src/clientImpl.c | 2 + source/client/src/clientMonitor.c | 71 ++++++++++++++++++++++++++-- 3 files changed, 73 insertions(+), 3 deletions(-) diff --git a/include/libs/monitor/clientMonitor.h b/include/libs/monitor/clientMonitor.h index 0085173ecd..b09a1ac11c 100644 --- a/include/libs/monitor/clientMonitor.h +++ b/include/libs/monitor/clientMonitor.h @@ -24,6 +24,7 @@ extern "C" { #include "thash.h" #include "query.h" #include "tqueue.h" +#include "clientInt.h" typedef enum { SQL_RESULT_SUCCESS = 0, @@ -81,6 +82,8 @@ void monitorCreateClientCounter(int64_t clusterId, const char* name, void monitorCounterInc(int64_t clusterId, const char* counterName, const char** label_values); const char* monitorResultStr(SQL_RESULT_CODE code); int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data); + +void clientOperateReport(SRequestObj* pRequest); #ifdef __cplusplus } #endif diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 2c67cafdf5..13ca874c47 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2945,6 +2945,8 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s tscDebug("taos_query end with sql:%s", sql); + clientOperateReport(pRequest); + return pRequest; } diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 595c871953..0624d8d94d 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -2,8 +2,6 @@ #include "cJSON.h" #include "clientInt.h" #include "clientLog.h" -#include "os.h" -#include "tglobal.h" #include "tmisce.h" #include "tqueue.h" #include "ttime.h" @@ -932,4 +930,71 @@ int32_t monitorPutData2MonitorQueue(MonitorSlowLogData data) { taosFreeQitem(slowLogData); } return 0; -} \ No newline at end of file +} + +int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) { + taosMemoryFree(pMsg->pData); + taosMemoryFree(pMsg->pEpSet); + tscDebug("[del report]delete reportCB code:%d", code); + return 0; +} + +void clientOperateReport(SRequestObj* pRequest) { + if (pRequest == NULL || pRequest->pQuery == NULL) { + tscError("[del report]invalid request"); + return; + } + + if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { + SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot; + STscObj* pTscObj = pRequest->pTscObj; + + if(nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) { + tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable)); + return; + } + SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable; + SAuditReq req; + req.pSql = pRequest->sqlstr; + req.sqlLen = pRequest->sqlLen; + tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName); + tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName); + tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"); + int32_t tlen = tSerializeSAuditReq(NULL, 0, &req); + void* pReq = taosMemoryCalloc(1, tlen); + if (pReq == NULL) { + tscError("[del report]failed to allocate memory for req"); + return; + } + + if (tSerializeSAuditReq(pReq, tlen, &req) < 0) { + tscError("[del report]failed to serialize req"); + taosMemoryFree(pReq); + return; + } + + SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (sendInfo == NULL) { + tscError("[del report]failed to allocate memory for sendInfo"); + taosMemoryFree(pReq); + return; + } + + sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = tlen, .handle = NULL}; + + sendInfo->requestId = generateRequestId(); + sendInfo->requestObjRefId = 0; + sendInfo->param = NULL; + sendInfo->fp = reportCB; + sendInfo->msgType = TDMT_MND_AUDIT; + + SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + + int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo); + if (code != 0) { + tscError("[del report]failed to send msg to server, code:%d", code); + taosMemoryFree(sendInfo); + } + tscDebug("[del report]delete data, sql:%s", req.pSql); + } +} From 6010e52a2389a77897a0bf474066973baf0f40ac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Thu, 31 Oct 2024 17:42:06 +0800 Subject: [PATCH 070/127] blob/param: new cases for default value checking --- tests/army/storage/blob/ablob.py | 39 ++++++++++++++++---------------- 1 file changed, 19 insertions(+), 20 deletions(-) diff --git a/tests/army/storage/blob/ablob.py b/tests/army/storage/blob/ablob.py index f6e783d6f3..d3e00f3424 100644 --- a/tests/army/storage/blob/ablob.py +++ b/tests/army/storage/blob/ablob.py @@ -31,8 +31,6 @@ from frame.eos import * class TDTestCase(TBase): - index = eutil.cpuRand(20) + 1 - bucketName = f"ci-bucket{index}" updatecfgDict = { "supportVnodes":"1000", 's3EndPoint': 'https://.blob.core.windows.net', @@ -44,7 +42,6 @@ class TDTestCase(TBase): 's3MigrateEnabled': '1' } - tdLog.info(f"assign bucketName is {bucketName}\n") maxFileSize = (128 + 10) * 1014 * 1024 # add 10M buffer def insertData(self): @@ -172,6 +169,23 @@ class TDTestCase(TBase): sql = "drop database db1" tdSql.execute(sql) + def checkDefault(self, keepLocal, chunkSize, compact): + sql = f" create database db1 vgroups 1" + tdSql.execute(sql, show=True) + #sql = f"select name,s3_keeplocal,s3_chunkpages,s3_compact from information_schema.ins_databases where name='db1';" + sql = f"select * from information_schema.ins_databases where name='db1';" + tdSql.query(sql) + # 29 30 31 -> chunksize keeplocal compact + if chunkSize is not None: + tdSql.checkData(0, 29, chunkSize) + if keepLocal is not None: + keepLocalm = keepLocal * 24 * 60 + tdSql.checkData(0, 30, f"{keepLocalm}m") + if compact is not None: + tdSql.checkData(0, 31, compact) + sql = "drop database db1" + tdSql.execute(sql) + def checkExcept(self): # errors sqls = [ @@ -226,16 +240,7 @@ class TDTestCase(TBase): # except self.checkExcept() - - # - def preDb(self, vgroups): - cnt = int(time.time())%2 + 1 - for i in range(cnt): - vg = eutil.cpuRand(9) + 1 - sql = f"create database predb vgroups {vg}" - tdSql.execute(sql, show=True) - sql = "drop database predb" - tdSql.execute(sql, show=True) + self.checkDefault(365, 131072, 1) # history def insertHistory(self): @@ -287,9 +292,6 @@ class TDTestCase(TBase): if eos.isArm64Cpu(): tdLog.success(f"{__file__} arm64 ignore executed") else: - - self.preDb(10) - # insert data self.insertData() @@ -311,7 +313,6 @@ class TDTestCase(TBase): # check insert correct again self.checkInsertCorrect() - # check stream correct and drop stream #self.checkStreamCorrect() @@ -321,7 +322,7 @@ class TDTestCase(TBase): # insert history disorder data self.insertHistory() - # checkBasic + # check db params self.checkBasic() #self.checkInsertCorrect() @@ -335,10 +336,8 @@ class TDTestCase(TBase): # drop database and free s3 file self.dropDb() - tdLog.success(f"{__file__} successfully executed") - tdCases.addLinux(__file__, TDTestCase()) tdCases.addWindows(__file__, TDTestCase()) From c18b26ff9c06a04d8ee06ff381ed99073c9ce37d Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Wed, 30 Oct 2024 10:21:58 +0800 Subject: [PATCH 071/127] enh: add more log for arbtrans --- source/dnode/mnode/impl/src/mndArbGroup.c | 6 ++++++ source/libs/sync/src/syncUtil.c | 4 ++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 1dd21900e3..77ba6d65dd 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -250,6 +250,12 @@ static int32_t mndArbGroupActionUpdate(SSdb *pSdb, SArbGroup *pOld, SArbGroup *p pOld->assignedLeader.acked = pNew->assignedLeader.acked; pOld->version++; + mInfo( + "arbgroup:%d, perform update action. members[0].token:%s, members[1].token:%s, isSync:%d, as-dnodeid:%d, " + "as-token:%s, as-acked:%d, version:%" PRId64, + pOld->vgId, pOld->members[0].state.token, pOld->members[1].state.token, pOld->isSync, + pOld->assignedLeader.dnodeId, pOld->assignedLeader.token, pOld->assignedLeader.acked, pOld->version); + _OVER: (void)taosThreadMutexUnlock(&pOld->mutex); diff --git a/source/libs/sync/src/syncUtil.c b/source/libs/sync/src/syncUtil.c index 212a75c2ae..996d8531e2 100644 --- a/source/libs/sync/src/syncUtil.c +++ b/source/libs/sync/src/syncUtil.c @@ -234,14 +234,14 @@ void syncPrintNodeLog(const char* flags, ELogLevel level, int32_t dflag, SSyncNo ", elect-times:%d, as-leader-times:%d, as-assigned-leader-times:%d, cfg-ch-times:%d, hb-slow:%d, hbr-slow:%d, " "aq-items:%d, snaping:%" PRId64 ", replicas:%d, last-cfg:%" PRId64 ", chging:%d, restore:%d, quorum:%d, elect-lc-timer:%" PRId64 ", hb:%" PRId64 - ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s", + ", buffer:%s, repl-mgrs:%s, members:%s, hb:%s, hb-reply:%s, arb-token:%s", pNode->vgId, eventLog, syncStr(pNode->state), currentTerm, pNode->commitIndex, pNode->assignedCommitIndex, appliedIndex, logBeginIndex, logLastIndex, pNode->minMatchIndex, snapshot.lastApplyIndex, snapshot.lastApplyTerm, pNode->electNum, pNode->becomeLeaderNum, pNode->becomeAssignedLeaderNum, pNode->configChangeNum, pNode->hbSlowNum, pNode->hbrSlowNum, aqItems, pNode->snapshottingIndex, pNode->replicaNum, pNode->raftCfg.lastConfigIndex, pNode->changing, pNode->restoreFinish, syncNodeDynamicQuorum(pNode), pNode->electTimerLogicClock, pNode->heartbeatTimerLogicClockUser, bufferStatesStr, - replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr); + replMgrStatesStr, cfgStr, hbTimeStr, hbrTimeStr, pNode->arbToken); } } From ff565f0f39b4b01925b6375382b7c00f89b7716f Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Thu, 31 Oct 2024 18:58:55 +0800 Subject: [PATCH 072/127] fix: mndProcessArbCheckSyncTimer no check for sync --- source/dnode/mnode/impl/src/mndArbGroup.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 77ba6d65dd..e6e8294cd0 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -629,8 +629,8 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { SArbAssignedLeader *pAssignedLeader = &arbGroupDup.assignedLeader; int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId; - // 1. has assigned && is sync && no response => send req - if (currentAssignedDnodeId != 0 && arbGroupDup.isSync == true && pAssignedLeader->acked == false) { + // 1. has assigned && no response => send req + if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) { (void)mndSendArbSetAssignedLeaderReq(pMnode, currentAssignedDnodeId, vgId, arbToken, term, pAssignedLeader->token); mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, currentAssignedDnodeId); From 8c3e4ce63daac2a6140d05f56a97a16a197423a0 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 1 Nov 2024 09:48:11 +0800 Subject: [PATCH 073/127] enh: set default safetyCheckLevel to normal --- source/common/src/tglobal.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 1aef08bc7e..b57db2a84a 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -139,7 +139,7 @@ bool tsEnableCrashReport = true; #endif char *tsClientCrashReportUri = "/ccrashreport"; char *tsSvrCrashReportUri = "/dcrashreport"; -int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NEVER; +int8_t tsSafetyCheckLevel = TSDB_SAFETY_CHECK_LEVELL_NORMAL; // schemaless bool tsSmlDot2Underline = true; From 08267fec5ceb1a7149b81912f6669220c52119f0 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 1 Nov 2024 11:40:53 +0800 Subject: [PATCH 074/127] feat: delete report --- source/client/src/clientImpl.c | 3 +- source/client/src/clientMonitor.c | 120 +++++++++++++++++------------- 2 files changed, 71 insertions(+), 52 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 13ca874c47..04c536a2d0 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -2836,6 +2836,7 @@ void syncQueryFn(void* param, void* res, int32_t code) { if (pParam->pRequest) { pParam->pRequest->code = code; + clientOperateReport(pParam->pRequest); } if (TSDB_CODE_SUCCESS != tsem_post(&pParam->sem)) { @@ -2945,8 +2946,6 @@ TAOS_RES* taosQueryImpl(TAOS* taos, const char* sql, bool validateOnly, int8_t s tscDebug("taos_query end with sql:%s", sql); - clientOperateReport(pRequest); - return pRequest; } diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 0624d8d94d..3469543e4d 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -939,6 +939,75 @@ int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) { return 0; } +int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len) { + SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); + if (sendInfo == NULL) { + tscError("[del report]failed to allocate memory for sendInfo"); + return terrno; + } + + sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL}; + + sendInfo->requestId = generateRequestId(); + sendInfo->requestObjRefId = 0; + sendInfo->param = NULL; + sendInfo->fp = reportCB; + sendInfo->msgType = TDMT_MND_AUDIT; + + SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); + + int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo); + if (code != 0) { + tscError("[del report]failed to send msg to server, code:%d", code); + taosMemoryFree(sendInfo); + return code; + } + return TSDB_CODE_SUCCESS; +} + +static void reportDeleteSql(SRequestObj* pRequest) { + SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot; + STscObj* pTscObj = pRequest->pTscObj; + + if (pRequest->code != TSDB_CODE_SUCCESS) { + tscDebug("[del report]delete request result code:%d", pRequest->code); + return; + } + + if (nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) { + tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable)); + return; + } + + SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable; + SAuditReq req; + req.pSql = pRequest->sqlstr; + req.sqlLen = pRequest->sqlLen; + tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName); + tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName); + tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"); + int32_t tlen = tSerializeSAuditReq(NULL, 0, &req); + void* pReq = taosMemoryCalloc(1, tlen); + if (pReq == NULL) { + tscError("[del report]failed to allocate memory for req"); + return; + } + + if (tSerializeSAuditReq(pReq, tlen, &req) < 0) { + tscError("[del report]failed to serialize req"); + taosMemoryFree(pReq); + return; + } + + int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen); + if (code != 0) { + tscError("[del report]failed to send audit info, code:%d", code); + taosMemoryFree(pReq); + return; + } + tscDebug("[del report]delete data, sql:%s", req.pSql); +} + void clientOperateReport(SRequestObj* pRequest) { if (pRequest == NULL || pRequest->pQuery == NULL) { tscError("[del report]invalid request"); @@ -946,55 +1015,6 @@ void clientOperateReport(SRequestObj* pRequest) { } if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { - SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot; - STscObj* pTscObj = pRequest->pTscObj; - - if(nodeType(pStmt->pFromTable) != QUERY_NODE_REAL_TABLE) { - tscError("[del report]invalid from table node type:%d", nodeType(pStmt->pFromTable)); - return; - } - SRealTableNode* pTable = (SRealTableNode*)pStmt->pFromTable; - SAuditReq req; - req.pSql = pRequest->sqlstr; - req.sqlLen = pRequest->sqlLen; - tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName); - tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName); - tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"); - int32_t tlen = tSerializeSAuditReq(NULL, 0, &req); - void* pReq = taosMemoryCalloc(1, tlen); - if (pReq == NULL) { - tscError("[del report]failed to allocate memory for req"); - return; - } - - if (tSerializeSAuditReq(pReq, tlen, &req) < 0) { - tscError("[del report]failed to serialize req"); - taosMemoryFree(pReq); - return; - } - - SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); - if (sendInfo == NULL) { - tscError("[del report]failed to allocate memory for sendInfo"); - taosMemoryFree(pReq); - return; - } - - sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = tlen, .handle = NULL}; - - sendInfo->requestId = generateRequestId(); - sendInfo->requestObjRefId = 0; - sendInfo->param = NULL; - sendInfo->fp = reportCB; - sendInfo->msgType = TDMT_MND_AUDIT; - - SEpSet epSet = getEpSet_s(&pTscObj->pAppInfo->mgmtEp); - - int32_t code = asyncSendMsgToServer(pTscObj->pAppInfo->pTransporter, &epSet, NULL, sendInfo); - if (code != 0) { - tscError("[del report]failed to send msg to server, code:%d", code); - taosMemoryFree(sendInfo); - } - tscDebug("[del report]delete data, sql:%s", req.pSql); + reportDeleteSql(pRequest); } } From 6f68677b7ea8dee9cf3e818ec407d05c59a244b4 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Fri, 1 Nov 2024 13:40:00 +0800 Subject: [PATCH 075/127] enh: delete report switch --- source/client/src/clientMonitor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 3469543e4d..44ed262880 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -17,6 +17,7 @@ STaosQueue* monitorQueue; SHashObj* monitorSlowLogHash; char tmpSlowLogPath[PATH_MAX] = {0}; TdThread monitorThread; +extern bool tsEnableAuditDelete; static int32_t getSlowLogTmpDir(char* tmpPath, int32_t size) { int ret = tsnprintf(tmpPath, size, "%s/tdengine_slow_log/", tsTempDir); @@ -1014,7 +1015,7 @@ void clientOperateReport(SRequestObj* pRequest) { return; } - if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { + if (tsEnableAuditDelete && QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { reportDeleteSql(pRequest); } } From 39a0916b803444155742c4a764acaaf5dba6c675 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Fri, 1 Nov 2024 15:38:33 +0800 Subject: [PATCH 076/127] fix: return value --- source/client/src/clientMonitor.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 44ed262880..3b19e070b8 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -984,9 +984,9 @@ static void reportDeleteSql(SRequestObj* pRequest) { SAuditReq req; req.pSql = pRequest->sqlstr; req.sqlLen = pRequest->sqlLen; - tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName); - tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName); - tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete"); + TAOS_UNUSED(tsnprintf(req.table, TSDB_TABLE_NAME_LEN, "%s", pTable->table.tableName)); + TAOS_UNUSED(tsnprintf(req.db, TSDB_DB_FNAME_LEN, "%s", pTable->table.dbName)); + TAOS_UNUSED(tsnprintf(req.operation, AUDIT_OPERATION_LEN, "delete")); int32_t tlen = tSerializeSAuditReq(NULL, 0, &req); void* pReq = taosMemoryCalloc(1, tlen); if (pReq == NULL) { From 362aa5f7cb64aa3ff9ec7bdeaa44da77e1f02d4f Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Fri, 1 Nov 2024 15:57:55 +0800 Subject: [PATCH 077/127] enh: refactor mndProcessArbCheckSyncTimer --- source/dnode/mnode/impl/src/mndArbGroup.c | 193 +++++++++++++--------- 1 file changed, 117 insertions(+), 76 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index e6e8294cd0..1faee3744c 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -583,19 +583,84 @@ static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, i return code; } +typedef enum { + CHECK_SYNC_NONE = 0, + CHECK_SYNC_SET_ASSIGNED_LEADER = 1, + CHECK_SYNC_CHECK_SYNC = 2, + CHECK_SYNC_UPDATE = 3 +} ECheckSyncOp; + +static void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) { + *pOp = CHECK_SYNC_NONE; + int32_t code = 0; + + int32_t vgId = pArbGroup->vgId; + + bool member0IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 0, nowMs); + bool member1IsTimeout = mndCheckArbMemberHbTimeout(pArbGroup, 1, nowMs); + SArbAssignedLeader *pAssignedLeader = &pArbGroup->assignedLeader; + int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId; + + // 1. has assigned && no response => send req + if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) { + *pOp = CHECK_SYNC_SET_ASSIGNED_LEADER; + return; + } + + // 2. both of the two members are timeout => skip + if (member0IsTimeout && member1IsTimeout) { + return; + } + + // 3. no member is timeout => check sync + if (member0IsTimeout == false && member1IsTimeout == false) { + // no assigned leader and not sync + if (currentAssignedDnodeId == 0 && !pArbGroup->isSync) { + *pOp = CHECK_SYNC_CHECK_SYNC; + } + return; + } + + // 4. one of the members is timeout => set assigned leader + int32_t candidateIndex = member0IsTimeout ? 1 : 0; + SArbGroupMember *pMember = &pArbGroup->members[candidateIndex]; + + // has assigned leader and dnodeId not match => skip + if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) { + mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId, + pMember->info.dnodeId, currentAssignedDnodeId); + return; + } + + // not sync => skip + if (pArbGroup->isSync == false) { + if (currentAssignedDnodeId == pMember->info.dnodeId) { + mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, + pMember->info.dnodeId); + } else { + mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, + pMember->info.dnodeId); + } + return; + } + + // is sync && no assigned leader => write to sdb + mndArbGroupDupObj(pArbGroup, pNewGroup); + mndArbGroupSetAssignedLeader(pNewGroup, candidateIndex); + *pOp = CHECK_SYNC_UPDATE; +} + static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { - int32_t code = 0; + int32_t code = 0, lino = 0; SMnode *pMnode = pReq->info.node; SSdb *pSdb = pMnode->pSdb; SArbGroup *pArbGroup = NULL; - SArbGroup arbGroupDup = {0}; void *pIter = NULL; + SArray *pUpdateArray = NULL; char arbToken[TSDB_ARB_TOKEN_SIZE]; - if ((code = mndGetArbToken(pMnode, arbToken)) != 0) { - mError("failed to get arb token for arb-check-sync timer"); - TAOS_RETURN(code); - } + TAOS_CHECK_EXIT(mndGetArbToken(pMnode, arbToken)); + int64_t term = mndGetTerm(pMnode); if (term < 0) { mError("arb failed to get term since %s", terrstr()); @@ -612,87 +677,63 @@ static int32_t mndProcessArbCheckSyncTimer(SRpcMsg *pReq) { return 0; } - SArray *pUpdateArray = taosArrayInit(16, sizeof(SArbGroup)); - while (1) { pIter = sdbFetch(pSdb, SDB_ARBGROUP, pIter, (void **)&pArbGroup); if (pIter == NULL) break; + SArbGroup arbGroupDup = {0}; + (void)taosThreadMutexLock(&pArbGroup->mutex); mndArbGroupDupObj(pArbGroup, &arbGroupDup); (void)taosThreadMutexUnlock(&pArbGroup->mutex); - int32_t vgId = arbGroupDup.vgId; - - bool member0IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 0, nowMs); - bool member1IsTimeout = mndCheckArbMemberHbTimeout(&arbGroupDup, 1, nowMs); - SArbAssignedLeader *pAssignedLeader = &arbGroupDup.assignedLeader; - int32_t currentAssignedDnodeId = pAssignedLeader->dnodeId; - - // 1. has assigned && no response => send req - if (currentAssignedDnodeId != 0 && pAssignedLeader->acked == false) { - (void)mndSendArbSetAssignedLeaderReq(pMnode, currentAssignedDnodeId, vgId, arbToken, term, - pAssignedLeader->token); - mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, currentAssignedDnodeId); - sdbRelease(pSdb, pArbGroup); - continue; - } - - // 2. both of the two members are timeout => skip - if (member0IsTimeout && member1IsTimeout) { - sdbRelease(pSdb, pArbGroup); - continue; - } - - // 3. no member is timeout => check sync - if (member0IsTimeout == false && member1IsTimeout == false) { - // no assigned leader and not sync - if (currentAssignedDnodeId == 0 && !arbGroupDup.isSync) { - (void)mndSendArbCheckSyncReq(pMnode, arbGroupDup.vgId, arbToken, term, arbGroupDup.members[0].state.token, - arbGroupDup.members[1].state.token); - } - sdbRelease(pSdb, pArbGroup); - continue; - } - - // 4. one of the members is timeout => set assigned leader - int32_t candidateIndex = member0IsTimeout ? 1 : 0; - SArbGroupMember *pMember = &arbGroupDup.members[candidateIndex]; - - // has assigned leader and dnodeId not match => skip - if (currentAssignedDnodeId != 0 && currentAssignedDnodeId != pMember->info.dnodeId) { - mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, assigned leader has been set to dnodeId:%d", vgId, - pMember->info.dnodeId, currentAssignedDnodeId); - sdbRelease(pSdb, pArbGroup); - continue; - } - - // not sync => skip - if (arbGroupDup.isSync == false) { - if (currentAssignedDnodeId == pMember->info.dnodeId) { - mDebug("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, - pMember->info.dnodeId); - } else { - mInfo("arb skip to set assigned leader to vgId:%d dnodeId:%d, arb group is not sync", vgId, - pMember->info.dnodeId); - } - sdbRelease(pSdb, pArbGroup); - continue; - } - - // is sync && no assigned leader => write to sdb - SArbGroup newGroup = {0}; - mndArbGroupDupObj(&arbGroupDup, &newGroup); - mndArbGroupSetAssignedLeader(&newGroup, candidateIndex); - if (taosArrayPush(pUpdateArray, &newGroup) == NULL) { - taosArrayDestroy(pUpdateArray); - return terrno; - } - sdbRelease(pSdb, pArbGroup); + + ECheckSyncOp op = CHECK_SYNC_NONE; + SArbGroup newGroup = {0}; + mndArbCheckSync(&arbGroupDup, nowMs, &op, &newGroup); + + int32_t vgId = arbGroupDup.vgId; + SArbAssignedLeader *pAssgndLeader = &arbGroupDup.assignedLeader; + int32_t assgndDnodeId = pAssgndLeader->dnodeId; + + switch (op) { + case CHECK_SYNC_NONE: + mTrace("vgId:%d, arb skip to send msg by check sync", vgId); + break; + case CHECK_SYNC_SET_ASSIGNED_LEADER: + (void)mndSendArbSetAssignedLeaderReq(pMnode, assgndDnodeId, vgId, arbToken, term, pAssgndLeader->token); + mInfo("vgId:%d, arb send set assigned leader to dnodeId:%d", vgId, assgndDnodeId); + break; + case CHECK_SYNC_CHECK_SYNC: + (void)mndSendArbCheckSyncReq(pMnode, vgId, arbToken, term, arbGroupDup.members[0].state.token, + arbGroupDup.members[1].state.token); + mInfo("vgId:%d, arb send check sync request", vgId); + break; + case CHECK_SYNC_UPDATE: + if (!pUpdateArray) { + pUpdateArray = taosArrayInit(16, sizeof(SArbGroup)); + if (!pUpdateArray) { + TAOS_CHECK_EXIT(TSDB_CODE_OUT_OF_MEMORY); + } + } + + if (taosArrayPush(pUpdateArray, &newGroup) == NULL) { + TAOS_CHECK_EXIT(terrno); + } + break; + default: + mError("vgId:%d, arb unknown check sync op:%d", vgId, op); + break; + } } - TAOS_CHECK_RETURN(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray)); + TAOS_CHECK_EXIT(mndPullupArbUpdateGroupBatch(pMnode, pUpdateArray)); + +_exit: + if (code != 0) { + mError("failed to check sync at line %d since %s", lino, terrstr()); + } taosArrayDestroy(pUpdateArray); return 0; From f0760a3bb03404253161bd559c60e7365da33230 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 1 Nov 2024 16:17:10 +0800 Subject: [PATCH 078/127] fix: msg preprocess fail issue --- source/libs/qworker/src/qwMsg.c | 2 +- source/libs/qworker/src/qworker.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 69014d5b1c..20b81bfc14 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -429,7 +429,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran tFreeSSubQueryMsg(&msg); - return TSDB_CODE_SUCCESS; + return code; } int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) { diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index ddc4812b55..9b96c1e519 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -751,7 +751,7 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - return TSDB_CODE_SUCCESS; + return code; } int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { From 2ebc84d02af09c2ec7e64926371f49021ee8092c Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 1 Nov 2024 16:17:10 +0800 Subject: [PATCH 079/127] fix: msg preprocess fail issue --- source/libs/qworker/src/qwMsg.c | 2 +- source/libs/qworker/src/qworker.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 69014d5b1c..20b81bfc14 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -429,7 +429,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran tFreeSSubQueryMsg(&msg); - return TSDB_CODE_SUCCESS; + return code; } int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) { diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index ddc4812b55..9b96c1e519 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -751,7 +751,7 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - return TSDB_CODE_SUCCESS; + return code; } int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { From 2551b32d1df19c1a17133a0850934a571c9100e7 Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Fri, 1 Nov 2024 17:52:10 +0800 Subject: [PATCH 080/127] enh: add unit test for mndArbCheckSync --- source/dnode/mnode/impl/inc/mndArbGroup.h | 9 ++ source/dnode/mnode/impl/src/mndArbGroup.c | 9 +- .../mnode/impl/test/arbgroup/arbgroup.cpp | 126 +++++++++++++----- 3 files changed, 106 insertions(+), 38 deletions(-) diff --git a/source/dnode/mnode/impl/inc/mndArbGroup.h b/source/dnode/mnode/impl/inc/mndArbGroup.h index 779d64c7e2..66ef3f766b 100644 --- a/source/dnode/mnode/impl/inc/mndArbGroup.h +++ b/source/dnode/mnode/impl/inc/mndArbGroup.h @@ -47,6 +47,15 @@ bool mndUpdateArbGroupBySetAssignedLeader(SArbGroup *pGroup, int32_t vgId, char int32_t mndGetArbGroupSize(SMnode *pMnode); +typedef enum { + CHECK_SYNC_NONE = 0, + CHECK_SYNC_SET_ASSIGNED_LEADER = 1, + CHECK_SYNC_CHECK_SYNC = 2, + CHECK_SYNC_UPDATE = 3 +} ECheckSyncOp; + +void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndArbGroup.c b/source/dnode/mnode/impl/src/mndArbGroup.c index 1faee3744c..0192044e67 100644 --- a/source/dnode/mnode/impl/src/mndArbGroup.c +++ b/source/dnode/mnode/impl/src/mndArbGroup.c @@ -583,14 +583,7 @@ static int32_t mndSendArbSetAssignedLeaderReq(SMnode *pMnode, int32_t dnodeId, i return code; } -typedef enum { - CHECK_SYNC_NONE = 0, - CHECK_SYNC_SET_ASSIGNED_LEADER = 1, - CHECK_SYNC_CHECK_SYNC = 2, - CHECK_SYNC_UPDATE = 3 -} ECheckSyncOp; - -static void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) { +void mndArbCheckSync(SArbGroup *pArbGroup, int64_t nowMs, ECheckSyncOp *pOp, SArbGroup *pNewGroup) { *pOp = CHECK_SYNC_NONE; int32_t code = 0; diff --git a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp index fdfc560d54..11fba524fe 100644 --- a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp +++ b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp @@ -80,17 +80,17 @@ TEST_F(ArbgroupTest, 01_encode_decode_sdb) { SArbGroup* pNewGroup = (SArbGroup*)sdbGetRowObj(pRow); - EXPECT_EQ(group.vgId, pNewGroup->vgId); - EXPECT_EQ(group.dbUid, pNewGroup->dbUid); - EXPECT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId); - EXPECT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId); - EXPECT_EQ(group.isSync, pNewGroup->isSync); - EXPECT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId); + ASSERT_EQ(group.vgId, pNewGroup->vgId); + ASSERT_EQ(group.dbUid, pNewGroup->dbUid); + ASSERT_EQ(group.members[0].info.dnodeId, pNewGroup->members[0].info.dnodeId); + ASSERT_EQ(group.members[1].info.dnodeId, pNewGroup->members[1].info.dnodeId); + ASSERT_EQ(group.isSync, pNewGroup->isSync); + ASSERT_EQ(group.assignedLeader.dnodeId, pNewGroup->assignedLeader.dnodeId); - EXPECT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token)); - EXPECT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token)); - EXPECT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token)); - EXPECT_EQ(group.version, pNewGroup->version); + ASSERT_EQ(std::string(group.members[0].state.token), std::string(pNewGroup->members[0].state.token)); + ASSERT_EQ(std::string(group.members[1].state.token), std::string(pNewGroup->members[1].state.token)); + ASSERT_EQ(std::string(group.assignedLeader.token), std::string(pNewGroup->assignedLeader.token)); + ASSERT_EQ(group.version, pNewGroup->version); taosMemoryFree(pRow); taosMemoryFree(pRaw); @@ -129,9 +129,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_FALSE(updateToken); - EXPECT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_NE(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, false); + ASSERT_NE(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_NE(group.members[0].state.lastHbMs, nowMs); } { // old token @@ -144,9 +144,9 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_FALSE(updateToken); - EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_EQ(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, false); + ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_EQ(group.members[0].state.lastHbMs, nowMs); } { // new token @@ -159,14 +159,14 @@ TEST_F(ArbgroupTest, 02_process_heart_beat_rsp) { SArbGroup newGroup = {0}; bool updateToken = mndUpdateArbGroupByHeartBeat(&group, &rspMember, nowMs, dnodeId, &newGroup); - EXPECT_TRUE(updateToken); - EXPECT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); - EXPECT_EQ(group.members[0].state.lastHbMs, nowMs); + ASSERT_EQ(updateToken, true); + ASSERT_EQ(group.members[0].state.responsedHbSeq, rspMember.hbSeq); + ASSERT_EQ(group.members[0].state.lastHbMs, nowMs); - EXPECT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken)); - EXPECT_FALSE(newGroup.isSync); - EXPECT_EQ(newGroup.assignedLeader.dnodeId, 0); - EXPECT_EQ(std::string(newGroup.assignedLeader.token).size(), 0); + ASSERT_EQ(std::string(newGroup.members[0].state.token), std::string(rspMember.memberToken)); + ASSERT_EQ(newGroup.isSync, false); + ASSERT_EQ(newGroup.assignedLeader.dnodeId, 0); + ASSERT_EQ(std::string(newGroup.assignedLeader.token).size(), 0); } taosThreadMutexDestroy(&group.mutex); @@ -203,7 +203,7 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) { SArbGroup newGroup = {0}; bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup); - EXPECT_FALSE(updateIsSync); + ASSERT_EQ(updateIsSync, false); } { // newIsSync @@ -216,8 +216,8 @@ TEST_F(ArbgroupTest, 03_process_check_sync_rsp) { SArbGroup newGroup = {0}; bool updateIsSync = mndUpdateArbGroupByCheckSync(&group, vgId, member0Token, member1Token, newIsSync, &newGroup); - EXPECT_TRUE(updateIsSync); - EXPECT_TRUE(newGroup.isSync); + ASSERT_EQ(updateIsSync, true); + ASSERT_EQ(newGroup.isSync, true); } taosThreadMutexDestroy(&group.mutex); @@ -254,7 +254,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_FALSE(updateAssigned); + ASSERT_EQ(updateAssigned, false); } { // errcode != TSDB_CODE_SUCCESS @@ -265,7 +265,7 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_FALSE(updateAssigned); + ASSERT_EQ(updateAssigned, false); } { // errcode == TSDB_CODE_SUCCESS @@ -276,11 +276,77 @@ TEST_F(ArbgroupTest, 04_process_set_assigned_leader){ SArbGroup newGroup = {0}; bool updateAssigned = mndUpdateArbGroupBySetAssignedLeader(&group, vgId, memberToken, errcode, &newGroup); - EXPECT_TRUE(updateAssigned); - EXPECT_FALSE(newGroup.isSync); + ASSERT_EQ(updateAssigned, true); + ASSERT_EQ(newGroup.isSync, false); } taosThreadMutexDestroy(&group.mutex); } +TEST_F(ArbgroupTest, 05_check_sync_timer) { + const int32_t assgndDnodeId = 1; + const int32_t vgId = 5; + const int64_t nowMs = 173044838300; + + SArbGroup group = {0}; + group.vgId = vgId; + group.dbUid = 1234; + group.members[0].info.dnodeId = assgndDnodeId; + group.members[0].state.lastHbMs = nowMs - 10; + + group.members[1].info.dnodeId = 2; + group.members[1].state.lastHbMs = nowMs - 10; + + group.isSync = 1; + taosThreadMutexInit(&group.mutex, NULL); + + SArbAssignedLeader assgnedLeader = {.dnodeId = assgndDnodeId, .acked = false}; + strncpy(assgnedLeader.token, group.members[0].state.token, TSDB_ARB_TOKEN_SIZE); + + SArbAssignedLeader nonoAsgndLeader = {.dnodeId = 0, .acked = false}; + + ECheckSyncOp op = CHECK_SYNC_NONE; + SArbGroup newGroup = {0}; + + // 1. asgnd,sync,noAck --> send set assigned + group.assignedLeader = assgnedLeader; + group.assignedLeader.acked = false; + group.isSync = true; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER); + + // 2. asgnd,notSync,noAck --> send set assgnd + newGroup = {0}; + group.assignedLeader = assgnedLeader; + group.isSync = false; + group.assignedLeader.acked = false; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_SET_ASSIGNED_LEADER); + + // 3. noAsgnd,notSync,noAck(init) --> check sync + newGroup = {0}; + group.assignedLeader = nonoAsgndLeader; + group.isSync = false; + group.assignedLeader.acked = false; + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_CHECK_SYNC); + + // 4. noAsgnd,sync,noAck,one timeout--> update arbgroup (asgnd,sync,noAck) + newGroup = {0}; + group.assignedLeader = nonoAsgndLeader; + group.isSync = true; + group.assignedLeader.acked = false; + group.members[1].state.lastHbMs = nowMs - 2 * tsArbSetAssignedTimeoutSec * 1000; // member1 timeout + mndArbCheckSync(&group, nowMs, &op, &newGroup); + + ASSERT_EQ(op, CHECK_SYNC_UPDATE); + ASSERT_EQ(newGroup.assignedLeader.dnodeId, assgndDnodeId); + ASSERT_EQ(std::string(newGroup.assignedLeader.token), std::string(group.members[0].state.token)); + ASSERT_EQ(newGroup.isSync, true); + ASSERT_EQ(newGroup.assignedLeader.acked, false); +} + #pragma GCC diagnostic pop From 17077d1a3244d461e1e05440c64a9a13013b2fa5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 1 Nov 2024 17:53:06 +0800 Subject: [PATCH 081/127] handle mem leak failure --- source/client/src/clientImpl.c | 44 +++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 19 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 74fd4e13a7..8d86745566 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1770,19 +1770,15 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, } } -int32_t doProcessMsgFromServer(void* param) { - AsyncArg* arg = (AsyncArg*)param; - SRpcMsg* pMsg = &arg->msg; - SEpSet* pEpSet = arg->pEpset; - +int32_t doProcessMsFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; if (pMsg->info.ahandle == NULL) { tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); - taosMemoryFree(arg->pEpset); rpcFreeCont(pMsg->pCont); - taosMemoryFree(arg); + taosMemoryFree(pEpSet); return TSDB_CODE_TSC_INTERNAL_ERROR; } + STscObj* pTscObj = NULL; STraceId* trace = &pMsg->info.traceId; @@ -1802,10 +1798,9 @@ int32_t doProcessMsgFromServer(void* param) { if (TSDB_CODE_SUCCESS != taosReleaseRef(clientReqRefPool, pSendInfo->requestObjRefId)) { tscError("doProcessMsgFromServer taosReleaseRef failed"); } - taosMemoryFree(arg->pEpset); rpcFreeCont(pMsg->pCont); + taosMemoryFree(pEpSet); destroySendMsgInfo(pSendInfo); - taosMemoryFree(arg); return TSDB_CODE_TSC_INTERNAL_ERROR; } pTscObj = pRequest->pTscObj; @@ -1844,20 +1839,24 @@ int32_t doProcessMsgFromServer(void* param) { rpcFreeCont(pMsg->pCont); destroySendMsgInfo(pSendInfo); - - taosMemoryFree(arg); return TSDB_CODE_SUCCESS; } +int32_t doProcessMsgFromServer(void* param) { + AsyncArg* arg = (AsyncArg*)param; + int32_t code = doProcessMsFromServerImpl(&arg->msg, arg->pEpset); + taosMemoryFree(arg); + return code; +} void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { + int32_t code = 0; SEpSet* tEpSet = NULL; if (pEpSet != NULL) { tEpSet = taosMemoryCalloc(1, sizeof(SEpSet)); if (NULL == tEpSet) { - pMsg->code = TSDB_CODE_OUT_OF_MEMORY; - rpcFreeCont(pMsg->pCont); - destroySendMsgInfo(pMsg->info.ahandle); - return; + code = terrno; + pMsg->code = terrno; + goto _exit; } (void)memcpy((void*)tEpSet, (void*)pEpSet, sizeof(SEpSet)); } @@ -1879,12 +1878,12 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { AsyncArg* arg = taosMemoryCalloc(1, sizeof(AsyncArg)); if (NULL == arg) { - pMsg->code = TSDB_CODE_OUT_OF_MEMORY; - taosMemoryFree(tEpSet); - rpcFreeCont(pMsg->pCont); - destroySendMsgInfo(pMsg->info.ahandle); + code = terrno; + pMsg->code = code; + goto _exit; return; } + arg->msg = *pMsg; arg->pEpset = tEpSet; @@ -1895,6 +1894,13 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { destroySendMsgInfo(pMsg->info.ahandle); taosMemoryFree(arg); } + return; +_exit: + tscError("failed to sched msg to tsc since %s", tstrerror(code)); + code = doProcessMsFromServerImpl(pMsg, tEpSet); + if (code != 0) { + tscError("failed to sched msg to tsc, tsc ready quit"); + } } TAOS* taos_connect_auth(const char* ip, const char* user, const char* auth, const char* db, uint16_t port) { From 7041e6474493a184a2abb5c56a6ce5719adcd44d Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Sun, 3 Nov 2024 23:00:31 +0800 Subject: [PATCH 082/127] add colDataSetVal func desc --- include/common/tdatablock.h | 4 +++ source/libs/executor/src/dataDispatcher.c | 39 +++++++++++++++++++---- 2 files changed, 37 insertions(+), 6 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 6578999db4..1103b89ccb 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -189,7 +189,11 @@ static FORCE_INLINE void colDataSetDouble(SColumnInfoData* pColumnInfoData, uint int32_t getJsonValueLen(const char* data); +// For the VAR_DATA_TYPE type, new data is inserted strictly according to the position of SVarColAttr.length. +// If the same row is inserted repeatedly, data holes will result. int32_t colDataSetVal(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); +// For the VAR_DATA_TYPE type, if a row already has data before inserting it (judged by offset != -1), +// it will be inserted at the original position and the old data will be overwritten. int32_t colDataSetValOrCover(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, bool isNull); int32_t colDataReassignVal(SColumnInfoData* pColumnInfoData, uint32_t dstRowIdx, uint32_t srcRowIdx, const char* pData); int32_t colDataSetNItems(SColumnInfoData* pColumnInfoData, uint32_t rowIndex, const char* pData, uint32_t numOfRows, diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 236d6a4d3e..48f4ed3ed1 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -70,21 +70,15 @@ static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* SNode* pNode; int32_t numOfCols = 0; - int32_t realOutputRowSize = 0; FOREACH(pNode, pHandle->pSchema->pSlots) { SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; if (pSlotDesc->output) { - realOutputRowSize += pSlotDesc->dataType.bytes; ++numOfCols; } else { // Slots must be sorted, and slots with 'output' set to true must come first break; } } - if (realOutputRowSize != pSchema->outputRowSize) { - qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pSchema->outputRowSize); - return TSDB_CODE_QRY_INVALID_INPUT; - } if (numOfCols > taosArrayGetSize(pInput->pData->pDataBlock)) { qError("invalid column number, schema:%d, input:%zu", numOfCols, taosArrayGetSize(pInput->pData->pDataBlock)); @@ -397,8 +391,41 @@ static int32_t getCacheSize(struct SDataSinkHandle* pHandle, uint64_t* size) { return TSDB_CODE_SUCCESS; } +static int32_t blockDescNodeCheck(SDataBlockDescNode* pInputDataBlockDesc) { + if(tsSafetyCheckLevel == TSDB_SAFETY_CHECK_LEVELL_NEVER) { + return TSDB_CODE_SUCCESS; + } + + if (pInputDataBlockDesc == NULL) { + qError("invalid schema"); + return TSDB_CODE_QRY_INVALID_INPUT; + } + + SNode* pNode; + int32_t realOutputRowSize = 0; + FOREACH(pNode, pInputDataBlockDesc->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + realOutputRowSize += pSlotDesc->dataType.bytes; + } else { + // Slots must be sorted, and slots with 'output' set to true must come first + break; + } + } + if (realOutputRowSize != pInputDataBlockDesc->outputRowSize) { + qError("invalid schema, realOutputRowSize:%d, outputRowSize:%d", realOutputRowSize, pInputDataBlockDesc->outputRowSize); + return TSDB_CODE_QRY_INVALID_INPUT; + } + return TSDB_CODE_SUCCESS; +} + int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) { int32_t code; + code = blockDescNodeCheck(pDataSink->pInputDataBlockDesc); + if (code) { + qError("failed to check input data block desc, code:%d", code); + return code; + } SDataDispatchHandle* dispatcher = taosMemoryCalloc(1, sizeof(SDataDispatchHandle)); if (NULL == dispatcher) { From 6286b28a32b942c3cf8657c8c16ad655beb7bd1e Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Mon, 4 Nov 2024 09:30:20 +0800 Subject: [PATCH 083/127] fix: avoid compile error on win --- source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp index 11fba524fe..d5ca019420 100644 --- a/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp +++ b/source/dnode/mnode/impl/test/arbgroup/arbgroup.cpp @@ -300,10 +300,14 @@ TEST_F(ArbgroupTest, 05_check_sync_timer) { group.isSync = 1; taosThreadMutexInit(&group.mutex, NULL); - SArbAssignedLeader assgnedLeader = {.dnodeId = assgndDnodeId, .acked = false}; + SArbAssignedLeader assgnedLeader = {0}; + assgnedLeader.dnodeId = assgndDnodeId; + assgnedLeader.acked = false; strncpy(assgnedLeader.token, group.members[0].state.token, TSDB_ARB_TOKEN_SIZE); - SArbAssignedLeader nonoAsgndLeader = {.dnodeId = 0, .acked = false}; + SArbAssignedLeader noneAsgndLeader = {0}; + noneAsgndLeader.dnodeId = 0; + noneAsgndLeader.acked = false; ECheckSyncOp op = CHECK_SYNC_NONE; SArbGroup newGroup = {0}; @@ -327,7 +331,7 @@ TEST_F(ArbgroupTest, 05_check_sync_timer) { // 3. noAsgnd,notSync,noAck(init) --> check sync newGroup = {0}; - group.assignedLeader = nonoAsgndLeader; + group.assignedLeader = noneAsgndLeader; group.isSync = false; group.assignedLeader.acked = false; mndArbCheckSync(&group, nowMs, &op, &newGroup); @@ -336,7 +340,7 @@ TEST_F(ArbgroupTest, 05_check_sync_timer) { // 4. noAsgnd,sync,noAck,one timeout--> update arbgroup (asgnd,sync,noAck) newGroup = {0}; - group.assignedLeader = nonoAsgndLeader; + group.assignedLeader = noneAsgndLeader; group.isSync = true; group.assignedLeader.acked = false; group.members[1].state.lastHbMs = nowMs - 2 * tsArbSetAssignedTimeoutSec * 1000; // member1 timeout From 355aa885de6885ac537ed38f0b5f27f5920a19db Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Thu, 31 Oct 2024 17:11:34 +0800 Subject: [PATCH 084/127] fix:[TD-32506] fix mem leak in percentile function. --- source/libs/executor/inc/executorInt.h | 7 ++++-- source/libs/executor/src/aggregateoperator.c | 20 ++++++++++++---- source/libs/executor/src/groupoperator.c | 4 ++-- .../executor/src/streamtimewindowoperator.c | 4 ++-- source/libs/executor/src/timewindowoperator.c | 24 +++++++++++++------ 5 files changed, 42 insertions(+), 17 deletions(-) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 572ff88be9..98d3fefada 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -553,6 +553,7 @@ typedef struct SIntervalAggOperatorInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. + bool cleanGroupResInfo; struct SOperatorInfo* pOperator; // for limit optimization bool limited; @@ -831,8 +832,10 @@ void cleanupExprSupp(SExprSupp* pSup); void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp* pSup, SGroupResInfo* pGroupResInfo); -void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, - SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap); +void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, + SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap); +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo, + SAggSupporter *pAggSup, bool cleanHashmap); void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo); diff --git a/source/libs/executor/src/aggregateoperator.c b/source/libs/executor/src/aggregateoperator.c index 91b435fbec..829ca6da50 100644 --- a/source/libs/executor/src/aggregateoperator.c +++ b/source/libs/executor/src/aggregateoperator.c @@ -49,6 +49,7 @@ typedef struct SAggOperatorInfo { SSDataBlock* pNewGroupBlock; bool hasCountFunc; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SAggOperatorInfo; static void destroyAggOperatorInfo(void* param); @@ -121,6 +122,7 @@ int32_t createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiNode* pA pInfo->binfo.outputTsOrder = pAggNode->node.outputTsOrder; pInfo->hasCountFunc = pAggNode->hasCountLikeFunc; pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "TableAggregate", QUERY_NODE_PHYSICAL_PLAN_HASH_AGG, !pAggNode->node.forceCreateNonBlockingOptr, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -159,8 +161,8 @@ void destroyAggOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } cleanupAggSup(&pInfo->aggSup); @@ -191,6 +193,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) { int32_t order = pAggInfo->binfo.inputTsOrder; SSDataBlock* pBlock = pAggInfo->pNewGroupBlock; + pAggInfo->cleanGroupResInfo = false; if (pBlock) { pAggInfo->pNewGroupBlock = NULL; tSimpleHashClear(pAggInfo->aggSup.pResultRowHashTable); @@ -263,6 +266,7 @@ static bool nextGroupedResult(SOperatorInfo* pOperator) { code = initGroupedResultInfo(&pAggInfo->groupResInfo, pAggInfo->aggSup.pResultRowHashTable, 0); QUERY_CHECK_CODE(code, lino, _end); + pAggInfo->cleanGroupResInfo = true; _end: if (code != TSDB_CODE_SUCCESS) { @@ -627,7 +631,7 @@ void cleanupResultInfoInStream(SExecTaskInfo* pTaskInfo, void* pState, SExprSupp } } -void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, +void cleanupResultInfoInGroupResInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo) { int32_t numOfExprs = pSup->numOfExprs; int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; @@ -663,7 +667,7 @@ void cleanupResultInfoWithoutHash(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDi } } -void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, +void cleanupResultInfoInHashMap(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* pBuf, SGroupResInfo* pGroupResInfo, SSHashObj* pHashmap) { int32_t numOfExprs = pSup->numOfExprs; int32_t* rowEntryOffset = pSup->rowEntryInfoOffset; @@ -701,6 +705,14 @@ void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SDiskbasedBuf* } } +void cleanupResultInfo(SExecTaskInfo* pTaskInfo, SExprSupp* pSup, SGroupResInfo* pGroupResInfo, + SAggSupporter *pAggSup, bool cleanGroupResInfo) { + if (cleanGroupResInfo) { + cleanupResultInfoInGroupResInfo(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo); + } else { + cleanupResultInfoInHashMap(pTaskInfo, pSup, pAggSup->pResultBuf, pGroupResInfo, pAggSup->pResultRowHashTable); + } +} void cleanupAggSup(SAggSupporter* pAggSup) { taosMemoryFreeClear(pAggSup->keyBuf); tSimpleHashCleanup(pAggSup->pResultRowHashTable); diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 0f85c3346d..e4db766a6a 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -88,8 +88,8 @@ static void destroyGroupOperatorInfo(void* param) { cleanupExprSupp(&pInfo->scalarSup); if (pInfo->pOperator != NULL) { - cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + false); pInfo->pOperator = NULL; } diff --git a/source/libs/executor/src/streamtimewindowoperator.c b/source/libs/executor/src/streamtimewindowoperator.c index 67ec976ee9..a7f8934c03 100644 --- a/source/libs/executor/src/streamtimewindowoperator.c +++ b/source/libs/executor/src/streamtimewindowoperator.c @@ -474,8 +474,8 @@ void destroyStreamFinalIntervalOperatorInfo(void* param) { SStreamIntervalOperatorInfo* pInfo = (SStreamIntervalOperatorInfo*)param; cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + false); pInfo->pOperator = NULL; } cleanupAggSup(&pInfo->aggSup); diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 34ecda6ce7..538d2a8a2e 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -38,6 +38,7 @@ typedef struct SSessionAggOperatorInfo { int32_t tsSlotId; // primary timestamp slot id STimeWindowAggSupp twAggSup; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SSessionAggOperatorInfo; typedef struct SStateWindowOperatorInfo { @@ -52,6 +53,7 @@ typedef struct SStateWindowOperatorInfo { int32_t tsSlotId; // primary timestamp column slot id STimeWindowAggSupp twAggSup; SOperatorInfo* pOperator; + bool cleanGroupResInfo; } SStateWindowOperatorInfo; typedef enum SResultTsInterpType { @@ -943,6 +945,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { int32_t scanFlag = MAIN_SCAN; int64_t st = taosGetTimestampUs(); + pInfo->cleanGroupResInfo = false; while (1) { SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); if (pBlock == NULL) { @@ -965,6 +968,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder); QUERY_CHECK_CODE(code, lino, _end); + pInfo->cleanGroupResInfo = true; OPTR_SET_OPENED(pOperator); @@ -1092,6 +1096,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { int64_t st = taosGetTimestampUs(); SOperatorInfo* downstream = pOperator->pDownstream[0]; + pInfo->cleanGroupResInfo = false; while (1) { SSDataBlock* pBlock = getNextBlockFromDownstream(pOperator, 0); if (pBlock == NULL) { @@ -1120,7 +1125,7 @@ static int32_t openStateWindowAggOptr(SOperatorInfo* pOperator) { pOperator->cost.openCost = (taosGetTimestampUs() - st) / 1000.0; code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); QUERY_CHECK_CODE(code, lino, _end); - + pInfo->cleanGroupResInfo = true; pOperator->status = OP_RES_TO_RETURN; _end: @@ -1230,8 +1235,8 @@ static void destroyStateWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); taosMemoryFreeClear(pInfo->stateKey.pData); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1257,8 +1262,8 @@ void destroyIntervalOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1452,6 +1457,7 @@ int32_t createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPhysiNode } pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; initResultRowInfo(&pInfo->binfo.resultRowInfo); setOperatorInfo(pOperator, "TimeIntervalAggOperator", QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL, true, OP_NOT_OPENED, pInfo, pTaskInfo); @@ -1573,6 +1579,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp SOptrBasicInfo* pBInfo = &pInfo->binfo; SExprSupp* pSup = &pOperator->exprSupp; + pInfo->cleanGroupResInfo = false; if (pOperator->status == OP_RES_TO_RETURN) { while (1) { doBuildResultDatablock(pOperator, &pInfo->binfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf); @@ -1628,6 +1635,7 @@ static int32_t doSessionWindowAggNext(SOperatorInfo* pOperator, SSDataBlock** pp code = initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, TSDB_ORDER_ASC); QUERY_CHECK_CODE(code, lino, _end); + pInfo->cleanGroupResInfo = true; code = blockDataEnsureCapacity(pBInfo->pRes, pOperator->resultInfo.capacity); QUERY_CHECK_CODE(code, lino, _end); @@ -1731,6 +1739,7 @@ int32_t createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWinodwPhy pInfo->tsSlotId = tsSlotId; pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "StateWindowOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_STATE, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(openStateWindowAggOptr, doStateWindowAggNext, NULL, destroyStateWindowOperatorInfo, @@ -1763,8 +1772,8 @@ void destroySWindowOperatorInfo(void* param) { cleanupBasicInfo(&pInfo->binfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); if (pInfo->pOperator) { - cleanupResultInfoWithoutHash(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, pInfo->aggSup.pResultBuf, - &pInfo->groupResInfo); + cleanupResultInfo(pInfo->pOperator->pTaskInfo, &pInfo->pOperator->exprSupp, &pInfo->groupResInfo, &pInfo->aggSup, + pInfo->cleanGroupResInfo); pInfo->pOperator = NULL; } @@ -1835,6 +1844,7 @@ int32_t createSessionAggOperatorInfo(SOperatorInfo* downstream, SSessionWinodwPh QUERY_CHECK_CODE(code, lino, _error); pInfo->pOperator = pOperator; + pInfo->cleanGroupResInfo = false; setOperatorInfo(pOperator, "SessionWindowAggOperator", QUERY_NODE_PHYSICAL_PLAN_MERGE_SESSION, true, OP_NOT_OPENED, pInfo, pTaskInfo); pOperator->fpSet = createOperatorFpSet(optrDummyOpenFn, doSessionWindowAggNext, NULL, destroySWindowOperatorInfo, From 9df427c0cde0dbc9c3fe65b08ef1989a84d62bd4 Mon Sep 17 00:00:00 2001 From: Jing Sima Date: Fri, 1 Nov 2024 13:42:27 +0800 Subject: [PATCH 085/127] fix:[TD-32506] Add test case for TD-32506 --- source/libs/function/src/tpercentile.c | 4 +- tests/army/query/function/test_percentile.py | 194 +++++++++++++++++++ tests/parallel_test/cases.task | 1 + 3 files changed, 198 insertions(+), 1 deletion(-) create mode 100644 tests/army/query/function/test_percentile.py diff --git a/source/libs/function/src/tpercentile.c b/source/libs/function/src/tpercentile.c index 429ab52a8d..78c16ec7cb 100644 --- a/source/libs/function/src/tpercentile.c +++ b/source/libs/function/src/tpercentile.c @@ -224,7 +224,7 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index *index = -1; - if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v)) { + if (v > pBucket->range.dMaxVal || v < pBucket->range.dMinVal || isnan(v) || isinf(v)) { return TSDB_CODE_SUCCESS; } @@ -232,6 +232,8 @@ int32_t tBucketDoubleHash(tMemBucket *pBucket, const void *value, int32_t *index double span = pBucket->range.dMaxVal - pBucket->range.dMinVal; if (fabs(span) < DBL_EPSILON) { *index = 0; + } else if (isinf(span)) { + *index = -1; } else { double slotSpan = span / pBucket->numOfSlots; *index = (int32_t)((v - pBucket->range.dMinVal) / slotSpan); diff --git a/tests/army/query/function/test_percentile.py b/tests/army/query/function/test_percentile.py new file mode 100644 index 0000000000..004cad54c9 --- /dev/null +++ b/tests/army/query/function/test_percentile.py @@ -0,0 +1,194 @@ +################################################################### +# Copyright (c) 2016 by TAOS Technologies, Inc. +# All rights reserved. +# +# This file is proprietary and confidential to TAOS Technologies. +# No part of this file may be reproduced, stored, transmitted, +# disclosed or used in any form or by any means other than as +# expressly provided by the written permission from Jianhui Tao +# +################################################################### + +# -*- coding: utf-8 -*- + +from frame import etool +from frame.etool import * +from frame.log import * +from frame.cases import * +from frame.sql import * +from frame.caseBase import * +from frame.common import * + +class TDTestCase(TBase): + updatecfgDict = { + "keepColumnName": "1", + "ttlChangeOnWrite": "1", + "querySmaOptimize": "1", + "slowLogScope": "none", + "queryBufferSize": 10240 + } + + def insert_data(self): + tdLog.printNoPrefix("==========step1:create table") + + tdSql.execute(f'create database if not exists td32506') + tdSql.execute(f'use td32506') + tdSql.execute(f'CREATE STABLE fs_table (ts TIMESTAMP, speed INT, color VARCHAR(16), tinyint_type_col_name TINYINT, smallint_type_col_name SMALLINT, bigint_type_col_name BIGINT, ' + f'utinyint_type_col_name TINYINT, usmallint_type_col_name SMALLINT, uint_type_col_name INT, ubigint_type_col_name BIGINT, float_type_col_name FLOAT, ' + f'double_type_col_name DOUBLE, bool_type_col_name BOOL, nchar_type_col_name NCHAR(16), varchar_type_col_name VARCHAR(16), ' + f'varbinary_type_col_name VARBINARY(16),geometry_type_col_name GEOMETRY(32)) TAGS (b VARCHAR(200), f FLOAT, tinyint_type_tag_name TINYINT, ' + f'smallint_type_tag_name SMALLINT, int_type_tag_name INT, bigint_type_tag_name BIGINT, utinyint_type_tag_name TINYINT, ' + f'usmallint_type_tag_name SMALLINT UNSIGNED, uint_type_tag_name INT UNSIGNED, ubigint_type_tag_name BIGINT, double_type_tag_name DOUBLE, ' + f'bool_type_tag_name BOOL, nchar_type_tag_name NCHAR(16), varchar_type_tag_name VARCHAR(16), varbinary_type_tag_name VARBINARY(64), ' + f'geometry_type_tag_name GEOMETRY(32), extratag INT)') + tdSql.execute(f'CREATE TABLE reg_table_159 USING fs_table ' + f'(b, f, tinyint_type_tag_name, smallint_type_tag_name, int_type_tag_name, bigint_type_tag_name, ' + f'utinyint_type_tag_name, usmallint_type_tag_name, uint_type_tag_name, ubigint_type_tag_name, ' + f'double_type_tag_name, bool_type_tag_name, nchar_type_tag_name, varchar_type_tag_name, varbinary_type_tag_name) ' + f'TAGS ("fgiaaopuphardlom", -3.302167e+38, 40, 18667, 1116729408, -6426992149481917950, 55, 4674, 1756351183, ' + f'7228005179153159914, -3.428740e+307, false, "emvhqjcixroitxiw", "fixwxdovhhbizqdm", "\x786565787775656D6F667A666A646463")') + + tdLog.printNoPrefix("==========step2:insert data") + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,int_type_tag_name,varbinary_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,uint_type_tag_name,f,tinyint_type_tag_name,bool_type_tag_name,b) TAGS("emvhqjcixroitxiw", -3.4287401579952453e+307, -6426992149481917950, 4674, "fixwxdovhhbizqdm", "point(1.0 1.0)", 1116729408, "xeexwuemofzfjddc", 7228005179153159914, 18667, 55, 1756351183, -3.302167385734522e+38, 40, False, "fgiaaopuphardlom") (ts,varchar_type_col_name,uint_type_col_name,speed,smallint_type_col_name,nchar_type_col_name,ubigint_type_col_name,varbinary_type_col_name,float_type_col_name,bigint_type_col_name,double_type_col_name,geometry_type_col_name,color,bool_type_col_name,usmallint_type_col_name,utinyint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 00:49:27", "jvudhjbmixxuubhl", 1327384783, 215895363, 16025, "llosyvhgzqpixdru", -3772449087838215561, "jvludkxlqobiigip", -2.978105332100778e+37, -5559805599911459602, -4.028726372555818e+307, "point(1.0 1.0)", "bdencejzdarqaeef", True, 19468, 35, -30);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,bool_type_tag_name) TAGS(1653079398, True) (ts,bigint_type_col_name,ubigint_type_col_name,float_type_col_name,color,double_type_col_name,bool_type_col_name,smallint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 00:58:36", 1083391961316260438, 3613986442426750782, -1.0453149756996617e+38, "tvaiakmmcxzbepra", -1.4689107839018581e+308, True, -18675, 138061020);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,int_type_tag_name,varchar_type_tag_name,b,f,smallint_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name) TAGS(-7.061730755263099e+307, 2107452935481758830, 2258834966577471147, -952230254, "vhnwshrapagnalqu", "levvmtztgprzatat", 6.737169619795772e+37, 8872, 2, True, 1075287886, -60, "mpdmcvwntqfusvhm", 36270, "point(1.0 1.0)", "ctnxatxiaymaekvj") (ts) VALUES ("2016-12-16 01:23:36");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,usmallint_type_tag_name,smallint_type_tag_name,double_type_tag_name) TAGS("point(1.0 1.0)", -8.285007090644336e+37, 50936, -28943, 3.4910133200480327e+307) (ts,tinyint_type_col_name,geometry_type_col_name,ubigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 01:37:37", 20, "point(1.0 1.0)", -8279051334405446366, -11586);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,f,ubigint_type_tag_name,nchar_type_tag_name,uint_type_tag_name,int_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name) TAGS("qbwmnupomqkllhdf", -1.731243531476578e+38, -3044376275988311477, "onwtdayawxuoayuh", 3923375490, -122362890, -50, -25842) (ts,tinyint_type_col_name,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name,color) VALUES ("2016-12-16 01:47:01", 38, -912910938, -7421282029380796738, "zqqrmdatsixdjwmv", "qensugigfedpokag");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,nchar_type_tag_name,bigint_type_tag_name,b,tinyint_type_tag_name,smallint_type_tag_name) TAGS(3.0006598369272955e+38, "qgmecuzexdlbjoen", 2548336842131148813, "kvulvyjmnsujbygx", 54, 12017) (ts,ubigint_type_col_name,double_type_col_name,geometry_type_col_name,color,bool_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 02:41:15", -8037368176580035774, 1.8094617142119061e+307, "point(1.0 1.0)", "uriutisredzfnels", False, -50);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,usmallint_type_tag_name,uint_type_tag_name,geometry_type_tag_name) TAGS(-18163, 36635, 3511596470, "point(1.0 1.0)") (ts,utinyint_type_col_name,varchar_type_col_name,uint_type_col_name,bigint_type_col_name,bool_type_col_name,varbinary_type_col_name,ubigint_type_col_name,smallint_type_col_name,geometry_type_col_name,float_type_col_name,color,tinyint_type_col_name,usmallint_type_col_name,speed,double_type_col_name) VALUES ("2016-12-16 02:41:32", 53, "izpzycfgyyljiafe", 748493885, 1182091325231355552, True, "wirugruhofvuzvfq", -6622830592002864757, 2403, "point(1.0 1.0)", -2.0533976172415304e+38, "ypyrkfwkzsvykvbq", -98, 7975, 1661529650, 1.4776735328584477e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,varchar_type_tag_name,bool_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,double_type_tag_name,f) TAGS(58, "mxcovlujiaipbrxe", False, 3023725445, -8300199672256037241, 3.493291803670194e+307, -2.0809024953240414e+38) (ts,utinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,speed,color,uint_type_col_name,usmallint_type_col_name,bool_type_col_name,geometry_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 03:00:08", -47, "achpeufmryizqrmv", -3195561950072510641, -674332102, "ocyfdyeztjbdajyj", 1620966159, 3256, True, "point(1.0 1.0)", "ujaddbbtrpfhirhk");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,bool_type_tag_name,b,smallint_type_tag_name,geometry_type_tag_name,double_type_tag_name) TAGS(22844, False, "mtfrbktpnjmdaazm", 26985, "point(1.0 1.0)", 1.6179238326982066e+308) (ts) VALUES ("2016-12-16 03:02:29");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,uint_type_tag_name,smallint_type_tag_name,tinyint_type_tag_name) TAGS("point(1.0 1.0)", -3.0320850510538104e+38, 3560125456, 11119, 100) (ts,varbinary_type_col_name,double_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 03:04:06", "sujwqvsfertzzcuk", -1.4040026870544444e+308, 2475978411998036438);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,usmallint_type_tag_name) TAGS("orgrtrdbgldlyogq", 28231) (ts,varchar_type_col_name,double_type_col_name,bool_type_col_name,usmallint_type_col_name,geometry_type_col_name,bigint_type_col_name,color,speed,uint_type_col_name,ubigint_type_col_name) VALUES ("2016-12-16 03:04:22", "xdwjwlgxgpkaqnnb", 9.723011865690129e+307, True, -13258, "point(1.0 1.0)", 7895225088661281332, "srsjscgbnyrhrpmo", -1908698583, -777280648, -562221736344996425);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name) TAGS(-86) (ts,nchar_type_col_name) VALUES ("2016-12-16 03:12:18", "vcjkutzmjnmwreep");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,tinyint_type_tag_name) TAGS(-6.142189587223866e+37, -107) (ts,uint_type_col_name,usmallint_type_col_name,bigint_type_col_name,geometry_type_col_name,double_type_col_name,nchar_type_col_name,utinyint_type_col_name,speed,tinyint_type_col_name,ubigint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 03:16:08", -1334666808, 16860, 3563429449831894323, "point(1.0 1.0)", -6.706661740752272e+307, "vmfhrazqvgrsyjbi", 106, -2118429478, -24, -5510629554223761040, "hcpvgxxsqivxahrs");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,varbinary_type_tag_name,f,b,tinyint_type_tag_name,utinyint_type_tag_name) TAGS("point(1.0 1.0)", "ycquvwnecivxvdkq", 1.0805517348297708e+38, "qvdfdhvjokfwimfb", -16, -28) (ts,color,varbinary_type_col_name) VALUES ("2016-12-16 03:24:44", "khyzooeyfjsndqbl", "mxeaotkheqyjkwfe");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,double_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,uint_type_tag_name) TAGS(-2.6250101215841932e+38, -1.4224845739873728e+308, "vitarwbjdtoaouwk", -21, 2835005149249208489, False, -2853745842969962537, 2818492304) (ts,float_type_col_name,varbinary_type_col_name,speed,utinyint_type_col_name,double_type_col_name,uint_type_col_name) VALUES ("2016-12-16 03:33:59", 4.143242721974893e+37, "tkcfmjxczscjjbfw", -126722956, 32, 1.5620105176463347e+308, -403334517);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bool_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name,uint_type_tag_name) TAGS(1.1219685887309601e+38, True, 38172, "yjkrksyfhkqpxulw", 3797495177) (ts,double_type_col_name,ubigint_type_col_name,speed,nchar_type_col_name,smallint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 03:37:00", 4.3397415421427257e+307, 2658580196646742125, 1848731552, "dauismisbvpldvbh", -8915, -10205);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,f,bool_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,b,utinyint_type_tag_name) TAGS(1.8933544224270275e+307, "xeuwzufexkviruji", -1.0719023172814157e+38, False, "etafmdbercjranpw", 8783154341984945645, 1620912647, 35459, "amdmlvxpympytkxq", 27136, 4977907901152915273, "point(1.0 1.0)", "sndpqcboosvhuzuq", -27) (ts) VALUES ("2016-12-16 03:38:29");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,f,int_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,b,geometry_type_tag_name,uint_type_tag_name) TAGS(-1374459770575094448, 2.693293747482498e+38, 650971033, "pmviadhwiouunffs", 44961, "nxvgxzcwabujtppn", 17943, "pcctjatkdlbevdrn", "point(1.0 1.0)", 1568392342) (ts,utinyint_type_col_name,varbinary_type_col_name,nchar_type_col_name,float_type_col_name) VALUES ("2016-12-16 03:50:53", 37, "tqdxypikpbpopiid", "evjbmpbxsscpssei", 2.890872457201385e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,b,utinyint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,smallint_type_tag_name) TAGS(3.1967188798436023e+307, "dlqosxrxmrdlexvu", 490088010, "abfyijmjhtypgmjo", "vrjjkvfezfqjsjxn", "jguyfszmyqtoaiuy", 15, False, "point(1.0 1.0)", 7656873837277040486, -2879397104283241297, 79, 1548, 474297665, 27763) (ts,usmallint_type_col_name,tinyint_type_col_name,float_type_col_name,ubigint_type_col_name,varchar_type_col_name,speed,uint_type_col_name,geometry_type_col_name,double_type_col_name,utinyint_type_col_name,color,varbinary_type_col_name) VALUES ("2016-12-16 04:02:41", -22725, 121, -1.7363002143786742e+38, -5956529834331945772, "fdebpzbdywcxygxq", -1037829517, -344092943, "point(1.0 1.0)", 1.687974407803454e+308, -15, "njduhepaglkeckdd", "svrtpquvktqimuab");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,smallint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,int_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,uint_type_tag_name) TAGS(2.0855939441502307e+38, 31096, "point(1.0 1.0)", 1835, -1486483188, -63, -123, "dhfkbddofbppzizu", -8035802647163979457, True, -7483826213921795749, 795995100) (ts,geometry_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 04:10:21", "point(1.0 1.0)", 1969408246531214471);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,tinyint_type_tag_name,ubigint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,int_type_tag_name,double_type_tag_name,nchar_type_tag_name,b,f,geometry_type_tag_name,smallint_type_tag_name) TAGS(25593, 43, 7818398761945137262, -55, "vpwzpopkwmijjyqx", 826644318, -8.222056376363248e+307, "jjxxlxujrdkmkqez", "hkzvxvlmluxfhiao", 9.217736393675792e+37, "point(1.0 1.0)", 13064) (ts,float_type_col_name,geometry_type_col_name,nchar_type_col_name,tinyint_type_col_name,usmallint_type_col_name,ubigint_type_col_name,smallint_type_col_name,bool_type_col_name,utinyint_type_col_name,varchar_type_col_name,double_type_col_name,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 04:12:51", -1.1155033313780145e+38, "point(1.0 1.0)", "vwwljlxnuieozvfo", 5, -21906, 166234169597926020, -26423, True, -124, "johlaoepdvtqhimi", 1.6733911742274567e+308, 2100308092, 5485398075914940612, "goqmambtlodgwgxq");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name,int_type_tag_name,double_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,varchar_type_tag_name,b,uint_type_tag_name,f) TAGS("point(1.0 1.0)", -303959115421074377, 55, 58404549, 1.1317582542682038e+308, 8228, False, "bctnlrzfozjhicdk", "tgtjnkhoxjrqmnmu", 2346764198, -1.431762906013641e+38) (ts,float_type_col_name,speed,bigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 04:13:42", 9.989635187087936e+37, -1325033823, 6439771353766292866, -8002);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name) TAGS("zsmwyzfqmozmyniw") (ts,bool_type_col_name,speed,varbinary_type_col_name,uint_type_col_name,varchar_type_col_name,geometry_type_col_name,double_type_col_name,utinyint_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 04:21:19", True, -1595645133, "fcmpqbuxdduvfwrq", -1256972424, "doijzwclrxdbzwwq", "point(1.0 1.0)", 6.027483417919201e+307, 107, 5847005732723402961);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,smallint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,b,geometry_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS(1.9234061768725975e+306, 27143, "dlsvyehikpxuzggw", 18465, "xhwupqgazckmiobv", "point(1.0 1.0)", 1009963833, 2202148978430704618) (ts,nchar_type_col_name,color,tinyint_type_col_name,ubigint_type_col_name) VALUES ("2016-12-16 04:25:17", "qshpnutukcvqxtaj", "zryqatfkrrhxravq", -123, 5614613512735569542);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,bigint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,double_type_tag_name,varchar_type_tag_name,bool_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,b,f,utinyint_type_tag_name) TAGS(-25502, -4694006038921375834, 4273176734, -7092260276769281916, "point(1.0 1.0)", 3016, 1.6354692626546982e+308, "hjbzklszxdzdoxqn", False, "dexhfkdiumjzdbtu", 55, 1628959, "caeogfirfkzuqrgm", "kklniqcfjgtnpaat", 1.2381441388115072e+38, 34) (ts,smallint_type_col_name,float_type_col_name,uint_type_col_name,nchar_type_col_name,geometry_type_col_name,varchar_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 04:30:12", -15424, -9.958486706164754e+37, 1543951155, "uonnnwqaxjnzcktw", "point(1.0 1.0)", "sbnhooyzwvvvgdsb", "iguafwamsgzdtcid");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,bigint_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,b,smallint_type_tag_name,f,ubigint_type_tag_name,int_type_tag_name,uint_type_tag_name,varchar_type_tag_name) TAGS(125, 7128483423223572360, "wknmbdmxriwytuzs", "point(1.0 1.0)", 23719, "cehohszybqisbcsr", 29052, -1.2266502524670797e+38, -5190174475276867332, -1054122176, 3316814729, "zeympzallufrrwwk") (ts,float_type_col_name) VALUES ("2016-12-16 04:42:07", -3.0086436232306867e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,smallint_type_tag_name) TAGS(24922, 19120) (ts,ubigint_type_col_name,varbinary_type_col_name,bool_type_col_name,bigint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 04:44:37", 2119708740431033246, "tfscgthihrecwcca", True, 3858755530043058209, 16911, 49);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,int_type_tag_name,nchar_type_tag_name,uint_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name,double_type_tag_name,usmallint_type_tag_name) TAGS(False, "point(1.0 1.0)", "lgomrzeuveghuavk", 698597980, "fibwfbupmoxwxwhu", 2299571086, 88, 26, -784, 5.836820071314546e+307, 44181) (ts,color,nchar_type_col_name,ubigint_type_col_name,float_type_col_name,utinyint_type_col_name,usmallint_type_col_name,geometry_type_col_name,varchar_type_col_name,tinyint_type_col_name,double_type_col_name,speed,uint_type_col_name,bigint_type_col_name,varbinary_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 04:48:53", "fdrpbbuwolrgigwu", "ovkkzehagcozavag", -859824747452770489, 2.668460352935784e+38, -36, -4146, "point(1.0 1.0)", "zpnfvkdabqhvvugr", 122, -1.6659109789494941e+308, -339715491, -744820813, 4922622614018944659, "drckbshqwqdgjywk", 26922);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,nchar_type_tag_name,bool_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,utinyint_type_tag_name,b,bigint_type_tag_name,uint_type_tag_name,tinyint_type_tag_name,double_type_tag_name) TAGS("fnudpzattpadznjh", "curlvpnayxqyhgnj", True, "qujeqlinronfmnra", 26904, "point(1.0 1.0)", 103, "rmdyvbxvfscrklat", 1772757837581587144, 3755231946, -28, 1.392881389803201e+308) (ts) VALUES ("2016-12-16 04:49:43");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,f,uint_type_tag_name,b,varbinary_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name) TAGS("point(1.0 1.0)", 23766, 34510, -9.391278476138455e+37, 1424312679, "usmzlgendwxwjzkd", "gfsuserzmdovezbh", -3630795755071364737, 33) (ts,ubigint_type_col_name,speed,bool_type_col_name,smallint_type_col_name,geometry_type_col_name,varbinary_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 05:08:42", 9043564226598764196, -1058070689, True, -27689, "point(1.0 1.0)", "jkcbtfqphclecaiw", -2968);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,bigint_type_tag_name,b) TAGS("dtpxqkpvzwqhqtlf", 8497835948198860590, "labufkbmgoaihoke") (ts,speed,uint_type_col_name,ubigint_type_col_name,utinyint_type_col_name,usmallint_type_col_name,varchar_type_col_name,color,varbinary_type_col_name,tinyint_type_col_name,smallint_type_col_name,nchar_type_col_name,bigint_type_col_name,float_type_col_name,bool_type_col_name,double_type_col_name) VALUES ("2016-12-16 05:32:32", -1915664223, 1413499274, -5989223773653851128, -116, 26921, "ownicmwaptorttec", "ygzvhoyputddyowk", "wrkogplcbpemkxdn", -33, 18817, "clrhqokhsspzbxgc", -859502208102825417, -1.5128753403587181e+38, True, -1.3508466034943649e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,varbinary_type_tag_name,int_type_tag_name,b,tinyint_type_tag_name,double_type_tag_name,ubigint_type_tag_name,bigint_type_tag_name,uint_type_tag_name) TAGS("point(1.0 1.0)", "uvvthorbwmsnfjoz", 1030009117, "tyysttboblgpkypz", -69, 1.23842724503325e+308, 9192485125431744596, 29000826379644817, 3538861351) (ts) VALUES ("2016-12-16 05:41:58");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,bool_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,ubigint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,f,varbinary_type_tag_name,b,uint_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name) TAGS(859129059, True, "uxbtbeicqswhoomz", -62, 3734511001108851286, 34, -1.5815188393505786e+308, 4.745631034394835e+37, "qoxpifitbedupend", "ppnqdxusgmlszoqw", 1004378621, 3705733919632701068, "point(1.0 1.0)", 38489, 29461) (ts,tinyint_type_col_name,usmallint_type_col_name,varchar_type_col_name,color,geometry_type_col_name,float_type_col_name,bool_type_col_name,speed,double_type_col_name,bigint_type_col_name,varbinary_type_col_name,uint_type_col_name) VALUES ("2016-12-16 06:08:50", 76, 12466, "nujbmtudnrleyvdy", "bgzmcovdcpghdlzi", "point(1.0 1.0)", -1.7192977879525407e+38, False, -910281174, 1.5781580952881898e+308, -3488933529074665133, "tfrgpkdcgqesmvbt", -1938221877);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,f,double_type_tag_name,bigint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,b,ubigint_type_tag_name,varbinary_type_tag_name) TAGS("zfsuqkilwplvrebi", "ccqtsosvtnlbnkig", 103, 78, 1.3362517686508279e+38, -9.563002829076875e+307, -6627606384484374568, 1126761657, 27844, True, 30861, "point(1.0 1.0)", "hmnpllitztvocynw", 8180438212890952155, "qfekhbflxpldbddz") (ts,varchar_type_col_name,geometry_type_col_name,uint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 06:12:48", "kavbtvnlqfmeyfgh", "point(1.0 1.0)", 86018588, 3835);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,int_type_tag_name) TAGS("rfnbxcuzeizhglks", 7283103569704428551, 94, "dudcfczmlnwyrkvp", "geivplrozxtocipa", "point(1.0 1.0)", -1224429691) (ts,float_type_col_name,varbinary_type_col_name,double_type_col_name,tinyint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 06:26:38", -1.3097826665873164e+37, "xwfgtqoddvxyhcre", 1.5229330656778251e+308, 118, 1385681942);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name) TAGS(9169294385094804151, "rdibiqdizmdrkfrf", 3962954747786936314, 54) (ts,geometry_type_col_name,varbinary_type_col_name,utinyint_type_col_name,smallint_type_col_name,float_type_col_name,tinyint_type_col_name,bigint_type_col_name,speed) VALUES ("2016-12-16 06:32:37", "point(1.0 1.0)", "ciymbzfmbfqqsxkv", -76, 32317, -9.09960741209682e+37, 77, 3789368028357457617, -432822270);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,f,tinyint_type_tag_name,varchar_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,int_type_tag_name,b,geometry_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name) TAGS(-1.2944016628561449e+308, 7.983554080975201e+37, 53, "fcbmysknvqvehfiz", True, -6, -8922838578018838847, -20254, 1722340321, "wrrcjsdfwrihjnqy", "point(1.0 1.0)", "yflqloxmpchzkdts", 57539, "kvvzxzerqnmnzxwk") (ts,nchar_type_col_name,tinyint_type_col_name,ubigint_type_col_name,geometry_type_col_name,usmallint_type_col_name,double_type_col_name,varchar_type_col_name,uint_type_col_name,bool_type_col_name,speed,bigint_type_col_name,varbinary_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 06:34:00", "mrzcfbzniotxzbdi", 58, 7707860227701048488, "point(1.0 1.0)", 31402, -5.6095749182351896e+306, "ibzewxdivctecfot", -1652235605, False, -933263235, 384543174410180162, "woixarpkcucglfey", 124);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,uint_type_tag_name,varchar_type_tag_name,int_type_tag_name,varbinary_type_tag_name,utinyint_type_tag_name) TAGS(-5.930942323411467e+307, -121, "ckbowkjukztkjqoi", True, -2618046353021226711, "point(1.0 1.0)", 2166418040, "yxhuoimpnuqhlhgw", -1659085223, "zeyylgulwwdnwsii", -3) (ts,bigint_type_col_name) VALUES ("2016-12-16 06:34:14", -4941469222634542296);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,tinyint_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,double_type_tag_name,nchar_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,f,uint_type_tag_name) TAGS(5489251136620324287, "igrbjdomcwywiekb", -25, "xhhaerxfxfobwcvn", 14001, -1.796912338313077e+307, "umtnvilvgmbxbxsq", True, 43, -1.8528500716419152e+38, 1497513541) (ts) VALUES ("2016-12-16 06:35:34");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,ubigint_type_tag_name,f,b,bool_type_tag_name,geometry_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,smallint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name) TAGS(2431406493780788447, 4861403703027821860, 1.99259675649246e+38, "yzdneqzibyuwrypn", True, "point(1.0 1.0)", -127, "llytlpdymtuetvfm", -25050, -543736632, 11798, "dgznnsdqgrnrmazq") (ts,bool_type_col_name) VALUES ("2016-12-16 06:36:04", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,geometry_type_tag_name) TAGS(-5334, -79, 1.2315866993517746e+308, "mywxqnmbjqsxdsff", "dahqwtcwipwwbdye", "point(1.0 1.0)") (ts,uint_type_col_name,bigint_type_col_name,geometry_type_col_name,varbinary_type_col_name,tinyint_type_col_name,speed,bool_type_col_name,color,ubigint_type_col_name,float_type_col_name) VALUES ("2016-12-16 07:08:57", -703937341, 508746925742892883, "point(1.0 1.0)", "yymznbjazhklgjxb", 12, 1587775618, True, "ozzyhsagmrpiyrjq", 5401400824616407615, -1.7440186980309764e+37);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,varchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,ubigint_type_tag_name,b,utinyint_type_tag_name,f,smallint_type_tag_name) TAGS(-7.999887918520793e+307, "qwbmvcvwhlkxwwld", 21, 468530456171689843, True, "point(1.0 1.0)", 10723, 4059334937310534108, "wgqsiptkcwhvmjwe", 99, -5.310912588143673e+37, -17363) (ts,float_type_col_name,geometry_type_col_name,smallint_type_col_name,varbinary_type_col_name,uint_type_col_name,speed,nchar_type_col_name,color,ubigint_type_col_name,bigint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 07:14:40", 2.8869331003756176e+38, "point(1.0 1.0)", -29659, "tbhqqyiqgtgckyyg", -471326270, 944696481, "ysahvyevqkwulqqv", "lzhzxslfmdzwsxpf", -9113216317948681955, 3545622045223678570, -25303, -80);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name) TAGS("auxtouxzloojhqtf") (ts,varbinary_type_col_name,nchar_type_col_name,double_type_col_name,color) VALUES ("2016-12-16 07:15:30", "ddluzbitxrtbkjbj", "ljgnuemqzlbotqwi", -5.352451641324967e+307, "suwegzsujiqfyvqt");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,geometry_type_tag_name,utinyint_type_tag_name,double_type_tag_name,nchar_type_tag_name,uint_type_tag_name,bigint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,f) TAGS("pizyvqqjzxhkofpm", "point(1.0 1.0)", -22, 1.5081448005935855e+308, "ufwxcfxfunjhxxzm", 282211280, 4861090276718143202, -2091585085, "lbxpmtsvtpucnlzy", -30781, False, 124484168740646, "yjuefabvljbxrgsx", 33815, -72, -1.128908264468181e+38) (ts,bigint_type_col_name,ubigint_type_col_name,bool_type_col_name,varchar_type_col_name,double_type_col_name,smallint_type_col_name,tinyint_type_col_name,uint_type_col_name,speed,geometry_type_col_name) VALUES ("2016-12-16 07:17:15", 1001689612302252183, -105401984359440882, False, "wbbktwsethudmtja", -4.557626601447439e+307, 21541, -98, -2080051974, -1861023150, "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,int_type_tag_name,bigint_type_tag_name,geometry_type_tag_name) TAGS(2320968032, "dhquczttiqbnfyjr", "cixjeekgvvixcbud", -484475030, -4789196290580581091, "point(1.0 1.0)") (ts,bigint_type_col_name,varbinary_type_col_name,nchar_type_col_name,double_type_col_name,speed,color,varchar_type_col_name,bool_type_col_name,uint_type_col_name,ubigint_type_col_name,smallint_type_col_name,utinyint_type_col_name,float_type_col_name) VALUES ("2016-12-16 07:18:05", -1206873661752154164, "pjkbldrvpglbnulx", "dztdpobuyksgdhgt", -1.1951640769154985e+308, 542907991, "ekubcyxbjiciigss", "xnxufupmzhskjmpq", False, 267042413, 3084280261011428030, 26039, 31, 2.539995004940438e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name,int_type_tag_name,double_type_tag_name) TAGS(-4414306501424620178, 4231079052, "mthgdnjdhnjraffo", False, -106, "hwcjdivkccrlukjw", 1762002945, -1.0781154156730552e+307) (ts,float_type_col_name,varbinary_type_col_name,usmallint_type_col_name,double_type_col_name,speed,varchar_type_col_name,nchar_type_col_name,utinyint_type_col_name,bigint_type_col_name,bool_type_col_name,geometry_type_col_name,uint_type_col_name,smallint_type_col_name,ubigint_type_col_name,color,tinyint_type_col_name) VALUES ("2016-12-16 07:24:51", -8.579854162437403e+37, "izylbbcyhuxvmohl", -11832, -1.238446106977585e+308, 1414832325, "nukonaftikjqqbdj", "cykvszvrxguiajwg", -28, -4542731759437382057, False, "point(1.0 1.0)", -250287824, -22672, 2059749290085408427, "wvojjnanujjbrjbk", 37);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,uint_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,int_type_tag_name) TAGS(-87, 3054590850, 10240, False, 7471097903390818886, "point(1.0 1.0)", "gvmesmuwkboibbwm", "qnkzvaetvvsoliqw", "tsaezwjdobewrsgg", 718941504) (ts,uint_type_col_name,bool_type_col_name,color,varchar_type_col_name,speed,utinyint_type_col_name,tinyint_type_col_name,bigint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 07:47:31", -1418250282, True, "cxyfgdrwrdryoxdv", "urqrvjnzpghqycjl", 1139755242, -53, -40, -3750115493851220318, "yvystiojfnkeinwv");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name) TAGS(11662) (ts,float_type_col_name,smallint_type_col_name,nchar_type_col_name,bigint_type_col_name,bool_type_col_name,tinyint_type_col_name,uint_type_col_name,speed,varchar_type_col_name,varbinary_type_col_name,usmallint_type_col_name,color,double_type_col_name,utinyint_type_col_name,geometry_type_col_name) VALUES ("2016-12-16 07:47:42", 1.5652814126254586e+38, 31965, "jzglkmapiqvmkcgt", -456718528158805320, True, 13, 1550395191, -468850482, "qsuieurxcdhljsyg", "pimzexqqdusasasq", -25133, "heaqwaulngjummmj", 1.4472853493781175e+307, -100, "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,tinyint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name,bool_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS("point(1.0 1.0)", 71, "egjrjdgnsdelmvoi", "nnvaiqrgsmsgulsf", -1764160281, False, -3170988760692140923, 59567, "vvwezjglauxztbkw", -28, 3876664873, 2077268665321637137) (ts,geometry_type_col_name,ubigint_type_col_name,utinyint_type_col_name,speed,varbinary_type_col_name,float_type_col_name,bigint_type_col_name,color) VALUES ("2016-12-16 07:53:43", "point(1.0 1.0)", -8518449815289332525, -90, -1716688898, "llphooyquheusfis", 1.8971999924428717e+38, 6265618875544853883, "mhnrsjknurfeejin");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,int_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,uint_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,geometry_type_tag_name,f,double_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,varchar_type_tag_name,ubigint_type_tag_name) TAGS(-107, -1135898492, 7359, True, 2054944069, -5204892746217048371, "dtbkgxohffiiqvex", "point(1.0 1.0)", -1.6139634795645272e+38, -3.4474223981039393e+307, -4519, "geosetpzbiyvrxsl", "owahukwcrwnazqmg", -6817825300706607165) (ts,nchar_type_col_name) VALUES ("2016-12-16 07:57:23", "yfefrbnkzlnvszku");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,uint_type_tag_name) TAGS("gujkmscmzrumlzgr", "kyjkxjmmiujedyoc", -63, -30, "point(1.0 1.0)", 3040843900) (ts,varbinary_type_col_name,uint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,bigint_type_col_name,nchar_type_col_name,float_type_col_name,bool_type_col_name,utinyint_type_col_name,speed,varchar_type_col_name,smallint_type_col_name,ubigint_type_col_name,color) VALUES ("2016-12-16 08:00:58", "tzthqrttxnhexwie", -1719097053, 12664, 92, 8111449057800298881, "ckhlhqkuqbbkkqbc", -1.2378307380230564e+38, True, -77, 832149914, "zhaatkireunhqjmr", 10552, -1885112753329295315, "etiwbiqspffpftuy");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,uint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,smallint_type_tag_name,f,b,nchar_type_tag_name,varbinary_type_tag_name,double_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name) TAGS(6936877712832826111, 379134044, 5584, -873981273, True, -36, 23332, 3.0590465783887826e+38, "doyrcpkmqdmkvisu", "ebrzlgpzjdhwgbkg", "favnzcgrgbqhjykc", -5.409211271125824e+306, "point(1.0 1.0)", 7020490078397766201, "dfmaignpkotpzfzp", 0) (ts,utinyint_type_col_name,color,tinyint_type_col_name,usmallint_type_col_name,speed,float_type_col_name,smallint_type_col_name,varbinary_type_col_name,uint_type_col_name,bool_type_col_name,double_type_col_name) VALUES ("2016-12-16 08:08:05", 100, "embwiippvzztzfsx", -29, 30679, 398157649, -2.8271578721561e+38, 16481, "eapyuftmoemhlous", 1346563710, False, 1.2527960914103297e+308);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,varchar_type_tag_name,f,int_type_tag_name) TAGS(14445, "frbgwoxdfgvomhij", 3.112685005605257e+38, 919563343) (ts,smallint_type_col_name,uint_type_col_name,nchar_type_col_name,color,tinyint_type_col_name,utinyint_type_col_name,double_type_col_name,float_type_col_name,bigint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 08:10:32", 30534, 287062448, "radqyprkqzicegcm", "digmvwqxesuzblxo", -45, 57, 4.2367818689200055e+307, -1.2630859264197575e+38, 5777026179863424498, -18041);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,geometry_type_tag_name,b) TAGS(-125, "point(1.0 1.0)", "sknwbldiaotpguoh") (ts,geometry_type_col_name,speed,bool_type_col_name,uint_type_col_name,nchar_type_col_name,tinyint_type_col_name,smallint_type_col_name,bigint_type_col_name,usmallint_type_col_name,varchar_type_col_name,float_type_col_name,varbinary_type_col_name,double_type_col_name) VALUES ("2016-12-16 08:21:15", "point(1.0 1.0)", -533077180, True, 125980213, "rzpvyyiafbdgsrlt", -18, 516, 1836336637894618829, -32398, "qyolcbkiphumcyni", -1.7088547654566307e+38, "uhfuttyjdsiwfpge", 9.045738084106129e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name) TAGS("eibgfpzivbkqipxh", "bajctkjmycezvfjy") (ts,uint_type_col_name,nchar_type_col_name,geometry_type_col_name,speed,varbinary_type_col_name,varchar_type_col_name,ubigint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 08:27:17", 182114551, "bqoiqgcelwawaxoh", "point(1.0 1.0)", -2048616567, "ngeyahwjhojgfrji", "ajndkftrjrhebhkh", 7449846270987100591, 21014);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name) TAGS(-4625726455522573231, 2.8073063831603936e+307) (ts,float_type_col_name,bigint_type_col_name,uint_type_col_name,usmallint_type_col_name,color,varchar_type_col_name,tinyint_type_col_name,nchar_type_col_name,bool_type_col_name,varbinary_type_col_name,geometry_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 08:28:17", 2.389718468668489e+38, 232575111591325522, 1038517632, 22294, "gkuqyywzvitscnmf", "pzotmkbymjkajqxh", 84, "zwaqqoiitsaldvxd", False, "lutscwlzfncvtbzh", "point(1.0 1.0)", 104);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,varchar_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,int_type_tag_name,b,uint_type_tag_name,f,varbinary_type_tag_name,nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,bigint_type_tag_name) TAGS(3789358482002489003, "cbbfcxtbficxjubk", 87, "point(1.0 1.0)", 16290, 65141, 1107108066, "dbvdcpsxhwxkfcyz", 1995515016, 2.7409223337049425e+38, "qngmplwikoseyylt", "tgzwjjwlcizxxwnr", -1.6889546962063194e+308, -114, False, -3482122891531832648) (ts,nchar_type_col_name,tinyint_type_col_name,geometry_type_col_name,double_type_col_name,color,smallint_type_col_name,speed,usmallint_type_col_name,ubigint_type_col_name,varbinary_type_col_name,bigint_type_col_name,varchar_type_col_name,utinyint_type_col_name,float_type_col_name,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 08:28:56", "ofdxnlygqcwvsdcj", -29, "point(1.0 1.0)", -1.8130271598650515e+307, "nefvavoukqtdzkgt", -17717, 1471102951, -22978, 6641200189088106550, "tzjbozbjlczkgwxa", -6094740512181599472, "etogqzsgcqipiztv", 83, 3.26367598847478e+38, False, -748583096);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,uint_type_tag_name,nchar_type_tag_name,bool_type_tag_name,double_type_tag_name,int_type_tag_name,tinyint_type_tag_name) TAGS(-3.3072135298943572e+38, 1159943179, "kswqkzvltgixlmfs", True, -4.306722847726777e+307, 232669674, 110) (ts,double_type_col_name,smallint_type_col_name,utinyint_type_col_name,bool_type_col_name,tinyint_type_col_name,color,float_type_col_name,bigint_type_col_name,uint_type_col_name,geometry_type_col_name,ubigint_type_col_name,usmallint_type_col_name,nchar_type_col_name,varbinary_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 08:32:49", 6.47061313170663e+307, 10319, -39, False, 11, "zrdnjfphcwpeinqc", -1.9983066068141309e+37, -7934490781005501591, 254843018, "point(1.0 1.0)", 3715382894868674189, 15127, "czgcvwrtlnhrftfw", "mmsahbdykxugoszh", "oarqmzgnlqfgoetb");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name,b,f) TAGS("eqpvdmknfwrubsuz", 25062, "viftdmeaoemnzlte", -8.206973826119586e+307, -3370842340933526422, "laotfjfckrruwnso", 1.6151693420601788e+37) (ts,ubigint_type_col_name,usmallint_type_col_name,bool_type_col_name,varchar_type_col_name,uint_type_col_name,color,float_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 08:39:16", 3961511905025407799, 17678, False, "donavljkknjvmamp", 1568179614, "dgtldyeuhhgqsrhz", 4.974589437659843e+37, 4642);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,utinyint_type_tag_name,smallint_type_tag_name,int_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,bigint_type_tag_name,uint_type_tag_name,bool_type_tag_name) TAGS("hsbqzsfvkllngzft", "oshuiqqteddxsxhg", 12, -6023, -1854137419, "point(1.0 1.0)", -7820787410908786398, "ktlafrcpckbnasok", -1.1140330045511274e+308, -121, 30336, -5235830165957712064, 1885913417, True) (ts,bigint_type_col_name,varbinary_type_col_name,utinyint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,color) VALUES ("2016-12-16 08:47:05", 1101333002744137125, "ztteslebezjpmdic", 9, -16849, -109, "otxajtxgosmycgwf");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,tinyint_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name) TAGS(12811, 115, 11564, "point(1.0 1.0)", "fvllsyeldqjvxwvh") (ts,nchar_type_col_name,color,float_type_col_name,ubigint_type_col_name,tinyint_type_col_name,geometry_type_col_name,double_type_col_name,varbinary_type_col_name,bigint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 08:52:23", "kzlebaiflssmhrez", "rmgygwigqpcedjfd", -1.0528922331364947e+38, 2053899259005385975, -4, "point(1.0 1.0)", 1.2949613471672877e+307, "xpnwhkobylhldlja", -2081901725973275838, 1084220081);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,int_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,bigint_type_tag_name,geometry_type_tag_name,b,double_type_tag_name,nchar_type_tag_name,smallint_type_tag_name,f,varchar_type_tag_name,ubigint_type_tag_name) TAGS(False, 81017693, -59, 3106871640, 6594200175307481979, "point(1.0 1.0)", "cxlcaxjqbottjwbt", 1.0810790118515493e+307, "mzpfnozkhzccymcy", -23268, -2.5710992840734416e+38, "bzcemqcjmfensoam", -6857344180628167939) (ts,varchar_type_col_name,tinyint_type_col_name,speed,bool_type_col_name,color,nchar_type_col_name,float_type_col_name,bigint_type_col_name,double_type_col_name,utinyint_type_col_name,ubigint_type_col_name,usmallint_type_col_name,geometry_type_col_name,uint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 08:52:40", "goxjebyqxffhytny", 42, 956707019, False, "mwhbccpdjxrnlfmj", "hsswrkdrgvqzrqvc", -3.1923783000520714e+37, 6439233177980554597, 1.5462567361109987e+308, -104, 5670859655382432947, -13629, "point(1.0 1.0)", -291035521, -28198);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,tinyint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,f,varbinary_type_tag_name,nchar_type_tag_name,double_type_tag_name,bigint_type_tag_name) TAGS(-1951062887, -15, False, 8992786638768246075, "point(1.0 1.0)", "zkbchpvyaborestv", 14319, -2.9661092483352556e+38, "sgmkfslmxakjfqna", "hujharmlkobjdcnn", -1.3558306099409006e+308, 6268021739102999331) (ts) VALUES ("2016-12-16 09:57:37");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,f,b,bigint_type_tag_name,smallint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name) TAGS(1359, 3.0441073077150407e+38, "hdozqqjsswenceob", 4323504650099562462, -468, False, 2127413600006157512) (ts) VALUES ("2016-12-16 09:57:50");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,ubigint_type_tag_name,usmallint_type_tag_name,uint_type_tag_name) TAGS("jrfnpvmonxehrxqh", 5.481850803776123e+307, -72, "point(1.0 1.0)", -6843236102647158686, 12864, 3757105281) (ts) VALUES ("2016-12-16 10:04:07");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,tinyint_type_tag_name,f,ubigint_type_tag_name,int_type_tag_name,double_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,geometry_type_tag_name) TAGS(38, "gdjxupnqalfsczwf", 6556, True, 16, 2.3326392387567076e+38, -3052440068987461367, 1379207868, 9.408456780510208e+307, "yfoiebrvvxepulkf", "gyldwqexpqwtymqb", -8612949720672024630, "point(1.0 1.0)") (ts,nchar_type_col_name,tinyint_type_col_name,uint_type_col_name,varchar_type_col_name,float_type_col_name,smallint_type_col_name,speed) VALUES ("2016-12-16 10:07:23", "xhzhyldsilkilnhj", -111, 29265820, "hfayzyyeielxuajo", -1.9673771091182076e+38, -16871, -687056858);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,f,ubigint_type_tag_name,double_type_tag_name) TAGS("ojupiiqggvxiyyeg", 1.828613068144597e+38, -3546696963839193043, 1.063690235483227e+308) (ts,bigint_type_col_name,geometry_type_col_name,color,speed,tinyint_type_col_name,usmallint_type_col_name,double_type_col_name,utinyint_type_col_name,ubigint_type_col_name,uint_type_col_name,float_type_col_name,bool_type_col_name,varbinary_type_col_name,smallint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 10:08:10", -8093382424883326599, "point(1.0 1.0)", "cqihlukuoejlpfry", 979506951, 21, -22385, -1.5427105800265006e+308, 27, -8297437264602062727, -1746384009, 2.252643841383272e+38, True, "lyyysgtqcxnmrtup", -12771, "lfzvhkaxvgdtprxe");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,tinyint_type_tag_name,smallint_type_tag_name) TAGS(-3588987228330855806, -26, 21365) (ts,tinyint_type_col_name) VALUES ("2016-12-16 10:09:18", -32);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,nchar_type_tag_name,double_type_tag_name,utinyint_type_tag_name) TAGS(245950801, "yvkkiivzhbqnnmsk", -1.658703905525468e+308, 28) (ts,smallint_type_col_name) VALUES ("2016-12-16 10:13:19", 26054);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,varchar_type_tag_name,double_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name,b,utinyint_type_tag_name) TAGS(1694485723, False, -474604515817697594, "rtgthkbkwlbutbgv", 2.3887324912266175e+307, "point(1.0 1.0)", "gopcjcoagmnblahg", "jwnrdxiishgvobmd", 48) (ts,nchar_type_col_name,double_type_col_name,bigint_type_col_name,uint_type_col_name,smallint_type_col_name,tinyint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 10:19:33", "xeinqeysquavrsyz", -8.949042643309358e+305, -5607735618216794531, -1459791787, -19075, -55, -127);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,uint_type_tag_name,utinyint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,varchar_type_tag_name,bool_type_tag_name,b,tinyint_type_tag_name,extratag,varbinary_type_tag_name) TAGS(-2.4047245829078376e+38, 2561944294, 9, -8008616984432962570, -13634, "point(1.0 1.0)", 40692, "wnbqrneelpvqwlja", "jvqvobbxkeelzunr", True, "fmdtxmzdhmejwdtg", 34, 935844453, "pejduzquhxddarph") (ts,varbinary_type_col_name,varchar_type_col_name,double_type_col_name,bool_type_col_name,nchar_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 11:27:47", "sgusymsmfcwjnpcy", "npntezujzhtxahzl", -2.8300806976493463e+307, True, "vozigylgtwuwqyxd", -29259);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,geometry_type_tag_name) TAGS(2, "point(1.0 1.0)") (ts,tinyint_type_col_name,color,double_type_col_name,smallint_type_col_name,utinyint_type_col_name,float_type_col_name,varbinary_type_col_name,bool_type_col_name,speed,nchar_type_col_name,ubigint_type_col_name,uint_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 11:38:41", 65, "xbkejkdziselkysy", -1.3741827310590848e+308, 31519, -36, 5.743820075715186e+37, "gnqhbldkghybjgsu", True, 1371169555, "xhfnbxeeaaniflfh", -479840166843776187, 1922629809, -8139122071465810064);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name,nchar_type_tag_name,varbinary_type_tag_name,smallint_type_tag_name,f,uint_type_tag_name,bigint_type_tag_name,double_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,extratag,bool_type_tag_name) TAGS(-270631341, 46930, "point(1.0 1.0)", "gnbznssgvnzwrwxw", "cesmnjhqapqjvrja", 23088, -3.3164207581144432e+38, 1700967625, -1030316662035835968, 1.2011743206213233e+308, -92, 121, 367729172, True) (ts,color,utinyint_type_col_name,usmallint_type_col_name,tinyint_type_col_name,bigint_type_col_name,ubigint_type_col_name,nchar_type_col_name,varchar_type_col_name,speed,double_type_col_name,smallint_type_col_name,float_type_col_name) VALUES ("2016-12-16 11:42:49", "tldfcpsuvkwvvcxt", 27, 23508, 56, 7459772708834616479, 4287277222975526137, "irlpvwywkyfduafy", "fthdcsvpfasrorxm", 2014476868, 6.679126531840444e+307, 24227, 7.882713831816527e+36);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name) TAGS("ouvptxbwrpffphkp") (ts,color,double_type_col_name,float_type_col_name,nchar_type_col_name,bigint_type_col_name,varchar_type_col_name,ubigint_type_col_name,bool_type_col_name,tinyint_type_col_name,uint_type_col_name,geometry_type_col_name,speed,usmallint_type_col_name,smallint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 12:34:44", "wnyagxmugtlzloqs", 2.5224403678140916e+307, -2.9593116477640043e+38, "wpatbvwqhdetaoko", 7131704094598121396, "cenguwerksdlwthz", -3084446013548702169, False, 121, -53423625, "point(1.0 1.0)", 1601558648, 23715, -10339, 45);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,varbinary_type_tag_name,int_type_tag_name,varchar_type_tag_name,bool_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name) TAGS("ebiacunkvnicuhlq", "fqqxcyidlzmeaqqn", 1865880042, "fqyixbagtpigrvkm", True, -15973, 52, -1.2233588646642921e+308, "kjegzlhpztxcjjjt", 49, 3661613052, -3064287245566411855) (ts,tinyint_type_col_name,speed,usmallint_type_col_name,uint_type_col_name,bool_type_col_name,double_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 12:35:29", -54, -1777243683, -5292, -240610904, False, -1.4017848527201252e+308, "diubdgptppxueerk");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,tinyint_type_tag_name,varbinary_type_tag_name,int_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name,b,double_type_tag_name,smallint_type_tag_name,uint_type_tag_name) TAGS(7218832446436978154, 34, "jqvhmixowianctmg", -169644439, -111, "yulvetumzeimvdew", "bsbqhjdbykaxhkah", -4.3917327843321925e+307, -12324, 3993659508) (ts,geometry_type_col_name,usmallint_type_col_name,color,tinyint_type_col_name,smallint_type_col_name,varchar_type_col_name,varbinary_type_col_name,double_type_col_name,utinyint_type_col_name,nchar_type_col_name,bigint_type_col_name,bool_type_col_name,uint_type_col_name,ubigint_type_col_name,float_type_col_name,speed) VALUES ("2016-12-16 12:55:41", "point(1.0 1.0)", 631, "nmddklgtjxrugcig", -88, -8868, "miajgamwlraattqs", "kffhsedlnafobisv", 1.4423313487130354e+308, -116, "kxoycofwhmsaeohq", -1136894476255564565, True, -939744415, -6653064581027995564, -1.9832809127199404e+38, -745235635);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,varchar_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,double_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,geometry_type_tag_name,uint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name,extratag,tinyint_type_tag_name) TAGS(6.311858336249026e+37, "jgyxmmpsdxqtdwrb", 109, "ziwccnmvbdvxjqvt", -8.093949887150045e+307, 50753, True, "point(1.0 1.0)", 1312277084, -22696, -6774059684436011400, "cfvnaoytrwsqfgik", "scoopbahexobzuzi", 260884614, 2013977267, -98) (ts,geometry_type_col_name,bigint_type_col_name,float_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 12:58:13", "point(1.0 1.0)", 222826911129858937, 1.4161786490709517e+38, "pwyyqxegncyrgybp");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,tinyint_type_tag_name,bool_type_tag_name,utinyint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name) TAGS(-350969142, -27, True, 95, -2014323156, "ytaraprsrowqgtya", 2187064438, 7657067162517860811, -11739) (ts,ubigint_type_col_name,speed,float_type_col_name,geometry_type_col_name,utinyint_type_col_name,smallint_type_col_name,uint_type_col_name,usmallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 12:58:20", 1695470437837518753, -248107853, -2.308796864773065e+38, "point(1.0 1.0)", 96, 27392, -317204398, -28969, 27);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,varchar_type_tag_name,extratag,geometry_type_tag_name,f,bool_type_tag_name,double_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,nchar_type_tag_name,utinyint_type_tag_name,b,varbinary_type_tag_name,int_type_tag_name) TAGS(9315, "zcptltyqaoeubpam", -411172808, "point(1.0 1.0)", -1.274336621267979e+38, True, -1.574639717196322e+308, 3207202020, 8037317073978039576, 14431, 1953592773870071856, "qwzkcwunsuoqokru", -83, "crpiwmvdmhhxtnon", "rzvcgilgehavrorx", 1770221516) (ts,float_type_col_name,color,uint_type_col_name,speed,varchar_type_col_name,bigint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 12:58:25", 2.203270740971935e+38, "gclrvxcyluoaqkul", -1757420543, -1667540040, "fhcuwffqunwujhnu", -6952698129894047939, 31135);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,geometry_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,int_type_tag_name,b,bool_type_tag_name,varchar_type_tag_name,varbinary_type_tag_name,tinyint_type_tag_name,ubigint_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,utinyint_type_tag_name) TAGS(928439163, "point(1.0 1.0)", -5510, -6111546345505423465, 1619259430, "zqnekxkjgjysmuiz", False, "uzacypoysstbvzjr", "rzhyionncrfhslvk", -95, 2084839207513824909, "xbakjryihqmzoyxb", 1124, -51) (ts) VALUES ("2016-12-16 13:10:00");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,f,tinyint_type_tag_name,smallint_type_tag_name,varbinary_type_tag_name,uint_type_tag_name,utinyint_type_tag_name) TAGS(-7654723116287337126, 7.0413764481621105e+37, 28, 14707, "ohmuqnoqockmmvyr", 503417135, -1) (ts,varchar_type_col_name,color,geometry_type_col_name,usmallint_type_col_name,float_type_col_name,uint_type_col_name,ubigint_type_col_name,bigint_type_col_name,nchar_type_col_name,bool_type_col_name) VALUES ("2016-12-16 13:11:00", "vvnkffiwzkwchxku", "ettaavibyrfbkwnd", "point(1.0 1.0)", -14161, -1.8652679722611485e+38, -32936243, -5932829237196271077, 1036650175201440454, "gnkdvttswywmpmqr", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,utinyint_type_tag_name,b,extratag) TAGS(-1.6449590526148958e+308, 5, "oyqjdboasjbcgkoa", 769124366) (ts,smallint_type_col_name,utinyint_type_col_name,varbinary_type_col_name,float_type_col_name,bool_type_col_name) VALUES ("2016-12-16 13:12:21", -13042, 36, "ioqlptmacnphnudi", -2.8818733348179174e+38, True);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,geometry_type_tag_name,extratag,usmallint_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,utinyint_type_tag_name,b,tinyint_type_tag_name) TAGS(-3.002723197204508e+38, "point(1.0 1.0)", 673924760, 10423, "zqbdmmvwolrmxtdy", 9067788071941407006, 58, "xfzkhhjsdslfocok", -6) (ts,bigint_type_col_name,float_type_col_name,usmallint_type_col_name,bool_type_col_name,uint_type_col_name,color,double_type_col_name) VALUES ("2016-12-16 13:18:58", -8128143152489083329, 3.1863841600401516e+38, -16841, False, -1439274645, "ayaagfubotrfelgd", 3.940540502421231e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,geometry_type_tag_name,uint_type_tag_name,double_type_tag_name,extratag,ubigint_type_tag_name,int_type_tag_name,varchar_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,f) TAGS(72, "point(1.0 1.0)", 664119737, 8.636577697672091e+307, -1956689850, 5442726894170414063, -620118845, "atdkvjdrsslbkqwk", -1730258878203616660, "jetzkpnswyrblizo", 62924, "yubdmiswgttrzlud", -2.6952802203722693e+38) (ts,tinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,utinyint_type_col_name,nchar_type_col_name,usmallint_type_col_name,geometry_type_col_name,double_type_col_name,color,uint_type_col_name,float_type_col_name,ubigint_type_col_name,varchar_type_col_name,bool_type_col_name,speed) VALUES ("2016-12-16 13:22:31", -101, "pezwjcdegugdqlmj", 2660651858547046010, -106, "xdhkuoxnpbrxzikz", 32513, "point(1.0 1.0)", 1.0237067068787619e+308, "qipropwsssqtdswq", -918947945, -3.2678520762372033e+38, 407015107737783176, "vvigrjnmxmzasgqt", False, -1322323386);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,varchar_type_tag_name,geometry_type_tag_name,double_type_tag_name,extratag,bool_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,f,int_type_tag_name,ubigint_type_tag_name) TAGS(667307468, "lykdtqsuiuxbdejt", "point(1.0 1.0)", -3.233371151222957e+307, 780193507, False, 6454, -104, -1.6348292668856767e+38, -332964092, 6842647149944893582) (ts,bigint_type_col_name,nchar_type_col_name,double_type_col_name,smallint_type_col_name,varbinary_type_col_name,bool_type_col_name,speed,ubigint_type_col_name,utinyint_type_col_name,color,geometry_type_col_name,float_type_col_name,tinyint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 13:23:55", -2751819826451411816, "fwuvslqvozjdkybl", -1.4309368487421518e+308, -29059, "ncbtjyvhgdmrtwyb", False, 476112352, 5847161143554335168, 103, "rkxynqftsspdyohe", "point(1.0 1.0)", -2.7303867859897275e+38, 120, -15572);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(extratag,int_type_tag_name) TAGS(1467247951, -27058643) (ts,smallint_type_col_name) VALUES ("2016-12-16 13:28:54", 11705);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name,bool_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,f,int_type_tag_name,extratag) TAGS(-7641570078560113172, -1.5720182754543317e+308, False, 3978085195, "otsbskgvmxxpqsuv", 1.761118847494028e+38, -798787056, 1618027135) (ts,tinyint_type_col_name,usmallint_type_col_name,float_type_col_name,varchar_type_col_name,color) VALUES ("2016-12-16 13:30:00", 34, -27037, 8.233875034151866e+37, "klvhnzbiagpaygjr", "azavalppgglodkpt");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,b,extratag,usmallint_type_tag_name,tinyint_type_tag_name,varchar_type_tag_name,f,varbinary_type_tag_name,geometry_type_tag_name) TAGS(-113, 26916, "hiabnhvkskqdxcft", "rilirkzfvvcsoxvu", 1985911354, 63961, -49, "tkqjhnbsfqlkrmen", 2.4436889435795517e+38, "ljxwjovyuimnwfyn", "point(1.0 1.0)") (ts,float_type_col_name,speed,double_type_col_name,nchar_type_col_name,tinyint_type_col_name,smallint_type_col_name,geometry_type_col_name,utinyint_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 13:32:10", -2.8391080467349558e+38, -334011151, -6.231241486564915e+307, "yikfspsjjzvjmiig", 65, 32623, "point(1.0 1.0)", -79, "vqcenjbphvbzuyec");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bool_type_tag_name,b,nchar_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name) TAGS(7.628181285293382e+37, False, "xasbwapuqiwnuapm", "wgbkfpitzqilvgam", 1495376777, -22, "cyihglhaaqdhsbrt") (ts,bigint_type_col_name,ubigint_type_col_name,color) VALUES ("2016-12-16 13:33:30", 6511707951588009422, 2723033375589066235, "gxikhegxccxagvge");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,uint_type_tag_name,varchar_type_tag_name) TAGS(106, 142, 1896023978688432555, 64760, True, "kbfjnghakyfiaydm", "point(1.0 1.0)", 1202839796, "hmpxrvvhazkoxktl") (ts,color,double_type_col_name,geometry_type_col_name,float_type_col_name,ubigint_type_col_name,varbinary_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 13:34:57", "rnlnepdxpkshnoon", -9.753180446674174e+307, "point(1.0 1.0)", 7.136745784580883e+37, -5844625605864726019, "cjivuqrqtdkhbiwa", 2274288423212647405);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name) TAGS("nebcguavuaezyyde", 1.2787498546480036e+308, -102, -1, "point(1.0 1.0)") (ts,bool_type_col_name,geometry_type_col_name,utinyint_type_col_name,usmallint_type_col_name) VALUES ("2016-12-16 13:37:33", True, "point(1.0 1.0)", 60, 3057);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,bigint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,usmallint_type_tag_name,int_type_tag_name,b) TAGS(-3.3009424548590525e+38, 7360584785531994343, 20, 1.426103197404768e+308, 47478, -1878972741, "nscbvzswvnmgwucm") (ts,smallint_type_col_name,geometry_type_col_name,utinyint_type_col_name,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 13:37:39", -27464, "point(1.0 1.0)", 112, True, 1250310797);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,double_type_tag_name,smallint_type_tag_name,usmallint_type_tag_name,varbinary_type_tag_name) TAGS(-1589972917066394459, 8.923822004171481e+307, 15758, 5488, "ruuxldxnhfwxpstv") (ts,uint_type_col_name,color,bool_type_col_name,ubigint_type_col_name,smallint_type_col_name,varchar_type_col_name,varbinary_type_col_name,float_type_col_name,usmallint_type_col_name,double_type_col_name,nchar_type_col_name,geometry_type_col_name) VALUES ("2016-12-16 13:44:22", 963762484, "wlcggiilxpahshwd", True, 3728201935390266604, -2468, "edlyrnopkadknbig", "odaximfsofanwsiq", 1.2948243726798647e+38, 22342, 1.6685972624971541e+308, "ebxjnldwzyxugscc", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,bigint_type_tag_name,uint_type_tag_name,b,int_type_tag_name,ubigint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name) TAGS(-88, 51955, 29505, "point(1.0 1.0)", -6962148594230058647, 2678324900, "kygvkaanownlmuny", -1846762302, -785520152137980973, 49, "oxptoddkmvlzukes") (ts,tinyint_type_col_name,varchar_type_col_name,bigint_type_col_name) VALUES ("2016-12-16 13:49:22", 31, "nicxhhisdvunqmyq", 8945862635913752172);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,b,varchar_type_tag_name,int_type_tag_name,bigint_type_tag_name) TAGS(23202, "imrskuasopukglor", "bvizzkkpezerxhqu", 317353908, 754217902580887681) (ts,geometry_type_col_name) VALUES ("2016-12-16 13:53:17", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,double_type_tag_name,ubigint_type_tag_name,bigint_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,usmallint_type_tag_name,b,extratag,f,varchar_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,smallint_type_tag_name) TAGS(-1011316156, 122, 3910130646, 43, 8.512388268116912e+307, 1669456457022538791, 2195079875842603602, "erjqjbgtlgvbwwja", True, 42205, "wovnwgvyflnrbkjr", 1404754906, 2.2368721201787224e+38, "yjisfzjurskvjdck", "vmjcbkwgfouvenyq", "point(1.0 1.0)", -6290) (ts,bool_type_col_name,ubigint_type_col_name,speed,tinyint_type_col_name,usmallint_type_col_name,utinyint_type_col_name,nchar_type_col_name,double_type_col_name,varbinary_type_col_name,smallint_type_col_name,uint_type_col_name,bigint_type_col_name,geometry_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 13:55:15", False, -8830864698607791783, 200778707, 80, -12482, -108, "qlzsnerbfpxtrhsu", 7.918325930212065e+307, "bkeuitzjnwepwsoz", -24175, 1796806861, 8456809260387320755, "point(1.0 1.0)", "bfhekhbyzfnbfvbc");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,extratag,double_type_tag_name,smallint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name,bool_type_tag_name,f,usmallint_type_tag_name,ubigint_type_tag_name,geometry_type_tag_name) TAGS(-109100963, "fkktjbsjdjozysqb", "kvckvuuvjkhrwvum", "svohobpldtonpiqs", "bgrvrzilrbtkzaim", 1058428584, 7.196238081264505e+306, 1884, 2991872141, -109, 117, 1510579019662137511, False, 1.2245033771852769e+38, 19364, -4972876200109100193, "point(1.0 1.0)") (ts,nchar_type_col_name,utinyint_type_col_name,varbinary_type_col_name,bigint_type_col_name,geometry_type_col_name,ubigint_type_col_name,smallint_type_col_name,bool_type_col_name,speed,color,uint_type_col_name) VALUES ("2016-12-16 13:58:17", "fhofoqbrebyjlnkk", 78, "jbttouyukxktabti", 715764060196867324, "point(1.0 1.0)", -3965830703103714728, 6591, True, -2043004502, "ipjtaiujqrieidut", -1209525645);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,bool_type_tag_name,smallint_type_tag_name,f,bigint_type_tag_name) TAGS("rqnninzykiejsiaq", 2744710939, 125, True, -3120, 2.294739380866984e+38, 3223998974927684831) (ts,nchar_type_col_name,smallint_type_col_name,bigint_type_col_name,geometry_type_col_name,color,ubigint_type_col_name,float_type_col_name,varchar_type_col_name,double_type_col_name,varbinary_type_col_name,speed,utinyint_type_col_name) VALUES ("2016-12-16 13:58:26", "dpuonivwjikfanqj", 28149, -8363753204688150611, "point(1.0 1.0)", "deqhimtcneshajxu", -7721659794023728322, 3.1823124307599837e+38, "lmykqmofvaeejays", 1.3450730693544848e+308, "cpvchycerzkkriye", -935361460, -32);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varbinary_type_tag_name,b) TAGS("gzexbtkchcrciore", "xzsaapvhaldvnsee", "luhcuujwpvsqcydj") (ts,varbinary_type_col_name,uint_type_col_name,smallint_type_col_name) VALUES ("2016-12-16 14:17:37", "vegoocllfotccxme", 1106951626, 22765);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,nchar_type_tag_name,b,varchar_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name) TAGS(16570, "silgmziwpdxxoyek", "yqvguhekzckruinc", "aajlxcknjrbyhkha", -8045441217037238680, 107) (ts,varchar_type_col_name,float_type_col_name,speed,utinyint_type_col_name,bool_type_col_name,color) VALUES ("2016-12-16 14:19:46", "heftclalnprpfawp", 6.0294286136178425e+37, 302462364, -8, False, "zdadyskvxrgyaqsm");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,varbinary_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,nchar_type_tag_name) TAGS(10191, "lhxvlfkfbtrhsjkt", 124, 77770857, "dmsjeojxddyuxink") (ts,tinyint_type_col_name,speed,uint_type_col_name,bool_type_col_name,varbinary_type_col_name,double_type_col_name,color) VALUES ("2016-12-16 14:32:40", 21, 379371801, 608762533, False, "rjgbwgnnkkemctro", 1.4188655662190125e+308, "sbwlxrlpgnkmvadl");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,int_type_tag_name) TAGS(False, 2060765790) (ts) VALUES ("2016-12-16 14:59:59");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,b) TAGS("ieyyywpqpqushibb", "dkvjhgnakkgqzijs") (ts) VALUES ("2016-12-16 15:16:49");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,bigint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,int_type_tag_name,bool_type_tag_name,extratag,b,geometry_type_tag_name,f) TAGS("mgatrnymxrqqmdyx", "oljnaeurozmvfqhp", 48759, 38, 3.403325685451397e+307, -6460116231033920128, 1576382718, 104, -603843450, True, 309025710, "tcbaxjfsqakxpvji", "point(1.0 1.0)", 2.8892847575707817e+38) (ts,usmallint_type_col_name,ubigint_type_col_name,nchar_type_col_name,uint_type_col_name,double_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 15:20:44", -17861, 7575197484024879527, "txwlduparjczcbnx", -2111059045, 4.196810591621617e+307, "asimawbydzjggkce");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,bigint_type_tag_name,double_type_tag_name) TAGS(58633, -7506966660525849170, 6.430296151375471e+307) (ts,smallint_type_col_name,color,bigint_type_col_name,geometry_type_col_name,speed,nchar_type_col_name,float_type_col_name,double_type_col_name,varchar_type_col_name,usmallint_type_col_name,uint_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 15:51:23", -16594, "twlhrkfaqrwnjumx", -6411319415230721292, "point(1.0 1.0)", 1147845389, "dkuebijphjfmniyc", -2.276532994058693e+38, -4.871085226210322e+307, "ttundubdmhuwtaaa", -29548, -424312121, "zgvfwozyxijapwho");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(uint_type_tag_name,int_type_tag_name) TAGS(4147856182, 710788816) (ts,uint_type_col_name,double_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 16:03:05", -391718933, 9.11108254173214e+306, "mtfdzaczermguiog");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varbinary_type_tag_name,int_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,varchar_type_tag_name,extratag,b,smallint_type_tag_name,ubigint_type_tag_name,usmallint_type_tag_name) TAGS("abhddtiwgwsawqif", -326574137, 3092694575, 117, True, "elqkqieyrmvxdfal", 513325224677025259, "coxihpudvcdmvavx", 949926470, "jtbtsxdafkcutiyq", 18084, -3755127971745692605, 15049) (ts,color,uint_type_col_name,bigint_type_col_name,ubigint_type_col_name,bool_type_col_name,geometry_type_col_name,tinyint_type_col_name,varchar_type_col_name,speed,usmallint_type_col_name,varbinary_type_col_name,double_type_col_name,smallint_type_col_name,nchar_type_col_name,float_type_col_name) VALUES ("2016-12-16 16:10:12", "geiqxfxzxvcisxvq", 404296035, 6018544511539939806, -2723124593683508517, False, "point(1.0 1.0)", -35, "vpgmwoviojtaksyc", -1590408139, -21520, "yivgshgarquqsmkx", -5.091863294679578e+307, -6162, "hwxnlpshligbgdwr", 2.8760534412729593e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,f,nchar_type_tag_name,double_type_tag_name,ubigint_type_tag_name,extratag,b,varbinary_type_tag_name) TAGS("point(1.0 1.0)", -8.890969508566344e+37, "olgmpbkzjdyaaxkw", 1.1670001553372841e+308, -6483556380230416046, -1489326043, "kavalxtywfupgxae", "nimnqvcrgffqrnry") (ts,geometry_type_col_name,double_type_col_name,varchar_type_col_name,tinyint_type_col_name,nchar_type_col_name,bool_type_col_name,float_type_col_name,utinyint_type_col_name,usmallint_type_col_name,bigint_type_col_name,speed,uint_type_col_name,smallint_type_col_name,color) VALUES ("2016-12-16 16:31:17", "point(1.0 1.0)", -1.6228106351276176e+308, "mrbiqastjhshphqy", -31, "sldcopievsrcpmdn", True, 1.2788940507924604e+38, -53, 1934, -93937490102293875, 251909968, -1180421292, 2009, "fvcuvlqkjdivmbhf");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(nchar_type_tag_name,varchar_type_tag_name,uint_type_tag_name,geometry_type_tag_name,extratag,bigint_type_tag_name,double_type_tag_name,usmallint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,ubigint_type_tag_name,f) TAGS("hcwwkenozisgewdo", "ftzlnzgbqcyttqkz", 557595811, "point(1.0 1.0)", -1874674248, -2833874801024332821, 1.106348478672925e+308, 10205, -377441795, "nlqfaejgqdmixvco", True, -8116329742748132558, 2.0010693036315465e+37) (ts,varbinary_type_col_name,smallint_type_col_name,geometry_type_col_name,tinyint_type_col_name,usmallint_type_col_name,color,nchar_type_col_name,bigint_type_col_name,utinyint_type_col_name,bool_type_col_name,ubigint_type_col_name,speed,double_type_col_name,float_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 16:59:27", "kjukvijrgrplxrqt", 15581, "point(1.0 1.0)", 94, 25505, "btgdgtpeqlczgeua", "sstxtgzlyuxqmpla", -8232897057478421459, -38, False, 1902106109546699459, -2035112348, -6.609585770864842e+307, -5.473155208537856e+37, "wkymamybnhbkbwod");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(b,extratag,varchar_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name,geometry_type_tag_name,varbinary_type_tag_name,bool_type_tag_name) TAGS("obtjobsephyqxyky", 1926001254, "scxtotlkrgfzsqej", 8.631070766476075e+307, 116, -122, "point(1.0 1.0)", "venokzvoaqfytamy", False) (ts,tinyint_type_col_name,varchar_type_col_name,utinyint_type_col_name,geometry_type_col_name,varbinary_type_col_name,ubigint_type_col_name,nchar_type_col_name,bool_type_col_name,double_type_col_name,bigint_type_col_name,float_type_col_name,smallint_type_col_name,uint_type_col_name) VALUES ("2016-12-16 17:09:37", 38, "wihymoxehtmockaj", 119, "point(1.0 1.0)", "lamqyqtuicaumtur", 228827014834750467, "wjeizsgkbechyzya", True, -4.942776530531653e+307, -7607643892481897470, -8.442862379478159e+36, -19143, -2117225342);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(utinyint_type_tag_name,varchar_type_tag_name,f,smallint_type_tag_name,tinyint_type_tag_name,double_type_tag_name,varbinary_type_tag_name,extratag,nchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,usmallint_type_tag_name,geometry_type_tag_name) TAGS(-103, "rlxmlphalsiwfxkb", -3.256319143371585e+38, -23823, -50, -4.530638219225517e+307, "ycpjvlrnkgdcxbxl", -1782343000, "eibzodmwkyfectbm", False, -3662763902289367996, 11348, "point(1.0 1.0)") (ts,color,bool_type_col_name,uint_type_col_name) VALUES ("2016-12-16 17:13:24", "bpczdwxzdhdlacff", True, 853512564);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(f,geometry_type_tag_name,double_type_tag_name,usmallint_type_tag_name,b,uint_type_tag_name,varbinary_type_tag_name,varchar_type_tag_name,bool_type_tag_name,bigint_type_tag_name,extratag,smallint_type_tag_name,int_type_tag_name,tinyint_type_tag_name,nchar_type_tag_name) TAGS(2.8559644361518244e+38, "point(1.0 1.0)", -8.660488863866903e+307, 51580, "mfdzkxvqcmvlwcab", 1370077655, "gsocrcrzmrbagyvu", "gwtxcyfmdzlvijkk", False, 4749716129372507648, -1343610171, 3648, 67398117, 55, "onuoalimpzxphmrb") (ts,varchar_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 17:19:38", "lbdzxuiflnytuwcm", "qyiblgcvjolvnfxd");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bool_type_tag_name,utinyint_type_tag_name,varchar_type_tag_name,b,nchar_type_tag_name,double_type_tag_name,uint_type_tag_name,varbinary_type_tag_name,bigint_type_tag_name,ubigint_type_tag_name,f,int_type_tag_name,usmallint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name) TAGS(True, -128, "ghxlfvebdngntlsy", "gyshejexhflthjyt", "csoufooqhjixarep", -3.560079945924916e+307, 3796289973, "xrwzaykbqpnpowzg", -5254959553491612400, 6959026343484215526, -2.426597157147208e+38, 1953534210, 56530, -27622, "point(1.0 1.0)") (ts,nchar_type_col_name,varbinary_type_col_name,varchar_type_col_name,uint_type_col_name,speed,ubigint_type_col_name,double_type_col_name,usmallint_type_col_name,bool_type_col_name,bigint_type_col_name,tinyint_type_col_name,color,float_type_col_name,geometry_type_col_name,smallint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 17:28:43", "bxarvuznezbovivi", "ljmaaewnxbwquqbv", "vmttcfyjpofkfxdz", -160343511, -1153710436, -6078325270497694207, -1.6708621643614452e+308, -18667, False, -5283782545471513173, 34, "xmbhflvmvnjsjukp", 1.7453583209432804e+38, "point(1.0 1.0)", 21766, 96);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(varchar_type_tag_name,ubigint_type_tag_name,bool_type_tag_name,nchar_type_tag_name,varbinary_type_tag_name,int_type_tag_name,b,utinyint_type_tag_name) TAGS("xzxnwavsfkqexqst", 6248701313661488236, False, "tfzyiigcdgdviljn", "pyaziteptbnjwetv", 1550386893, "hvgiajgmbrfjbhzv", -50) (ts,uint_type_col_name,ubigint_type_col_name,utinyint_type_col_name,nchar_type_col_name) VALUES ("2016-12-16 17:29:35", -2140028488, 9126620526143931960, 48, "vzlvftkxtgejntwx");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(int_type_tag_name,f,varchar_type_tag_name) TAGS(962788507, 1.835407119522404e+38, "dcugcviilgfzpjup") (ts,usmallint_type_col_name,smallint_type_col_name,bigint_type_col_name,geometry_type_col_name,ubigint_type_col_name,speed,nchar_type_col_name,double_type_col_name,float_type_col_name) VALUES ("2016-12-16 17:33:20", 2254, 28827, -8316728271968247646, "point(1.0 1.0)", 6630573292051824500, -1180210336, "bvbksqzbwgmpufzu", 2.591615616226201e+307, 1.8393414054114685e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,f,double_type_tag_name,ubigint_type_tag_name,int_type_tag_name,varbinary_type_tag_name,bool_type_tag_name,smallint_type_tag_name,tinyint_type_tag_name,uint_type_tag_name,varchar_type_tag_name,usmallint_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,extratag) TAGS(-1652523720828862491, "rshcsvsqtejnnyzt", -2.8033588099999885e+38, 1.1267740923697902e+308, 6134475623749100063, 275423891, "symfelluxyjghegv", True, 28636, 15, 2850861607, "dluujqzooplynbpi", 11747, "xkepcycandqyqhng", "point(1.0 1.0)", 28000589) (ts,float_type_col_name) VALUES ("2016-12-16 17:35:20", -2.768468628882595e+38);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(usmallint_type_tag_name,utinyint_type_tag_name,b,varbinary_type_tag_name,varchar_type_tag_name,smallint_type_tag_name,nchar_type_tag_name,tinyint_type_tag_name,bigint_type_tag_name) TAGS(40614, -17, "xwadkvwyxpirlkuv", "tfgddotpqkdjbtbh", "weehvoxpqcioyipx", -25537, "xtrykrvmibasrfpk", 64, 2611368116480765912) (ts,color,geometry_type_col_name,usmallint_type_col_name,float_type_col_name,smallint_type_col_name,bigint_type_col_name,utinyint_type_col_name) VALUES ("2016-12-16 17:37:51", "jdyzckzuwxieaecb", "point(1.0 1.0)", -27205, -1.711911078586609e+38, 31375, -6123067329853056480, -90);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,usmallint_type_tag_name) TAGS(1819937892824962656, 8171) (ts,ubigint_type_col_name,float_type_col_name,bool_type_col_name,uint_type_col_name,color,double_type_col_name) VALUES ("2016-12-16 17:43:42", -4609243792627697209, 2.006334657982245e+38, False, 1603774652, "czkpwgrrldvfefpq", -5.301467469348041e+307);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(bigint_type_tag_name,b,usmallint_type_tag_name,int_type_tag_name,uint_type_tag_name,double_type_tag_name,tinyint_type_tag_name,utinyint_type_tag_name) TAGS(1581454524159189903, "aauchczywgtewzec", 10564, -1624200982, 1778830750, 1.007077574465233e+308, -117, -8) (ts,double_type_col_name,color,nchar_type_col_name,utinyint_type_col_name,bigint_type_col_name,uint_type_col_name,tinyint_type_col_name,varchar_type_col_name) VALUES ("2016-12-16 17:47:52", 3.178641188947165e+307, "plewvtmwmncovgwi", "afkkkizxcmpvjrof", 97, -4082154569696236500, 785036327, 36, "tnjnpkqduaiplcbr");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name) TAGS(-9259) (ts,ubigint_type_col_name,utinyint_type_col_name,varbinary_type_col_name,bool_type_col_name,varchar_type_col_name,nchar_type_col_name,geometry_type_col_name,usmallint_type_col_name,speed) VALUES ("2016-12-16 17:50:22", 6575849309295898287, -110, "ebiistxhnphfulox", False, "djlqhenlzryusbkd", "ltldluztvsgqjeku", "point(1.0 1.0)", 8278, 1651340708);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(double_type_tag_name,nchar_type_tag_name,geometry_type_tag_name,smallint_type_tag_name,utinyint_type_tag_name,extratag) TAGS(1.0764964917532312e+308, "ykuhegoyikhmcgre", "point(1.0 1.0)", 9094, 74, 1323341877) (ts,varbinary_type_col_name) VALUES ("2016-12-16 17:51:12", "nafbgufapnohskpj");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(smallint_type_tag_name,double_type_tag_name) TAGS(28583, 1.953720446700205e+307) (ts,nchar_type_col_name,varbinary_type_col_name,utinyint_type_col_name,geometry_type_col_name,varchar_type_col_name,speed) VALUES ("2016-12-16 18:11:59", "qwqpnycixypxdgkx", "sqdczulupdskltin", -2, "point(1.0 1.0)", "gbqmwfvaobmucwws", -1295442484);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(ubigint_type_tag_name,uint_type_tag_name,utinyint_type_tag_name,nchar_type_tag_name,bigint_type_tag_name,tinyint_type_tag_name,geometry_type_tag_name,f,int_type_tag_name,double_type_tag_name,varchar_type_tag_name,extratag,bool_type_tag_name,smallint_type_tag_name,b,usmallint_type_tag_name) TAGS(-1899740088608098071, 3000382785, -90, "lyiqgjrtcmygsulu", 3674173029313761846, 24, "point(1.0 1.0)", -2.0614150673404532e+38, -83384014, 8.266327304822514e+307, "hmriafyufcvjdmxp", -1458878109, True, 2751, "ldrlpabgmvlmmvpg", 14211) (ts,color,ubigint_type_col_name,uint_type_col_name,smallint_type_col_name,varbinary_type_col_name,speed,tinyint_type_col_name,double_type_col_name,float_type_col_name,varchar_type_col_name,usmallint_type_col_name,geometry_type_col_name,bool_type_col_name) VALUES ("2016-12-16 18:14:29", "hlfayalksfvnguaw", 7446576754933889227, -200330992, 28598, "bqqqhufmncpjhsuv", 218558271, -85, -9.741398605743586e+307, 7.376357821892906e+37, "tpvxybkpxgmhowjr", 25868, "point(1.0 1.0)", False);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,nchar_type_tag_name,usmallint_type_tag_name,uint_type_tag_name,varchar_type_tag_name,b,int_type_tag_name,utinyint_type_tag_name,varbinary_type_tag_name,double_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,f,ubigint_type_tag_name,bool_type_tag_name) TAGS("point(1.0 1.0)", "sjzthkwujsqenwih", 48703, 4087817577, "xmqagtnxequrrylr", "ijtbyhilxioxzuah", -352262545, 1, "sckpgzmooxqjheoe", 1.1198965017615236e+308, 6916146561643120258, 32531, -1.6419733589465474e+38, -4935693961629680589, False) (ts,nchar_type_col_name,geometry_type_col_name,double_type_col_name,varbinary_type_col_name,varchar_type_col_name,bigint_type_col_name,smallint_type_col_name,tinyint_type_col_name) VALUES ("2016-12-16 18:16:09", "jssdvwbxvsbmilue", "point(1.0 1.0)", 6.391478347681934e+307, "zdtetttfsnleoyet", "ppoptsqilikhsino", -840467639559539794, -31830, -103);') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(geometry_type_tag_name,smallint_type_tag_name,bigint_type_tag_name,int_type_tag_name,utinyint_type_tag_name,f,uint_type_tag_name,tinyint_type_tag_name) TAGS("point(1.0 1.0)", 24091, 2578286530814614745, 1008675772, -8, 7.362335563770809e+37, 1819796668, 44) (ts,utinyint_type_col_name,bigint_type_col_name,tinyint_type_col_name,ubigint_type_col_name,smallint_type_col_name,varbinary_type_col_name,float_type_col_name,speed,varchar_type_col_name,bool_type_col_name,usmallint_type_col_name,color,geometry_type_col_name) VALUES ("2016-12-16 18:16:59", 93, -2924293332667446092, 18, -1926645562871095772, 25986, "xjlujcacaynnnmub", -9.456838052905681e+37, 529848418, "nxkirdhlqqloitkl", True, 4620, "acujdfhatzkugcrk", "point(1.0 1.0)");') + tdSql.execute(f'INSERT INTO td32506.reg_table_159 using td32506.fs_table(tinyint_type_tag_name,usmallint_type_tag_name,b,f,varbinary_type_tag_name,bigint_type_tag_name,smallint_type_tag_name,geometry_type_tag_name,varchar_type_tag_name,extratag,utinyint_type_tag_name,uint_type_tag_name,ubigint_type_tag_name,double_type_tag_name,bool_type_tag_name,int_type_tag_name) TAGS(-98, 2946, "qgvkaujyrikpmgxp", 3.2784411370027565e+38, "dspxtsyqnqdyciab", -794888918858663169, -1378, "point(1.0 1.0)", "diaerlydilfkqveu", -1591170700, -9, 88084273, -8326438069324931384, -6.790019844508353e+307, False, 1326851510) (ts,speed,double_type_col_name,geometry_type_col_name,ubigint_type_col_name,bool_type_col_name,utinyint_type_col_name,bigint_type_col_name,usmallint_type_col_name,color,smallint_type_col_name,varchar_type_col_name,nchar_type_col_name,float_type_col_name,varbinary_type_col_name) VALUES ("2016-12-16 18:17:51", -1374792144, -1.149850424042433e+308, "point(1.0 1.0)", 8907011909602006795, False, 104, 1588514034331429385, -18984, "namrvnonxcrzziic", -26616, "ueddmjfyoclvjwfx", "mkkexxinfededdfr", -2.403953587086065e+38, "kadxrwlomevmukfr");') + + def test_percentile(self): + tdSql.error('SELECT SPREAD(bigint_type_tag_name),LEASTSQUARES(bigint_type_tag_name, 9, 2),COUNT(bigint_type_tag_name),HYPERLOGLOG(bigint_type_tag_name),PERCENTILE(bigint_type_tag_name,89,27,20,19,87,99,26,17,45),AVG(bigint_type_tag_name), HYPERLOGLOG(tinyint_type_tag_name),AVG(tinyint_type_tag_name),SPREAD(tinyint_type_tag_name),STDDEV(tinyint_type_tag_name),SUM(tinyint_type_tag_name),COUNT(tinyint_type_tag_name),APERCENTILE(tinyint_type_tag_name, 1, "t-digest"),LEASTSQUARES(tinyint_type_tag_name, 8, 1), STDDEV(usmallint_type_tag_name), COUNT(nchar_type_tag_name),HYPERLOGLOG(nchar_type_tag_name), PERCENTILE(double_type_col_name,6,23,52,24,1,53,95,51,38),HYPERLOGLOG(double_type_col_name),SPREAD(double_type_col_name) FROM td32506.reg_table_159 STATE_WINDOW(1);') + + def run(self): + tdLog.debug(f"start to excute {__file__}") + + self.insert_data() + + # math function + self.test_percentile() + + tdLog.success(f"{__file__} successfully executed") + + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 216be82868..d389c27929 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -15,6 +15,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f cluster/snapshot.py -N 3 -L 3 -D 2 ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_func_elapsed.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_function.py +,,y,army,./pytest.sh python3 ./test.py -f query/function/test_percentile.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_resinfo.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/test_interp.py ,,y,army,./pytest.sh python3 ./test.py -f query/function/concat.py From 6bb0d6dff3c8d213a8a44e740bb31a40923c9273 Mon Sep 17 00:00:00 2001 From: haoranchen Date: Mon, 4 Nov 2024 10:17:07 +0800 Subject: [PATCH 086/127] Update 01-taosd.md --- docs/zh/14-reference/01-components/01-taosd.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/14-reference/01-components/01-taosd.md b/docs/zh/14-reference/01-components/01-taosd.md index 091f9416eb..89a97b108d 100644 --- a/docs/zh/14-reference/01-components/01-taosd.md +++ b/docs/zh/14-reference/01-components/01-taosd.md @@ -156,8 +156,8 @@ charset 的有效值是 UTF-8。 ### 内存相关 | 参数名称 | 参数说明 | | :----------------: | :---------------------------------------------: | -| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10 | -| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:10485760-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | +| rpcQueueMemoryAllowed | 一个 dnode 允许的 rpc 消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10 | +| syncLogBufferMemoryAllowed | 一个 dnode 允许的 sync 日志缓存消息占用的内存最大值,单位 bytes,取值范围:104857600-INT64_MAX,缺省值:服务器内存的 1/10,3.1.3.2/3.3.2.13 版本开始生效 | ### 性能调优 From 5e19ac7485ef099c137040b5cac39bd8a78cbe50 Mon Sep 17 00:00:00 2001 From: yinheli Date: Wed, 8 May 2024 19:06:35 +0800 Subject: [PATCH 087/127] fix: typo --- docs/zh/14-reference/03-taos-sql/03-table.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/zh/14-reference/03-taos-sql/03-table.md b/docs/zh/14-reference/03-taos-sql/03-table.md index 9e4cc66eaf..81ad60e3d2 100644 --- a/docs/zh/14-reference/03-taos-sql/03-table.md +++ b/docs/zh/14-reference/03-taos-sql/03-table.md @@ -42,12 +42,12 @@ table_option: { **使用说明** 1. 表(列)名命名规则参见[名称命名规则](./19-limit.md#名称命名规则)。 -1. 表名最大长度为 192。 -1. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。 -1. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(varchar)。 -1. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 BINARY/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。 -1. 使用数据类型 BINARY/NCHAR/GEOMETRY,需指定其最长的字节数,如 BINARY(20),表示 20 字节。 -1. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress) +2. 表名最大长度为 192。 +3. 表的第一个字段必须是 TIMESTAMP,并且系统自动将其设为主键。 +4. 除时间戳主键列之外,还可以通过 PRIMARY KEY 关键字指定第二列为额外的主键列。被指定为主键列的第二列必须为整型或字符串类型(VARCHAR)。 +5. 表的每行长度不能超过 48KB(从 3.0.5.0 版本开始为 64KB);(注意:每个 VARCHAR/NCHAR/GEOMETRY 类型的列还会额外占用 2 个字节的存储位置)。 +6. 使用数据类型 VARCHAR/NCHAR/GEOMETRY,需指定其最长的字节数,如 VARCHAR(20),表示 20 字节。 +7. 关于 `ENCODE` 和 `COMPRESS` 的使用,请参考[按列压缩](../compress) **参数说明** From 0f535e06aaec3b3f97ed8e7113a3f46e475cf562 Mon Sep 17 00:00:00 2001 From: dmchen Date: Mon, 4 Nov 2024 11:12:18 +0800 Subject: [PATCH 088/127] feat/TS-5484-audit-delete-fix-review --- source/common/src/tmsg.c | 13 +++++++++++++ source/dnode/mnode/impl/src/mndDnode.c | 3 +++ 2 files changed, 16 insertions(+) diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c index 8b55f2c4e9..458badc764 100644 --- a/source/common/src/tmsg.c +++ b/source/common/src/tmsg.c @@ -567,6 +567,7 @@ int32_t tSerializeSClientHbBatchRsp(void *buf, int32_t bufLen, const SClientHbBa TAOS_CHECK_EXIT(tSerializeSClientHbRsp(&encoder, pRsp)); } TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pBatchRsp->monitorParas)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pBatchRsp->enableAuditDelete)); tEndEncode(&encoder); _exit: @@ -609,6 +610,12 @@ int32_t tDeserializeSClientHbBatchRsp(void *buf, int32_t bufLen, SClientHbBatchR TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pBatchRsp->monitorParas)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pBatchRsp->enableAuditDelete)); + } else { + pBatchRsp->enableAuditDelete = 0; + } + tEndDecode(&decoder); _exit: @@ -6348,6 +6355,7 @@ int32_t tSerializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { TAOS_CHECK_EXIT(tEncodeI32(&encoder, pRsp->authVer)); TAOS_CHECK_EXIT(tEncodeI64(&encoder, pRsp->whiteListVer)); TAOS_CHECK_EXIT(tSerializeSMonitorParas(&encoder, &pRsp->monitorParas)); + TAOS_CHECK_EXIT(tEncodeI8(&encoder, pRsp->enableAuditDelete)); tEndEncode(&encoder); _exit: @@ -6399,6 +6407,11 @@ int32_t tDeserializeSConnectRsp(void *buf, int32_t bufLen, SConnectRsp *pRsp) { if (!tDecodeIsEnd(&decoder)) { TAOS_CHECK_EXIT(tDeserializeSMonitorParas(&decoder, &pRsp->monitorParas)); } + if (!tDecodeIsEnd(&decoder)) { + TAOS_CHECK_EXIT(tDecodeI8(&decoder, &pRsp->enableAuditDelete)); + } else { + pRsp->enableAuditDelete = 0; + } tEndDecode(&decoder); _exit: diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 08e9886df4..24ae8382f9 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -607,12 +607,15 @@ static int32_t mndProcessStatisReq(SRpcMsg *pReq) { } static int32_t mndProcessAuditReq(SRpcMsg *pReq) { + mTrace("process audit req:%p", pReq); if (tsEnableAudit && tsEnableAuditDelete) { SMnode *pMnode = pReq->info.node; SAuditReq auditReq = {0}; TAOS_CHECK_RETURN(tDeserializeSAuditReq(pReq->pCont, pReq->contLen, &auditReq)); + mDebug("received audit req:%s, %s, %s, %s", auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql); + auditAddRecord(pReq, pMnode->clusterId, auditReq.operation, auditReq.db, auditReq.table, auditReq.pSql, auditReq.sqlLen); From 031146b3b5316ba203b8802c20212b64e7219163 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 11:53:24 +0800 Subject: [PATCH 089/127] fix: check memory calloc --- source/libs/function/src/tudf.c | 34 ++++++++++++++++++++++++++------- 1 file changed, 27 insertions(+), 7 deletions(-) diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c index b19320d27f..a8198a804d 100644 --- a/source/libs/function/src/tudf.c +++ b/source/libs/function/src/tudf.c @@ -77,6 +77,8 @@ void udfUdfdExit(uv_process_t *process, int64_t exitStatus, int32_t termSignal) static int32_t udfSpawnUdfd(SUdfdData *pData) { fnInfo("start to init udfd"); + + int32_t err = 0; uv_process_options_t options = {0}; char path[PATH_MAX] = {0}; @@ -177,22 +179,39 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { char **envUdfdWithPEnv = NULL; if (environ != NULL) { + int32_t lenEnvUdfd = ARRAY_SIZE(envUdfd); int32_t numEnviron = 0; while (environ[numEnviron] != NULL) { numEnviron++; } - int32_t lenEnvUdfd = ARRAY_SIZE(envUdfd); - envUdfdWithPEnv = (char **)taosMemoryMalloc((numEnviron + lenEnvUdfd) * sizeof(char *)); + envUdfdWithPEnv = (char **)taosMemoryCalloc(numEnviron + lenEnvUdfd, sizeof(char *)); + if (envUdfdWithPEnv == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } for (int32_t i = 0; i < numEnviron; i++) { - envUdfdWithPEnv[i] = (char *)taosMemoryMalloc(strlen(environ[i]) + 1); - strcpy(envUdfdWithPEnv[i], environ[i]); + int32_t len = strlen(environ[i]) + 1; + envUdfdWithPEnv[i] = (char *)taosMemoryCalloc(len, 1); + if (envUdfdWithPEnv[i] == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + tstrncpy(envUdfdWithPEnv[i], environ[i], len); } + for (int32_t i = 0; i < lenEnvUdfd; i++) { if (envUdfd[i] != NULL) { - envUdfdWithPEnv[numEnviron + i] = (char *)taosMemoryMalloc(strlen(envUdfd[i]) + 1); - strcpy(envUdfdWithPEnv[numEnviron + i], envUdfd[i]); + int32_t len = strlen(envUdfd[i]) + 1; + envUdfdWithPEnv[numEnviron + i] = (char *)taosMemoryCalloc(len, 1); + if (envUdfdWithPEnv[numEnviron + i] == NULL) { + err = TSDB_CODE_OUT_OF_MEMORY; + goto _OVER; + } + + tstrncpy(envUdfdWithPEnv[numEnviron + i], envUdfd[i], len); } } envUdfdWithPEnv[numEnviron + lenEnvUdfd - 1] = NULL; @@ -202,7 +221,7 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { options.env = envUdfd; } - int32_t err = uv_spawn(&pData->loop, &pData->process, &options); + err = uv_spawn(&pData->loop, &pData->process, &options); pData->process.data = (void *)pData; #ifdef WINDOWS @@ -230,6 +249,7 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) { fnInfo("udfd is initialized"); } +_OVER: if (taosFqdnEnvItem) { taosMemoryFree(taosFqdnEnvItem); } From 83f96706c7594ab7e0a850a58101020dc06454fa Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 11:58:25 +0800 Subject: [PATCH 090/127] fix: compile errors --- include/os/osString.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/os/osString.h b/include/os/osString.h index 995aa4a591..09a697d834 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -22,7 +22,7 @@ extern "C" { typedef wchar_t TdWchar; typedef int32_t TdUcs4; -#if !defined(DISALLOW_NCHAR_WITHOUT_ICONV) && defined(DARWIN) +#ifndef DISALLOW_NCHAR_WITHOUT_ICONV #include "iconv.h" #else typedef void *iconv_t; From 296b2a7b8e9f586ed509f5f54b1b16d291274ac6 Mon Sep 17 00:00:00 2001 From: xsren <285808407@qq.com> Date: Mon, 4 Nov 2024 12:56:12 +0800 Subject: [PATCH 091/127] cfg: enableAuditDelete --- source/client/inc/clientInt.h | 6 +++++- source/client/src/clientEnv.c | 20 ++++++++++---------- source/client/src/clientHb.c | 3 ++- source/client/src/clientMonitor.c | 20 +++++++++++++++----- source/client/src/clientMsgHandler.c | 3 ++- source/client/src/clientSml.c | 2 +- 6 files changed, 35 insertions(+), 19 deletions(-) diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h index 114bc00125..7aefdf5435 100644 --- a/source/client/inc/clientInt.h +++ b/source/client/inc/clientInt.h @@ -108,6 +108,10 @@ typedef struct SQueryExecMetric { int64_t execCostUs; } SQueryExecMetric; +typedef struct { + SMonitorParas monitorParas; + int8_t enableAuditDelete; +} SAppInstServerCFG; struct SAppInstInfo { int64_t numOfConns; SCorEpSet mgmtEp; @@ -121,7 +125,7 @@ struct SAppInstInfo { void* pTransporter; SAppHbMgr* pAppHbMgr; char* instKey; - SMonitorParas monitorParas; + SAppInstServerCFG serverCfg; }; typedef struct SAppInfo { diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index f892575f0a..d759abe293 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -166,11 +166,11 @@ static int32_t generateWriteSlowLog(STscObj *pTscObj, SRequestObj *pRequest, int ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "type", cJSON_CreateNumber(reqType))); ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject( json, "rows_num", cJSON_CreateNumber(pRequest->body.resInfo.numOfRows + pRequest->body.resInfo.totalRows))); - if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen) { - char tmp = pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen]; - pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = '\0'; + if (pRequest->sqlstr != NULL && strlen(pRequest->sqlstr) > pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen) { + char tmp = pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen]; + pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = '\0'; ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); - pRequest->sqlstr[pTscObj->pAppInfo->monitorParas.tsSlowLogMaxLen] = tmp; + pRequest->sqlstr[pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogMaxLen] = tmp; } else { ENV_JSON_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", cJSON_CreateString(pRequest->sqlstr))); } @@ -284,7 +284,7 @@ static void deregisterRequest(SRequestObj *pRequest) { } } - if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) { if (QUERY_NODE_VNODE_MODIFY_STMT == pRequest->stmtType || QUERY_NODE_INSERT_STMT == pRequest->stmtType) { sqlReqLog(pTscObj->id, pRequest->killed, pRequest->code, MONITORSQLTYPEINSERT); } else if (QUERY_NODE_SELECT_STMT == pRequest->stmtType) { @@ -294,15 +294,15 @@ static void deregisterRequest(SRequestObj *pRequest) { } } - if ((duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThreshold * 1000000UL || - duration >= pTscObj->pAppInfo->monitorParas.tsSlowLogThresholdTest * 1000000UL) && - checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->monitorParas.tsSlowLogExceptDb)) { + if ((duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThreshold * 1000000UL || + duration >= pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogThresholdTest * 1000000UL) && + checkSlowLogExceptDb(pRequest, pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogExceptDb)) { (void)atomic_add_fetch_64((int64_t *)&pActivity->numOfSlowQueries, 1); - if (pTscObj->pAppInfo->monitorParas.tsSlowLogScope & reqType) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & reqType) { taosPrintSlowLog("PID:%d, Conn:%u,QID:0x%" PRIx64 ", Start:%" PRId64 " us, Duration:%" PRId64 "us, SQL:%s", taosGetPId(), pTscObj->connId, pRequest->requestId, pRequest->metric.start, duration, pRequest->sqlstr); - if (pTscObj->pAppInfo->monitorParas.tsEnableMonitor) { + if (pTscObj->pAppInfo->serverCfg.monitorParas.tsEnableMonitor) { slowQueryLog(pTscObj->id, pRequest->killed, pRequest->code, duration); if (TSDB_CODE_SUCCESS != generateWriteSlowLog(pTscObj, pRequest, reqType, duration)) { tscError("failed to generate write slow log"); diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 75716d0bd2..07be4bb596 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -605,7 +605,8 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) { return code; } - pInst->monitorParas = pRsp.monitorParas; + pInst->serverCfg.monitorParas = pRsp.monitorParas; + pInst->serverCfg.enableAuditDelete = pRsp.enableAuditDelete; tscDebug("[monitor] paras from hb, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", pInst->clusterId, pRsp.monitorParas.tsSlowLogThreshold, pRsp.monitorParas.tsSlowLogScope); diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 3b19e070b8..901f4ceef8 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -215,7 +215,7 @@ static void reportSendProcess(void* param, void* tmrId) { SEpSet ep = getEpSet_s(&pInst->mgmtEp); generateClusterReport(pMonitor->registry, pInst->pTransporter, &ep); bool reset = - taosTmrReset(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); + taosTmrReset(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, param, monitorTimer, &tmrId); tscDebug("reset timer, pMonitor:%p, %d", pMonitor, reset); taosRUnLockLatch(&monitorLock); } @@ -288,7 +288,7 @@ void monitorCreateClient(int64_t clusterId) { goto fail; } pMonitor->timer = - taosTmrStart(reportSendProcess, pInst->monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer); + taosTmrStart(reportSendProcess, pInst->serverCfg.monitorParas.tsMonitorInterval * 1000, (void*)pMonitor, monitorTimer); if (pMonitor->timer == NULL) { tscError("failed to start timer"); goto fail; @@ -659,7 +659,7 @@ static void monitorSendAllSlowLog() { taosHashCancelIterate(monitorSlowLogHash, pIter); return; } - if (t - pClient->lastCheckTime > pInst->monitorParas.tsMonitorInterval * 1000) { + if (t - pClient->lastCheckTime > pInst->serverCfg.monitorParas.tsMonitorInterval * 1000) { pClient->lastCheckTime = t; } else { continue; @@ -685,7 +685,7 @@ static void monitorSendAllSlowLog() { static void monitorSendAllSlowLogFromTempDir(int64_t clusterId) { SAppInstInfo* pInst = getAppInstByClusterId((int64_t)clusterId); - if (pInst == NULL || !pInst->monitorParas.tsEnableMonitor) { + if (pInst == NULL || !pInst->serverCfg.monitorParas.tsEnableMonitor) { tscInfo("[monitor] monitor is disabled, skip send slow log"); return; } @@ -970,6 +970,16 @@ static void reportDeleteSql(SRequestObj* pRequest) { SDeleteStmt* pStmt = (SDeleteStmt*)pRequest->pQuery->pRoot; STscObj* pTscObj = pRequest->pTscObj; + if (pTscObj == NULL || pTscObj->pAppInfo == NULL) { + tscError("[del report]invalid tsc obj"); + return; + } + + if(pTscObj->pAppInfo->serverCfg.enableAuditDelete == 0) { + tscDebug("[del report]audit delete is disabled"); + return; + } + if (pRequest->code != TSDB_CODE_SUCCESS) { tscDebug("[del report]delete request result code:%d", pRequest->code); return; @@ -1015,7 +1025,7 @@ void clientOperateReport(SRequestObj* pRequest) { return; } - if (tsEnableAuditDelete && QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { + if (QUERY_NODE_DELETE_STMT == nodeType(pRequest->pQuery->pRoot)) { reportDeleteSql(pRequest); } } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index aef3cef1c5..34007c2be7 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -135,7 +135,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { // update the appInstInfo pTscObj->pAppInfo->clusterId = connectRsp.clusterId; - pTscObj->pAppInfo->monitorParas = connectRsp.monitorParas; + pTscObj->pAppInfo->serverCfg.monitorParas = connectRsp.monitorParas; + pTscObj->pAppInfo->serverCfg.enableAuditDelete = connectRsp.enableAuditDelete; tscDebug("[monitor] paras from connect rsp, clusterId:%" PRIx64 " monitorParas threshold:%d scope:%d", connectRsp.clusterId, connectRsp.monitorParas.tsSlowLogThreshold, connectRsp.monitorParas.tsSlowLogScope); lastClusterId = connectRsp.clusterId; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index e8221c8b8d..0f649f99ff 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1695,7 +1695,7 @@ END: } void smlSetReqSQL(SRequestObj *request, char *lines[], char *rawLine, char *rawLineEnd) { - if (request->pTscObj->pAppInfo->monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) { + if (request->pTscObj->pAppInfo->serverCfg.monitorParas.tsSlowLogScope & SLOW_LOG_TYPE_INSERT) { int32_t len = 0; int32_t rlen = 0; char *p = NULL; From 9221bfb51e559cf5ab74d566df3131db5617ec7a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 4 Nov 2024 13:18:11 +0800 Subject: [PATCH 092/127] handle mem leak failure --- source/client/src/clientImpl.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 8d86745566..0473a025f9 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1770,7 +1770,7 @@ void updateTargetEpSet(SMsgSendInfo* pSendInfo, STscObj* pTscObj, SRpcMsg* pMsg, } } -int32_t doProcessMsFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { +int32_t doProcessMsgFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { SMsgSendInfo* pSendInfo = (SMsgSendInfo*)pMsg->info.ahandle; if (pMsg->info.ahandle == NULL) { tscError("doProcessMsgFromServer pMsg->info.ahandle == NULL"); @@ -1843,7 +1843,7 @@ int32_t doProcessMsFromServerImpl(SRpcMsg* pMsg, SEpSet* pEpSet) { } int32_t doProcessMsgFromServer(void* param) { AsyncArg* arg = (AsyncArg*)param; - int32_t code = doProcessMsFromServerImpl(&arg->msg, arg->pEpset); + int32_t code = doProcessMsgFromServerImpl(&arg->msg, arg->pEpset); taosMemoryFree(arg); return code; } @@ -1881,7 +1881,6 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { code = terrno; pMsg->code = code; goto _exit; - return; } arg->msg = *pMsg; @@ -1897,7 +1896,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { return; _exit: tscError("failed to sched msg to tsc since %s", tstrerror(code)); - code = doProcessMsFromServerImpl(pMsg, tEpSet); + code = doProcessMsgFromServerImpl(pMsg, tEpSet); if (code != 0) { tscError("failed to sched msg to tsc, tsc ready quit"); } From 44958e3c76765f894746123fd9c20cd8a45e48a0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 4 Nov 2024 13:21:15 +0800 Subject: [PATCH 093/127] handle mem leak failure --- source/client/src/clientImpl.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 0473a025f9..e2f9b4a85c 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1886,12 +1886,9 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { arg->msg = *pMsg; arg->pEpset = tEpSet; - if (0 != taosAsyncExec(doProcessMsgFromServer, arg, NULL)) { - tscError("failed to sched msg to tsc, tsc ready to quit"); - rpcFreeCont(pMsg->pCont); - taosMemoryFree(arg->pEpset); - destroySendMsgInfo(pMsg->info.ahandle); + if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) { taosMemoryFree(arg); + goto _exit; } return; _exit: From b762aec1613fffce64d1e9e182c085ca53937122 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 13:37:23 +0800 Subject: [PATCH 094/127] fix: complie errors --- include/os/osString.h | 2 +- source/os/src/osString.c | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/include/os/osString.h b/include/os/osString.h index 09a697d834..995aa4a591 100644 --- a/include/os/osString.h +++ b/include/os/osString.h @@ -22,7 +22,7 @@ extern "C" { typedef wchar_t TdWchar; typedef int32_t TdUcs4; -#ifndef DISALLOW_NCHAR_WITHOUT_ICONV +#if !defined(DISALLOW_NCHAR_WITHOUT_ICONV) && defined(DARWIN) #include "iconv.h" #else typedef void *iconv_t; diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 7960f84369..18da778227 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -17,6 +17,10 @@ #define _DEFAULT_SOURCE #include "os.h" +#ifndef DISALLOW_NCHAR_WITHOUT_ICONV +#include "iconv.h" +#endif + extern int wcwidth(wchar_t c); extern int wcswidth(const wchar_t *s, size_t n); From 14843014f963e8cac327694813cd20bb51ce2de1 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 4 Nov 2024 13:57:08 +0800 Subject: [PATCH 095/127] handle mem leak failure --- source/client/src/clientImpl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index e2f9b4a85c..30a85d72ad 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1887,6 +1887,7 @@ void processMsgFromServer(void* parent, SRpcMsg* pMsg, SEpSet* pEpSet) { arg->pEpset = tEpSet; if ((code = taosAsyncExec(doProcessMsgFromServer, arg, NULL)) != 0) { + pMsg->code = code; taosMemoryFree(arg); goto _exit; } From 71e27240cb3be095f5e6abf9f7e2d00722cfa7f3 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 14:00:27 +0800 Subject: [PATCH 096/127] stream/rocks: fix meta file dir --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index b82d06b6c7..5923efbac9 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -5065,7 +5065,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } memset(dstBuf, 0, cap); - nBytes = snprintf(dstDir, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); + nBytes = snprintf(dstBuf, cap, "%s%s%s", dstDir, TD_DIRSEP, chkpMeta); if (nBytes <= 0 || nBytes >= cap) { code = TSDB_CODE_OUT_OF_RANGE; goto _ERROR; From 175a3f7d57b887ab414f4108577fec6e619ad017 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 14:22:28 +0800 Subject: [PATCH 097/127] strea/checkpoint: fix meta file path --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 5923efbac9..b69e191059 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -5071,7 +5071,7 @@ int32_t dbChkpDumpTo(SDbChkp* p, char* dname, SArray* list) { goto _ERROR; } - TdFilePtr pFile = taosOpenFile(dstDir, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); + TdFilePtr pFile = taosOpenFile(dstBuf, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC); if (pFile == NULL) { code = terrno; stError("chkp failed to create meta file: %s, reason:%s", dstDir, tstrerror(code)); From 03814cfe5af5ee5b8f1c8307acb8cd37fa2094ff Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 15:16:15 +0800 Subject: [PATCH 098/127] refact: rename version to td_version --- include/common/tglobal.h | 8 ++++---- include/util/version.h | 10 +++++----- source/client/src/clientEnv.c | 2 +- source/client/src/clientImpl.c | 4 ++-- source/client/src/clientMain.c | 4 ++-- source/client/src/clientMsgHandler.c | 4 ++-- source/common/src/tglobal.c | 8 ++++---- source/common/src/tmisce.c | 7 +++---- source/dnode/mgmt/exe/dmMain.c | 9 +++++---- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 20 +++++++++---------- source/dnode/mnode/impl/src/mndMain.c | 2 +- source/dnode/mnode/impl/src/mndProfile.c | 12 +++++------ source/dnode/mnode/impl/src/mndTelem.c | 6 +++--- source/libs/function/src/udfd.c | 8 ++++---- source/libs/parser/src/parTranslater.c | 2 +- source/util/src/version.c.in | 10 +++++----- tools/shell/src/shellArguments.c | 6 +++--- tools/shell/src/shellNettest.c | 4 ++-- 18 files changed, 63 insertions(+), 63 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 41fb692e42..fc47166a60 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -194,10 +194,10 @@ extern int32_t tsMinIntervalTime; extern int32_t tsMaxInsertBatchRows; // build info -extern char version[]; -extern char compatible_version[]; -extern char gitinfo[]; -extern char buildinfo[]; +extern char td_version[]; +extern char td_compatible_version[]; +extern char td_gitinfo[]; +extern char td_buildinfo[]; // lossy extern char tsLossyColumns[]; diff --git a/include/util/version.h b/include/util/version.h index b241dd248b..7b62914a33 100644 --- a/include/util/version.h +++ b/include/util/version.h @@ -20,11 +20,11 @@ extern "C" { #endif -extern char version[]; -extern char compatible_version[]; -extern char gitinfo[]; -extern char gitinfoOfInternal[]; -extern char buildinfo[]; +extern char td_version[]; +extern char td_compatible_version[]; +extern char td_gitinfo[]; +extern char td_gitinfoOfInternal[]; +extern char td_buildinfo[]; #ifdef __cplusplus } diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index f892575f0a..dae6a93d86 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -375,7 +375,7 @@ int32_t openTransporter(const char *user, const char *auth, int32_t numOfThread, rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - int32_t code = taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + int32_t code = taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); if (TSDB_CODE_SUCCESS != code) { tscError("invalid version string."); return code; diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index 74fd4e13a7..2968b4867e 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -1700,7 +1700,7 @@ static int32_t buildConnectMsg(SRequestObj* pRequest, SMsgSendInfo** pMsgSendInf tstrncpy(connectReq.app, appInfo.appName, sizeof(connectReq.app)); tstrncpy(connectReq.user, pObj->user, sizeof(connectReq.user)); tstrncpy(connectReq.passwd, pObj->pass, sizeof(connectReq.passwd)); - tstrncpy(connectReq.sVer, version, sizeof(connectReq.sVer)); + tstrncpy(connectReq.sVer, td_version, sizeof(connectReq.sVer)); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = taosMemoryMalloc(contLen); @@ -2573,7 +2573,7 @@ TSDB_SERVER_STATUS taos_check_server_status(const char* fqdn, int port, char* de rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; rpcInit.readTimeout = tsReadTimeout; - if (TSDB_CODE_SUCCESS != taosVersionStrToInt(version, &(rpcInit.compatibilityVer))) { + if (TSDB_CODE_SUCCESS != taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)) { tscError("faild to convert taos version from str to int, errcode:%s", terrstr()); goto _OVER; } diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 64631fd754..9899bf52f0 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -670,7 +670,7 @@ const char *taos_data_type(int type) { } } -const char *taos_get_client_info() { return version; } +const char *taos_get_client_info() { return td_version; } // return int32_t int taos_affected_rows(TAOS_RES *res) { @@ -2144,4 +2144,4 @@ int taos_set_conn_mode(TAOS *taos, int mode, int value) { return 0; } -char *getBuildInfo() { return buildinfo; } +char *getBuildInfo() { return td_buildinfo; } diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index aef3cef1c5..c3d11b7a40 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -80,8 +80,8 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) { goto End; } - if ((code = taosCheckVersionCompatibleFromStr(version, connectRsp.sVer, 3)) != 0) { - tscError("version not compatible. client version: %s, server version: %s", version, connectRsp.sVer); + if ((code = taosCheckVersionCompatibleFromStr(td_version, connectRsp.sVer, 3)) != 0) { + tscError("version not compatible. client version: %s, server version: %s", td_version, connectRsp.sVer); goto End; } diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 834615bdaa..36e36752c0 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -675,10 +675,10 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) { TAOS_CHECK_RETURN(cfgAddString(pCfg, "os version", info.version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_CHECK_RETURN(cfgAddString(pCfg, "os machine", info.machine, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); - TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "version", td_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "compatible_version", td_compatible_version, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "gitinfo", td_gitinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); + TAOS_CHECK_RETURN(cfgAddString(pCfg, "buildinfo", td_buildinfo, CFG_SCOPE_BOTH, CFG_DYN_NONE)); TAOS_RETURN(TSDB_CODE_SUCCESS); } diff --git a/source/common/src/tmisce.c b/source/common/src/tmisce.c index 10375ba857..4df458c2bb 100644 --- a/source/common/src/tmisce.c +++ b/source/common/src/tmisce.c @@ -221,10 +221,9 @@ int32_t taosGenCrashJsonMsg(int signum, char** pMsg, int64_t clusterId, int64_t } TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), NULL, _exit); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), NULL, _exit); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), NULL, _exit); - - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), NULL, _exit); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashSig", signum), NULL, _exit); TAOS_CHECK_GOTO(tjsonAddIntegerToObject(pJson, "crashTs", taosGetTimestampUs()), NULL, _exit); diff --git a/source/dnode/mgmt/exe/dmMain.c b/source/dnode/mgmt/exe/dmMain.c index f94b9e2d73..ade5e16894 100644 --- a/source/dnode/mgmt/exe/dmMain.c +++ b/source/dnode/mgmt/exe/dmMain.c @@ -297,12 +297,13 @@ static void dmPrintArgs(int32_t argc, char const *argv[]) { static void dmGenerateGrant() { mndGenerateMachineCode(); } static void dmPrintVersion() { - printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, version, compatible_version); - printf("git: %s\n", gitinfo); + printf("%s\n%sd version: %s compatible_version: %s\n", TD_PRODUCT_NAME, CUS_PROMPT, td_version, + td_compatible_version); + printf("git: %s\n", td_gitinfo); #ifdef TD_ENTERPRISE - printf("gitOfInternal: %s\n", gitinfoOfInternal); + printf("gitOfInternal: %s\n", td_gitinfoOfInternal); #endif - printf("build: %s\n", buildinfo); + printf("build: %s\n", td_buildinfo); } static void dmPrintHelp() { diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index c151529aea..5a276de251 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -138,9 +138,9 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) { pRpc->info.handle, pRpc->contLen, pRpc->code, pRpc->info.ahandle, pRpc->info.refId); int32_t svrVer = 0; - code = taosVersionStrToInt(version, &svrVer); + code = taosVersionStrToInt(td_version, &svrVer); if (code != 0) { - dError("failed to convert version string:%s to int, code:%d", version, code); + dError("failed to convert version string:%s to int, code:%d", td_version, code); goto _OVER; } if ((code = taosCheckVersionCompatible(pRpc->info.cliVer, svrVer, 3)) != 0) { @@ -434,8 +434,8 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->clientRpc = rpcOpen(&rpcInit); @@ -483,8 +483,8 @@ int32_t dmInitStatusClient(SDnode *pDnode) { rpcInit.startReadTimer = 0; rpcInit.readTimeout = 0; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->statusRpc = rpcOpen(&rpcInit); @@ -533,8 +533,8 @@ int32_t dmInitSyncClient(SDnode *pDnode) { rpcInit.startReadTimer = 1; rpcInit.readTimeout = tsReadTimeout; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->syncRpc = rpcOpen(&rpcInit); @@ -588,8 +588,8 @@ int32_t dmInitServer(SDnode *pDnode) { rpcInit.compressSize = tsCompressMsgSize; rpcInit.shareConnLimit = tsShareConnLimit * 16; - if (taosVersionStrToInt(version, &(rpcInit.compatibilityVer)) != 0) { - dError("failed to convert version string:%s to int", version); + if (taosVersionStrToInt(td_version, &rpcInit.compatibilityVer) != 0) { + dError("failed to convert version string:%s to int", td_version); } pTrans->serverRpc = rpcOpen(&rpcInit); diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index 08ebf52ec6..6c30193ea7 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -1021,7 +1021,7 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr } // cluster info - tstrncpy(pClusterInfo->version, version, sizeof(pClusterInfo->version)); + tstrncpy(pClusterInfo->version, td_version, sizeof(pClusterInfo->version)); pClusterInfo->monitor_interval = tsMonitorInterval; pClusterInfo->connections_total = mndGetNumOfConnections(pMnode); pClusterInfo->dbs_total = sdbGetSize(pSdb, SDB_DB); diff --git a/source/dnode/mnode/impl/src/mndProfile.c b/source/dnode/mnode/impl/src/mndProfile.c index a1ffee9b06..91df640bce 100644 --- a/source/dnode/mnode/impl/src/mndProfile.c +++ b/source/dnode/mnode/impl/src/mndProfile.c @@ -239,8 +239,8 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { goto _OVER; } - if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, version, 3)) != 0) { - mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, version); + if ((code = taosCheckVersionCompatibleFromStr(connReq.sVer, td_version, 3)) != 0) { + mGError("version not compatible. client version: %s, server version: %s", connReq.sVer, td_version); goto _OVER; } @@ -308,9 +308,9 @@ static int32_t mndProcessConnectReq(SRpcMsg *pReq) { tstrncpy(connectRsp.monitorParas.tsSlowLogExceptDb, tsSlowLogExceptDb, TSDB_DB_NAME_LEN); connectRsp.whiteListVer = pUser->ipWhiteListVer; - (void)strcpy(connectRsp.sVer, version); - (void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", version, - buildinfo, gitinfo); + tstrncpy(connectRsp.sVer, td_version, sizeof(connectRsp.sVer)); + (void)snprintf(connectRsp.sDetailVer, sizeof(connectRsp.sDetailVer), "ver:%s\nbuild:%s\ngitinfo:%s", td_version, + td_buildinfo, td_gitinfo); mndGetMnodeEpSet(pMnode, &connectRsp.epSet); int32_t contLen = tSerializeSConnectRsp(NULL, 0, &connectRsp); @@ -813,7 +813,7 @@ static int32_t mndProcessSvrVerReq(SRpcMsg *pReq) { int32_t code = 0; int32_t lino = 0; SServerVerRsp rsp = {0}; - tstrncpy(rsp.ver, version, sizeof(rsp.ver)); + tstrncpy(rsp.ver, td_version, sizeof(rsp.ver)); int32_t contLen = tSerializeSServerVerRsp(NULL, 0, &rsp); if (contLen < 0) { diff --git a/source/dnode/mnode/impl/src/mndTelem.c b/source/dnode/mnode/impl/src/mndTelem.c index 0022aee619..810c71b7c5 100644 --- a/source/dnode/mnode/impl/src/mndTelem.c +++ b/source/dnode/mnode/impl/src/mndTelem.c @@ -115,9 +115,9 @@ static char* mndBuildTelemetryReport(SMnode* pMnode) { snprintf(tmp, sizeof(tmp), "%" PRId64 " kB", tsTotalMemoryKB); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "memory", tmp), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", version), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", buildinfo), &lino, _OVER); - TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", gitinfo), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "version", td_version), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "buildInfo", td_buildinfo), &lino, _OVER); + TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "gitInfo", td_gitinfo), &lino, _OVER); TAOS_CHECK_GOTO(tjsonAddStringToObject(pJson, "email", pMgmt->email), &lino, _OVER); mndBuildRuntimeInfo(pMnode, pJson); diff --git a/source/libs/function/src/udfd.c b/source/libs/function/src/udfd.c index c360cf6894..6eef99e1f8 100644 --- a/source/libs/function/src/udfd.c +++ b/source/libs/function/src/udfd.c @@ -1217,7 +1217,7 @@ int32_t udfdOpenClientRpc() { connLimitNum = TMIN(connLimitNum, 500); rpcInit.connLimitNum = connLimitNum; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; - TAOS_CHECK_RETURN(taosVersionStrToInt(version, &(rpcInit.compatibilityVer))); + TAOS_CHECK_RETURN(taosVersionStrToInt(td_version, &rpcInit.compatibilityVer)); global.clientRpc = rpcOpen(&rpcInit); if (global.clientRpc == NULL) { fnError("failed to init dnode rpc client"); @@ -1470,9 +1470,9 @@ static int32_t udfdParseArgs(int32_t argc, char *argv[]) { } static void udfdPrintVersion() { - (void)printf("udfd version: %s compatible_version: %s\n", version, compatible_version); - (void)printf("git: %s\n", gitinfo); - (void)printf("build: %s\n", buildinfo); + (void)printf("udfd version: %s compatible_version: %s\n", td_version, td_compatible_version); + (void)printf("git: %s\n", td_gitinfo); + (void)printf("build: %s\n", td_buildinfo); } static int32_t udfdInitLog() { diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index e475d34055..05372ac2dc 100755 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2902,7 +2902,7 @@ static int32_t rewriteDatabaseFunc(STranslateContext* pCxt, SNode** pNode) { } static int32_t rewriteClentVersionFunc(STranslateContext* pCxt, SNode** pNode) { - char* pVer = taosStrdup((void*)version); + char* pVer = taosStrdup((void*)td_version); if (NULL == pVer) { return terrno; } diff --git a/source/util/src/version.c.in b/source/util/src/version.c.in index c91b46e18d..c5123e6f21 100644 --- a/source/util/src/version.c.in +++ b/source/util/src/version.c.in @@ -1,7 +1,7 @@ -char version[64] = "${TD_VER_NUMBER}"; -char compatible_version[12] = "${TD_VER_COMPATIBLE}"; -char gitinfo[48] = "${TD_VER_GIT}"; -char gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}"; -char buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}"; +char td_version[64] = "${TD_VER_NUMBER}"; +char td_compatible_version[12] = "${TD_VER_COMPATIBLE}"; +char td_gitinfo[48] = "${TD_VER_GIT}"; +char td_gitinfoOfInternal[48] = "${TD_VER_GIT_INTERNAL}"; +char td_buildinfo[64] = "${TD_VER_OSTYPE}-${TD_VER_CPUTYPE} ${TD_VER_DATE}"; void libtaos_${TD_LIB_VER_NUMBER}_${TD_VER_OSTYPE}_${TD_VER_CPUTYPE}_${TD_VER_VERTYPE}() {}; \ No newline at end of file diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 4638f2ad74..5dd24dc396 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -440,11 +440,11 @@ int32_t shellParseArgs(int32_t argc, char *argv[]) { #ifdef TD_ENTERPRISE snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), "%s\n%s version: %s compatible_version: %s\ngit: %s\ngitOfInternal: %s\nbuild: %s", TD_PRODUCT_NAME, - CUS_PROMPT, version, compatible_version, gitinfo, gitinfoOfInternal, buildinfo); + CUS_PROMPT, td_version, td_compatible_version, td_gitinfo, td_gitinfoOfInternal, td_buildinfo); #else snprintf(shell.info.programVersion, sizeof(shell.info.programVersion), - "%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, version, - compatible_version, gitinfo, buildinfo); + "%s\n%s version: %s compatible_version: %s\ngit: %s\nbuild: %s", TD_PRODUCT_NAME, CUS_PROMPT, td_version, + td_compatible_version, td_gitinfo, td_buildinfo); #endif #if defined(_TD_WINDOWS_64) || defined(_TD_WINDOWS_32) diff --git a/tools/shell/src/shellNettest.c b/tools/shell/src/shellNettest.c index 2e5ec7bc24..d1ecf503d2 100644 --- a/tools/shell/src/shellNettest.c +++ b/tools/shell/src/shellNettest.c @@ -34,7 +34,7 @@ static void shellWorkAsClient() { rpcInit.user = "_dnd"; rpcInit.timeToGetConn = tsTimeToGetAvailableConn; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); clientRpc = rpcOpen(&rpcInit); if (clientRpc == NULL) { printf("failed to init net test client since %s\r\n", terrstr()); @@ -125,7 +125,7 @@ static void shellWorkAsServer() { rpcInit.connType = TAOS_CONN_SERVER; rpcInit.idleTime = tsShellActivityTimer * 1000; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); void *serverRpc = rpcOpen(&rpcInit); if (serverRpc == NULL) { From b011ef49e02b31cc3039e964f6db5d9ce9d34b2b Mon Sep 17 00:00:00 2001 From: Pengrongkun Date: Sun, 27 Oct 2024 11:04:39 +0800 Subject: [PATCH 099/127] TD-32120:add api taos_stmt2_get_all_fields fix some format convert TAOS_FIELD_E to TAOS_FIELD_ALL, to prevent modifications to the original API rename taos_stmt2_get_stb_fields refact parse for ctbname parameterized refactor some code, fix some core problem. --- include/client/taos.h | 47 ++++--- include/libs/parser/parser.h | 3 +- source/client/inc/clientStmt2.h | 1 + source/client/src/clientMain.c | 25 +++- source/client/src/clientStmt2.c | 50 +++++++- source/libs/parser/src/parInsertSql.c | 168 ++++++++++++++++--------- source/libs/parser/src/parInsertStmt.c | 67 +++++++++- tests/script/api/makefile | 2 + tests/script/api/stmt2-get-fields.c | 129 +++++++++++++++++++ 9 files changed, 404 insertions(+), 88 deletions(-) create mode 100644 tests/script/api/stmt2-get-fields.c diff --git a/include/client/taos.h b/include/client/taos.h index 80dbe27c47..924d0ff66e 100644 --- a/include/client/taos.h +++ b/include/client/taos.h @@ -81,6 +81,13 @@ typedef enum { TSDB_SML_TIMESTAMP_NANO_SECONDS, } TSDB_SML_TIMESTAMP_TYPE; +typedef enum TAOS_FIELD_T { + TAOS_FIELD_COL = 1, + TAOS_FIELD_TAG, + TAOS_FIELD_QUERY, + TAOS_FIELD_TBNAME, +} TAOS_FIELD_T; + typedef struct taosField { char name[65]; int8_t type; @@ -95,6 +102,15 @@ typedef struct TAOS_FIELD_E { int32_t bytes; } TAOS_FIELD_E; +typedef struct TAOS_FIELD_STB { + char name[65]; + int8_t type; + uint8_t precision; + uint8_t scale; + int32_t bytes; + TAOS_FIELD_T field_type; +} TAOS_FIELD_STB; + #ifdef WINDOWS #define DLL_EXPORT __declspec(dllexport) #else @@ -195,13 +211,6 @@ DLL_EXPORT int taos_stmt_affected_rows_once(TAOS_STMT *stmt); typedef void TAOS_STMT2; -typedef enum TAOS_FIELD_T { - TAOS_FIELD_COL = 1, - TAOS_FIELD_TAG, - TAOS_FIELD_QUERY, - TAOS_FIELD_TBNAME, -} TAOS_FIELD_T; - typedef struct TAOS_STMT2_OPTION { int64_t reqid; bool singleStbInsert; @@ -232,7 +241,9 @@ DLL_EXPORT int taos_stmt2_exec(TAOS_STMT2 *stmt, int *affected_rows); DLL_EXPORT int taos_stmt2_close(TAOS_STMT2 *stmt); DLL_EXPORT int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert); DLL_EXPORT int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields); +DLL_EXPORT int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields); DLL_EXPORT void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields); +DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields); DLL_EXPORT TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt); DLL_EXPORT char *taos_stmt2_error(TAOS_STMT2 *stmt); @@ -251,17 +262,17 @@ DLL_EXPORT int64_t taos_affected_rows64(TAOS_RES *res); DLL_EXPORT TAOS_FIELD *taos_fetch_fields(TAOS_RES *res); DLL_EXPORT int taos_select_db(TAOS *taos, const char *db); DLL_EXPORT int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); -DLL_EXPORT void taos_stop_query(TAOS_RES *res); -DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); -DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); -DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); -DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); -DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); -DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); -DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); -DLL_EXPORT void taos_reset_current_db(TAOS *taos); +DLL_EXPORT int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields); +DLL_EXPORT void taos_stop_query(TAOS_RES *res); +DLL_EXPORT bool taos_is_null(TAOS_RES *res, int32_t row, int32_t col); +DLL_EXPORT int taos_is_null_by_column(TAOS_RES *res, int columnIndex, bool result[], int *rows); +DLL_EXPORT bool taos_is_update_query(TAOS_RES *res); +DLL_EXPORT int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_block_s(TAOS_RES *res, int *numOfRows, TAOS_ROW *rows); +DLL_EXPORT int taos_fetch_raw_block(TAOS_RES *res, int *numOfRows, void **pData); +DLL_EXPORT int *taos_get_column_data_offset(TAOS_RES *res, int columnIndex); +DLL_EXPORT int taos_validate_sql(TAOS *taos, const char *sql); +DLL_EXPORT void taos_reset_current_db(TAOS *taos); DLL_EXPORT int *taos_fetch_lengths(TAOS_RES *res); DLL_EXPORT TAOS_ROW *taos_result_block(TAOS_RES *res); diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index 832e4f8863..0fb6261ac8 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -65,7 +65,7 @@ typedef struct SParseCsvCxt { const char* pLastSqlPos; // the location of the last parsed sql } SParseCsvCxt; -typedef void(*setQueryFn)(int64_t); +typedef void (*setQueryFn)(int64_t); typedef struct SParseContext { uint64_t requestId; @@ -147,6 +147,7 @@ int32_t qBindStmtColsValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, c int32_t qBindStmtSingleColValue(void* pBlock, SArray* pCols, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen, int32_t colIdx, int32_t rowNum); int32_t qBuildStmtColFields(void* pDataBlock, int32_t* fieldNum, TAOS_FIELD_E** fields); +int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields); int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields); int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const char* sTableName, char* tName, TAOS_MULTI_BIND* bind, char* msgBuf, int32_t msgBufLen); diff --git a/source/client/inc/clientStmt2.h b/source/client/inc/clientStmt2.h index 4e9a09c082..64abf31bc1 100644 --- a/source/client/inc/clientStmt2.h +++ b/source/client/inc/clientStmt2.h @@ -222,6 +222,7 @@ int stmtSetTbTags2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *tags); int stmtBindBatch2(TAOS_STMT2 *stmt, TAOS_STMT2_BIND *bind, int32_t colIdx); int stmtGetTagFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); int stmtGetColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_E **fields); +int stmtGetStbColFields2(TAOS_STMT2 *stmt, int *nums, TAOS_FIELD_STB **fields); int stmtGetParamNum2(TAOS_STMT2 *stmt, int *nums); int stmtGetParamTbName(TAOS_STMT2 *stmt, int *nums); int stmtIsInsert2(TAOS_STMT2 *stmt, int *insert); diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 64631fd754..306af194ab 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -84,7 +84,7 @@ void taos_cleanup(void) { taosCloseRef(id); nodesDestroyAllocatorSet(); -// cleanupAppInfo(); + // cleanupAppInfo(); rpcCleanup(); tscDebug("rpc cleanup"); @@ -388,7 +388,6 @@ void taos_free_result(TAOS_RES *res) { tDeleteMqBatchMetaRsp(&pRsp->batchMetaRsp); } taosMemoryFree(pRsp); - } void taos_kill_query(TAOS *taos) { @@ -484,7 +483,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { return taos_print_row_with_size(str, INT32_MAX, row, fields, num_fields); } -int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields){ +int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) { int32_t len = 0; for (int i = 0; i < num_fields; ++i) { if (i > 0 && len < size - 1) { @@ -589,7 +588,7 @@ int taos_print_row_with_size(char *str, uint32_t size, TAOS_ROW row, TAOS_FIELD break; } } - if (len < size){ + if (len < size) { str[len] = 0; } @@ -2082,7 +2081,7 @@ int taos_stmt2_is_insert(TAOS_STMT2 *stmt, int *insert) { } int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, TAOS_FIELD_E **fields) { - if (stmt == NULL || NULL == count) { + if (stmt == NULL || count == NULL) { tscError("NULL parameter for %s", __FUNCTION__); terrno = TSDB_CODE_INVALID_PARA; return terrno; @@ -2103,12 +2102,28 @@ int taos_stmt2_get_fields(TAOS_STMT2 *stmt, TAOS_FIELD_T field_type, int *count, } } +int taos_stmt2_get_stb_fields(TAOS_STMT2 *stmt, int *count, TAOS_FIELD_STB **fields) { + if (stmt == NULL || count == NULL) { + tscError("NULL parameter for %s", __FUNCTION__); + terrno = TSDB_CODE_INVALID_PARA; + return terrno; + } + + return stmtGetStbColFields2(stmt, count, fields); +} + void taos_stmt2_free_fields(TAOS_STMT2 *stmt, TAOS_FIELD_E *fields) { (void)stmt; if (!fields) return; taosMemoryFree(fields); } +DLL_EXPORT void taos_stmt2_free_stb_fields(TAOS_STMT2 *stmt, TAOS_FIELD_STB *fields) { + (void)stmt; + if (!fields) return; + taosMemoryFree(fields); +} + TAOS_RES *taos_stmt2_result(TAOS_STMT2 *stmt) { if (stmt == NULL) { tscError("NULL parameter for %s", __FUNCTION__); diff --git a/source/client/src/clientStmt2.c b/source/client/src/clientStmt2.c index 2f046b61d6..4bbfc6afaa 100644 --- a/source/client/src/clientStmt2.c +++ b/source/client/src/clientStmt2.c @@ -1068,6 +1068,34 @@ static int stmtFetchColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_E return TSDB_CODE_SUCCESS; } + +static int stmtFetchStbColFields2(STscStmt2* pStmt, int32_t* fieldNum, TAOS_FIELD_STB** fields) { + if (pStmt->errCode != TSDB_CODE_SUCCESS) { + return pStmt->errCode; + } + + if (STMT_TYPE_QUERY == pStmt->sql.type) { + tscError("invalid operation to get query column fileds"); + STMT_ERR_RET(TSDB_CODE_TSC_STMT_API_ERROR); + } + + STableDataCxt** pDataBlock = NULL; + + if (pStmt->sql.stbInterlaceMode) { + pDataBlock = &pStmt->sql.siInfo.pDataCtx; + } else { + pDataBlock = + (STableDataCxt**)taosHashGet(pStmt->exec.pBlockHash, pStmt->bInfo.tbFName, strlen(pStmt->bInfo.tbFName)); + if (NULL == pDataBlock) { + tscError("table %s not found in exec blockHash", pStmt->bInfo.tbFName); + STMT_ERR_RET(TSDB_CODE_APP_ERROR); + } + } + + STMT_ERR_RET(qBuildStmtStbColFields(*pDataBlock, fieldNum, fields)); + + return TSDB_CODE_SUCCESS; +} /* SArray* stmtGetFreeCol(STscStmt2* pStmt, int32_t* idx) { while (true) { @@ -1808,7 +1836,7 @@ _return: return code; } -int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { +int stmtParseColFields2(TAOS_STMT2* stmt) { int32_t code = 0; STscStmt2* pStmt = (STscStmt2*)stmt; int32_t preCode = pStmt->errCode; @@ -1842,8 +1870,6 @@ int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { STMT_ERRI_JRET(stmtParseSql(pStmt)); } - STMT_ERRI_JRET(stmtFetchColFields2(stmt, nums, fields)); - _return: pStmt->errCode = preCode; @@ -1851,6 +1877,24 @@ _return: return code; } +int stmtGetColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_E** fields) { + int32_t code = stmtParseColFields2(stmt); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + return stmtFetchColFields2(stmt, nums, fields); +} + +int stmtGetStbColFields2(TAOS_STMT2* stmt, int* nums, TAOS_FIELD_STB** fields) { + int32_t code = stmtParseColFields2(stmt); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + return stmtFetchStbColFields2(stmt, nums, fields); +} + int stmtGetParamNum2(TAOS_STMT2* stmt, int* nums) { STscStmt2* pStmt = (STscStmt2*)stmt; diff --git a/source/libs/parser/src/parInsertSql.c b/source/libs/parser/src/parInsertSql.c index 1c26a7c70e..4b91f01a8c 100644 --- a/source/libs/parser/src/parInsertSql.c +++ b/source/libs/parser/src/parInsertSql.c @@ -30,7 +30,7 @@ typedef struct SInsertParseContext { bool forceUpdate; bool needTableTagVal; bool needRequest; // whether or not request server - bool isStmtBind; // whether is stmt bind + bool isStmtBind; // whether is stmt bind } SInsertParseContext; typedef int32_t (*_row_append_fn_t)(SMsgBuf* pMsgBuf, const void* value, int32_t len, void* param); @@ -757,7 +757,7 @@ int32_t parseTagValue(SMsgBuf* pMsgBuf, const char** pSql, uint8_t precision, SS STagVal val = {0}; int32_t code = parseTagToken(pSql, pToken, pTagSchema, precision, &val, pMsgBuf); if (TSDB_CODE_SUCCESS == code) { - if (NULL == taosArrayPush(pTagVals, &val)){ + if (NULL == taosArrayPush(pTagVals, &val)) { code = terrno; } } @@ -775,11 +775,14 @@ static int32_t buildCreateTbReq(SVnodeModifyOpStmt* pStmt, STag* pTag, SArray* p return terrno; } return insBuildCreateTbReq(pStmt->pCreateTblReq, pStmt->targetTableName.tname, pTag, pStmt->pTableMeta->suid, - pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, - TSDB_DEFAULT_TABLE_TTL); + pStmt->usingTableName.tname, pTagName, pStmt->pTableMeta->tableInfo.numOfTags, + TSDB_DEFAULT_TABLE_TTL); } int32_t checkAndTrimValue(SToken* pToken, char* tmpTokenBuf, SMsgBuf* pMsgBuf, int8_t type) { + if (pToken->type == TK_NK_QUESTION) { + return buildInvalidOperationMsg(pMsgBuf, "insert into super table syntax is not supported for stmt"); + } if ((pToken->type != TK_NOW && pToken->type != TK_TODAY && pToken->type != TK_NK_INTEGER && pToken->type != TK_NK_STRING && pToken->type != TK_NK_FLOAT && pToken->type != TK_NK_BOOL && pToken->type != TK_NULL && pToken->type != TK_NK_HEX && pToken->type != TK_NK_OCT && pToken->type != TK_NK_BIN && @@ -810,7 +813,7 @@ typedef struct SRewriteTagCondCxt { static int32_t rewriteTagCondColumnImpl(STagVal* pVal, SNode** pNode) { SValueNode* pValue = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); + int32_t code = nodesMakeNode(QUERY_NODE_VALUE, (SNode**)&pValue); if (NULL == pValue) { return code; } @@ -1041,7 +1044,7 @@ static int32_t storeChildTableMeta(SInsertParseContext* pCxt, SVnodeModifyOpStmt return TSDB_CODE_OUT_OF_MEMORY; } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { taosMemoryFree(pBackup); @@ -1236,7 +1239,7 @@ static int32_t getTargetTableMetaAndVgroup(SInsertParseContext* pCxt, SVnodeModi } static int32_t collectUseTable(const SName* pName, SHashObj* pTable) { - char fullName[TSDB_TABLE_FNAME_LEN]; + char fullName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(pName, fullName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1382,7 +1385,7 @@ static int32_t getTableDataCxt(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS pStmt->pTableMeta, &pStmt->pCreateTblReq, pTableCxt, false, false); } - char tbFName[TSDB_TABLE_FNAME_LEN]; + char tbFName[TSDB_TABLE_FNAME_LEN]; int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); if (TSDB_CODE_SUCCESS != code) { return code; @@ -1824,37 +1827,49 @@ static int32_t doGetStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* code = generateSyntaxErrMsg(&pCxt->msg, TSDB_CODE_PAR_INVALID_COLUMNS_NUM); break; } - if (pCols->pColIndex[i] < numOfCols) { - const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]]; - SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]); - code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal); - if (TK_NK_VARIABLE == pToken->type) { - code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); + + if (TK_NK_QUESTION == pToken->type) { + pCxt->isStmtBind = true; + if (pCols->pColIndex[i] == tbnameIdx) { + *bFoundTbName = true; } - } else if (pCols->pColIndex[i] < tbnameIdx) { - const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]]; - if (canParseTagsAfter) { - tagTokens[(*pNumOfTagTokens)] = *pToken; - tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema; - ++(*pNumOfTagTokens); - } else { - code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type); - if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) { + if (NULL == pCxt->pComCxt->pStmtCb) { + code = buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pToken->z); + break; + } + } else { + if (pCols->pColIndex[i] < numOfCols) { + const SSchema* pSchema = &pSchemas[pCols->pColIndex[i]]; + SColVal* pVal = taosArrayGet(pStbRowsCxt->aColVals, pCols->pColIndex[i]); + code = parseValueToken(pCxt, ppSql, pToken, (SSchema*)pSchema, precision, pVal); + if (TK_NK_VARIABLE == pToken->type) { code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); } - if (code == TSDB_CODE_SUCCESS) { - code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals, - &pStbRowsCxt->pTag); + } else if (pCols->pColIndex[i] < tbnameIdx) { + const SSchema* pTagSchema = &pSchemas[pCols->pColIndex[i]]; + if (canParseTagsAfter) { + tagTokens[(*pNumOfTagTokens)] = *pToken; + tagSchemas[(*pNumOfTagTokens)] = (SSchema*)pTagSchema; + ++(*pNumOfTagTokens); + } else { + code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, pTagSchema->type); + if (code == TSDB_CODE_SUCCESS && TK_NK_VARIABLE == pToken->type) { + code = buildInvalidOperationMsg(&pCxt->msg, "not expected row value"); + } + if (code == TSDB_CODE_SUCCESS) { + code = parseTagValue(&pCxt->msg, ppSql, precision, (SSchema*)pTagSchema, pToken, pTagNames, pTagVals, + &pStbRowsCxt->pTag); + } + } + } else if (pCols->pColIndex[i] == tbnameIdx) { + code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY); + if (TK_NK_VARIABLE == pToken->type) { + code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname"); } - } - } else if (pCols->pColIndex[i] == tbnameIdx) { - code = checkAndTrimValue(pToken, pCxt->tmpTokenBuf, &pCxt->msg, TSDB_DATA_TYPE_BINARY); - if (TK_NK_VARIABLE == pToken->type) { - code = buildInvalidOperationMsg(&pCxt->msg, "not expected tbname"); - } - if (code == TSDB_CODE_SUCCESS) { - code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName); + if (code == TSDB_CODE_SUCCESS) { + code = parseTbnameToken(&pCxt->msg, pStbRowsCxt->ctbName.tname, pToken, bFoundTbName); + } } } @@ -1888,6 +1903,11 @@ static int32_t getStbRowValues(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pS code = buildSyntaxErrMsg(&pCxt->msg, "tbname value expected", pOrigSql); } + if (code == TSDB_CODE_SUCCESS && pStbRowsCxt->ctbName.tname[0] == '\0') { + *pGotRow = true; + return TSDB_CODE_TSC_STMT_TBNAME_ERROR; + } + bool ctbFirst = true; char ctbFName[TSDB_TABLE_FNAME_LEN]; if (code == TSDB_CODE_SUCCESS) { @@ -1923,8 +1943,8 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod } if (code == TSDB_CODE_SUCCESS) { code = insBuildCreateTbReq(pStbRowsCxt->pCreateCtbReq, pStbRowsCxt->ctbName.tname, pStbRowsCxt->pTag, - pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, - getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); + pStbRowsCxt->pStbMeta->uid, pStbRowsCxt->stbName.tname, pStbRowsCxt->aTagNames, + getNumOfTags(pStbRowsCxt->pStbMeta), TSDB_DEFAULT_TABLE_TTL); pStbRowsCxt->pTag = NULL; } @@ -1933,9 +1953,9 @@ static int32_t processCtbAutoCreationAndCtbMeta(SInsertParseContext* pCxt, SVnod code = tNameExtractFullName(&pStbRowsCxt->ctbName, ctbFName); SVgroupInfo vg; SRequestConnInfo conn = {.pTrans = pCxt->pComCxt->pTransporter, - .requestId = pCxt->pComCxt->requestId, - .requestObjRefId = pCxt->pComCxt->requestRid, - .mgmtEps = pCxt->pComCxt->mgmtEpSet}; + .requestId = pCxt->pComCxt->requestId, + .requestObjRefId = pCxt->pComCxt->requestRid, + .mgmtEps = pCxt->pComCxt->mgmtEpSet}; if (TSDB_CODE_SUCCESS == code) { code = catalogGetTableHashVgroup(pCxt->pComCxt->pCatalog, &conn, &pStbRowsCxt->ctbName, &vg); } @@ -1979,11 +1999,47 @@ static void clearStbRowsDataContext(SStbRowsDataContext* pStbRowsCxt) { taosMemoryFreeClear(pStbRowsCxt->pCreateCtbReq); } +static int32_t parseStbBoundInfo(SVnodeModifyOpStmt* pStmt, SStbRowsDataContext* pStbRowsCxt, + STableDataCxt** ppTableDataCxt) { + char tbFName[TSDB_TABLE_FNAME_LEN]; + int32_t code = tNameExtractFullName(&pStmt->targetTableName, tbFName); + if (TSDB_CODE_SUCCESS != code) { + return code; + } + if (pStmt->usingTableProcessing) { + pStmt->pTableMeta->uid = 0; + } + + code = insGetTableDataCxt(pStmt->pTableBlockHashObj, tbFName, strlen(tbFName), pStmt->pTableMeta, + &pStmt->pCreateTblReq, ppTableDataCxt, false, true); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + insDestroyBoundColInfo(&((*ppTableDataCxt)->boundColsInfo)); + (*ppTableDataCxt)->boundColsInfo = pStbRowsCxt->boundColsInfo; + (*ppTableDataCxt)->boundColsInfo.numOfCols = pStbRowsCxt->boundColsInfo.numOfBound; + (*ppTableDataCxt)->boundColsInfo.numOfBound = pStbRowsCxt->boundColsInfo.numOfBound; + (*ppTableDataCxt)->boundColsInfo.hasBoundCols = pStbRowsCxt->boundColsInfo.hasBoundCols; + (*ppTableDataCxt)->boundColsInfo.pColIndex = taosMemoryCalloc(pStbRowsCxt->boundColsInfo.numOfBound, sizeof(int16_t)); + if (NULL == (*ppTableDataCxt)->boundColsInfo.pColIndex) { + return terrno; + } + (void)memcpy((*ppTableDataCxt)->boundColsInfo.pColIndex, pStbRowsCxt->boundColsInfo.pColIndex, + sizeof(int16_t) * pStmt->pStbRowsCxt->boundColsInfo.numOfBound); + return TSDB_CODE_SUCCESS; +} + static int32_t parseOneStbRow(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt, const char** ppSql, SStbRowsDataContext* pStbRowsCxt, bool* pGotRow, SToken* pToken, STableDataCxt** ppTableDataCxt) { bool bFirstTable = false; int32_t code = getStbRowValues(pCxt, pStmt, ppSql, pStbRowsCxt, pGotRow, pToken, &bFirstTable); + + if (code == TSDB_CODE_TSC_STMT_TBNAME_ERROR && *pGotRow) { + return parseStbBoundInfo(pStmt, pStbRowsCxt, ppTableDataCxt); + } + if (code != TSDB_CODE_SUCCESS || !*pGotRow) { return code; } @@ -2176,8 +2232,8 @@ static int32_t parseCsvFile(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt if (code == TSDB_CODE_SUCCESS) { SStbRowsDataContext* pStbRowsCxt = rowsDataCxt.pStbRowsCxt; void* pData = pTableDataCxt; - code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), &pData, - POINTER_BYTES); + code = taosHashPut(pStmt->pTableCxtHashObj, &pStbRowsCxt->pCtbMeta->uid, sizeof(pStbRowsCxt->pCtbMeta->uid), + &pData, POINTER_BYTES); if (TSDB_CODE_SUCCESS != code) { break; } @@ -2249,7 +2305,7 @@ static int32_t parseDataFromFileImpl(SInsertParseContext* pCxt, SVnodeModifyOpSt if (!pStmt->stbSyntax && numOfRows > 0) { void* pData = rowsDataCxt.pTableDataCxt; code = taosHashPut(pStmt->pTableCxtHashObj, &pStmt->pTableMeta->uid, sizeof(pStmt->pTableMeta->uid), &pData, - POINTER_BYTES); + POINTER_BYTES); } return code; @@ -2363,8 +2419,7 @@ static int32_t constructStbRowsDataContext(SVnodeModifyOpStmt* pStmt, SStbRowsDa if (TSDB_CODE_SUCCESS == code) { // col values and bound cols info of STableDataContext is not used pStbRowsCxt->aColVals = taosArrayInit(getNumOfColumns(pStbRowsCxt->pStbMeta), sizeof(SColVal)); - if (!pStbRowsCxt->aColVals) - code = terrno; + if (!pStbRowsCxt->aColVals) code = terrno; } if (TSDB_CODE_SUCCESS == code) { code = insInitColValues(pStbRowsCxt->pStbMeta, pStbRowsCxt->aColVals); @@ -2422,9 +2477,6 @@ static int32_t parseInsertStbClauseBottom(SInsertParseContext* pCxt, SVnodeModif // 1. [(tag1_name, ...)] ... // 2. VALUES ... | FILE ... static int32_t parseInsertTableClauseBottom(SInsertParseContext* pCxt, SVnodeModifyOpStmt* pStmt) { - if (pStmt->stbSyntax && TSDB_QUERY_HAS_TYPE(pStmt->insertType, TSDB_QUERY_TYPE_STMT_INSERT)) { - return buildInvalidOperationMsg(&pCxt->msg, "insert into super table syntax is not supported for stmt"); - } if (!pStmt->stbSyntax) { STableDataCxt* pTableCxt = NULL; int32_t code = parseSchemaClauseBottom(pCxt, pStmt, &pTableCxt); @@ -2511,9 +2563,9 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif } // db.? situation,ensure that the only thing following the '.' mark is '?' - char *tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); + char* tbNameAfterDbName = strnchr(pTbName->z, '.', pTbName->n, true); if ((tbNameAfterDbName != NULL) && (*(tbNameAfterDbName + 1) == '?')) { - char *tbName = NULL; + char* tbName = NULL; if (NULL == pCxt->pComCxt->pStmtCb) { return buildSyntaxErrMsg(&pCxt->msg, "? only used in stmt", pTbName->z); } @@ -2528,7 +2580,8 @@ static int32_t checkTableClauseFirstToken(SInsertParseContext* pCxt, SVnodeModif if (pCxt->isStmtBind) { if (TK_NK_ID == pTbName->type || (tbNameAfterDbName != NULL && *(tbNameAfterDbName + 1) != '?')) { // In SQL statements, the table name has already been specified. - parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", pCxt->pComCxt->requestId); + parserWarn("0x%" PRIx64 " table name is specified in sql, ignore the table name in bind param", + pCxt->pComCxt->requestId); } } @@ -2614,7 +2667,7 @@ static void destroySubTableHashElem(void* p) { taosMemoryFree(*(STableMeta**)p); static int32_t createVnodeModifOpStmt(SInsertParseContext* pCxt, bool reentry, SNode** pOutput) { SVnodeModifyOpStmt* pStmt = NULL; - int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); + int32_t code = nodesMakeNode(QUERY_NODE_VNODE_MODIFY_STMT, (SNode**)&pStmt); if (NULL == pStmt) { return code; } @@ -2729,7 +2782,7 @@ static int32_t buildTagNameFromMeta(STableMeta* pMeta, SArray** pTagName) { return terrno; } SSchema* pSchema = getTableTagSchema(pMeta); - int32_t code = 0; + int32_t code = 0; for (int32_t i = 0; i < pMeta->tableInfo.numOfTags; ++i) { if (NULL == taosArrayPush(*pTagName, pSchema[i].name)) { code = terrno; @@ -2834,7 +2887,7 @@ static int32_t resetVnodeModifOpStmt(SInsertParseContext* pCxt, SQuery* pQuery) } if (NULL == pStmt->pTableBlockHashObj) { pStmt->pTableBlockHashObj = - taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); } if (NULL == pStmt->pVgroupsHashObj || NULL == pStmt->pTableBlockHashObj) { code = TSDB_CODE_OUT_OF_MEMORY; @@ -2866,7 +2919,7 @@ static int32_t initInsertQuery(SInsertParseContext* pCxt, SCatalogReq* pCatalogR static int32_t setRefreshMeta(SQuery* pQuery) { SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)pQuery->pRoot; - int32_t code = 0; + int32_t code = 0; if (taosHashGetSize(pStmt->pTableNameHashObj) > 0) { taosArrayDestroy(pQuery->pTableList); @@ -3065,9 +3118,10 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal .forceUpdate = (NULL != pCatalogReq ? pCatalogReq->forceUpdate : false), .isStmtBind = pCxt->isStmtBind}; - int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + int32_t code = initInsertQuery(&context, pCatalogReq, pMetaData, pQuery); + SVnodeModifyOpStmt* pStmt = (SVnodeModifyOpStmt*)((*pQuery)->pRoot); if (TSDB_CODE_SUCCESS == code) { - code = parseInsertSqlImpl(&context, (SVnodeModifyOpStmt*)(*pQuery)->pRoot); + code = parseInsertSqlImpl(&context, pStmt); } if (TSDB_CODE_SUCCESS == code) { code = setNextStageInfo(&context, *pQuery, pCatalogReq); @@ -3076,8 +3130,8 @@ int32_t parseInsertSql(SParseContext* pCxt, SQuery** pQuery, SCatalogReq* pCatal QUERY_EXEC_STAGE_SCHEDULE == (*pQuery)->execStage) { code = setRefreshMeta(*pQuery); } - insDestroyBoundColInfo(&context.tags); + insDestroyBoundColInfo(&context.tags); // if no data to insert, set emptyMode to avoid request server if (!context.needRequest) { (*pQuery)->execMode = QUERY_EXEC_MODE_EMPTY_RESULT; diff --git a/source/libs/parser/src/parInsertStmt.c b/source/libs/parser/src/parInsertStmt.c index ee61611bf2..0979028e6d 100644 --- a/source/libs/parser/src/parInsertStmt.c +++ b/source/libs/parser/src/parInsertStmt.c @@ -242,7 +242,7 @@ int32_t qBindStmtTagsValue(void* pBlock, void* boundTags, int64_t suid, const ch } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -594,7 +594,7 @@ int32_t qBindStmtTagsValue2(void* pBlock, void* boundTags, int64_t suid, const c } code = insBuildCreateTbReq(pDataBlock->pData->pCreateTbReq, tName, pTag, suid, sTableName, tagName, - pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); + pDataBlock->pMeta->tableInfo.numOfTags, TSDB_DEFAULT_TABLE_TTL); pTag = NULL; end: @@ -797,6 +797,10 @@ int32_t qBindStmtColsValue2(void* pBlock, SArray* pCols, TAOS_STMT2_BIND* bind, for (int c = 0; c < boundInfo->numOfBound; ++c) { SSchema* pColSchema = &pSchema[boundInfo->pColIndex[c]]; SColData* pCol = taosArrayGet(pCols, c); + if (pCol == NULL || pColSchema == NULL) { + code = buildInvalidOperationMsg(&pBuf, "get column schema or column data failed"); + goto _return; + } if (bind[c].num != rowNum) { code = buildInvalidOperationMsg(&pBuf, "row number in each bind param should be the same"); @@ -886,7 +890,7 @@ _return: int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_E** fields, uint8_t timePrec) { - if (fields) { + if (fields != NULL) { *fields = taosMemoryCalloc(numOfBound, sizeof(TAOS_FIELD_E)); if (NULL == *fields) { return terrno; @@ -910,6 +914,44 @@ int32_t buildBoundFields(int32_t numOfBound, int16_t* boundColumns, SSchema* pSc return TSDB_CODE_SUCCESS; } +int32_t buildStbBoundFields(SBoundColInfo boundColsInfo, SSchema* pSchema, int32_t* fieldNum, TAOS_FIELD_STB** fields, + STableMeta* pMeta) { + if (fields != NULL) { + *fields = taosMemoryCalloc(boundColsInfo.numOfBound, sizeof(TAOS_FIELD_STB)); + if (NULL == *fields) { + return terrno; + } + + SSchema* schema = &pSchema[boundColsInfo.pColIndex[0]]; + if (TSDB_DATA_TYPE_TIMESTAMP == schema->type) { + (*fields)[0].precision = pMeta->tableInfo.precision; + } + + for (int32_t i = 0; i < boundColsInfo.numOfBound; ++i) { + int16_t idx = boundColsInfo.pColIndex[i]; + + if (idx == pMeta->tableInfo.numOfColumns + pMeta->tableInfo.numOfTags) { + (*fields)[i].field_type = TAOS_FIELD_TBNAME; + tstrncpy((*fields)[i].name, "tbname", sizeof((*fields)[i].name)); + continue; + } else if (idx < pMeta->tableInfo.numOfColumns) { + (*fields)[i].field_type = TAOS_FIELD_COL; + } else { + (*fields)[i].field_type = TAOS_FIELD_TAG; + } + + schema = &pSchema[idx]; + tstrncpy((*fields)[i].name, schema->name, sizeof((*fields)[i].name)); + (*fields)[i].type = schema->type; + (*fields)[i].bytes = schema->bytes; + } + } + + *fieldNum = boundColsInfo.numOfBound; + + return TSDB_CODE_SUCCESS; +} + int32_t qBuildStmtTagFields(void* pBlock, void* boundTags, int32_t* fieldNum, TAOS_FIELD_E** fields) { STableDataCxt* pDataBlock = (STableDataCxt*)pBlock; SBoundColInfo* tags = (SBoundColInfo*)boundTags; @@ -939,7 +981,7 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); if (pDataBlock->boundColsInfo.numOfBound <= 0) { *fieldNum = 0; - if (fields) { + if (fields != NULL) { *fields = NULL; } @@ -952,6 +994,23 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_E** fiel return TSDB_CODE_SUCCESS; } +int32_t qBuildStmtStbColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD_STB** fields) { + STableDataCxt* pDataBlock = (STableDataCxt*)pBlock; + SSchema* pSchema = getTableColumnSchema(pDataBlock->pMeta); + if (pDataBlock->boundColsInfo.numOfBound <= 0) { + *fieldNum = 0; + if (fields != NULL) { + *fields = NULL; + } + + return TSDB_CODE_SUCCESS; + } + + CHECK_CODE(buildStbBoundFields(pDataBlock->boundColsInfo, pSchema, fieldNum, fields, pDataBlock->pMeta)); + + return TSDB_CODE_SUCCESS; +} + int32_t qResetStmtColumns(SArray* pCols, bool deepClear) { int32_t colNum = taosArrayGetSize(pCols); diff --git a/tests/script/api/makefile b/tests/script/api/makefile index d8a4e19218..9c2bb6be3d 100644 --- a/tests/script/api/makefile +++ b/tests/script/api/makefile @@ -25,6 +25,7 @@ exe: gcc $(CFLAGS) ./stmt.c -o $(ROOT)stmt $(LFLAGS) gcc $(CFLAGS) ./stmt2.c -o $(ROOT)stmt2 $(LFLAGS) gcc $(CFLAGS) ./stmt2-example.c -o $(ROOT)stmt2-example $(LFLAGS) + gcc $(CFLAGS) ./stmt2-get-fields.c -o $(ROOT)stmt2-get-fields $(LFLAGS) gcc $(CFLAGS) ./stmt2-nohole.c -o $(ROOT)stmt2-nohole $(LFLAGS) gcc $(CFLAGS) ./stmt-crash.c -o $(ROOT)stmt-crash $(LFLAGS) @@ -42,5 +43,6 @@ clean: rm $(ROOT)stmt rm $(ROOT)stmt2 rm $(ROOT)stmt2-example + rm $(ROOT)stmt2-get-fields rm $(ROOT)stmt2-nohole rm $(ROOT)stmt-crash diff --git a/tests/script/api/stmt2-get-fields.c b/tests/script/api/stmt2-get-fields.c new file mode 100644 index 0000000000..befde39f8a --- /dev/null +++ b/tests/script/api/stmt2-get-fields.c @@ -0,0 +1,129 @@ +// TAOS standard API example. The same syntax as MySQL, but only a subet +// to compile: gcc -o stmt2-get-fields stmt2-get-fields.c -ltaos + +#include +#include +#include +#include "taos.h" + +void getFields(TAOS *taos, const char *sql) { + TAOS_STMT2_OPTION option = {0}; + TAOS_STMT2 *stmt = taos_stmt2_init(taos, &option); + int code = taos_stmt2_prepare(stmt, sql, 0); + if (code != 0) { + printf("failed to execute taos_stmt2_prepare. error:%s\n", taos_stmt2_error(stmt)); + taos_stmt2_close(stmt); + return; + } + int fieldNum = 0; + TAOS_FIELD_STB *pFields = NULL; + code = taos_stmt2_get_stb_fields(stmt, &fieldNum, &pFields); + if (code != 0) { + printf("failed get col,ErrCode: 0x%x, ErrMessage: %s.\n", code, taos_stmt2_error(stmt)); + } else { + printf("col nums:%d\n", fieldNum); + for (int i = 0; i < fieldNum; i++) { + printf("field[%d]: %s, data_type:%d, field_type:%d\n", i, pFields[i].name, pFields[i].type, + pFields[i].field_type); + } + } + printf("====================================\n"); + taos_stmt2_free_stb_fields(stmt, pFields); + taos_stmt2_close(stmt); +} + +void do_query(TAOS *taos, const char *sql) { + TAOS_RES *result = taos_query(taos, sql); + int code = taos_errno(result); + if (code) { + printf("failed to query: %s, reason:%s\n", sql, taos_errstr(result)); + taos_free_result(result); + return; + } + taos_free_result(result); +} + +void do_stmt(TAOS *taos) { + do_query(taos, "drop database if exists db"); + do_query(taos, "create database db"); + do_query(taos, + "create table db.stb (ts timestamp, b binary(10)) tags(t1 " + "int, t2 binary(10))"); + do_query(taos, "CREATE TABLE db.d0 USING db.stb (t1,t2) TAGS (7,'Cali');"); + do_query(taos, "CREATE TABLE db.ntb(nts timestamp, nb binary(10),nvc varchar(16),ni int);"); + + printf("field_type: TAOS_FIELD_COL = 1, TAOS_FIELD_TAG=2, TAOS_FIELD_QUERY=3, TAOS_FIELD_TBNAME=4\n"); + + // case 1 : INSERT INTO db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?) + // test super table + const char *sql = "insert into db.stb(t1,t2,ts,b,tbname) values(?,?,?,?,?)"; + printf("====================================\n"); + printf("case 1 : %s\n", sql); + getFields(taos, sql); + + // case 2 : INSERT INTO db.d0 VALUES (?,?) + // test child table + sql = "INSERT INTO db.d0(ts,b) VALUES (?,?)"; + printf("case 2 : %s\n", sql); + getFields(taos, sql); + + // case 3 : INSERT INTO db.ntb VALUES(?,?,?,?) + // test normal table + sql = "INSERT INTO db.ntb VALUES(?,?,?,?)"; + printf("case 3 : %s\n", sql); + getFields(taos, sql); + + // case 4 : INSERT INTO db.? using db.stb TAGS(?,?) VALUES(?,?) + // not support this clause + sql = "insert into db.? using db.stb tags(?, ?) values(?,?)"; + printf("case 4 (not support): %s\n", sql); + getFields(taos, sql); + + // case 5 : INSERT INTO db.stb(t1,t2,ts,b) values(?,?,?,?) + // no tbname error + sql = "INSERT INTO db.stb(t1,t2,ts,b) values(?,?,?,?)"; + printf("case 5 (no tbname error): %s\n", sql); + getFields(taos, sql); + + // case 6 : INSERT INTO db.d0 using db.stb values(?,?) + // none para for ctbname + sql = "INSERT INTO db.d0 using db.stb values(?,?)"; + printf("case 6 (no tags error): %s\n", sql); + getFields(taos, sql); + + // case 7 : insert into db.stb(t1,t2,tbname) values(?,?,?) + // no value + sql = "insert into db.stb(t1,t2,tbname) values(?,?,?)"; + printf("case 7 (no PK error): %s\n", sql); + getFields(taos, sql); + + // case 8 : insert into db.stb(ts,b,tbname) values(?,?,?) + // no tag + sql = "insert into db.stb(ts,b,tbname) values(?,?,?)"; + printf("case 8 : %s\n", sql); + getFields(taos, sql); + + // case 9 : insert into db.stb(ts,b,tbname) values(?,?,?,?,?) + // wrong para nums + sql = "insert into db.stb(ts,b,tbname) values(?,?,?,?,?)"; + printf("case 9 (wrong para nums): %s\n", sql); + getFields(taos, sql); + + // case 10 : insert into db.ntb(nts,ni) values(?,?,?,?,?) + // wrong para nums + sql = "insert into db.ntb(nts,ni) values(?,?)"; + printf("case 10 : %s\n", sql); + getFields(taos, sql); +} + +int main() { + TAOS *taos = taos_connect("localhost", "root", "taosdata", "", 0); + if (!taos) { + printf("failed to connect to db, reason:%s\n", taos_errstr(taos)); + exit(1); + } + + do_stmt(taos); + taos_close(taos); + taos_cleanup(); +} \ No newline at end of file From 097174f609a0b8fd8a73da79bd381b3b9919b462 Mon Sep 17 00:00:00 2001 From: yingzhao Date: Mon, 4 Nov 2024 17:24:16 +0800 Subject: [PATCH 100/127] fix(docs): compile error for using curly brace --- docs/zh/06-advanced/06-data-analysis/addins.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md index 867bd9ff23..8b50c93d85 100644 --- a/docs/zh/06-advanced/06-data-analysis/addins.md +++ b/docs/zh/06-advanced/06-data-analysis/addins.md @@ -143,7 +143,7 @@ def test_iqr(self): 并在 model 目录下建立对应的文件夹存放该模型。 -保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 {"model": "ad_encoder_keras"} 的方式,可以调用该模型进行计算。 +保存模型的调用,可参考 encoder.py 的方式,用户通过调用 set_params 方法,并指定参数 `{"model": "ad_encoder_keras"}` 的方式,可以调用该模型进行计算。 具体的调用方式如下: From 51e9f93ae68ebfbfd0f758ec16c9bce2131d0041 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Mon, 4 Nov 2024 18:44:50 +0800 Subject: [PATCH 101/127] requestid --- source/client/src/clientMonitor.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/client/src/clientMonitor.c b/source/client/src/clientMonitor.c index 901f4ceef8..5f7e11b6a3 100644 --- a/source/client/src/clientMonitor.c +++ b/source/client/src/clientMonitor.c @@ -940,7 +940,7 @@ int32_t reportCB(void* param, SDataBuf* pMsg, int32_t code) { return 0; } -int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len) { +int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len, uint64_t requestId) { SMsgSendInfo* sendInfo = taosMemoryCalloc(1, sizeof(SMsgSendInfo)); if (sendInfo == NULL) { tscError("[del report]failed to allocate memory for sendInfo"); @@ -949,7 +949,7 @@ int32_t senAuditInfo(STscObj* pTscObj, void* pReq, int32_t len) { sendInfo->msgInfo = (SDataBuf){.pData = pReq, .len = len, .handle = NULL}; - sendInfo->requestId = generateRequestId(); + sendInfo->requestId = requestId; sendInfo->requestObjRefId = 0; sendInfo->param = NULL; sendInfo->fp = reportCB; @@ -1010,7 +1010,7 @@ static void reportDeleteSql(SRequestObj* pRequest) { return; } - int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen); + int32_t code = senAuditInfo(pRequest->pTscObj, pReq, tlen, pRequest->requestId); if (code != 0) { tscError("[del report]failed to send audit info, code:%d", code); taosMemoryFree(pReq); @@ -1020,7 +1020,7 @@ static void reportDeleteSql(SRequestObj* pRequest) { } void clientOperateReport(SRequestObj* pRequest) { - if (pRequest == NULL || pRequest->pQuery == NULL) { + if (pRequest == NULL || pRequest->pQuery == NULL || pRequest->pQuery->pRoot == NULL) { tscError("[del report]invalid request"); return; } From 109c1a799f5a58f16582f783fca7ee4573c70a1f Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 18:49:36 +0800 Subject: [PATCH 102/127] test/blob: perf json file for taos benchmark --- tests/army/storage/blob/perf.json | 67 +++++++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) create mode 100644 tests/army/storage/blob/perf.json diff --git a/tests/army/storage/blob/perf.json b/tests/army/storage/blob/perf.json new file mode 100644 index 0000000000..002515873e --- /dev/null +++ b/tests/army/storage/blob/perf.json @@ -0,0 +1,67 @@ +{ + "filetype": "insert", + "cfgdir": "/etc/taos", + "host": "127.0.0.1", + "port": 6030, + "user": "root", + "password": "taosdata", + "connection_pool_size": 8, + "num_of_records_per_req": 4000, + "prepared_rand": 500, + "thread_count": 4, + "create_table_thread_count": 1, + "confirm_parameter_prompt": "no", + "databases": [ + { + "dbinfo": { + "name": "db", + "drop": "yes", + "vgroups": 3, + "replica": 3, + "duration":"10d", + "s3_keeplocal":"30d", + "s3_chunkpages":"131072", + "tsdb_pagesize":"1", + "s3_compact":"1", + "wal_retention_size":"1", + "wal_retention_period":"1", + "flush_each_batch":"no", + "keep": "3650d" + }, + "super_tables": [ + { + "name": "stb", + "child_table_exists": "no", + "childtable_count": 500, + "insert_rows": 200000, + "childtable_prefix": "d", + "insert_mode": "taosc", + "timestamp_step": 100, + "start_timestamp": 1600000000000, + "columns": [ + { "type": "bool", "name": "bc"}, + { "type": "float", "name": "fc" }, + { "type": "double", "name": "dc"}, + { "type": "tinyint", "name": "ti"}, + { "type": "smallint", "name": "si" }, + { "type": "int", "name": "ic" ,"max": 1,"min": 1}, + { "type": "bigint", "name": "bi" }, + { "type": "utinyint", "name": "uti"}, + { "type": "usmallint", "name": "usi"}, + { "type": "uint", "name": "ui" }, + { "type": "ubigint", "name": "ubi"}, + { "type": "binary", "name": "bin", "len": 50}, + { "type": "nchar", "name": "nch", "len": 100} + ], + "tags": [ + {"type": "tinyint", "name": "groupid","max": 10,"min": 1}, + {"name": "location","type": "binary", "len": 16, "values": + ["San Francisco", "Los Angles", "San Diego", "San Jose", "Palo Alto", "Campbell", "Mountain View","Sunnyvale", "Santa Clara", "Cupertino"] + } + ] + } + ] + } + ] +} + From d572f9e38779047463ebb35bdf1d4f45b9541af8 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 4 Nov 2024 19:21:27 +0800 Subject: [PATCH 103/127] blob/doc: connect blob directly without flexify --- docs/zh/08-operation/12-multi.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/docs/zh/08-operation/12-multi.md b/docs/zh/08-operation/12-multi.md index 5489226ce1..1e81a7ff1e 100644 --- a/docs/zh/08-operation/12-multi.md +++ b/docs/zh/08-operation/12-multi.md @@ -163,3 +163,15 @@ s3BucketName td-test - 认为全部 S3 服务均指向同一数据源,对各个 S3 服务操作完全等价 - 在某一 S3 服务上操作失败后会切换至其他服务,全部服务都失败后将返回最后产生的错误码 - 最大支持的 S3 服务配置数为 10 + +### 不依赖 Flexify 服务 + +用户界面同 S3,不同的地方在于下面三个参数的配置: + +| # | 参数 | 示例值 | 描述 | +| :--- | :----------- | :--------------------------------------- | :----------------------------------------------------------- | +| 1 | s3EndPoint | https://fd2d01c73.blob.core.windows.net | Blob URL | +| 2 | s3AccessKey | fd2d01c73:veUy/iRBeWaI2YAerl+AStw6PPqg== | 冒号分隔的用户 accountId:accountKey | +| 3 | s3BucketName | test-container | Container name | + +其中 fd2d01c73 是账户 ID;微软 Blob 存储服务只支持 Https 协议,不支持 Http。 From d75d22eb3c707d26a7c30f6cef602144d8d2ade7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 19:38:18 +0800 Subject: [PATCH 104/127] fix(stream): check the right return code for concurrent checkpoint trans. --- source/common/src/tglobal.c | 3 + source/dnode/mnode/impl/src/mndStream.c | 11 ++- .../script/tsim/stream/concurrentcheckpt.sim | 79 +++++++++++++++++++ 3 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tests/script/tsim/stream/concurrentcheckpt.sim diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 00052cc810..2104562c0b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1646,6 +1646,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval"); tsStreamCheckpointInterval = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint"); + tsMaxConcurrentCheckpoint = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate"); tsSinkDataRate = pItem->fval; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a4327b777f..88b43d497f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1284,9 +1284,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { void* p = taosArrayPush(pList, &in); if (p) { int32_t currentSize = taosArrayGetSize(pList); - mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64 - "s) beyond concurrently launch threshold:%d", - pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize); + mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64 + "s), concurrently launch threshold:%d", + pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000, + tsMaxConcurrentCheckpoint); } else { mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid); } @@ -1338,7 +1339,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true); sdbRelease(pSdb, p); - if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) { started += 1; if (started >= capacity) { @@ -1346,6 +1347,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { (started + numOfCheckpointTrans)); break; } + } else { + mError("failed to start checkpoint trans, code:%s", tstrerror(code)); } } } diff --git a/tests/script/tsim/stream/concurrentcheckpt.sim b/tests/script/tsim/stream/concurrentcheckpt.sim new file mode 100644 index 0000000000..4162617deb --- /dev/null +++ b/tests/script/tsim/stream/concurrentcheckpt.sim @@ -0,0 +1,79 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 1 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select task_id, checkpoint_id from information_schema.ins_stream_tasks order by checkpoint_id; + +print $data01 $data11 $data21 +if $data01 == $data11 then + print not allowed 2 checkpoint start completed + return -1 +endi + +if $data11 == $data21 then + print not allowed 2 checkpoints start concurrently + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print ========== concurrent checkpoint is set 2 + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c concurrentCheckpoint -v 2 + +system sh/exec.sh -n dnode1 -s start + +print ========== step2 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select count(*) a, checkpoint_id from information_schema.ins_stream_tasks group by checkpoint_id order by a; +print $data00 $data01 +print $data10 $data11 + +if $data00 != 1 then + print expect 1, actual $data00 + return -1 +endi + +if $data10 != 2 then + print expect 2, actual $data10 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 0614efdffd0c776f9a1d461f62d9db84fa03da3e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 19:38:18 +0800 Subject: [PATCH 105/127] fix(stream): check the right return code for concurrent checkpoint trans. --- source/common/src/tglobal.c | 3 + source/dnode/mnode/impl/src/mndStream.c | 11 ++- .../script/tsim/stream/concurrentcheckpt.sim | 79 +++++++++++++++++++ 3 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tests/script/tsim/stream/concurrentcheckpt.sim diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 834615bdaa..6caddf4703 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1646,6 +1646,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval"); tsStreamCheckpointInterval = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint"); + tsMaxConcurrentCheckpoint = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate"); tsSinkDataRate = pItem->fval; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index e3f9adf033..8abe739f25 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1294,9 +1294,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { void* p = taosArrayPush(pList, &in); if (p) { int32_t currentSize = taosArrayGetSize(pList); - mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64 - "s) beyond concurrently launch threshold:%d", - pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize); + mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64 + "s), concurrently launch threshold:%d", + pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000, + tsMaxConcurrentCheckpoint); } else { mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid); } @@ -1348,7 +1349,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true); sdbRelease(pSdb, p); - if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) { started += 1; if (started >= capacity) { @@ -1356,6 +1357,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { (started + numOfCheckpointTrans)); break; } + } else { + mError("failed to start checkpoint trans, code:%s", tstrerror(code)); } } } diff --git a/tests/script/tsim/stream/concurrentcheckpt.sim b/tests/script/tsim/stream/concurrentcheckpt.sim new file mode 100644 index 0000000000..4162617deb --- /dev/null +++ b/tests/script/tsim/stream/concurrentcheckpt.sim @@ -0,0 +1,79 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 1 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select task_id, checkpoint_id from information_schema.ins_stream_tasks order by checkpoint_id; + +print $data01 $data11 $data21 +if $data01 == $data11 then + print not allowed 2 checkpoint start completed + return -1 +endi + +if $data11 == $data21 then + print not allowed 2 checkpoints start concurrently + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print ========== concurrent checkpoint is set 2 + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c concurrentCheckpoint -v 2 + +system sh/exec.sh -n dnode1 -s start + +print ========== step2 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select count(*) a, checkpoint_id from information_schema.ins_stream_tasks group by checkpoint_id order by a; +print $data00 $data01 +print $data10 $data11 + +if $data00 != 1 then + print expect 1, actual $data00 + return -1 +endi + +if $data10 != 2 then + print expect 2, actual $data10 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 26334df0905d61e1950527e081964ffbc9411008 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 20:13:49 +0800 Subject: [PATCH 106/127] test: add a new test case. --- tests/parallel_test/cases.task | 1 + tests/script/tsim/testsuit.sim | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d389c27929..4769c5b67a 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1319,6 +1319,7 @@ ,,y,script,./test.sh -f tsim/stream/basic2.sim ,,y,script,./test.sh -f tsim/stream/basic3.sim ,,y,script,./test.sh -f tsim/stream/basic4.sim +,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim ,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index c208a07488..fbc89b196b 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -110,6 +110,7 @@ run tsim/stream/distributeInterval0.sim run tsim/stream/distributeSession0.sim run tsim/stream/state0.sim run tsim/stream/basic2.sim +run tsim/stream/concurrentcheckpt.sim run tsim/insert/basic1.sim run tsim/insert/commit-merge0.sim run tsim/insert/basic0.sim From fea19d5ad11e46a037157aa40a9793efa9562eb3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 20:13:49 +0800 Subject: [PATCH 107/127] test: add a new test case. --- tests/parallel_test/cases.task | 2 +- tests/script/tsim/testsuit.sim | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 4923858a3e..77e4cbdbe5 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -48,7 +48,7 @@ ,,y,army,./pytest.sh python3 ./test.py -f query/window/base.py ,,y,army,./pytest.sh python3 ./test.py -f query/sys/tb_perf_queries_exist_test.py -N 3 ,,y,army,./pytest.sh python3 ./test.py -f query/test_having.py -,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py +,,n,army,python3 ./test.py -f tmq/drop_lost_comsumers.py # # system test # diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index ff3bb82aaf..ec52b8c234 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -111,6 +111,7 @@ run tsim/stream/distributeInterval0.sim run tsim/stream/distributeSession0.sim run tsim/stream/state0.sim run tsim/stream/basic2.sim +run tsim/stream/concurrentcheckpt.sim run tsim/insert/basic1.sim run tsim/insert/commit-merge0.sim run tsim/insert/basic0.sim From 2942542e8517a4da187982cf9d0827a2c3a1a305 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 20:17:41 +0800 Subject: [PATCH 108/127] fix: compile errors --- source/libs/transport/test/cliBench.c | 2 +- source/libs/transport/test/svrBench.c | 4 ++-- source/libs/transport/test/transUT.cpp | 6 +++--- source/libs/transport/test/transUT2.cpp | 6 +++--- tools/shell/src/shellArguments.c | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/source/libs/transport/test/cliBench.c b/source/libs/transport/test/cliBench.c index 6330b2d2c6..0a5cb5d1bb 100644 --- a/source/libs/transport/test/cliBench.c +++ b/source/libs/transport/test/cliBench.c @@ -158,7 +158,7 @@ int main(int argc, char *argv[]) { } initLogEnv(); - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to initialize RPC"); diff --git a/source/libs/transport/test/svrBench.c b/source/libs/transport/test/svrBench.c index dacde982ed..44299d86a3 100644 --- a/source/libs/transport/test/svrBench.c +++ b/source/libs/transport/test/svrBench.c @@ -127,7 +127,7 @@ int main(int argc, char *argv[]) { rpcInit.cfp = processRequestMsg; rpcInit.idleTime = 2 * 1500; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); rpcDebugFlag = 131; rpcInit.compressSize = -1; @@ -165,7 +165,7 @@ int main(int argc, char *argv[]) { rpcInit.connType = TAOS_CONN_SERVER; initLogEnv(); - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit.compatibilityVer)); void *pRpc = rpcOpen(&rpcInit); if (pRpc == NULL) { tError("failed to start RPC server"); diff --git a/source/libs/transport/test/transUT.cpp b/source/libs/transport/test/transUT.cpp index cac8abf857..8e396d59d7 100644 --- a/source/libs/transport/test/transUT.cpp +++ b/source/libs/transport/test/transUT.cpp @@ -56,7 +56,7 @@ class Client { rpcInit_.connType = TAOS_CONN_CLIENT; rpcInit_.shareConnLimit = 200; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); tsem_init(&this->sem, 0, 0); } @@ -69,7 +69,7 @@ class Client { void Restart(CB cb) { rpcClose(this->transCli); rpcInit_.cfp = cb; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); } void Stop() { @@ -129,7 +129,7 @@ class Server { rpcInit_.cfp = processReq; rpcInit_.user = (char *)user; rpcInit_.connType = TAOS_CONN_SERVER; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); } void Start() { this->transSrv = rpcOpen(&this->rpcInit_); diff --git a/source/libs/transport/test/transUT2.cpp b/source/libs/transport/test/transUT2.cpp index f69151813e..54d23b1f64 100644 --- a/source/libs/transport/test/transUT2.cpp +++ b/source/libs/transport/test/transUT2.cpp @@ -56,7 +56,7 @@ class Client { rpcInit_.connType = TAOS_CONN_CLIENT; rpcInit_.shareConnLimit = 200; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); //tsem_init(&this->sem, 0, 0); } @@ -69,7 +69,7 @@ class Client { void Restart(CB cb) { rpcClose(this->transCli); rpcInit_.cfp = cb; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); this->transCli = rpcOpen(&rpcInit_); } void Stop() { @@ -139,7 +139,7 @@ class Server { rpcInit_.cfp = processReq; rpcInit_.user = (char *)user; rpcInit_.connType = TAOS_CONN_SERVER; - taosVersionStrToInt(version, &(rpcInit_.compatibilityVer)); + taosVersionStrToInt(td_version, &(rpcInit_.compatibilityVer)); } void Start() { this->transSrv = rpcOpen(&this->rpcInit_); diff --git a/tools/shell/src/shellArguments.c b/tools/shell/src/shellArguments.c index 5dd24dc396..442329674d 100644 --- a/tools/shell/src/shellArguments.c +++ b/tools/shell/src/shellArguments.c @@ -102,7 +102,7 @@ void shellPrintHelp() { #include #endif -const char *argp_program_version = version; +const char *argp_program_version = td_version; #ifdef CUS_EMAIL const char *argp_program_bug_address = CUS_EMAIL; #else From 635f16dd53bce83da9d00c819d1524baff2476dd Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 20:18:17 +0800 Subject: [PATCH 109/127] test: add a new test case. --- tests/parallel_test/cases.task | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 77e4cbdbe5..1182fdb93c 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1320,6 +1320,7 @@ ,,y,script,./test.sh -f tsim/stream/basic3.sim ,,y,script,./test.sh -f tsim/stream/basic4.sim ,,y,script,./test.sh -f tsim/stream/snodeCheck.sim +,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim ,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim From 47f2c60b2581ace7869908fcf8abb33b6324cd9e Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 20:19:41 +0800 Subject: [PATCH 110/127] fix: compile errors --- source/dnode/mgmt/test/sut/src/client.cpp | 2 +- source/dnode/mnode/impl/test/profile/profile.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mgmt/test/sut/src/client.cpp b/source/dnode/mgmt/test/sut/src/client.cpp index 95eea2359d..6f8b1eb2b4 100644 --- a/source/dnode/mgmt/test/sut/src/client.cpp +++ b/source/dnode/mgmt/test/sut/src/client.cpp @@ -54,7 +54,7 @@ void TestClient::DoInit() { rpcInit.parent = this; // rpcInit.secret = (char*)secretEncrypt; // rpcInit.spi = 1; - taosVersionStrToInt(version, &(rpcInit.compatibilityVer)); + taosVersionStrToInt(td_version, &rpcInit.compatibilityVer); clientRpc = rpcOpen(&rpcInit); ASSERT(clientRpc); diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index b1b94c65fb..2abe6b7ec9 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -76,7 +76,7 @@ TEST_F(MndTestProfile, 02_ConnectMsg_NotExistDB) { strcpy(connectReq.db, "not_exist_db"); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); From 74e514dc879cf7cc0c551d2b4f5625704774080d Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 20:22:14 +0800 Subject: [PATCH 111/127] fix: compile errors --- source/dnode/mnode/impl/test/profile/profile.cpp | 2 +- source/dnode/mnode/impl/test/show/show.cpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/test/profile/profile.cpp b/source/dnode/mnode/impl/test/profile/profile.cpp index 2abe6b7ec9..bfd8909a76 100644 --- a/source/dnode/mnode/impl/test/profile/profile.cpp +++ b/source/dnode/mnode/impl/test/profile/profile.cpp @@ -39,7 +39,7 @@ TEST_F(MndTestProfile, 01_ConnectMsg) { strcpy(connectReq.db, ""); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); diff --git a/source/dnode/mnode/impl/test/show/show.cpp b/source/dnode/mnode/impl/test/show/show.cpp index 2e67ffa946..92b914a8dc 100644 --- a/source/dnode/mnode/impl/test/show/show.cpp +++ b/source/dnode/mnode/impl/test/show/show.cpp @@ -64,7 +64,7 @@ TEST_F(MndTestShow, 03_ShowMsg_Conn) { strcpy(connectReq.db, ""); strcpy(connectReq.user, "root"); strcpy(connectReq.passwd, secretEncrypt); - strcpy(connectReq.sVer, version); + strcpy(connectReq.sVer, td_version); int32_t contLen = tSerializeSConnectReq(NULL, 0, &connectReq); void* pReq = rpcMallocCont(contLen); From 6857c6fd1772470adac8709e769e6a3738d2dba9 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Mon, 4 Nov 2024 22:52:51 +0800 Subject: [PATCH 112/127] fix: ci errors --- tests/system-test/0-others/sysinfo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py index 43a0400f18..35e574739b 100644 --- a/tests/system-test/0-others/sysinfo.py +++ b/tests/system-test/0-others/sysinfo.py @@ -39,7 +39,7 @@ class TDTestCase: taos_list = ['server','client'] for i in taos_list: tdSql.query(f'select {i}_version()') - version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] + version_info = str(subprocess.run('cat ../../source/util/src/version.c |grep "char td_version"', shell=True,capture_output=True).stdout.decode('utf8')).split('"')[1] tdSql.checkData(0,0,version_info) def get_server_status(self): From 1a20ae8013ad351b485c50dafd8dc1c863869611 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Mon, 4 Nov 2024 17:47:33 +0800 Subject: [PATCH 113/127] docs: update port information --- docs/zh/04-get-started/01-docker.md | 7 ++++--- docs/zh/08-operation/02-planning.md | 24 +++++++++++++++--------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index 848a7fd499..e58e7bff71 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -23,17 +23,18 @@ docker pull tdengine/tdengine:3.3.3.0 然后只需执行下面的命令: ```shell -docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` -注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口,可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。 如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令: ```shell docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ -v ~/data/taos/dnode/log:/var/log/taos \ - -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine + -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` :::note diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 66da1df8bf..04957ed26c 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100) 与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 -由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 +由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 **注意** - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。 @@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 -|接口或组件 | 端口 | -|:---------------------------:|:---------:| -|原生接口(taosc) | 6030 | -|RESTful 接口 | 6041 | -|WebSocket 接口 |6041 | -|taosKeeper | 6043 | -|taosX | 6050, 6055 | -|taosExplorer | 6060 | \ No newline at end of file +| 接口或组件名称 | 端口 | 协议 | +|:------------------------------------:|:----------:|:-------:| +| 原生接口(taosc) | 6030 | TCP | +| RESTful 接口 | 6041 | TCP | +| WebSocket 接口 | 6041 | TCP | +| statsd 格式写入接口 | 6044 | TCP/UDP | +| collectd 格式写入接口 | 6045 | TCP/UDP | +| openTSDB Telnet 格式写入接口 | 6046 | TCP | +| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP | +| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP | +| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP | +| taosKeeper | 6043 | TCP | +| taosX | 6050, 6055 | TCP | +| taosExplorer | 6060 | TCP | From 17300c8b792861c7142fe433e11e211155b402ce Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Mon, 4 Nov 2024 18:01:25 +0800 Subject: [PATCH 114/127] docs: update port information --- docs/zh/08-operation/02-planning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 04957ed26c..83f98af0bb 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -151,12 +151,12 @@ TDengine 的多级存储功能在使用上还具备以下优点。 | 原生接口(taosc) | 6030 | TCP | | RESTful 接口 | 6041 | TCP | | WebSocket 接口 | 6041 | TCP | +| taosKeeper | 6043 | TCP | | statsd 格式写入接口 | 6044 | TCP/UDP | | collectd 格式写入接口 | 6045 | TCP/UDP | | openTSDB Telnet 格式写入接口 | 6046 | TCP | | collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP | | icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP | | tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP | -| taosKeeper | 6043 | TCP | | taosX | 6050, 6055 | TCP | | taosExplorer | 6060 | TCP | From 6f7bca4d803f492dcb1aad30cde4c36d6eec061b Mon Sep 17 00:00:00 2001 From: Feng Chao Date: Tue, 5 Nov 2024 10:33:01 +0800 Subject: [PATCH 115/127] Update test_case_when.py --- tests/army/query/test_case_when.py | 34 ++++++++++++++++-------------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/tests/army/query/test_case_when.py b/tests/army/query/test_case_when.py index dea5a5f01f..e772914183 100644 --- a/tests/army/query/test_case_when.py +++ b/tests/army/query/test_case_when.py @@ -324,19 +324,15 @@ class TDTestCase(TBase): tdSql.query("select case when ts='2024-10-01 00:00:04.000' then 456646546 when ts>'2024-10-01 00:00:04.000' then 'after today' else 'before today or unknow date' end from st1;") assert(tdSql.checkRows(10) and tdSql.res == [('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('before today or unknow date',), ('456646546',), ('after today',), ('after today',), ('after today',), ('after today',), ('after today',)]) - tdSql.query("select case when c_geometry is null then c_geometry else c_null end from st1;") - assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) - - # tdSql.error("select case when c_geometry is not null then c_geometry else c_null end from st1;") - # tdSql.error("select case when c_geometry='POINT(2 2)' then c_geometry else c_bool end from st1;") - # tdSql.error("select case when c_geometry!='POINT(2 2)' then c_geometry else c_bool end from st1;") + tdSql.error("select case when c_geometry is null then c_geometry else c_null end from st1;") + tdSql.error("select case when c_geometry is not null then c_geometry else c_null end from st1;") + tdSql.error("select case when c_geometry='POINT(2 2)' then c_geometry else c_bool end from st1;") + tdSql.error("select case when c_geometry!='POINT(2 2)' then c_geometry else c_bool end from st1;") - tdSql.query("select case when t is null then t else c_null end from st2;") - assert(tdSql.checkRows(10) and all([item[0] is None for item in tdSql.res])) - - # tdSql.error("select case when t is not null then t else c_null end from st2;") - # tdSql.error("select case when t->'location'='beijing' then t->'location' else c_bool end from st2;") - # tdSql.error("select case when t->'location'!='beijing' then t->'location' else c_bool end from st1;") + tdSql.error("select case when t is null then t else c_null end from st2;") + tdSql.error("select case when t is not null then t else c_null end from st2;") + tdSql.error("select case when t->'location'='beijing' then t->'location' else c_bool end from st2;") + tdSql.error("select case when t->'location'!='beijing' then t->'location' else c_bool end from st1;") tdSql.query("select case when c_float!=2.2 then 387897 else 'test message' end from st1;") assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) @@ -344,9 +340,15 @@ class TDTestCase(TBase): tdSql.query("select case when c_double!=2.22 then 387897 else 'test message' end from st1;") assert(tdSql.checkRows(10) and tdSql.res == [('test message',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('387897',), ('test message',)]) - # tdSql.query("select case c_tinyint when 2 then -2147483648 when 3 then 'three' else '4294967295' end from st1;") - # tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;") - # tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;") + tdSql.query("select case c_tinyint when 2 then -2147483648 when 3 then 'three' else '4294967295' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('-2147483648',), ('three',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',), ('4294967295',)]) + + tdSql.query("select case c_float when 2.2 then 9.2233720e+18 when 3.3 then -9.2233720e+18 else 'aa' end from st1;") + assert(tdSql.checkRows(10) and tdSql.res == [('9223372000000000000.000000',), ('-9223372000000000000.000000',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',), ('aa',)]) + + tdSql.query("select case t1.c_int when 2 then 'run' when t1.c_int is null then 'other' else t2.c_varchar end from st1 t1, st2 t2 where t1.ts=t2.ts;") + print(tdSql.res) + assert(tdSql.checkRows(10) and tdSql.res == [('run',), ('cccccccc',), ('dddddddd',), ('eeeeeeee',), ('ffffffff',), ('gggggggg',), ('hhhhhhhh',), ('iiiiiiii',), ('jjjjjjjj',), (None,)]) tdSql.query("select avg(case when c_tinyint>=2 then c_tinyint else c_null end) from st1;") assert(tdSql.checkRows(1) and tdSql.res == [(6.0,)]) @@ -365,7 +367,7 @@ class TDTestCase(TBase): self.test_case_when_statements() def stop(self): - # tdSql.execute("drop database test_case_when;") + tdSql.execute("drop database test_case_when;") tdSql.close() tdLog.success("%s successfully executed" % __file__) From 75f838c4b49672b2523309ad7a0080ef3ac5a81c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 5 Nov 2024 11:02:41 +0800 Subject: [PATCH 116/127] fix(stream): fix syntax error. --- source/libs/stream/src/streamMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 90ef5fd643..f5de719848 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -452,7 +452,7 @@ int32_t streamMetaOpen(const char* path, void* ahandle, FTaskBuild buildTaskFn, TSDB_CHECK_CODE(code, lino, _err); // add refId at the end of initialization function - pMeta->rid = taosAddRef(streamMetaId, pMeta); + pMeta->rid = taosAddRef(streamMetaRefPool, pMeta); int64_t* pRid = taosMemoryMalloc(sizeof(int64_t)); TSDB_CHECK_NULL(pRid, code, lino, _err, terrno); From fa90a34dbecbcc481576742739a30b7cfd6c3374 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 5 Nov 2024 11:14:54 +0800 Subject: [PATCH 117/127] doc: simple changes --- docs/examples/JDBC/taosdemo/readme.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/docs/examples/JDBC/taosdemo/readme.md b/docs/examples/JDBC/taosdemo/readme.md index 141391d1f6..82aba4d9c1 100644 --- a/docs/examples/JDBC/taosdemo/readme.md +++ b/docs/examples/JDBC/taosdemo/readme.md @@ -7,8 +7,4 @@ java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -data java -jar target/taosdemo-2.0.1-jar-with-dependencies.jar -host -database -doCreateTable false -superTableSQL "create table weather(ts timestamp, f1 int) tags(t1 nchar(4))" -numOfTables 1000 -numOfRowsPerTable 100000000 -numOfThreadsForInsert 10 -numOfTablesPerSQL 10 -numOfValuesPerSQL 100 ``` -如果发生错误 Exception in thread "main" java.lang.UnsatisfiedLinkError: no taos in java.library.path -请检查是否安装 TDengine 客户端安装包或编译 TDengine 安装。如果确定已经安装过还出现这个错误,可以在命令行 java 后加 -Djava.library.path=/usr/lib 来指定寻找共享库的路径。 - - If you encounter the error Exception in thread "main" `java.lang.UnsatisfiedLinkError: no taos in java.library.path`, please check whether the TDengine client package is installed or TDengine is compiled and installed. If you are sure it is installed and still encounter this error, you can add `-Djava.library.path=/usr/lib` after the `java` command to specify the path to the shared library. From 90d1e014db7dbcbddfa0d6d303dbbede7f266638 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 14:25:24 +0800 Subject: [PATCH 118/127] az/begin: remove duplicate begin & end of empty impl. --- source/libs/azure/src/az.cpp | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp index 5f95624c94..9a1f95b142 100644 --- a/source/libs/azure/src/az.cpp +++ b/source/libs/azure/src/az.cpp @@ -22,6 +22,10 @@ #include "taoserror.h" #include "tglobal.h" +int32_t azBegin() { return TSDB_CODE_SUCCESS; } + +void azEnd() {} + #if defined(USE_S3) #include @@ -40,10 +44,6 @@ extern char tsS3BucketName[TSDB_FQDN_LEN]; extern int8_t tsS3Enabled; extern int8_t tsS3EpNum; -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - static void checkPrint(const char *fmt, ...) { va_list arg_ptr; va_start(arg_ptr, fmt); @@ -524,10 +524,6 @@ int32_t azDeleteObjects(const char *object_name[], int nobject) { #else -int32_t azBegin() { return TSDB_CODE_SUCCESS; } - -void azEnd() {} - int32_t azCheckCfg() { return TSDB_CODE_SUCCESS; } int32_t azPutObjectFromFileOffset(const char *file, const char *object_name, int64_t offset, int64_t size) { From 7316b339b6fa3b17a45120402ad12cb551b03f8d Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 14:34:20 +0800 Subject: [PATCH 119/127] az/get object by prefix: catch all cpp eceptions --- source/libs/azure/src/az.cpp | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/source/libs/azure/src/az.cpp b/source/libs/azure/src/az.cpp index 9a1f95b142..1380b58bbd 100644 --- a/source/libs/azure/src/az.cpp +++ b/source/libs/azure/src/az.cpp @@ -32,7 +32,6 @@ void azEnd() {} #include #include "td_block_blob_client.hpp" -// Add appropriate using namespace directives using namespace Azure::Storage; using namespace Azure::Storage::Blobs; @@ -223,7 +222,6 @@ static int32_t azPutObjectFromFileOffsetImpl(const char *file, const char *objec uint8_t blobContent[] = "Hello Azure!"; // Create the block blob client // BlockBlobClient blobClient = containerClient.GetBlockBlobClient(blobName); - // TDBlockBlobClient blobClient(containerClient.GetBlobClient(blobName)); TDBlockBlobClient blobClient(containerClient.GetBlobClient(object_name)); blobClient.UploadFrom(file, offset, size); @@ -467,7 +465,7 @@ int32_t azGetObjectToFile(const char *object_name, const char *fileName) { TAOS_RETURN(code); } -int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { +static int32_t azGetObjectsByPrefixImpl(const char *prefix, const char *path) { const std::string delimiter = "/"; std::string accountName = tsS3AccessKeyId[0]; std::string accountKey = tsS3AccessKeySecret[0]; @@ -514,6 +512,23 @@ int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { return 0; } +int32_t azGetObjectsByPrefix(const char *prefix, const char *path) { + int32_t code = 0; + + try { + code = azGetObjectsByPrefixImpl(prefix, path); + } catch (const std::exception &e) { + azError("%s: Reason Phrase: %s", __func__, e.what()); + + code = TAOS_SYSTEM_ERROR(EIO); + azError("%s failed at line %d since %s", __func__, __LINE__, tstrerror(code)); + + TAOS_RETURN(code); + } + + TAOS_RETURN(code); +} + int32_t azDeleteObjects(const char *object_name[], int nobject) { for (int i = 0; i < nobject; ++i) { azDeleteObjectsByPrefix(object_name[i]); From a51bd9a24ff5661615cdf13487de53d9fd6da697 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Tue, 5 Nov 2024 15:42:57 +0800 Subject: [PATCH 120/127] enh: cal output col counts --- source/libs/executor/src/dataDispatcher.c | 38 ++++++++++++++--------- 1 file changed, 24 insertions(+), 14 deletions(-) diff --git a/source/libs/executor/src/dataDispatcher.c b/source/libs/executor/src/dataDispatcher.c index 48f4ed3ed1..f255d0b95c 100644 --- a/source/libs/executor/src/dataDispatcher.c +++ b/source/libs/executor/src/dataDispatcher.c @@ -45,6 +45,7 @@ typedef struct SDataDispatchHandle { SDataBlockDescNode* pSchema; STaosQueue* pDataBlocks; SDataDispatchBuf nextOutput; + int32_t outPutColCounts; int32_t status; bool queryEnd; uint64_t useconds; @@ -68,23 +69,12 @@ static int32_t inputSafetyCheck(SDataDispatchHandle* pHandle, const SInputData* return TSDB_CODE_QRY_INVALID_INPUT; } - SNode* pNode; - int32_t numOfCols = 0; - FOREACH(pNode, pHandle->pSchema->pSlots) { - SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; - if (pSlotDesc->output) { - ++numOfCols; - } else { - // Slots must be sorted, and slots with 'output' set to true must come first - break; - } - } - - if (numOfCols > taosArrayGetSize(pInput->pData->pDataBlock)) { - qError("invalid column number, schema:%d, input:%zu", numOfCols, taosArrayGetSize(pInput->pData->pDataBlock)); + if (pHandle->outPutColCounts > taosArrayGetSize(pInput->pData->pDataBlock)) { + qError("invalid column number, schema:%d, input:%zu", pHandle->outPutColCounts, taosArrayGetSize(pInput->pData->pDataBlock)); return TSDB_CODE_QRY_INVALID_INPUT; } + SNode* pNode; int32_t colNum = 0; FOREACH(pNode, pHandle->pSchema->pSlots) { SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; @@ -419,6 +409,25 @@ static int32_t blockDescNodeCheck(SDataBlockDescNode* pInputDataBlockDesc) { return TSDB_CODE_SUCCESS; } +int32_t getOutputColCounts(SDataBlockDescNode* pInputDataBlockDesc) { + if (pInputDataBlockDesc == NULL) { + qError("invalid schema"); + return 0; + } + SNode* pNode; + int32_t numOfCols = 0; + FOREACH(pNode, pInputDataBlockDesc->pSlots) { + SSlotDescNode* pSlotDesc = (SSlotDescNode*)pNode; + if (pSlotDesc->output) { + ++numOfCols; + } else { + // Slots must be sorted, and slots with 'output' set to true must come first + break; + } + } + return numOfCols; +} + int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pDataSink, DataSinkHandle* pHandle) { int32_t code; code = blockDescNodeCheck(pDataSink->pInputDataBlockDesc); @@ -443,6 +452,7 @@ int32_t createDataDispatcher(SDataSinkManager* pManager, const SDataSinkNode* pD dispatcher->pManager = pManager; pManager = NULL; dispatcher->pSchema = pDataSink->pInputDataBlockDesc; + dispatcher->outPutColCounts = getOutputColCounts(dispatcher->pSchema); dispatcher->status = DS_BUF_EMPTY; dispatcher->queryEnd = false; code = taosOpenQueue(&dispatcher->pDataBlocks); From 688b159bec22791b93218fa03d412016e636b809 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 5 Nov 2024 17:09:52 +0800 Subject: [PATCH 121/127] fix:[TD-32166] change createRequest to buildRequest to avoid printing error because request not in pRequests hash --- source/client/src/clientEnv.c | 2 +- source/client/src/clientRawBlockWrite.c | 8 ++++---- source/client/src/clientSml.c | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 21f7c93036..b1eb4a683f 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -686,7 +686,7 @@ void doDestroyRequest(void *p) { int32_t code = taosHashRemove(pRequest->pTscObj->pRequests, &pRequest->self, sizeof(pRequest->self)); if (TSDB_CODE_SUCCESS != code) { - tscError("failed to remove request from hash, code:%s", tstrerror(code)); + tscWarn("failed to remove request from hash, code:%s", tstrerror(code)); } schedulerFreeJob(&pRequest->body.queryJob, 0); diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index acba8117c6..213c18ac8a 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -1590,7 +1590,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat SHashObj* pVgHash = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid)); uDebug(LOG_ID_TAG " write raw block with field, rows:%d, pData:%p, tbname:%s, fields:%p, numFields:%d", LOG_ID_VALUE, rows, pData, tbname, fields, numFields); @@ -1651,7 +1651,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha SHashObj* pVgHash = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, reqid, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, reqid)); uDebug(LOG_ID_TAG " write raw block, rows:%d, pData:%p, tbname:%s", LOG_ID_VALUE, rows, pData, tbname); @@ -1721,7 +1721,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { STableMeta* pTableMeta = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, 0)); uDebug(LOG_ID_TAG " write raw data, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); pRequest->syncQuery = true; @@ -1869,7 +1869,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) SHashObj* pCreateTbHash = NULL; SRequestObj* pRequest = NULL; - RAW_RETURN_CHECK(createRequest(*(int64_t*)taos, TSDB_SQL_INSERT, 0, &pRequest)); + RAW_RETURN_CHECK(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &pRequest, 0)); uDebug(LOG_ID_TAG " write raw metadata, data:%p, dataLen:%d", LOG_ID_VALUE, data, dataLen); pRequest->syncQuery = true; diff --git a/source/client/src/clientSml.c b/source/client/src/clientSml.c index e8221c8b8d..90c7895d94 100644 --- a/source/client/src/clientSml.c +++ b/source/client/src/clientSml.c @@ -1740,7 +1740,7 @@ TAOS_RES *taos_schemaless_insert_inner(TAOS *taos, char *lines[], char *rawLine, SSmlHandle *info = NULL; int cnt = 0; while (1) { - SML_CHECK_CODE(createRequest(*(int64_t *)taos, TSDB_SQL_INSERT, reqid, &request)); + SML_CHECK_CODE(buildRequest(*(int64_t*)taos, "", 0, NULL, false, &request, reqid)); SSmlMsgBuf msg = {request->msgBufLen, request->msgBuf}; request->code = smlBuildSmlInfo(taos, &info); SML_CHECK_CODE(request->code); From 693aacb8cbeaead3664a02adbe40783b4fb2b252 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 5 Nov 2024 17:13:16 +0800 Subject: [PATCH 122/127] refactor: always successfully put the retrieve msg even though the input queue is full already. --- source/libs/stream/src/streamQueue.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamQueue.c b/source/libs/stream/src/streamQueue.c index 6af6ebd044..bafd354360 100644 --- a/source/libs/stream/src/streamQueue.c +++ b/source/libs/stream/src/streamQueue.c @@ -304,8 +304,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) // use the local variable to avoid the pItem be freed by other threads, since it has been put into queue already. stDebug("s-task:%s submit enqueue msgLen:%d ver:%" PRId64 ", total in queue:%d, size:%.2fMiB", pTask->id.idStr, msgLen, ver, total, size + SIZE_IN_MiB(msgLen)); - } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__DATA_RETRIEVE || - type == STREAM_INPUT__REF_DATA_BLOCK) { + } else if (type == STREAM_INPUT__DATA_BLOCK || type == STREAM_INPUT__REF_DATA_BLOCK) { if (streamQueueIsFull(pTask->inputq.queue)) { double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue)); @@ -324,7 +323,7 @@ int32_t streamTaskPutDataIntoInputQ(SStreamTask* pTask, SStreamQueueItem* pItem) double size = SIZE_IN_MiB(taosQueueMemorySize(pQueue)); stDebug("s-task:%s blockdata enqueue, total in queue:%d, size:%.2fMiB", pTask->id.idStr, total, size); } else if (type == STREAM_INPUT__CHECKPOINT || type == STREAM_INPUT__CHECKPOINT_TRIGGER || - type == STREAM_INPUT__TRANS_STATE) { + type == STREAM_INPUT__TRANS_STATE || type == STREAM_INPUT__DATA_RETRIEVE) { int32_t code = taosWriteQitem(pQueue, pItem); if (code != TSDB_CODE_SUCCESS) { streamFreeQitem(pItem); From 16570822e8bce1de77b59c0d56318fd309faffac Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Tue, 5 Nov 2024 17:16:20 +0800 Subject: [PATCH 123/127] blob/config: check dnode s3 params --- tests/system-test/2-query/db.py | 20 +++++++++++++++++++- 1 file changed, 19 insertions(+), 1 deletion(-) diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py index d4e5f89aa3..ee6b517061 100644 --- a/tests/system-test/2-query/db.py +++ b/tests/system-test/2-query/db.py @@ -62,12 +62,30 @@ class TDTestCase: tdSql.query("show dnode 1 variables like '____debugFlag'") tdSql.checkRows(2) - tdSql.query("show dnode 1 variables like 's3MigrateEna%'") + tdSql.query("show dnode 1 variables like 's3MigrateEnab%'") tdSql.checkRows(1) tdSql.checkData(0, 0, 1) tdSql.checkData(0, 1, 's3MigrateEnabled') tdSql.checkData(0, 2, 0) + tdSql.query("show dnode 1 variables like 's3MigrateIntervalSec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3MigrateIntervalSec') + tdSql.checkData(0, 2, 3600) + + tdSql.query("show dnode 1 variables like 's3PageCacheSize%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3PageCacheSize') + tdSql.checkData(0, 2, 4096) + + tdSql.query("show dnode 1 variables like 's3UploadDelaySec%'") + tdSql.checkRows(1) + tdSql.checkData(0, 0, 1) + tdSql.checkData(0, 1, 's3UploadDelaySec') + tdSql.checkData(0, 2, 60) + def threadTest(self, threadID): print(f"Thread {threadID} starting...") tdsqln = tdCom.newTdSql() From c54855cdd995da7f6f40936afd9f9e107c0a4d03 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Tue, 5 Nov 2024 17:18:20 +0800 Subject: [PATCH 124/127] fix:[TD-32166] change createRequest to buildRequest to avoid printing error because request not in pRequests hash --- source/client/src/clientRawBlockWrite.c | 84 ++++++++++++------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/source/client/src/clientRawBlockWrite.c b/source/client/src/clientRawBlockWrite.c index 229e137a21..1799f29eb4 100644 --- a/source/client/src/clientRawBlockWrite.c +++ b/source/client/src/clientRawBlockWrite.c @@ -163,7 +163,7 @@ static void buildCreateTableJson(SSchemaWrapper* schemaRow, SSchemaWrapper* sche } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); - end: +end: *pJson = json; } @@ -197,7 +197,7 @@ static int32_t setCompressOption(cJSON* json, uint32_t para) { return code; } - end: +end: return code; } static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** pJson) { @@ -338,7 +338,7 @@ static void buildAlterSTableJson(void* alterData, int32_t alterDataLen, cJSON** break; } - end: +end: tFreeSMAltertbReq(&req); *pJson = json; } @@ -358,7 +358,7 @@ static void processCreateStb(SMqMetaRsp* metaRsp, cJSON** pJson) { } buildCreateTableJson(&req.schemaRow, &req.schemaTag, req.name, req.suid, TSDB_SUPER_TABLE, &req.colCmpr, pJson); - end: +end: uDebug("create stable return"); tDecoderClear(&coder); } @@ -378,7 +378,7 @@ static void processAlterStb(SMqMetaRsp* metaRsp, cJSON** pJson) { } buildAlterSTableJson(req.alterOriData, req.alterOriDataLen, pJson); - end: +end: uDebug("alter stable return"); tDecoderClear(&coder); } @@ -485,7 +485,7 @@ static void buildChildElement(cJSON* json, SVCreateTbReq* pCreateReq) { RAW_FALSE_CHECK(cJSON_AddItemToArray(tags, tag)); } - end: +end: RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tags", tags)); taosArrayDestroy(pTagVals); } @@ -514,7 +514,7 @@ static void buildCreateCTableJson(SVCreateTbReq* pCreateReq, int32_t nReqs, cJSO } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "createList", createList)); - end: +end: *pJson = json; } @@ -542,7 +542,7 @@ static void processCreateTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } } - end: +end: uDebug("create table return"); tDeleteSVCreateTbBatchReq(&req); tDecoderClear(&decoder); @@ -585,7 +585,7 @@ static void processAutoCreateTable(SMqDataRsp* rsp, char** string) { *string = cJSON_PrintUnformatted(pJson); cJSON_Delete(pJson); - end: +end: uDebug("auto created table return, sql json:%s", *string); for (int i = 0; decoder && pCreateReq && i < rsp->createTableNum; i++) { tDecoderClear(&decoder[i]); @@ -773,7 +773,7 @@ static void processAlterTable(SMqMetaRsp* metaRsp, cJSON** pJson) { break; } - end: +end: uDebug("alter table return"); tDecoderClear(&decoder); *pJson = json; @@ -808,7 +808,7 @@ static void processDropSTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(tableName); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableName", tableName)); - end: +end: uDebug("processDropSTable return"); tDecoderClear(&decoder); *pJson = json; @@ -844,7 +844,7 @@ static void processDeleteTable(SMqMetaRsp* metaRsp, cJSON** pJson) { RAW_NULL_CHECK(sqlJson); RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "sql", sqlJson)); - end: +end: uDebug("processDeleteTable return"); tDecoderClear(&coder); *pJson = json; @@ -881,7 +881,7 @@ static void processDropTable(SMqMetaRsp* metaRsp, cJSON** pJson) { } RAW_FALSE_CHECK(cJSON_AddItemToObject(json, "tableNameList", tableNameList)); - end: +end: uDebug("processDropTable return"); tDecoderClear(&decoder); *pJson = json; @@ -989,7 +989,7 @@ static int32_t taosCreateStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " create stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tFreeSMCreateStbReq(&pReq); @@ -1023,9 +1023,9 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pRequest->pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pRequest->pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pRequest->pTscObj->pAppInfo->mgmtEp)}; SName pName = {0}; toName(pRequest->pTscObj->acctId, pRequest->pDb, req.name, &pName); STableMeta* pTableMeta = NULL; @@ -1088,7 +1088,7 @@ static int32_t taosDropStb(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " drop stable return, msg:%s", LOG_ID_VALUE, tstrerror(code)); destroyRequest(pRequest); tDecoderClear(&coder); @@ -1142,9 +1142,9 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyCreateTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); @@ -1269,7 +1269,7 @@ static int32_t taosCreateTable(TAOS* taos, void* meta, int32_t metaLen) { code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " create table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteSVCreateTbBatchReq(&req); @@ -1328,9 +1328,9 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { taosHashSetFreeFp(pVgroupHashmap, destroyDropTbReqBatch); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; pRequest->tableList = taosArrayInit(req.nReqs, sizeof(SName)); RAW_NULL_CHECK(pRequest->tableList); // loop to create table @@ -1395,7 +1395,7 @@ static int32_t taosDropTable(TAOS* taos, void* meta, int32_t metaLen) { } code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " drop table return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosHashCleanup(pVgroupHashmap); destroyRequest(pRequest); @@ -1433,7 +1433,7 @@ static int32_t taosDeleteData(TAOS* taos, void* meta, int32_t metaLen) { } taos_free_result(res); - end: +end: uDebug("connId:0x%" PRIx64 " delete data sql:%s, code:%s", *(int64_t*)taos, sql, tstrerror(code)); tDecoderClear(&coder); return code; @@ -1473,9 +1473,9 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { SCatalog* pCatalog = NULL; RAW_RETURN_CHECK(catalogGetHandle(pTscObj->pAppInfo->clusterId, &pCatalog)); SRequestConnInfo conn = {.pTrans = pTscObj->pAppInfo->pTransporter, - .requestId = pRequest->requestId, - .requestObjRefId = pRequest->self, - .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; + .requestId = pRequest->requestId, + .requestObjRefId = pRequest->self, + .mgmtEps = getEpSet_s(&pTscObj->pAppInfo->mgmtEp)}; SVgroupInfo pInfo = {0}; SName pName = {0}; @@ -1543,7 +1543,7 @@ static int32_t taosAlterTable(TAOS* taos, void* meta, int32_t metaLen) { code = handleAlterTbExecRes(pRes->res, pCatalog); } } - end: +end: uDebug(LOG_ID_TAG " alter table return, meta:%p, len:%d, msg:%s", LOG_ID_VALUE, meta, metaLen, tstrerror(code)); taosArrayDestroy(pArray); if (pVgData) taosMemoryFreeClear(pVgData->pData); @@ -1608,7 +1608,7 @@ int taos_write_raw_block_with_fields_with_reqid(TAOS* taos, int rows, char* pDat launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " write raw block with field return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1668,7 +1668,7 @@ int taos_write_raw_block_with_reqid(TAOS* taos, int rows, char* pData, const cha launchQueryImpl(pRequest, pQuery, true, NULL); code = pRequest->code; - end: +end: uDebug(LOG_ID_TAG " write raw block return, msg:%s", LOG_ID_VALUE, tstrerror(code)); taosMemoryFreeClear(pTableMeta); qDestroyQuery(pQuery); @@ -1719,7 +1719,7 @@ static int32_t buildCreateTbMap(SMqDataRsp* rsp, SHashObj* pHashObj) { } return 0; - end: +end: tDecoderClear(&decoderTmp); tDestroySVCreateTbReq(&pCreateReq, TSDB_MSG_FLG_DECODE); return code; @@ -1826,7 +1826,7 @@ static int32_t getRawCache(SHashObj** pVgHash, SHashObj** pNameHash, SHashObj** } return 0; - end: +end: taosHashCleanup(*pMetaHash); taosHashCleanup(*pNameHash); taosHashCleanup(*pVgHash); @@ -1848,7 +1848,7 @@ static int32_t buildRawRequest(TAOS* taos, SRequestObj** pRequest, SCatalog** pC conn->requestObjRefId = (*pRequest)->self; conn->mgmtEps = getEpSet_s(&(*pRequest)->pTscObj->pAppInfo->mgmtEp); - end: +end: return code; } @@ -1925,7 +1925,7 @@ static int32_t processCacheMeta(SHashObj* pVgHash, SHashObj* pNameHash, SHashObj } *pMeta = pTableMeta; - end: +end: return code; } @@ -1993,7 +1993,7 @@ static int32_t tmqWriteRawDataImpl(TAOS* taos, void* data, int32_t dataLen) { break; } - end: +end: uDebug(LOG_ID_TAG " write raw data return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteMqDataRsp(&rspObj.dataRsp); tDecoderClear(&decoder); @@ -2075,7 +2075,7 @@ static int32_t tmqWriteRawMetaDataImpl(TAOS* taos, void* data, int32_t dataLen) break; } - end: +end: uDebug(LOG_ID_TAG " write raw metadata return, msg:%s", LOG_ID_VALUE, tstrerror(code)); tDeleteSTaosxRsp(&rspObj.dataRsp); void* pIter = taosHashIterate(pCreateTbHash, NULL); @@ -2150,7 +2150,7 @@ static void processBatchMetaToJson(SMqBatchMetaRsp* pMsgRsp, char** string) { *string = fullStr; return; - end: +end: cJSON_Delete(pJson); tDeleteMqBatchMetaRsp(&rsp); } @@ -2233,7 +2233,7 @@ static int32_t encodeMqDataRsp(__encode_func__* encodeFunc, SMqDataRsp* rspObj, raw->raw = buf; raw->raw_len = len; return code; - FAILED: +FAILED: tEncoderClear(&encoder); taosMemoryFree(buf); return code; @@ -2377,7 +2377,7 @@ static int32_t tmqWriteBatchMetaDataImpl(TAOS* taos, void* meta, int32_t metaLen } } - end: +end: tDeleteMqBatchMetaRsp(&rsp); return code; } \ No newline at end of file From 082c6b1c509351e7b96c196526d4f0f303bd12e2 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Tue, 5 Nov 2024 18:17:41 +0800 Subject: [PATCH 125/127] enh: compatible with taosanode --- packaging/tools/install.sh | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 8a6b159a22..a6fd69d16f 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -185,7 +185,14 @@ function kill_process() { function install_main_path() { #create install main dir and all sub dir - ${csudo}rm -rf ${install_main_dir} || : + ${csudo}rm -rf ${install_main_dir}/cfg || : + ${csudo}rm -rf ${install_main_dir}/bin || : + ${csudo}rm -rf ${install_main_dir}/driver || : + ${csudo}rm -rf ${install_main_dir}/examples || : + ${csudo}rm -rf ${install_main_dir}/include || : + ${csudo}rm -rf ${install_main_dir}/share || : + ${csudo}rm -rf ${install_main_dir}/log || : + ${csudo}mkdir -p ${install_main_dir} ${csudo}mkdir -p ${install_main_dir}/cfg ${csudo}mkdir -p ${install_main_dir}/bin From 04cf61b8d84d9d5a38af24978aeb08ef4aecf621 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 6 Nov 2024 09:03:56 +0800 Subject: [PATCH 126/127] docs: data analysis --- .../06-advanced/06-data-analysis/01-arima.md | 22 ++--- .../06-data-analysis/02-holtwinters.md | 20 ++--- .../06-data-analysis/03-anomaly-detection.md | 8 +- .../zh/06-advanced/06-data-analysis/addins.md | 23 ++--- docs/zh/06-advanced/06-data-analysis/index.md | 84 ++++++++++--------- 5 files changed, 81 insertions(+), 76 deletions(-) diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-data-analysis/01-arima.md index 6799d48887..56383b7e86 100644 --- a/docs/zh/06-advanced/06-data-analysis/01-arima.md +++ b/docs/zh/06-advanced/06-data-analysis/01-arima.md @@ -7,12 +7,12 @@ sidebar_label: "ARIMA" ## 功能概述 -ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 -ARIMA模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA模型要求时序数据是**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 +ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 +ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时序数据**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 >平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。 -以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 的模型。 +以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 的模型。 - p= 自回归模型阶数 - d= 差分阶数 @@ -23,19 +23,19 @@ ARIMA模型是一种自回归模型,只需要自变量即可预测后续的值 分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 |参数名称|说明|必填项| |---|---|---| -|period|输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用非季节性/周期性的 ARIMA 模型预测。|选填| -|start_p| 自回归模型阶数的 起始值,0 开始的整数,不推荐大于 10 |选填| -|max_p| 自回归模型阶数的 结束值,0 开始的整数,不推荐大于 10 |选填| -|start_q| 移动平均模型阶数的起始值, 0 开始的整数,不推荐大于 10 |选填| -|max_q| 移动平均模型阶数的结束值, 0 开始的整数,不推荐大于 10 |选填| -|d| 差分阶数|选填| +|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0,将使用非季节性/周期性的 ARIMA 模型预测。|选填| +|start_p|自回归模型阶数的起始值,0 开始的整数,不推荐大于 10 |选填| +|max_p|自回归模型阶数的结束值,0 开始的整数,不推荐大于 10 |选填| +|start_q|移动平均模型阶数的起始值,0 开始的整数,不推荐大于 10 |选填| +|max_q|移动平均模型阶数的结束值,0 开始的整数,不推荐大于 10 |选填| +|d|差分阶数|选填| `start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。 ### 示例及结果 -针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q是1,最大值是5,预测结果中返回 95% 置信区间范围边界。 +针对 i32 列进行数据预测,输入列 i32 每 10 个点是一个周期,start_p 起始是 1, 最大拟合是 5,start_q 是 1,最大值是 5,预测结果中返回 95% 置信区间范围边界。 ``` -FORECAST(i32, "algo=arima,alpha=95,period=10, start_p=1, max_p=5, start_q=1, max_q=5") +FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5") ``` ```json5 diff --git a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md index 470ef46253..6fb4bffdc9 100644 --- a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md +++ b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md @@ -6,8 +6,8 @@ sidebar_label: "HoltWinters" 本节讲述 HoltWinters 算法模型的使用方法。 ## 功能概述 -HoltWinters模型又称为多次指数平滑模型(EMA)。对含有线性趋势和周期波动的非平稳序列适用,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。 -HoltWinters有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。 +HoltWinters 模型又称为多次指数平滑模型(EMA)。适用于含有线性趋势和周期波动的非平稳序列,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。 +HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。 该模型对于返回数据也不提供计算的置信区间范围结果。在 95% 置信区间的上下界结果与预测结果相同。 @@ -16,9 +16,9 @@ HoltWinters有两种不同的季节性组成部分,当季节变化在该时间 分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 |参数名称|说明|必填项| |---|---|---| -|period| 输入时间序列数据每个周期包含的数据点个数。如果不设置该参数或则该参数设置为 0, 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填| -|trend| 趋势模型使用加法模型还是乘法模型|选填| -|seasonal| 季节性采用加法模型还是乘法模型|选填| +|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0, 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填| +|trend|趋势模型使用加法模型还是乘法模型|选填| +|seasonal|季节性采用加法模型还是乘法模型|选填| 参数 `trend` 和 `seasonal`的均可以选择 `add` (加法模型)或 `mul`(乘法模型)。 @@ -30,11 +30,11 @@ FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul") ```json5 { -"rows": rows, // 结果的行数 -"period": period, // 返回结果的周期性, 该结果与输入的周期性相同,如果没有周期性,该值为 0 -"algo": 'holtwinters' // 返回结果使用的计算模型 -"mse":mse, // 最小均方误差(minmum square error) -"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了 两列[timestamp][fc_results]。 +"rows": rows, // 结果的行数 +"period": period, // 返回结果的周期性, 该结果与输入的周期性相同,如果没有周期性,该值为 0 +"algo": 'holtwinters' // 返回结果使用的计算模型 +"mse":mse, // 最小均方误差(minmum square error) +"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了 两列 [timestamp][fc_results]。 } ``` diff --git a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md index 8f1e1f064a..d0da330ab3 100644 --- a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md +++ b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md @@ -3,7 +3,7 @@ title: "Anomaly-detection" sidebar_label: "Anomaly-detection" --- -本节讲述 异常检测 算法模型的使用方法。 +本节讲述 异常检测算法模型的使用方法。 ## 概述 分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。 @@ -11,7 +11,7 @@ sidebar_label: "Anomaly-detection" ### 统计学异常检测方法 -- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为3, 即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值K倍标准差,则该点被视为异常点. +- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3, 即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. |参数名称|说明|是否必选|默认值| |---|---|---|---| @@ -22,7 +22,7 @@ sidebar_label: "Anomaly-detection" - Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。 -- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多49.9%。数据集的异常比例一般不超过5% +- SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5% |参数名称|说明|是否必选|默认值| |---|---|---|---| @@ -30,7 +30,7 @@ sidebar_label: "Anomaly-detection" ### 基于数据密度的检测方法 -LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是Breunig于2000年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的top(n)个点。 +LOF[5]: 局部离群因子(LOF,又叫局部异常因子)算法是 Breunig 于 2000 年提出的一种基于密度的局部离群点检测算法,该方法适用于不同类簇密度分散情况迥异的数据。根据数据点周围的数据密集情况,首先计算每个数据点的一个局部可达密度,然后通过局部可达密度进一步计算得到每个数据点的一个离群因子,该离群因子即标识了一个数据点的离群程度,因子值越大,表示离群程度越高,因子值越小,表示离群程度越低。最后,输出离群程度最大的 top(n) 个点。 ### 基于深度学习的检测方法 diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md index 8b50c93d85..aeaf08d490 100644 --- a/docs/zh/06-advanced/06-data-analysis/addins.md +++ b/docs/zh/06-advanced/06-data-analysis/addins.md @@ -11,10 +11,10 @@ sidebar_label: "addins" |目录|说明| |---|---| -|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc, 单元测试和集成测试目录 test。 algo目录下 ad 放置异常检测算法代码, fc 放置预测算法代码| +|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc, 单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码, fc 放置预测算法代码| |script|是安装脚本和发布脚本放置目录| |model|放置针对数据集完成的训练模型| -|cfg| 配置文件目录| +|cfg|配置文件目录| ## 约定与限制 @@ -23,7 +23,7 @@ sidebar_label: "addins" ### 类命名规范 -算法类的名称需要 以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。 +算法类的名称需要以下划线开始,以 Service 结尾。例如:_KsigmaService 是 KSigma 异常检测算法的实现类。 ### 类继承约定 @@ -38,22 +38,22 @@ sidebar_label: "addins" ### 核心方法输入与输出约定 -`execute` 是算法处理的核心方法。调用该方法的时候, `self.list` 已经设置好输入数组。 +`execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。 异常检测输出结果 -`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。 +`execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。 预测输出结果 -对于预测算法, `AbstractForecastService` 的对象属性说明如下: +对于预测算法,`AbstractForecastService` 的对象属性说明如下: |属性名称|说明|默认值| |---|---|---| -|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可。| 0| +|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0| |start_ts|预测数据的开始时间| 0| |time_step|预测结果的两个数据点之间时间间隔|0 | |fc_rows|预测结果数量| 0 | |return_conf|返回结果中是否包含执行区间范围,如果算法计算结果不包含置信区间,那么上界和下界与自身相同| 1| -|conf|执行区间分位数 0.05| +|conf|执行区间分位数 0.05| 预测返回结果如下: @@ -76,7 +76,7 @@ from service import AbstractAnomalyDetectionService # 算法实现类名称 需要以下划线 "_" 开始,并以 Service 结束,如下 _IqrService 是 IQR 异常检测算法的实现类。 class _IqrService(AbstractAnomalyDetectionService): - """ IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService类的抽象函数 """ + """ IQR algorithm 定义类,从 AbstractAnomalyDetectionService 继承,并实现 AbstractAnomalyDetectionService 类的抽象函数 """ # 定义算法调用关键词,全小写ASCII码(必须添加) name = 'iqr' @@ -111,6 +111,7 @@ class _IqrService(AbstractAnomalyDetectionService): ## 单元测试 在测试文件目录中的 anomaly_test.py 中增加单元测试用例。 + ```python def test_iqr(self): """ 测试 _IqrService 类 """ @@ -137,9 +138,9 @@ def test_iqr(self): ## 需要模型的算法 -针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立了, autoencoder的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。 +针对特定数据集,进行模型训练的算法,在训练完成后。需要将训练得到的模型保存在 model 目录中。需要注意的是,针对每个算法,需要建立独立的文件夹。例如 auto_encoder 的训练算法在 model 目录下建立 autoencoder 的目录,使用该算法针对不同数据集训练得到的模型,均需要放置在该目录下。 -训练完成后的模型,使用 joblib 进行保存。 +训练完成后的模型,使用 joblib 进行保存。 并在 model 目录下建立对应的文件夹存放该模型。 diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md index e3bf2f4158..df04c91d28 100644 --- a/docs/zh/06-advanced/06-data-analysis/index.md +++ b/docs/zh/06-advanced/06-data-analysis/index.md @@ -5,18 +5,18 @@ title: 数据分析功能 ## 概述 -TDengine 通过 ANode(AnalysisNode) 是提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,从而拓展 TDengine 的功能,支持时间序列高级分析功能。 -ANode 是无状态的数据分析节点,集群中可以存在多个 ANode节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。 +ANode(Analysis Node)是 TDengine 提供数据分析功能的扩展组件,通过 Restful 接口提供分析服务,拓展 TDengine 的功能,支持时间序列高级分析。 +ANode 是无状态的数据分析节点,集群中可以存在多个 ANode 节点,相互之间没有关联。将 ANode 注册到 TDengine 集群以后,通过 SQL 语句即可调用并完成时序分析任务。 下图是数据分析的技术架构示意图。 ![数据分析功能架构图](./pic/data-analysis.png) ## 安装部署 ### 环境准备 -ANode 的要求节点上准备有 Python 3.10 及以上版本以及相应的Python包自动安装组件 Pip ,同时请确保能够正常连接互联网。 +ANode 要求节点上准备有 Python 3.10 及以上版本,以及相应的 Python 包自动安装组件 Pip,同时请确保能够正常连接互联网。 ### 安装及卸载 -使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengien 的安装流程一致。 +使用专门的 ANode 安装包 TDengine-enterprise-anode-1.x.x.tar.gz 进行 ANode 的安装部署工作,安装过程与 TDengine 的安装流程一致。 ```bash tar -xzvf TDengine-enterprise-anode-1.0.0.tar.gz @@ -30,7 +30,7 @@ sudo ./install.sh 为了避免 ANode 安装后影响目标节点现有的 Python 库。 ANode 使用 Python 虚拟环境运行,安装后的默认 Python 目录处于 `/var/lib/taos/taosanode/venv/`。为了避免反复安装虚拟环境带来的开销,卸载 ANode 并不会自动删除该虚拟环境,如果您确认不需要 Python 的虚拟环境,可以手动删除。 ## 启动及停止服务 -安装 ANode 以后,可以使用`systemctl`来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。 +安装 ANode 以后,可以使用 `systemctl` 来管理 ANode 的服务。使用如下命令可以启动/停止/检查状态。 ```bash systemctl start taosanoded @@ -50,27 +50,27 @@ systemctl status taosanoded ### 配置说明 -Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 的配置和 uWSGI 的配置在同一个配置文件中,具体如下: +Anode 提供的 RestFul 服务使用 uWSGI 驱动,因此 ANode 和 uWSGI 的配置信息存放在同一个配置文件中,具体如下: ```ini [uwsgi] # charset -env=LC_ALL=en_US.UTF-8 +env = LC_ALL = en_US.UTF-8 # ip:port http = 127.0.0.1:6050 # the local unix socket file than communicate to Nginx #socket = 127.0.0.1:8001 -#socket-timeout=10 +#socket-timeout = 10 # base directory chdir = /usr/local/taos/taosanode/lib # initialize python file -wsgi-file = /usr/local/taos/taosanode/lib/app.py +wsgi-file = /usr/local/taos/taosanode/lib/taos/app.py -# invoke app model +# call module of uWSGI callable = app # auto remove unix Socket and pid file when stopping @@ -80,10 +80,10 @@ vacuum = true #chmod-socket = 664 # uWSGI pid -uid=root +uid = root # uWSGI gid -gid=root +gid = root # main process master = true @@ -92,25 +92,27 @@ master = true processes = 2 # pid file -pidfile = /usr/local/taos/taosanode/uwsgi.pid +pidfile = /usr/local/taos/taosanode/taosanode.pid # enable threads -enable-threads=true +enable-threads = true # the number of threads for each process -threads=2 +threads = 4 # memory useage report memory-report = true + +# smooth restart reload-mercy = 10 # conflict with systemctl, so do NOT uncomment this # daemonize = /var/log/taos/taosanode/taosanode.log -# set log +# log directory logto = /var/log/taos/taosanode/taosanode.log -# monitor server +# wWSGI monitor port stats = 127.0.0.1:8387 # python virtual environment directory @@ -121,10 +123,13 @@ virtualenv = /usr/local/taos/taosanode/venv/ app-log = /var/log/taos/taosanode/taosanode.app.log # model storage directory -model-dir=/usr/local/taos/taosanode/model/ +model-dir = /usr/local/taos/taosanode/model/ # default log level log-level = DEBUG + +# draw the query results +draw-result = 0 ``` **提示** @@ -137,7 +142,7 @@ log-level = DEBUG ```sql CREATE ANODE {node_url} ``` -node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。 +node_url 是提供服务的 ANode 的 IP 和 PORT, 例如:`create anode 'http://localhost:6050'`。启动 ANode 以后如果不注册到 TDengine 集群中,则无法提供正常的服务。不建议 ANode 注册到两个或多个集群中。 #### 查看 ANode 列出集群中所有的数据分析节点,包括其 `FQDN`, `PORT`, `STATUS`。 @@ -167,24 +172,23 @@ DROP ANODE {anode_id} #### 白噪声检查 -平台提供 Restful的服务检测输入时间序列是否是白噪声时间序列(White Noise Data, WND),白噪声时间序列及随机数序列。 -此外,分析平台要求输入的数据不能是 , 因此针对的所有数据均默认进行 白噪声检查。当前白噪声检查采用通行的 `Ljung-Box`检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。 +分析平台提供的 Restful 服务要求输入的时间序列不能是白噪声时间序列(White Noise Data, WND)和随机数序列 , 因此针对所有数据均默认进行白噪声检查。当前白噪声检查采用通行的 `Ljung-Box` 检验,`Ljung-Box` 统计量检查过程需要遍历整个输入序列并进行计算。 如果用户能够明确输入序列一定不是白噪声序列,那么可以通过输入参数,指定预测之前忽略该检查,从而节省分析过程的 CPU 计算资源。 同时支持独立地针对输入序列进行白噪声检测(该检测功能暂不独立对外开放)。 #### 数据重采样和时间戳对齐 -数据分析平台支持将输入的数据进行重采样的预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别: +分析平台支持将输入数据进行重采样预处理,从而确保输出结果按照用户指定的等间隔进行处理。处理过程分为两种类别: -- 数据时间戳对齐。由于真实数据时间可能并非严格按照查询指定的时间戳输入。此时数据平台将自动将数据的时间间隔按照指定的时间间隔进行对齐。例如有输入时间序列:[11, 22, 29, 41],用户指定时间间隔为 10,那么该序列将被对齐重整为以下序列 [10, 20, 30, 40]。 -- 数据时间重采样。用户输入的时间序列其采样频率超过了指定的查询需要获得结果的时间间隔,例如输入原始数据是 5, 但是输出结果的频率是 10. [0, 5, 10, 15, 20, 25, 30],那么该输入数据列将重采用为间隔 为 10 的输入序列,其结果如下 [0, 10, 20,30]。[5, 15, 25] 处的数据将被丢弃。 +- 数据时间戳对齐。由于真实数据可能并非严格按照查询指定的时间戳输入。此时分析平台会自动将数据的时间间隔按照指定的时间间隔进行对齐。例如输入时间序列 [11, 22, 29, 41],用户指定时间间隔为 10,该序列将被对齐重整为以下序列 [10, 20, 30, 40]。 +- 数据时间重采样。用户输入时间序列的采样频率超过了输出结果的频率,例如输入时间序列的采样频率是 5,输出结果的频率是 10,输入时间序列 [0, 5, 10, 15, 20, 25, 30] 将被重采用为间隔 为 10 的序列 [0, 10, 20,30],[5, 15, 25] 处的数据将被丢弃。 -需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据[11, 22, 29, 49],并且用户要求的时间间隔为 10, 重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。 +需要注意的是,数据输入平台不支持缺失数据补齐后进行的预测分析,如果输入时间序列数据 [11, 22, 29, 49],并且用户要求的时间间隔为 10,重整对齐后的序列是 [10, 20, 30, 50] 那么该序列进行预测分析将返回错误。 #### 时序数据异常检测 -异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。 +异常检测是针对输入的时序数据,使用预设或用户指定的算法确定时间序列中**可能**出现异常的时间序列点,对于时间序列中若干个连续的异常点,将自动合并成为一个连续的(闭区间)异常窗口。对于只有单个点的场景,异常窗口窗口退化成为一个起始时间和结束时间相同的点。 异常检测生成的异常窗口受检测算法和算法参数的共同影响,对于异常窗口范围内的数据,可以应用 TDengine 提供的聚合和标量函数进行查询或变换处理。 对于输入时间序列 (1, 20), (2, 22), (3, 91), (4, 120), (5, 18), (6, 19)。系统检测到 (3, 91), (4, 120) 为异常点,那么返回的异常窗口是闭区间 [3, 4]。 @@ -201,18 +205,18 @@ algo=expr1 "} ``` -1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列输入,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。 -2. `options`:字符串。其中使用 K/V 调用异常检测的算法,及与算法相关的参数。采用 逗号分隔的K/V字符串表示,其中的字符串不需要使用单引号、双引号、或转意号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma, k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。 +1. `column`:进行时序数据异常检测的输入数据列,当前只支持单列,且只能是数值类型,不能是字符类型(例如:`NCHAR` `VARCHAR` `VARBINARY`等类型),**不支持函数表达式**。 +2. `options`:字符串。其中使用 K=V 调用异常检测算法及与算法相关的参数。采用逗号分隔的 K=V 字符串表示,其中的字符串不需要使用单引号、双引号、或转义号等符号,不能使用中文及其他宽字符。例如:`algo=ksigma,k=2` 表示进行异常检测的算法是 ksigma,该算法接受的输入参数是 2。 3. 异常检测的结果可以作为外层查询的子查询输入,在 `SELECT` 子句中使用的聚合函数或标量函数与其他类型的窗口查询相同。 -4. 输入数据默认进行白噪声检查,如果检查结果是输入数据是白噪声,将不会有任何(异常)窗口信息返回。 +4. 输入数据默认进行白噪声检查,如果输入数据是白噪声,将不会有任何(异常)窗口信息返回。 **参数说明** |参数|含义|默认值| |---|---|---| |algo|异常检测调用的算法|iqr| -|wncheck|对输入数据列是否进行白噪声检查|取值为0或者1,默认值为 1,表示进行白噪声检查| +|wncheck|对输入数据列是否进行白噪声检查|取值为 0 或者 1,默认值为 1,表示进行白噪声检查| -异常检测的返回结果以窗口的形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下: +异常检测的返回结果以窗口形式呈现,因此窗口查询相关的伪列在这种场景下仍然可用。可以使用的伪列如下: 1. `_WSTART`: 异常窗口开始时间戳 2. `_WEND`:异常窗口结束时间戳 3. `_WDURATION`:异常窗口持续时间 @@ -232,9 +236,9 @@ ANOMALY_WINDOW(i32, "algo=ksigma,k=2"); ``` taos> SELECT _wstart, _wend, count(*) FROM ai.atb ANOMAYL_WINDOW(i32); - _wstart | _wend | count(*) | -============================================================================ - 2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 | + _wstart | _wend | count(*) | +==================================================================== + 2020-01-01 00:00:16.000 | 2020-01-01 00:00:16.001 | 1 | Query OK, 1 row(s) in set (0.028946s) ``` @@ -267,7 +271,7 @@ algo=expr1 ``` 1. `column_expr`:预测的时序数据列。与异常检测相同,只支持数值类型输入。 -2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持`conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下: +2. `options`:异常检测函数的参数,使用规则与 anomaly_window 相同。预测还支持 `conf`, `every`, `rows`, `start`, `rows` 几个参数,其含义如下: **参数说明** @@ -275,13 +279,13 @@ algo=expr1 |---|---|---| |algo|预测分析使用的算法|holtwinters| |wncheck|白噪声(white noise data)检查|默认值为 1,0 表示不进行检查| -|conf|预测数据的置信区间范围 ,取值范围[0, 100]|95| +|conf|预测数据的置信区间范围 ,取值范围 [0, 100]|95| |every|预测数据的采样间隔|输入数据的采样间隔| |start|预测结果的开始时间戳|输入数据最后一个时间戳加上一个采样时间段| |rows|预测结果的记录数|10| -1. 预测查询结果新增了三个伪列,具体如下: `_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果 -2. 更改参数 `START`:返回预测结果的起始时间,改变这个起始时间不会影响返回的预测数值,只影响起始时间。 +1. 预测查询结果新增了三个伪列,具体如下:`_FROWTS`:预测结果的时间戳、`_FLOW`:置信区间下界、`_FHIGH`:置信区间上界, 对于没有置信区间的预测算法,其置信区间同预测结果 +2. 更改参数 `START`:返回预测结果的起始时间,改变起始时间不会影响返回的预测数值,只影响起始时间。 3. `EVERY`:可以与输入数据的采样频率不同。采样频率只能低于或等于输入数据采样频率,不能**高于**输入数据的采样频率。 4. 对于某些不需要计算置信区间的算法,即使指定了置信区间,返回的结果中其上下界退化成为一个点。 @@ -292,12 +296,12 @@ algo=expr1 SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima") FROM ai.ftb; ---- 使用 arima 算法进行预测,输入数据的是周期数据,每10个采样点是一个周期。返回置信区间是 95%. +--- 使用 arima 算法进行预测,输入数据的是周期数据,每 10 个采样点是一个周期。返回置信区间是 95%. SELECT _flow, _fhigh, _frowts, FORECAST(i32, "algo=arima,alpha=95,period=10") FROM ai.ftb; ``` ``` -taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb; +taos> select _flow, _fhigh, _frowts, forecast(i32) from ai.ftb; _flow | _fhigh | _frowts | forecast(i32) | ======================================================================================== 10.5286684 | 41.8038254 | 2020-01-01 00:01:35.001 | 26 | From 1790f2929e5749c0a63d38a695f07dcb2106e701 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Wed, 6 Nov 2024 09:41:36 +0800 Subject: [PATCH 127/127] docs: data analysis --- .../06-advanced/06-data-analysis/01-arima.md | 22 +++++++-------- .../06-data-analysis/02-holtwinters.md | 16 +++++------ .../06-data-analysis/03-anomaly-detection.md | 10 +++---- .../zh/06-advanced/06-data-analysis/addins.md | 28 ++++++++++--------- docs/zh/06-advanced/06-data-analysis/index.md | 2 +- 5 files changed, 40 insertions(+), 38 deletions(-) diff --git a/docs/zh/06-advanced/06-data-analysis/01-arima.md b/docs/zh/06-advanced/06-data-analysis/01-arima.md index 56383b7e86..b9d63e924f 100644 --- a/docs/zh/06-advanced/06-data-analysis/01-arima.md +++ b/docs/zh/06-advanced/06-data-analysis/01-arima.md @@ -8,11 +8,11 @@ sidebar_label: "ARIMA" ## 功能概述 ARIMA 即自回归移动平均模型(Autoregressive Integrated Moving Average, ARIMA),也记作 ARIMA(p,d,q),是统计模型中最常见的一种用来进行时间序列预测的模型。 -ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时序数据**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 +ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的值。ARIMA 模型要求时间序列**平稳**,或经过差分处理后平稳,如果是不平稳的数据,**无法**获得正确的结果。 >平稳的时间序列:其性质不随观测时间的变化而变化。具有趋势或季节性的时间序列不是平稳时间序列——趋势和季节性使得时间序列在不同时段呈现不同性质。 -以下参数可以动态输入控制预测过程中生成 合适的 ARIMA 的模型。 +以下参数可以动态输入,控制预测过程中生成合适的 ARIMA 模型。 - p= 自回归模型阶数 - d= 差分阶数 @@ -21,13 +21,13 @@ ARIMA 模型是一种自回归模型,只需要自变量即可预测后续的 ### 参数 分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 -|参数名称|说明|必填项| -|---|---|---| -|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0,将使用非季节性/周期性的 ARIMA 模型预测。|选填| -|start_p|自回归模型阶数的起始值,0 开始的整数,不推荐大于 10 |选填| -|max_p|自回归模型阶数的结束值,0 开始的整数,不推荐大于 10 |选填| -|start_q|移动平均模型阶数的起始值,0 开始的整数,不推荐大于 10 |选填| -|max_q|移动平均模型阶数的结束值,0 开始的整数,不推荐大于 10 |选填| +|参数|说明|必填项| +|---|---|-----| +|period|输入时间序列每个周期包含的数据点个数,如果不设置该参数或该参数设置为 0,将使用非季节性/周期性的 ARIMA 模型预测|选填| +|start_p|自回归模型阶数的起始值,0 开始的整数,不推荐大于 10|选填| +|max_p|自回归模型阶数的结束值,0 开始的整数,不推荐大于 10|选填| +|start_q|移动平均模型阶数的起始值,0 开始的整数,不推荐大于 10|选填| +|max_q|移动平均模型阶数的结束值,0 开始的整数,不推荐大于 10|选填| |d|差分阶数|选填| `start_p`、`max_p` `start_q` `max_q` 四个参数约束了模型在多大的范围内去搜寻合适的最优解。相同输入数据的条件下,参数范围越大,消耗的资源越多,系统响应的时间越长。 @@ -40,11 +40,11 @@ FORECAST(i32, "algo=arima,alpha=95,period=10,start_p=1,max_p=5,start_q=1,max_q=5 ```json5 { -"rows": fc_rows, // 预测结果的行数 +"rows": fc_rows, // 返回结果的行数 "period": period, // 返回结果的周期性,同输入 "alpha": alpha, // 返回结果的置信区间,同输入 "algo": "arima", // 返回结果使用的算法 -"mse":mse, // 拟合输入时序数据时候生成模型的最小均方误差(MSE) +"mse": mse, // 拟合输入时间序列时候生成模型的最小均方误差(MSE) "res": res // 列模式的结果 } ``` diff --git a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md index 6fb4bffdc9..38662ca2b3 100644 --- a/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md +++ b/docs/zh/06-advanced/06-data-analysis/02-holtwinters.md @@ -8,15 +8,15 @@ sidebar_label: "HoltWinters" ## 功能概述 HoltWinters 模型又称为多次指数平滑模型(EMA)。适用于含有线性趋势和周期波动的非平稳序列,利用指数平滑法让模型参数不断适应非平稳序列的变化,并对未来趋势进行**短期**预测。 HoltWinters 有两种不同的季节性组成部分,当季节变化在该时间序列中大致保持不变时,通常选择**加法模型**;而当季节变化与时间序列的水平成比例变化时,通常选择**乘法模型**。 -该模型对于返回数据也不提供计算的置信区间范围结果。在 95% 置信区间的上下界结果与预测结果相同。 +该模型对于返回数据不提供计算的置信区间范围结果,在 95% 置信区间的上下界结果与预测结果相同。 ### 参数 -分析平台中使用自动化的 ARIMA 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 -|参数名称|说明|必填项| +分析平台中使用自动化的 HoltWinters 模型进行计算,因此每次计算的时候会根据输入的数据自动拟合最合适的模型,然后根据该模型进行预测输出结果。 +|参数|说明|必填项| |---|---|---| -|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0, 将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填| +|period|输入时间序列每个周期包含的数据点个数。如果不设置该参数或该参数设置为 0,将使用一次(简单)指数平滑方式进行数据拟合,并据此进行未来数据的预测|选填| |trend|趋势模型使用加法模型还是乘法模型|选填| |seasonal|季节性采用加法模型还是乘法模型|选填| @@ -30,11 +30,11 @@ FORECAST(i32, "algo=holtwinters,period=10,trend=mul,seasonal=mul") ```json5 { -"rows": rows, // 结果的行数 -"period": period, // 返回结果的周期性, 该结果与输入的周期性相同,如果没有周期性,该值为 0 +"rows": rows, // 返回结果的行数 +"period": period, // 返回结果的周期性,该结果与输入的周期性相同,如果没有周期性,该值为 0 "algo": 'holtwinters' // 返回结果使用的计算模型 -"mse":mse, // 最小均方误差(minmum square error) -"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了 两列 [timestamp][fc_results]。 +"mse": mse, // 最小均方误差(minmum square error) +"res": res // 具体的结果,按照列形式返回的结果。一般意义上包含了两列 [timestamp][fc_results]。 } ``` diff --git a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md index d0da330ab3..bdfa455ae3 100644 --- a/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md +++ b/docs/zh/06-advanced/06-data-analysis/03-anomaly-detection.md @@ -3,7 +3,7 @@ title: "Anomaly-detection" sidebar_label: "Anomaly-detection" --- -本节讲述 异常检测算法模型的使用方法。 +本节讲述异常检测算法模型的使用方法。 ## 概述 分析平台提供了 6 种异常检查模型,6 种异常检查模型分为 3 个类别,分别属于基于统计的异常检测模型、基于数据密度的检测模型、基于深度学习的异常检测模型。在不指定异常检测使用的方法的情况下,默认调用 iqr 的方法进行计算。 @@ -11,20 +11,20 @@ sidebar_label: "Anomaly-detection" ### 统计学异常检测方法 -- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3, 即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. +- k-sigma[1]: 即 ***68–95–99.7 rule*** 。***k***值默认为 3,即序列均值的 3 倍标准差范围为边界,超过边界的是异常值。KSigma 要求数据整体上服从正态分布,如果一个点偏离均值 K 倍标准差,则该点被视为异常点. -|参数名称|说明|是否必选|默认值| +|参数|说明|是否必选|默认值| |---|---|---|---| |k|标准差倍数|选填|3| -- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5 。无输入参数。 +- IQR[2]:四分位距 (Interquartile range, IQR) 是一种衡量变异性的方法. 四分位数将一个按等级排序的数据集划分为四个相等的部分。即 Q1(第 1 个四分位数)、Q2(第 2 个四分位数)和 Q3(第 3 个四分位数)。IQR 定义为 Q3–Q1,位于 Q3+1.5。无输入参数。 - Grubbs[3]: 又称为 Grubbs' test,即最大标准残差测试。Grubbs 通常用作检验最大值、最小值偏离均值的程度是否为异常,该单变量数据集遵循近似标准正态分布。非正态分布数据集不能使用该方法。无输入参数。 - SHESD[4]: 带有季节性的 ESD 检测算法。ESD 可以检测时间序列数据的多异常点。需要指定异常点比例的上界***k***,最差的情况是至多 49.9%。数据集的异常比例一般不超过 5% -|参数名称|说明|是否必选|默认值| +|参数|说明|是否必选|默认值| |---|---|---|---| |k|异常点在输入数据集中占比,范围是$`1\le K \le 49.9`$ |选填|5| diff --git a/docs/zh/06-advanced/06-data-analysis/addins.md b/docs/zh/06-advanced/06-data-analysis/addins.md index aeaf08d490..c0b8921718 100644 --- a/docs/zh/06-advanced/06-data-analysis/addins.md +++ b/docs/zh/06-advanced/06-data-analysis/addins.md @@ -3,7 +3,7 @@ title: "addins" sidebar_label: "addins" --- -本节说明如何将自己开发的新预测算法和异常检测算法整合到 TDengine 分析平台, 并能够通过 SQL 语句进行调用。 +本节说明如何将自己开发的预测算法和异常检测算法整合到 TDengine 分析平台,并能够通过 SQL 语句进行调用。 ## 目录结构 @@ -11,14 +11,14 @@ sidebar_label: "addins" |目录|说明| |---|---| -|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc, 单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码, fc 放置预测算法代码| +|taos|Python 源代码目录,其下包含了算法具体保存目录 algo,放置杂项目录 misc,单元测试和集成测试目录 test。 algo 目录下 ad 放置异常检测算法代码,fc 放置预测算法代码| |script|是安装脚本和发布脚本放置目录| |model|放置针对数据集完成的训练模型| |cfg|配置文件目录| ## 约定与限制 -定义异常检测算法的 Python 代码文件 需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。 +定义异常检测算法的 Python 代码文件需放在 /taos/algo/ad 目录中,预测算法 Python 代码文件需要放在 /taos/algo/fc 目录中,以确保系统启动的时候能够正常加载对应目录下的 Python 文件。 ### 类命名规范 @@ -27,33 +27,35 @@ sidebar_label: "addins" ### 类继承约定 -异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute`. -预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute`。 +- 异常检测算法需要从 `AbstractAnomalyDetectionService` 继承,并实现其核心抽象方法 `execute` +- 预测算法需要从 `AbstractForecastService` 继承,同样需要实现其核心抽象方法 `execute` ### 类属性初始化 -每个算法实现的类需要静态初始化两个类属性,分别是 +每个算法实现的类需要静态初始化两个类属性,分别是: -`name`: 的触发调用关键词,全小写英文字母。 -`desc`:该算法的描述信息。 +- `name`:触发调用的关键词,全小写英文字母 +- `desc`:算法的描述信息 ### 核心方法输入与输出约定 `execute` 是算法处理的核心方法。调用该方法的时候,`self.list` 已经设置好输入数组。 + 异常检测输出结果 `execute` 的返回值是长度与 `self.list` 相同的数组,数组位置为 -1 的即为异常值点。例如:输入数组是 [2, 2, 2, 2, 100], 如果 100 是异常点,那么返回值是 [1, 1, 1, 1, -1]。 + 预测输出结果 对于预测算法,`AbstractForecastService` 的对象属性说明如下: |属性名称|说明|默认值| |---|---|---| -|period|输入时序数据的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0| -|start_ts|预测数据的开始时间| 0| +|period|输入时间序列的周期性,多少个数据点表示一个完整的周期。如果没有周期性,那么设置为 0 即可| 0| +|start_ts|预测结果的开始时间| 0| |time_step|预测结果的两个数据点之间时间间隔|0 | -|fc_rows|预测结果数量| 0 | -|return_conf|返回结果中是否包含执行区间范围,如果算法计算结果不包含置信区间,那么上界和下界与自身相同| 1| -|conf|执行区间分位数 0.05| +|fc_rows|预测结果的数量| 0 | +|return_conf|预测结果中是否包含置信区间范围,如果不包含置信区间,那么上界和下界与自身相同| 1| +|conf|置信区间分位数 0.05| 预测返回结果如下: diff --git a/docs/zh/06-advanced/06-data-analysis/index.md b/docs/zh/06-advanced/06-data-analysis/index.md index df04c91d28..2cbea1caba 100644 --- a/docs/zh/06-advanced/06-data-analysis/index.md +++ b/docs/zh/06-advanced/06-data-analysis/index.md @@ -156,7 +156,7 @@ SHOW ANODES; SHOW ANODES FULL; ``` -#### 强制刷新 TDengine 集群中分析算法缓存 +#### 强制刷新集群中的分析算法缓存 ```SQL UPDATE ANODE {node_id} UPDATE ALL ANODES