diff --git a/README-CN.md b/README-CN.md index 39d979c123..b2ae777837 100644 --- a/README-CN.md +++ b/README-CN.md @@ -50,19 +50,12 @@ TDengine 目前 2.0 版服务器仅能在 Linux 系统上安装和运行,后 ## 安装工具 -### Ubuntu 16.04 及以上版本 & Debian: +### Ubuntu 18.04 及以上版本 & Debian: ```bash sudo apt-get install -y gcc cmake build-essential git libssl-dev ``` -### Ubuntu 14.04: - -```bash -sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26 -export PATH=/usr/lib/binutils-2.26/bin:$PATH -``` - 编译或打包 JDBC 驱动源码,需安装 Java JDK 8 或以上版本和 Apache Maven 2.7 或以上版本。 安装 OpenJDK 8: @@ -89,7 +82,7 @@ taosTools 是用于 TDengine 的辅助工具软件集合。目前它包含 taosB sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config ``` -### CentOS 7: +### CentOS 7.9: ```bash sudo yum install -y gcc gcc-c++ make cmake git openssl-devel @@ -161,12 +154,7 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -Go 连接器和 Grafana 插件在其他独立仓库,如果安装它们的话,需要在 TDengine 目录下通过此命令安装: - -```bash -git submodule update --init --recursive -``` - +Go 连接器和 Grafana 插件已移到其他独立仓库。 如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。 ``` @@ -187,7 +175,6 @@ git submodule update --init --recursive 这个脚本等价于执行如下命令: ```bash -git submodule update --init --recursive mkdir debug cd debug cmake .. -DBUILD_TOOLS=true diff --git a/README.md b/README.md index d4b8321813..2f38120b34 100644 --- a/README.md +++ b/README.md @@ -53,19 +53,12 @@ To build TDengine, use [CMake](https://cmake.org/) 3.0.2 or higher versions in t ## Install build dependencies -### Ubuntu 16.04 and above or Debian +### Ubuntu 18.04 and above or Debian ```bash sudo apt-get install -y gcc cmake build-essential git libssl-dev ``` -### Ubuntu 14.04 - -```bash -sudo apt-get install -y gcc cmake3 build-essential git binutils-2.26 -export PATH=/usr/lib/binutils-2.26/bin:$PATH -``` - To compile and package the JDBC driver source code, you should have a Java jdk-8 or higher and Apache Maven 2.7 or higher installed. To install openjdk-8: @@ -91,7 +84,7 @@ To build the [taosTools](https://github.com/taosdata/taos-tools) on Ubuntu/Debia sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev pkg-config ``` -### CentOS 7 +### CentOS 7.9 ```bash sudo yum install epel-release @@ -164,12 +157,7 @@ git clone https://github.com/taosdata/TDengine.git cd TDengine ``` -The connectors for go & Grafana and some tools have been moved to separated repositories, -so you should run this command in the TDengine directory to install them: - -```bash -git submodule update --init --recursive -``` +The connectors for go & Grafana and some tools have been moved to separated repositories. You can modify the file ~/.gitconfig to use ssh protocol instead of https for better download speed. You need to upload ssh public key to GitHub first. Please refer to GitHub official documentation for detail. @@ -191,7 +179,6 @@ You can run the bash script `build.sh` to build both TDengine and taosTools incl It equals to execute following commands: ```bash -git submodule update --init --recursive mkdir debug cd debug cmake .. -DBUILD_TOOLS=true diff --git a/build.sh b/build.sh new file mode 100755 index 0000000000..78f08afa7a --- /dev/null +++ b/build.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +if [ ! -d debug ]; then + mkdir debug || echo -e "failed to make directory for build" +fi + +cd debug && cmake .. -DBUILD_TOOLS=true && make + diff --git a/cmake/cmake.define b/cmake/cmake.define index a33db902ca..5639d212d7 100644 --- a/cmake/cmake.define +++ b/cmake/cmake.define @@ -115,9 +115,7 @@ ELSE () ENDIF () MESSAGE("System processor ID: ${CMAKE_SYSTEM_PROCESSOR}") - IF (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm64" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64") - ADD_DEFINITIONS("-D_TD_ARM_") - ELSE () + IF (TD_INTEL_64 OR TD_INTEL_32) ADD_DEFINITIONS("-msse4.2") IF("${FMA_SUPPORT}" MATCHES "true") MESSAGE(STATUS "turn fma function support on") diff --git a/cmake/cmake.install b/cmake/cmake.install index 27edd8e27a..4b1ccbf6d5 100644 --- a/cmake/cmake.install +++ b/cmake/cmake.install @@ -10,7 +10,9 @@ ELSEIF (TD_WINDOWS) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/python DESTINATION connector) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/src/connector/C\# DESTINATION connector) # INSTALL(DIRECTORY ${TD_SOURCE_DIR}/examples DESTINATION .) - INSTALL(FILES ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg DESTINATION cfg) + INSTALL(CODE "IF (NOT EXISTS ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) + execute_process(COMMAND ${CMAKE_COMMAND} -E copy ${TD_SOURCE_DIR}/packaging/cfg/taos.cfg ${CMAKE_INSTALL_PREFIX}/cfg/taos.cfg) + ENDIF ()") INSTALL(FILES ${TD_SOURCE_DIR}/include/client/taos.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/util/taoserror.h DESTINATION include) INSTALL(FILES ${TD_SOURCE_DIR}/include/libs/function/taosudf.h DESTINATION include) @@ -20,6 +22,7 @@ ELSEIF (TD_WINDOWS) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taos.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosd.exe DESTINATION .) INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/udfd.exe DESTINATION .) + INSTALL(FILES ${EXECUTABLE_OUTPUT_PATH}/taosBenchmark.exe DESTINATION .) IF (TD_MVN_INSTALLED) INSTALL(FILES ${LIBRARY_OUTPUT_PATH}/taos-jdbcdriver-2.0.38-dist.jar DESTINATION connector/jdbc) diff --git a/cmake/cmake.platform b/cmake/cmake.platform index 5c6ffd4b10..887fbd86d5 100644 --- a/cmake/cmake.platform +++ b/cmake/cmake.platform @@ -85,26 +85,36 @@ IF ("${CPUTYPE}" STREQUAL "") MESSAGE(STATUS "The current platform is aarch32") SET(PLATFORM_ARCH_STR "arm") SET(TD_ARM_32 TRUE) + ADD_DEFINITIONS("-D_TD_ARM_") + ADD_DEFINITIONS("-D_TD_ARM_32") ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64") MESSAGE(STATUS "The current platform is aarch64") SET(PLATFORM_ARCH_STR "arm64") SET(TD_ARM_64 TRUE) + ADD_DEFINITIONS("-D_TD_ARM_") + ADD_DEFINITIONS("-D_TD_ARM_64") ENDIF () ELSE () # if generate ARM version: # cmake -DCPUTYPE=aarch32 .. or cmake -DCPUTYPE=aarch64 - IF (${CPUTYPE} MATCHES "aarch32") + IF (${CPUTYPE} MATCHES "aarch32" OR ${CPUTYPE} MATCHES "arm32") SET(PLATFORM_ARCH_STR "arm") MESSAGE(STATUS "input cpuType: aarch32") + ADD_DEFINITIONS("-D_TD_ARM_") + ADD_DEFINITIONS("-D_TD_ARM_32") SET(TD_ARM_32 TRUE) - ELSEIF (${CPUTYPE} MATCHES "aarch64") + ELSEIF (${CPUTYPE} MATCHES "aarch64" OR ${CPUTYPE} MATCHES "arm64") SET(PLATFORM_ARCH_STR "arm64") MESSAGE(STATUS "input cpuType: aarch64") + ADD_DEFINITIONS("-D_TD_ARM_") + ADD_DEFINITIONS("-D_TD_ARM_64") SET(TD_ARM_64 TRUE) ELSEIF (${CPUTYPE} MATCHES "mips64") SET(PLATFORM_ARCH_STR "mips") MESSAGE(STATUS "input cpuType: mips64") SET(TD_MIPS_64 TRUE) + ADD_DEFINITIONS("-D_TD_MIPS_") + ADD_DEFINITIONS("-D_TD_MIPS_64") ELSEIF (${CPUTYPE} MATCHES "x64") SET(PLATFORM_ARCH_STR "amd64") MESSAGE(STATUS "input cpuType: x64") diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in index 9ee0ea526c..4f08656ac8 100644 --- a/cmake/taostools_CMakeLists.txt.in +++ b/cmake/taostools_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taos-tools ExternalProject_Add(taos-tools GIT_REPOSITORY https://github.com/taosdata/taos-tools.git - GIT_TAG 3c7dafe + GIT_TAG 2d68404 SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/cmake/taosws_CMakeLists.txt.in b/cmake/taosws_CMakeLists.txt.in index de6409a8c6..c6d42b686c 100644 --- a/cmake/taosws_CMakeLists.txt.in +++ b/cmake/taosws_CMakeLists.txt.in @@ -1,8 +1,8 @@ # taosws-rs ExternalProject_Add(taosws-rs - GIT_REPOSITORY https://github.com/taosdata/taosws-rs.git - GIT_TAG 29424d5 + GIT_REPOSITORY https://github.com/taosdata/taos-connector-rust.git + GIT_TAG 97c4bac SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosws-rs" BINARY_DIR "" #BUILD_IN_SOURCE TRUE diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index b4e8825431..de7b75a245 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -27,6 +27,10 @@ else () cat("${TD_SUPPORT_DIR}/taosadapter_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) endif() +if(TD_LINUX_64 AND JEMALLOC_ENABLED) + cat("${TD_SUPPORT_DIR}/jemalloc_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) +endif() + # pthread if(${BUILD_PTHREAD}) cat("${TD_SUPPORT_DIR}/pthread_CMakeLists.txt.in" ${CONTRIB_TMP_FILE}) @@ -392,6 +396,19 @@ if(${BUILD_WITH_SQLITE}) endif(NOT TD_WINDOWS) endif(${BUILD_WITH_SQLITE}) +# jemalloc +IF (TD_LINUX_64 AND JEMALLOC_ENABLED) + include(ExternalProject) + ExternalProject_Add(jemalloc + PREFIX "jemalloc" + SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}/jemalloc + BUILD_IN_SOURCE 1 + CONFIGURE_COMMAND ./autogen.sh COMMAND ./configure --prefix=${CMAKE_BINARY_DIR}/build/ + BUILD_COMMAND ${MAKE} + ) + INCLUDE_DIRECTORIES(${CMAKE_BINARY_DIR}/build/include) +ENDIF () + # addr2line if(${BUILD_ADDR2LINE}) if(NOT ${TD_WINDOWS}) diff --git a/docs/assets/tdengine.svg b/docs/assets/tdengine.svg new file mode 100644 index 0000000000..fd3f5955b3 --- /dev/null +++ b/docs/assets/tdengine.svg @@ -0,0 +1,44 @@ + + + + + + + + + + + + + + + diff --git a/docs/examples/go/query/sync/main.go b/docs/examples/go/query/sync/main.go index e37164f47f..8156eea46a 100644 --- a/docs/examples/go/query/sync/main.go +++ b/docs/examples/go/query/sync/main.go @@ -31,6 +31,6 @@ func main() { log.Fatalln("scan error:\n", err) return } - log.Fatalln(r.ts, r.current) + log.Println(r.ts, r.current) } } diff --git a/docs/examples/node/nativeexample/async_query_example.js b/docs/examples/node/nativeexample/async_query_example.js index 25b78bc48a..432d8b8f6c 100644 --- a/docs/examples/node/nativeexample/async_query_example.js +++ b/docs/examples/node/nativeexample/async_query_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", database: "power" }); const cursor = conn.cursor(); @@ -18,4 +18,3 @@ try { conn.close(); }, 2000); } -// bug here: jira 14506 diff --git a/docs/examples/node/nativeexample/connect.js b/docs/examples/node/nativeexample/connect.js index da791c566e..bb027d4fe8 100644 --- a/docs/examples/node/nativeexample/connect.js +++ b/docs/examples/node/nativeexample/connect.js @@ -1,13 +1,20 @@ -const taos = require("td2.0-connector"); +const { options, connect } = require("@tdengine/rest"); -var conn = taos.connect({ - host: "localhost", - port: 6030, - user: "root", - password: "taosdata", -}); -conn.close(); +async function test() { + options.path = "/rest/sql"; + options.host = "localhost"; + let conn = connect(options); + let cursor = conn.cursor(); + try { + let res = await cursor.query("SELECT server_version()"); + res.toString(); + } catch (err) { + console.log(err); + } +} +test(); -// run with: node connect.js // output: -// Successfully connected to TDengine +// server_version() | +// =================== +// 3.0.0.0 | diff --git a/docs/examples/node/nativeexample/influxdb_line_example.js b/docs/examples/node/nativeexample/influxdb_line_example.js index 2050bee545..57170770d8 100644 --- a/docs/examples/node/nativeexample/influxdb_line_example.js +++ b/docs/examples/node/nativeexample/influxdb_line_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", diff --git a/docs/examples/node/nativeexample/insert_example.js b/docs/examples/node/nativeexample/insert_example.js index ade9d83158..42050fa251 100644 --- a/docs/examples/node/nativeexample/insert_example.js +++ b/docs/examples/node/nativeexample/insert_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", @@ -11,11 +11,11 @@ try { cursor.execute( "CREATE STABLE meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (location BINARY(64), groupId INT)" ); - var sql = `INSERT INTO power.d1001 USING power.meters TAGS(California.SanFrancisco, 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) -power.d1002 USING power.meters TAGS(California.SanFrancisco, 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) -power.d1003 USING power.meters TAGS(California.LosAngeles, 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) -power.d1004 USING power.meters TAGS(California.LosAngeles, 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; - cursor.execute(sql); + var sql = `INSERT INTO power.d1001 USING power.meters TAGS('California.SanFrancisco', 2) VALUES ('2018-10-03 14:38:05.000', 10.30000, 219, 0.31000) ('2018-10-03 14:38:15.000', 12.60000, 218, 0.33000) ('2018-10-03 14:38:16.800', 12.30000, 221, 0.31000) +power.d1002 USING power.meters TAGS('California.SanFrancisco', 3) VALUES ('2018-10-03 14:38:16.650', 10.30000, 218, 0.25000) +power.d1003 USING power.meters TAGS('California.LosAngeles', 2) VALUES ('2018-10-03 14:38:05.500', 11.80000, 221, 0.28000) ('2018-10-03 14:38:16.600', 13.40000, 223, 0.29000) +power.d1004 USING power.meters TAGS('California.LosAngeles', 3) VALUES ('2018-10-03 14:38:05.000', 10.80000, 223, 0.29000) ('2018-10-03 14:38:06.500', 11.50000, 221, 0.35000)`; + cursor.execute(sql,{'quiet':false}); } finally { cursor.close(); conn.close(); diff --git a/docs/examples/node/nativeexample/multi_bind_example.js b/docs/examples/node/nativeexample/multi_bind_example.js index 6ef8b30c09..6cc3993397 100644 --- a/docs/examples/node/nativeexample/multi_bind_example.js +++ b/docs/examples/node/nativeexample/multi_bind_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", @@ -24,10 +24,10 @@ function insertData() { ); // bind table name and tags - let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("California.SanFrancisco"); - tagBind.bindInt(2); - cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); + let tagBind = new taos.TaosMultiBindArr(2); + tagBind.multiBindBinary(["California.SanFrancisco"]); + tagBind.multiBindInt([2]); + cursor.stmtSetTbnameTags("d1001", tagBind.getMultiBindArr()); // bind values let valueBind = new taos.TaosMultiBindArr(4); diff --git a/docs/examples/node/nativeexample/opentsdb_json_example.js b/docs/examples/node/nativeexample/opentsdb_json_example.js index 2d78444a3f..4ea36ca68b 100644 --- a/docs/examples/node/nativeexample/opentsdb_json_example.js +++ b/docs/examples/node/nativeexample/opentsdb_json_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", diff --git a/docs/examples/node/nativeexample/opentsdb_telnet_example.js b/docs/examples/node/nativeexample/opentsdb_telnet_example.js index 7f80f55883..8f5d17822b 100644 --- a/docs/examples/node/nativeexample/opentsdb_telnet_example.js +++ b/docs/examples/node/nativeexample/opentsdb_telnet_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", diff --git a/docs/examples/node/nativeexample/param_bind_example.js b/docs/examples/node/nativeexample/param_bind_example.js index c7e04c71a0..efe422586f 100644 --- a/docs/examples/node/nativeexample/param_bind_example.js +++ b/docs/examples/node/nativeexample/param_bind_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", @@ -23,25 +23,22 @@ function insertData() { ); // bind table name and tags - let tagBind = new taos.TaosBind(2); - tagBind.bindBinary("California.SanFrancisco"); - tagBind.bindInt(2); - cursor.stmtSetTbnameTags("d1001", tagBind.getBind()); + let tagBind = new taos.TaosMultiBindArr(2); + tagBind.multiBindBinary(["California.SanFrancisco"]); + tagBind.multiBindInt([2]); + cursor.stmtSetTbnameTags("d1001", tagBind.getMultiBindArr()); // bind values - let rows = [ - [1648432611249, 10.3, 219, 0.31], - [1648432611749, 12.6, 218, 0.33], - ]; - for (let row of rows) { - let valueBind = new taos.TaosBind(4); - valueBind.bindTimestamp(row[0]); - valueBind.bindFloat(row[1]); - valueBind.bindInt(row[2]); - valueBind.bindFloat(row[3]); - cursor.stmtBindParam(valueBind.getBind()); - cursor.stmtAddBatch(); - } + let rows = [[1648432611249, 1648432611749], [10.3, 12.6], [219, 218], [0.31, 0.33]]; + + let valueBind = new taos.TaosMultiBindArr(4); + valueBind.multiBindTimestamp(rows[0]); + valueBind.multiBindFloat(rows[1]); + valueBind.multiBindInt(rows[2]); + valueBind.multiBindFloat(rows[3]); + cursor.stmtBindParamBatch(valueBind.getMultiBindArr()); + cursor.stmtAddBatch(); + // execute cursor.stmtExecute(); diff --git a/docs/examples/node/nativeexample/query_example.js b/docs/examples/node/nativeexample/query_example.js index a51bc939ae..11cfd9a605 100644 --- a/docs/examples/node/nativeexample/query_example.js +++ b/docs/examples/node/nativeexample/query_example.js @@ -1,4 +1,4 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", database: "power" }); const cursor = conn.cursor(); @@ -9,8 +9,6 @@ query.execute().then(function (result) { // output: // Successfully connected to TDengine -// Query OK, 2 row(s) in set (0.00317767s) - // ts | current | // ======================================================= // 2018-10-03 14:38:05.000 | 10.3 | diff --git a/docs/examples/node/nativeexample/subscribe_demo.js b/docs/examples/node/nativeexample/subscribe_demo.js index bdf0c98687..c4f7e6df84 100644 --- a/docs/examples/node/nativeexample/subscribe_demo.js +++ b/docs/examples/node/nativeexample/subscribe_demo.js @@ -1,4 +1,51 @@ -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); const conn = taos.connect({ host: "localhost", database: "power" }); -// 未完成 \ No newline at end of file +var cursor = conn.cursor(); + +function runConsumer() { + + // create topic + cursor.execute("create topic topic_name_example as select * from meters"); + + let consumer = taos.consumer({ + 'group.id': 'tg2', + 'td.connect.user': 'root', + 'td.connect.pass': 'taosdata', + 'msg.with.table.name': 'true', + 'enable.auto.commit': 'true' + }); + + // subscribe the topic just created. + consumer.subscribe("topic_name_example"); + + // get subscribe topic list + let topicList = consumer.subscription(); + console.log(topicList); + + for (let i = 0; i < 5; i++) { + let msg = consumer.consume(100); + console.log(msg.topicPartition); + console.log(msg.block); + console.log(msg.fields) + consumer.commit(msg); + console.log(`=======consumer ${i} done`) + } + + consumer.unsubscribe(); + consumer.close(); + + // drop topic + cursor.execute("drop topic topic_name_example"); +} + + +try { + runConsumer(); +} finally { + + setTimeout(() => { + cursor.close(); + conn.close(); + }, 2000); +} \ No newline at end of file diff --git a/docs/examples/node/package.json b/docs/examples/node/package.json index f56196d2e5..36d3f016b5 100644 --- a/docs/examples/node/package.json +++ b/docs/examples/node/package.json @@ -4,7 +4,7 @@ "main": "index.js", "license": "MIT", "dependencies": { - "td2.0-connector": "^2.0.12", - "td2.0-rest-connector": "^1.0.0" + "@tdengine/client": "^3.0.0", + "@tdengine/rest": "^3.0.0" } } diff --git a/docs/examples/node/restexample/connect.js b/docs/examples/node/restexample/connect.js index b84ce2fadf..132e284ce9 100644 --- a/docs/examples/node/restexample/connect.js +++ b/docs/examples/node/restexample/connect.js @@ -1,4 +1,4 @@ -const { options, connect } = require("td2.0-rest-connector"); +const { options, connect } = require("@tdengine/rest"); async function test() { options.path = "/rest/sqlt"; @@ -17,4 +17,4 @@ test(); // output: // server_version() | // =================== -// 2.4.0.12 | +// 3.0.0.0 | diff --git a/docs/examples/python/native_insert_example.py b/docs/examples/python/native_insert_example.py index 3b6b73cb22..94fd00a6e9 100644 --- a/docs/examples/python/native_insert_example.py +++ b/docs/examples/python/native_insert_example.py @@ -1,13 +1,13 @@ import taos -lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,California.SanFrancisco,2", - "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,California.LosAngeles,3", - "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,California.LosAngeles,2", - "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,California.LosAngeles,3", - "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,California.SanFrancisco,3", - "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,California.SanFrancisco,2", - "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,California.SanFrancisco,2", - "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,California.LosAngeles,2"] +lines = ["d1001,2018-10-03 14:38:05.000,10.30000,219,0.31000,'California.SanFrancisco',2", + "d1004,2018-10-03 14:38:05.000,10.80000,223,0.29000,'California.LosAngeles',3", + "d1003,2018-10-03 14:38:05.500,11.80000,221,0.28000,'California.LosAngeles',2", + "d1004,2018-10-03 14:38:06.500,11.50000,221,0.35000,'California.LosAngeles',3", + "d1002,2018-10-03 14:38:16.650,10.30000,218,0.25000,'California.SanFrancisco',3", + "d1001,2018-10-03 14:38:15.000,12.60000,218,0.33000,'California.SanFrancisco',2", + "d1001,2018-10-03 14:38:16.800,12.30000,221,0.31000,'California.SanFrancisco',2", + "d1003,2018-10-03 14:38:16.600,13.40000,223,0.29000,'California.LosAngeles',2"] def get_connection() -> taos.TaosConnection: diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index 2edabad3c9..741f6dfeab 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -10,9 +10,11 @@ title: 通过 Docker 快速体验 TDengine 如果已经安装了 docker, 只需执行下面的命令。 ```shell -docker run -d -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041/6041 -p 6043-6049/6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine ``` +注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 + 确定该容器已经启动并且在正常运行 ```shell @@ -51,35 +53,16 @@ $ taos Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. -Server is Enterprise trial Edition, ver:3.0.0.0 and will expire at 2022-09-24 15:29:46. +Server is Community Edition. taos> ``` - -## 启动 REST 服务 - -taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd` 和 `taosadapter` 两个服务组件。 - -```bash -docker run -d --name tdengine-all -p 6030-6049:6030-6049 -p 6030-6049:6030-6049/udp tdengine/tdengine -``` - -如果想只启动 `taosadapter`: - -```bash -docker run -d --name tdengine-taosa -p 6041-6049:6041-6049 -p 6041-6049:6041-6049/udp -e TAOS_FIRST_EP=tdengine-all tdengine/tdengine:3.0.0.0 taosadapter -``` - -如果想只启动 `taosd`: - -```bash -docker run -d --name tdengine-taosd -p 6030-6042:6030-6042 -p 6030-6042:6030-6042/udp -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0 -``` - ## 访问 REST 接口 +taosAdapter 是 TDengine 中提供 REST 服务的组件。下面这条命令会在容器中同时启动 `taosd` 和 `taosadapter` 两个服务组件。默认 Docker 镜像同时启动 TDengine 后台服务 taosd 和 taosAdatper。 + 可以在宿主机使用 curl 通过 RESTful 端口访问 Docker 容器内的 TDengine server。 ``` @@ -96,6 +79,22 @@ curl -L -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。 +## 单独启动 REST 服务 + +如果想只启动 `taosadapter`: + +```bash +docker run -d --network=host --name tdengine-taosa -e TAOS_FIRST_EP=tdengine-taosd tdengine/tdengine:3.0.0.0 taosadapter +``` + +只启动 `taosd`: + +```bash +docker run -d --network=host --name tdengine-taosd -e TAOS_DISABLE_ADAPTER=true tdengine/tdengine:3.0.0.0 +``` + +注意以上为容器使用 host 方式网络配置进行单独部署 taosAdapter 的命令行参数。其他网络访问方式请设置 hostname、 DNS 等必要的网络配置。 + ## 写入数据 可以使用 TDengine 的自带工具 taosBenchmark 快速体验 TDengine 的写入。 @@ -107,7 +106,7 @@ TDengine REST API 详情请参考[官方文档](/reference/rest-api/)。 ``` - 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "California.SanFrancisco" 或者 "California.LosAngeles"。 + 该命令将在数据库 test 下面自动创建一张超级表 meters,该超级表下有 1 万张表,表名为 "d0" 到 "d9999",每张表有 1 万条记录,每条记录有 (ts, current, voltage, phase) 四个字段,时间戳从 "2017-07-14 10:40:00 000" 到 "2017-07-14 10:40:09 999",每张表带有标签 location 和 groupId,groupId 被设置为 1 到 10, location 被设置为 "San Francisco" 或者 "Los Angeles"等城市名称。 这条命令很快完成 1 亿条记录的插入。具体时间取决于硬件性能。 @@ -129,10 +128,10 @@ taos> select count(*) from test.meters; taos> select avg(current), max(voltage), min(phase) from test.meters; ``` -查询 location="California.SanFrancisco" 的记录总条数: +查询 location="San Francisco" 的记录总条数: ```sql -taos> select count(*) from test.meters where location="California.SanFrancisco"; +taos> select count(*) from test.meters where location="San Francisco"; ``` 查询 groupId=10 的所有记录的平均值、最大值、最小值等: @@ -145,4 +144,4 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```sql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); -``` \ No newline at end of file +``` diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index 6dbf74f8bc..096a89ccb4 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -11,7 +11,7 @@ import TabItem from "@theme/TabItem"; ::: -TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。 +TDengine 开源版本提供 deb 和 rpm 格式安装包,用户可以根据自己的运行环境选择合适的安装包。其中 deb 支持 Debian/Ubuntu 及衍生系统,rpm 支持 CentOS/RHEL/SUSE 及衍生系统。同时我们也为企业用户提供 tar.gz 格式安装包。也支持通过 `apt-get` 工具从线上进行安装。 ## 安装 @@ -46,114 +46,122 @@ apt-get 方式只适用于 Debian 或 Ubuntu 系统 -1、从官网下载获得 deb 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.deb; -2、进入到 TDengine-server-2.4.0.7-Linux-x64.deb 安装包所在目录,执行如下的安装命令: +1、从官网下载获得 deb 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.deb; +2、进入到 TDengine-server-3.0.0.10002-Linux-x64.deb 安装包所在目录,执行如下的安装命令: ``` -$ sudo dpkg -i TDengine-server-2.4.0.7-Linux-x64.deb -(Reading database ... 137504 files and directories currently installed.) -Preparing to unpack TDengine-server-2.4.0.7-Linux-x64.deb ... -TDengine is removed successfully! -Unpacking tdengine (2.4.0.7) over (2.4.0.7) ... -Setting up tdengine (2.4.0.7) ... +$ sudo dpkg -i TDengine-server-3.0.0.10002-Linux-x64.deb +Selecting previously unselected package tdengine. +(Reading database ... 119653 files and directories currently installed.) +Preparing to unpack TDengine-server-3.0.0.10002-Linux-x64.deb ... +Unpacking tdengine (3.0.0.10002) ... +Setting up tdengine (3.0.0.10002) ... Start to install TDengine... -System hostname is: ubuntu-1804 +System hostname is: v3cluster-0002 Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one: -Enter your email address for priority support or enter empty to skip: +Enter your email address for priority support or enter empty to skip: Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service. To configure TDengine : edit /etc/taos/taos.cfg To start TDengine : sudo systemctl start taosd -To access TDengine : taos -h ubuntu-1804 to login into TDengine server +To access TDengine : taos -h v3cluster-0002 to login into TDengine server TDengine is installed successfully! + ``` -1、从官网下载获得 rpm 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.rpm; -2、进入到 TDengine-server-2.4.0.7-Linux-x64.rpm 安装包所在目录,执行如下的安装命令: +1、从官网下载获得 rpm 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.rpm; +2、进入到 TDengine-server-3.0.0.10002-Linux-x64.rpm 安装包所在目录,执行如下的安装命令: ``` -$ sudo rpm -ivh TDengine-server-2.4.0.7-Linux-x64.rpm +$ sudo rpm -ivh TDengine-server-3.0.0.10002-Linux-x64.rpm Preparing... ################################# [100%] +Stop taosd service success! Updating / installing... - 1:tdengine-2.4.0.7-3 ################################# [100%] + 1:tdengine-3.0.0.10002-3 ################################# [100%] Start to install TDengine... -System hostname is: centos7 +System hostname is: chenhaoran01 Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join OR leave it blank to build one: -Enter your email address for priority support or enter empty to skip: - +Enter your email address for priority support or enter empty to skip: Created symlink from /etc/systemd/system/multi-user.target.wants/taosd.service to /etc/systemd/system/taosd.service. To configure TDengine : edit /etc/taos/taos.cfg To start TDengine : sudo systemctl start taosd -To access TDengine : taos -h centos7 to login into TDengine server +To access TDengine : taos -h chenhaoran01 to login into TDengine server TDengine is installed successfully! + ``` -1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-2.4.0.7-Linux-x64.tar.gz; -2、进入到 TDengine-server-2.4.0.7-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本: +1、从官网下载获得 tar.gz 安装包,例如 TDengine-server-3.0.0.10002-Linux-x64.tar.gz; +2、进入到 TDengine-server-3.0.0.10002-Linux-x64.tar.gz 安装包所在目录,先解压文件后,进入子目录,执行其中的 install.sh 安装脚本: ``` -$ tar xvzf TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz -TDengine-enterprise-server-2.4.0.7/ -TDengine-enterprise-server-2.4.0.7/driver/ -TDengine-enterprise-server-2.4.0.7/driver/vercomp.txt -TDengine-enterprise-server-2.4.0.7/driver/libtaos.so.2.4.0.7 -TDengine-enterprise-server-2.4.0.7/install.sh -TDengine-enterprise-server-2.4.0.7/examples/ +$ tar -zxvf TDengine-server-3.0.0.10002-Linux-x64.tar.gz +TDengine-server-3.0.0.10002/ +TDengine-server-3.0.0.10002/driver/ +TDengine-server-3.0.0.10002/driver/libtaos.so.3.0.0.10002 +TDengine-server-3.0.0.10002/driver/vercomp.txt +TDengine-server-3.0.0.10002/release_note +TDengine-server-3.0.0.10002/taos.tar.gz +TDengine-server-3.0.0.10002/install.sh ... $ ll -total 43816 -drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ./ -drwxr-xr-x 20 ubuntu ubuntu 4096 Feb 22 09:30 ../ -drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 TDengine-enterprise-server-2.4.0.7/ --rw-rw-r-- 1 ubuntu ubuntu 44852544 Feb 22 09:31 TDengine-enterprise-server-2.4.0.7-Linux-x64.tar.gz +total 56832 +drwxr-xr-x 3 root root 4096 Aug 8 10:29 ./ +drwxrwxrwx 6 root root 4096 Aug 5 16:45 ../ +drwxr-xr-x 4 root root 4096 Aug 4 18:03 TDengine-server-3.0.0.10002/ +-rwxr-xr-x 1 root root 58183066 Aug 8 10:28 TDengine-server-3.0.0.10002-Linux-x64.tar.gz* -$ cd TDengine-enterprise-server-2.4.0.7/ +$ cd TDengine-server-3.0.0.10002/ $ ll -total 40784 -drwxrwxr-x 4 ubuntu ubuntu 4096 Feb 22 09:30 ./ -drwxrwxr-x 3 ubuntu ubuntu 4096 Feb 22 09:31 ../ -drwxrwxr-x 2 ubuntu ubuntu 4096 Feb 22 09:30 driver/ -drwxrwxr-x 10 ubuntu ubuntu 4096 Feb 22 09:30 examples/ --rwxrwxr-x 1 ubuntu ubuntu 33294 Feb 22 09:30 install.sh* --rw-rw-r-- 1 ubuntu ubuntu 41704288 Feb 22 09:30 taos.tar.gz +total 51612 +drwxr-xr-x 4 root root 4096 Aug 4 18:03 ./ +drwxr-xr-x 3 root root 4096 Aug 8 10:29 ../ +drwxr-xr-x 2 root root 4096 Aug 4 18:03 driver/ +drwxr-xr-x 11 root root 4096 Aug 4 18:03 examples/ +-rwxr-xr-x 1 root root 30980 Aug 4 18:03 install.sh* +-rw-r--r-- 1 root root 6724 Aug 4 18:03 release_note +-rw-r--r-- 1 root root 52793079 Aug 4 18:03 taos.tar.gz $ sudo ./install.sh -Start to update TDengine... +Start to install TDengine... Created symlink /etc/systemd/system/multi-user.target.wants/taosd.service → /etc/systemd/system/taosd.service. -Nginx for TDengine is updated successfully! + +System hostname is: v3cluster-0002 + +Enter FQDN:port (like h1.taosdata.com:6030) of an existing TDengine cluster node to join +OR leave it blank to build one: + +Enter your email address for priority support or enter empty to skip: To configure TDengine : edit /etc/taos/taos.cfg -To configure Taos Adapter (if has) : edit /etc/taos/taosadapter.toml +To configure taosadapter (if has) : edit /etc/taos/taosadapter.toml To start TDengine : sudo systemctl start taosd -To access TDengine : use taos -h ubuntu-1804 in shell OR from http://127.0.0.1:6060 +To access TDengine : taos -h v3cluster-0002 to login into TDengine server -TDengine is updated successfully! -Install taoskeeper as a standalone service -taoskeeper is installed, enable it by `systemctl enable taoskeeper` +TDengine is installed successfully! ``` :::info @@ -293,4 +301,4 @@ taos> select avg(current), max(voltage), min(phase) from test.meters where group ```sql taos> select avg(current), max(voltage), min(phase) from test.d10 interval(10s); -``` \ No newline at end of file +``` diff --git a/docs/zh/07-develop/04-query-data/index.mdx b/docs/zh/07-develop/04-query-data/index.mdx index 824f36ef2f..eecda92744 100644 --- a/docs/zh/07-develop/04-query-data/index.mdx +++ b/docs/zh/07-develop/04-query-data/index.mdx @@ -25,6 +25,7 @@ TDengine 采用 SQL 作为查询语言。应用程序可以通过 REST API 或 - 单列、多列数据查询 - 标签和数值的多种过滤条件:>, <, =, <\>, like 等 - 聚合结果的分组(Group by)、排序(Order by)、约束输出(Limit/Offset) +- 时间窗口(Interval)、会话窗口(Session)和状态窗口(State_window)等窗口切分聚合查询 - 数值列及聚合结果的四则运算 - 时间戳对齐的连接查询(Join Query: 隐式连接)操作 - 多种聚合/计算函数: count, max, min, avg, sum, twa, stddev, leastsquares, top, bottom, first, last, percentile, apercentile, last_row, spread, diff 等 @@ -40,7 +41,7 @@ taos> select * from d1001 where voltage > 215 order by ts desc limit 2; Query OK, 2 row(s) in set (0.001100s) ``` -为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。TDengine 还支持连续查询。 +为满足物联网场景的需求,TDengine 支持几个特殊的函数,比如 twa(时间加权平均),spread (最大值与最小值的差),last_row(最后一条记录)等,更多与物联网场景相关的函数将添加进来。 具体的查询语法请看 [TAOS SQL 的数据查询](/taos-sql/select) 章节。 @@ -73,7 +74,7 @@ taos> SELECT count(*), max(current) FROM meters where groupId = 2 and ts > now - Query OK, 1 row(s) in set (0.002136s) ``` -TDengine 仅容许对属于同一个超级表的表之间进行聚合查询,不同超级表之间的聚合查询不支持。在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 +在 [TAOS SQL 的数据查询](/taos-sql/select) 一章,查询类操作都会注明是否支持超级表。 ## 降采样查询、插值 diff --git a/docs/zh/07-develop/09-udf.md b/docs/zh/07-develop/09-udf.md index b8ae618105..61b4974451 100644 --- a/docs/zh/07-develop/09-udf.md +++ b/docs/zh/07-develop/09-udf.md @@ -6,56 +6,144 @@ description: "支持用户编码的聚合函数和标量函数,在查询中嵌 在有些应用场景中,应用逻辑需要的查询无法直接使用系统内置的函数来表示。利用 UDF 功能,TDengine 可以插入用户编写的处理代码并在查询中使用它们,就能够很方便地解决特殊应用场景中的使用需求。 UDF 通常以数据表中的一列数据做为输入,同时支持以嵌套子查询的结果作为输入。 -从 2.2.0.0 版本开始,TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 +TDengine 支持通过 C/C++ 语言进行 UDF 定义。接下来结合示例讲解 UDF 的使用方法。 -用户可以通过 UDF 实现两类函数: 标量函数 和 聚合函数。 +用户可以通过 UDF 实现两类函数: 标量函数和聚合函数。标量函数对每行数据返回一个值,如求绝对值 abs,正弦函数 sin,字符串拼接函数 concat 等。聚合函数对多行数据进行返回一个值,如求平均数 avg,最大值 max 等。 -## 用 C/C++ 语言来定义 UDF +实现 UDF 时,需要实现规定的接口函数 +- 标量函数需要实现标量接口函数 scalarfn 。 +- 聚合函数需要实现聚合接口函数 aggfn_start , aggfn , aggfn_finish。 +- 如果需要初始化,实现 udf_init;如果需要清理工作,实现udf_destroy。 -### 标量函数 +接口函数的名称是 UDF 名称,或者是 UDF 名称和特定后缀(_start, _finish, _init, _destroy)的连接。列表中的scalarfn,aggfn, udf需要替换成udf函数名。 -用户可以按照下列函数模板定义自己的标量计算函数 +## 实现标量函数 +标量函数实现模板如下 +```c +#include "taos.h" +#include "taoserror.h" +#include "taosudf.h" - `int32_t udf(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)` +// initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix +// @return error number defined in taoserror.h +int32_t scalarfn_init() { + // initialization. + return TSDB_CODE_SUCCESS; +} + +// scalar function main computation function +// @param inputDataBlock, input data block composed of multiple columns with each column defined by SUdfColumn +// @param resultColumn, output column +// @return error number defined in taoserror.h +int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn* resultColumn) { + // read data from inputDataBlock and process, then output to resultColumn. + return TSDB_CODE_SUCCESS; +} + +// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix. +// @return error number defined in taoserror.h +int32_t scalarfn_destroy() { + // clean up + return TSDB_CODE_SUCCESS; +} +``` +scalarfn 为函数名的占位符,需要替换成函数名,如bit_and。 + +## 实现聚合函数 + +聚合函数的实现模板如下 +```c +#include "taos.h" +#include "taoserror.h" +#include "taosudf.h" + +// Initialization function. if no initialization, we can skip definition of it. The initialization function shall be concatenation of the udf name and _init suffix +// @return error number defined in taoserror.h +int32_t aggfn_init() { + // initialization. + return TSDB_CODE_SUCCESS; +} + +// aggregate start function. The intermediate value or the state(@interBuf) is initialized in this function. The function name shall be concatenation of udf name and _start suffix +// @param interbuf intermediate value to intialize +// @return error number defined in taoserror.h +int32_t aggfn_start(SUdfInterBuf* interBuf) { + // initialize intermediate value in interBuf + return TSDB_CODE_SUCESS; +} + +// aggregate reduce function. This function aggregate old state(@interbuf) and one data bock(inputBlock) and output a new state(@newInterBuf). +// @param inputBlock input data block +// @param interBuf old state +// @param newInterBuf new state +// @return error number defined in taoserror.h +int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { + // read from inputBlock and interBuf and output to newInterBuf + return TSDB_CODE_SUCCESS; +} + +// aggregate function finish function. This function transforms the intermediate value(@interBuf) into the final output(@result). The function name must be concatenation of aggfn and _finish suffix. +// @interBuf : intermediate value +// @result: final result +// @return error number defined in taoserror.h +int32_t int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result) { + // read data from inputDataBlock and process, then output to result + return TSDB_CODE_SUCCESS; +} + +// cleanup function. if no cleanup related processing, we can skip definition of it. The destroy function shall be concatenation of the udf name and _destroy suffix. +// @return error number defined in taoserror.h +int32_t aggfn_destroy() { + // clean up + return TSDB_CODE_SUCCESS; +} +``` +aggfn为函数名的占位符,需要修改为自己的函数名,如l2norm。 + +## 接口函数定义 + +接口函数的名称是 udf 名称,或者是 udf 名称和特定后缀(_start, _finish, _init, _destroy)的连接。以下描述中函数名称中的 scalarfn,aggfn, udf 需要替换成udf函数名。 + +接口函数返回值表示是否成功,如果错误返回错误代码,错误代码见taoserror.h。 + +接口函数参数类型见数据结构定义。 + +### 标量接口函数 + + `int32_t scalarfn(SUdfDataBlock* inputDataBlock, SUdfColumn *resultColumn)` - 其中 udf 是函数名的占位符,以上述模板实现的函数对行数据块进行标量计算。 + 其中 scalarFn 是函数名的占位符。这个函数对数据块进行标量计算,通过设置resultColumn结构体中的变量设置值 -- scalarFunction 中各参数的具体含义是: +参数的具体含义是: - inputDataBlock: 输入的数据块 - - resultColumn: 输出列 + - resultColumn: 输出列。输出列 -### 聚合函数 +### 聚合接口函数 -用户可以按照如下函数模板定义自己的聚合函数。 +`int32_t aggfn_start(SUdfInterBuf *interBuf)` -`int32_t udf_start(SUdfInterBuf *interBuf)` +`int32_t aggfn(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)` -`int32_t udf(SUdfDataBlock* inputBlock, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf)` +`int32_t aggfn_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)` -`int32_t udf_finish(SUdfInterBuf* interBuf, SUdfInterBuf *result)` -其中 udf 是函数名的占位符。其中各参数的具体含义是: +其中 aggfn 是函数名的占位符。首先调用aggfn_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个数据块调用 aggfn 用数据块更新中间结果,最后再调用 aggfn_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。 +参数的具体含义是: - interBuf:中间结果 buffer。 - inputBlock:输入的数据块。 - newInterBuf:新的中间结果buffer。 - result:最终结果。 -其计算过程为:首先调用udf_start生成结果buffer,然后相关的数据会被分为多个行数据块,对每个行数据块调用 udf 用数据块更新中间结果,最后再调用 udf_finish 从中间结果产生最终结果,最终结果只能含 0 或 1 条结果数据。 - ### UDF 初始化和销毁 `int32_t udf_init()` `int32_t udf_destroy()` -其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。 +其中 udf 是函数名的占位符。udf_init 完成初始化工作。 udf_destroy 完成清理工作。如果没有初始化工作,无需定义udf_init函数。如果没有清理工作,无需定义udf_destroy函数。 -:::note -如果对应的函数不需要具体的功能,也需要实现一个空函数。 -::: - -### UDF 数据结构 +## UDF 数据结构 ```c typedef struct SUdfColumnMeta { int16_t type; @@ -103,6 +191,13 @@ typedef struct SUdfInterBuf { int8_t numOfResult; //zero or one } SUdfInterBuf; ``` +数据结构说明如下: + +- SUdfDataBlock 数据块包含行数 numOfRows 和列数 numCols。udfCols[i] (0 <= i <= numCols-1)表示每一列数据,类型为SUdfColumn*。 +- SUdfColumn 包含列的数据类型定义 colMeta 和列的数据 colData。 +- SUdfColumnMeta 成员定义同 taos.h 数据类型定义。 +- SUdfColumnData 数据可以变长,varLenCol 定义变长数据,fixLenCol 定义定长数据。 +- SUdfInterBuf 定义中间结构 buffer,以及 buffer 中结果个数 numOfResult 为了更好的操作以上数据结构,提供了一些便利函数,定义在 taosudf.h。 @@ -118,75 +213,15 @@ gcc -g -O0 -fPIC -shared add_one.c -o add_one.so 这样就准备好了动态链接库 add_one.so 文件,可以供后文创建 UDF 时使用了。为了保证可靠的系统运行,编译器 GCC 推荐使用 7.5 及以上版本。 -## 在系统中管理和使用 UDF - -### 创建 UDF - -用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 - -在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 - -- 创建标量函数 -```sql -CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; -``` - - - function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致; - - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - output_type:此函数计算结果的数据类型名称; - - 例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF: - - ```sql - CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; - ``` - -- 创建聚合函数: -```sql -CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; -``` - - - function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; - - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; - - output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; - - buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。 - - 例如,如下语句可以把 libsqrsum.so 创建为系统中可用的 UDF: - - ```sql - CREATE AGGREGATE FUNCTION sqr_sum AS "/home/taos/udf_example/libsqrsum.so" OUTPUTTYPE DOUBLE bufsize 8; - ``` - -### 管理 UDF - -- 删除指定名称的用户定义函数: -``` -DROP FUNCTION function_name; -``` - -- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 -```sql -DROP FUNCTION bit_and; -``` -- 显示系统中当前可用的所有 UDF: -```sql -SHOW FUNCTIONS; -``` - -### 调用 UDF - -在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: -```sql -SELECT X(c1,c2) FROM table/stable; -``` - -表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 - +## 管理和使用UDF +关于如何管理和使用UDF,参见[UDF使用说明](../12-taos-sql/26-udf.md) ## 示例代码 ### 标量函数示例 [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c) +bit_add 实现多列的按位与功能。如果只有一列,返回这一列。bit_add 忽略空值。 +
bit_and.c @@ -196,13 +231,15 @@ SELECT X(c1,c2) FROM table/stable;
-### 聚合函数示例 [sqr_sum](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/sqr_sum.c) +### 聚合函数示例 [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c) + +l2norm 实现了输入列的所有数据的二阶范数,即对每个数据先平方,再累加求和,最后开方。
-sqr_sum.c +l2norm.c ```c -{{#include tests/script/sh/sqr_sum.c}} +{{#include tests/script/sh/l2norm.c}} ```
diff --git a/docs/zh/10-deployment/03-k8s.md b/docs/zh/10-deployment/03-k8s.md index d45a3b8030..ab1bff4449 100644 --- a/docs/zh/10-deployment/03-k8s.md +++ b/docs/zh/10-deployment/03-k8s.md @@ -3,27 +3,11 @@ sidebar_label: Kubernetes title: 在 Kubernetes 上部署 TDengine 集群 --- -## 配置 ConfigMap +以下配置文件可以从 [GitHub 仓库](https://github.com/taosdata/TDengine-Operator/tree/3.0/src/tdengine) 下载。 -为 TDengine 创建 `taoscfg.yaml`,此文件中的配置将作为环境变量传入 TDengine 镜像,更新此配置将导致所有 TDengine POD 重启。 +## 配置 Service 服务 -```yaml ---- -apiVersion: v1 -kind: ConfigMap -metadata: - name: taoscfg - labels: - app: tdengine -data: - CLUSTER: "1" - TAOS_KEEP: "3650" - TAOS_DEBUG_FLAG: "135" -``` - -## 配置服务 - -创建一个 service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口: +创建一个 Service 配置文件:`taosd-service.yaml`,服务名称 `metadata.name` (此处为 "taosd") 将在下一步中使用到。添加 TDengine 所用到的所有端口: ```yaml --- @@ -38,45 +22,9 @@ spec: - name: tcp6030 protocol: "TCP" port: 6030 - - name: tcp6035 - protocol: "TCP" - port: 6035 - name: tcp6041 protocol: "TCP" port: 6041 - - name: udp6030 - protocol: "UDP" - port: 6030 - - name: udp6031 - protocol: "UDP" - port: 6031 - - name: udp6032 - protocol: "UDP" - port: 6032 - - name: udp6033 - protocol: "UDP" - port: 6033 - - name: udp6034 - protocol: "UDP" - port: 6034 - - name: udp6035 - protocol: "UDP" - port: 6035 - - name: udp6036 - protocol: "UDP" - port: 6036 - - name: udp6037 - protocol: "UDP" - port: 6037 - - name: udp6038 - protocol: "UDP" - port: 6038 - - name: udp6039 - protocol: "UDP" - port: 6039 - - name: udp6040 - protocol: "UDP" - port: 6040 selector: app: "tdengine" ``` @@ -109,7 +57,7 @@ spec: spec: containers: - name: "tdengine" - image: "zitsen/taosd:develop" + image: "tdengine/tdengine:3.0.0.0" imagePullPolicy: "Always" envFrom: - configMapRef: @@ -118,45 +66,9 @@ spec: - name: tcp6030 protocol: "TCP" containerPort: 6030 - - name: tcp6035 - protocol: "TCP" - containerPort: 6035 - name: tcp6041 protocol: "TCP" containerPort: 6041 - - name: udp6030 - protocol: "UDP" - containerPort: 6030 - - name: udp6031 - protocol: "UDP" - containerPort: 6031 - - name: udp6032 - protocol: "UDP" - containerPort: 6032 - - name: udp6033 - protocol: "UDP" - containerPort: 6033 - - name: udp6034 - protocol: "UDP" - containerPort: 6034 - - name: udp6035 - protocol: "UDP" - containerPort: 6035 - - name: udp6036 - protocol: "UDP" - containerPort: 6036 - - name: udp6037 - protocol: "UDP" - containerPort: 6037 - - name: udp6038 - protocol: "UDP" - containerPort: 6038 - - name: udp6039 - protocol: "UDP" - containerPort: 6039 - - name: udp6040 - protocol: "UDP" - containerPort: 6040 env: # POD_NAME for FQDN config - name: POD_NAME @@ -190,14 +102,13 @@ spec: readinessProbe: exec: command: - - taos - - -s - - "show mnodes" + - taos-check initialDelaySeconds: 5 timeoutSeconds: 5000 livenessProbe: - tcpSocket: - port: 6030 + exec: + command: + - taos-check initialDelaySeconds: 15 periodSeconds: 20 volumeClaimTemplates: @@ -206,44 +117,78 @@ spec: spec: accessModes: - "ReadWriteOnce" - storageClassName: "csi-rbd-sc" + storageClassName: "standard" resources: requests: storage: "10Gi" ``` -## 启动集群 +## 使用 kubectl 命令部署 TDengine 集群 -将前述三个文件添加到 Kubernetes 集群中: +顺序执行以下命令。 ```bash -kubectl apply -f taoscfg.yaml kubectl apply -f taosd-service.yaml kubectl apply -f tdengine.yaml ``` -上面的配置将生成一个两节点的 TDengine 集群,dnode 是自动配置的,可以使用 `show dnodes` 命令查看当前集群的节点: +上面的配置将生成一个三节点的 TDengine 集群,dnode 是自动配置的,可以使用 show dnodes 命令查看当前集群的节点: ```bash kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" kubectl exec -i -t tdengine-1 -- taos -s "show dnodes" - +kubectl exec -i -t tdengine-2 -- taos -s "show dnodes" ``` 输出如下: ``` -Welcome to the TDengine shell from Linux, Client Version:2.1.1.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 17:13:24.181 | | - 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 17:14:09.257 | | -Query OK, 2 row(s) in set (0.000997s) + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | +Query OK, 3 rows in database (0.003655s) +``` +## 使能端口转发 + +利用 kubectl 端口转发功能可以使应用可以访问 Kubernetes 环境运行的 TDengine 集群。 + +``` +kubectl port-forward tdengine-0 6041:6041 & +``` + +使用 curl 命令验证 TDengine REST API 使用的 6041 接口。 + +``` +$ curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql +Handling connection for 6041 +{"code":0,"column_meta":[["name","VARCHAR",64],["create_time","TIMESTAMP",8],["vgroups","SMALLINT",2],["ntables","BIGINT",8],["replica","TINYINT",1],["strict","VARCHAR",4],["duration","VARCHAR",10],["keep","VARCHAR",32],["buffer","INT",4],["pagesize","INT",4],["pages","INT",4],["minrows","INT",4],["maxrows","INT",4],["comp","TINYINT",1],["precision","VARCHAR",2],["status","VARCHAR",10],["retention","VARCHAR",60],["single_stable","BOOL",1],["cachemodel","VARCHAR",11],["cachesize","INT",4],["wal_level","TINYINT",1],["wal_fsync_period","INT",4],["wal_retention_period","INT",4],["wal_retention_size","BIGINT",8],["wal_roll_period","INT",4],["wal_segment_size","BIGINT",8]],"data":[["information_schema",null,null,16,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null],["performance_schema",null,null,10,null,null,null,null,null,null,null,null,null,null,null,"ready",null,null,null,null,null,null,null,null,null,null]],"rows":2} +``` + +## 使用 dashboard 进行图形化管理 + + minikube 提供 dashboard 命令支持图形化管理界面。 + +``` +$ minikube dashboard +* Verifying dashboard health ... +* Launching proxy ... +* Verifying proxy health ... +* Opening http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ in your default browser... +http://127.0.0.1:46617/api/v1/namespaces/kubernetes-dashboard/services/http:kubernetes-dashboard:/proxy/ +``` + +对于某些公有云环境,minikube 绑定在 127.0.0.1 IP 地址上无法通过远程访问,需要使用 kubectl proxy 命令将端口映射到 0.0.0.0 IP 地址上,再通过浏览器访问虚拟机公网 IP 和端口以及相同的 dashboard URL 路径即可远程访问 dashboard。 + +``` +$ kubectl proxy --accept-hosts='^.*$' --address='0.0.0.0' ``` ## 集群扩容 @@ -252,14 +197,12 @@ TDengine 集群支持自动扩容: ```bash kubectl scale statefulsets tdengine --replicas=4 - ``` 上面命令行中参数 `--replica=4` 表示要将 TDengine 集群扩容到 4 个节点,执行后首先检查 POD 的状态: ```bash kubectl get pods -l app=tdengine - ``` 输出如下: @@ -270,102 +213,112 @@ tdengine-0 1/1 Running 0 161m tdengine-1 1/1 Running 0 161m tdengine-2 1/1 Running 0 32m tdengine-3 1/1 Running 0 32m - ``` 此时 POD 的状态仍然是 Running,TDengine 集群中的 dnode 状态要等 POD 状态为 `ready` 之后才能看到: ```bash -kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" - +kubectl exec -i -t tdengine-3 -- taos -s "show dnodes" ``` 扩容后的四节点 TDengine 集群的 dnode 列表: ``` -Welcome to the TDengine shell from Linux, Client Version:2.1.1.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | | - 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | | - 3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 14:07:27.078 | | - 4 | tdengine-3.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 14:07:48.362 | | -Query OK, 4 row(s) in set (0.001293s) - + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | + 4 | tdengine-3.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:33:16.039 | | +Query OK, 4 rows in database (0.008377s) ``` ## 集群缩容 -TDengine 的缩容并没有自动化,我们尝试将一个三节点集群缩容到两节点。 +由于 TDengine 集群在扩缩容时会对数据进行节点间迁移,使用 kubectl 命令进行缩容需要首先使用 "drop dnodes" 命令,节点删除完成后再进行 Kubernetes 集群缩容。 -首先,确认一个三节点 TDengine 集群正常工作,在 TDengine CLI 中查看 dnode 的状态: +注意:由于 Kubernetes Statefulset 中 Pod 的只能按创建顺序逆序移除,所以 TDengine drop dnode 也需要按照创建顺序逆序移除,否则会导致 Pod 处于错误状态。 + +``` +$ kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 4" +``` ```bash +$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | -====================================================================================================================================== - 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | | - 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | | - 3 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:28:49.787 | | -Query OK, 3 row(s) in set (0.001101s) - + id | endpoint | vnodes | support_vnodes | status | create_time | note | +============================================================================================================================================ + 1 | tdengine-0.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:14:57.285 | | + 2 | tdengine-1.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:11.302 | | + 3 | tdengine-2.taosd.default.sv... | 0 | 256 | ready | 2022-08-10 13:15:23.290 | | +Query OK, 3 rows in database (0.004861s) ``` -想要安全的缩容,首先需要将节点从 dnode 列表中移除,也即从集群中移除: +确认移除成功后(使用 kubectl exec -i -t tdengine-0 -- taos -s "show dnodes" 查看和确认 dnode 列表),使用 kubectl 命令移除 POD: + +``` +kubectl scale statefulsets tdengine --replicas=3 +``` + +最后一个 POD 将会被删除。使用命令 kubectl get pods -l app=tdengine 查看POD状态: + +``` +$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 4m7s +tdengine-1 1/1 Running 0 3m55s +tdengine-2 1/1 Running 0 2m28s +``` + +POD删除后,需要手动删除PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。 ```bash -kubectl exec -i -t tdengine-0 -- taos -s "drop dnode 'tdengine-2.taosd.default.svc.cluster.local:6030'" - -``` - -通过 `show dondes` 命令确认移除成功后,移除相应的 POD: - -```bash -kubectl scale statefulsets tdengine --replicas=2 - -``` - -最后一个 POD 会被删除,使用 `kubectl get pods -l app=tdengine` 查看集群状态: - -``` -NAME READY STATUS RESTARTS AGE -tdengine-0 1/1 Running 0 3h40m -tdengine-1 1/1 Running 0 3h40m - -``` - -POD 删除后,需要手动删除 PVC,否则下次扩容时会继续使用以前的数据导致无法正常加入集群。 - -```bash -kubectl delete pvc taosdata-tdengine-2 - +$ kubectl delete pvc taosdata-tdengine-3 ``` 此时的集群状态是安全的,需要时还可以再次进行扩容: ```bash -kubectl scale statefulsets tdengine --replicas=3 +$ kubectl scale statefulsets tdengine --replicas=4 +statefulset.apps/tdengine scaled +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 35m +tdengine-1 1/1 Running 0 34m +tdengine-2 1/1 Running 0 12m +tdengine-3 0/1 ContainerCreating 0 4s +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl get pods -l app=tdengine +NAME READY STATUS RESTARTS AGE +tdengine-0 1/1 Running 0 35m +tdengine-1 1/1 Running 0 34m +tdengine-2 1/1 Running 0 12m +tdengine-3 0/1 Running 0 7s +it@k8s-2:~/TDengine-Operator/src/tdengine$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. - -``` - -`show dnodes` 输出如下: - -``` taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | +id | endpoint | vnodes | support_vnodes | status | create_time | offline reason | ====================================================================================================================================== - 1 | tdengine-0.taosd.default.sv... | 1 | 40 | ready | any | 2021-06-01 16:27:24.852 | | - 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:27:53.339 | | - 4 | tdengine-2.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 16:40:49.177 | | - - +1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | | +2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | | +5 | tdengine-2.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:01:36.479 | | +6 | tdengine-3.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 18:13:54.411 | | +Query OK, 4 row(s) in set (0.001348s) ``` -## 删除集群 +## 清理 TDengine 集群 完整移除 TDengine 集群,需要分别清理 statefulset、svc、configmap、pvc。 @@ -381,26 +334,26 @@ kubectl delete configmap taoscfg ### 错误一 -扩容到四节点之后缩容到两节点,删除的 POD 会进入 offline 状态: +未进行 "drop dnode" 直接进行缩容,由于 TDengine 尚未删除节点,缩容 pod 导致 TDengine 集群中部分节点处于 offline 状态。 ``` -Welcome to the TDengine shell from Linux, Client Version:2.1.1.0 -Copyright (c) 2020 by TAOS Data, Inc. All rights reserved. +$ kubectl exec -it tdengine-0 -- taos -s "show dnodes" + +Welcome to the TDengine shell from Linux, Client Version:3.0.0.0 +Copyright (c) 2022 by TAOS Data, Inc. All rights reserved. + +Server is Community Edition. taos> show dnodes - id | end_point | vnodes | cores | status | role | create_time | offline reason | +id | endpoint | vnodes | support_vnodes | status | create_time | offline reason | ====================================================================================================================================== - 1 | tdengine-0.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:12.915 | | - 2 | tdengine-1.taosd.default.sv... | 0 | 40 | ready | any | 2021-06-01 11:58:33.127 | | - 3 | tdengine-2.taosd.default.sv... | 0 | 40 | offline | any | 2021-06-01 14:07:27.078 | status msg timeout | - 4 | tdengine-3.taosd.default.sv... | 1 | 40 | offline | any | 2021-06-01 14:07:48.362 | status msg timeout | -Query OK, 4 row(s) in set (0.001236s) - - +1 | tdengine-0.taosd.default.sv... | 0 | 4 | ready | 2022-07-25 17:38:49.012 | | +2 | tdengine-1.taosd.default.sv... | 1 | 4 | ready | 2022-07-25 17:39:01.517 | | +5 | tdengine-2.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:01:36.479 | status msg timeout | +6 | tdengine-3.taosd.default.sv... | 0 | 4 | offline | 2022-07-25 18:13:54.411 | status msg timeout | +Query OK, 4 row(s) in set (0.001323s) ``` -但 `drop dnode` 的行为按不会按照预期进行,且下次集群重启后,所有的 dnode 节点将无法启动 dropping 状态无法退出。 - ### 错误二 TDengine 集群会持有 replica 参数,如果缩容后的节点数小于这个值,集群将无法使用: diff --git a/docs/zh/12-taos-sql/26-udf.md b/docs/zh/12-taos-sql/26-udf.md index 1292206311..7ddcad298b 100644 --- a/docs/zh/12-taos-sql/26-udf.md +++ b/docs/zh/12-taos-sql/26-udf.md @@ -4,34 +4,65 @@ title: 用户自定义函数 --- 除了 TDengine 的内置函数以外,用户还可以编写自己的函数逻辑并加入TDengine系统中。 +## 创建 UDF -## 创建函数 +用户可以通过 SQL 指令在系统中加载客户端所在主机上的 UDF 函数库(不能通过 RESTful 接口或 HTTP 管理界面来进行这一过程)。一旦创建成功,则当前 TDengine 集群的所有用户都可以在 SQL 指令中使用这些函数。UDF 存储在系统的 MNode 节点上,因此即使重启 TDengine 系统,已经创建的 UDF 也仍然可用。 +在创建 UDF 时,需要区分标量函数和聚合函数。如果创建时声明了错误的函数类别,则可能导致通过 SQL 指令调用函数时出错。此外,用户需要保证输入数据类型与 UDF 程序匹配,UDF 输出数据类型与 OUTPUTTYPE 匹配。 + +- 创建标量函数 ```sql -CREATE [AGGREGATE] FUNCTION func_name AS library_path OUTPUTTYPE type_name [BUFSIZE buffer_size] +CREATE FUNCTION function_name AS library_path OUTPUTTYPE output_type; ``` -语法说明: + - function_name:标量函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udf 的实际名称一致; + - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - output_type:此函数计算结果的数据类型名称; -AGGREGATE:标识此函数是标量函数还是聚集函数。 -func_name:函数名,必须与函数实现中 udf 的实际名称一致。 -library_path:包含UDF函数实现的动态链接库的绝对路径,是在客户端侧主机上的绝对路径。 -type_name:标识此函数的返回类型。 -buffer_size:中间结果的缓冲区大小,单位是字节。不设置则默认为0。 + 例如,如下语句可以把 libbitand.so 创建为系统中可用的 UDF: + ```sql + CREATE FUNCTION bit_and AS "/home/taos/udf_example/libbitand.so" OUTPUTTYPE INT; + ``` + +- 创建聚合函数: +```sql +CREATE AGGREGATE FUNCTION function_name AS library_path OUTPUTTYPE output_type [ BUFSIZE buffer_size ]; +``` + + - function_name:聚合函数未来在 SQL 中被调用时的函数名,必须与函数实现中 udfNormalFunc 的实际名称一致; + - library_path:包含 UDF 函数实现的动态链接库的库文件绝对路径(指的是库文件在当前客户端所在主机上的保存路径,通常是指向一个 .so 文件),这个路径需要用英文单引号或英文双引号括起来; + - output_type:此函数计算结果的数据类型,与上文中 udfNormalFunc 的 itype 参数不同,这里不是使用数字表示法,而是直接写类型名称即可; + - buffer_size:中间计算结果的缓冲区大小,单位是字节。如果不使用可以不设置。 + + 例如,如下语句可以把 libl2norm.so 创建为系统中可用的 UDF: + + ```sql + CREATE AGGREGATE FUNCTION l2norm AS "/home/taos/udf_example/libl2norm.so" OUTPUTTYPE DOUBLE bufsize 8; + ``` 关于如何开发自定义函数,请参考 [UDF使用说明](../../develop/udf)。 -## 删除自定义函数 +## 管理 UDF +- 删除指定名称的用户定义函数: ``` DROP FUNCTION function_name; ``` -- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如 - - -## 显示 UDF - +- function_name:此参数的含义与 CREATE 指令中的 function_name 参数一致,也即要删除的函数的名字,例如bit_and, l2norm ```sql -SHOW FUNCTION; +DROP FUNCTION bit_and; ``` +- 显示系统中当前可用的所有 UDF: +```sql +SHOW FUNCTIONS; +``` + +## 调用 UDF + +在 SQL 指令中,可以直接以在系统中创建 UDF 时赋予的函数名来调用用户定义函数。例如: +```sql +SELECT X(c1,c2) FROM table/stable; +``` + +表示对名为 c1, c2 的数据列调用名为 X 的用户定义函数。SQL 指令中用户定义函数可以配合 WHERE 等查询特性来使用。 diff --git a/docs/zh/12-taos-sql/index.md b/docs/zh/12-taos-sql/index.md index cb01b3a918..900fff1ba2 100644 --- a/docs/zh/12-taos-sql/index.md +++ b/docs/zh/12-taos-sql/index.md @@ -5,11 +5,12 @@ description: "TAOS SQL 支持的语法规则、主要查询功能、支持的 SQ 本文档说明 TAOS SQL 支持的语法规则、主要查询功能、支持的 SQL 查询函数,以及常用技巧等内容。阅读本文档需要读者具有基本的 SQL 语言的基础。 -TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 为了便于用户快速上手,在一定程度上提供与标准 SQL 类似的风格和模式。严格意义上,TAOS SQL 并不是也不试图提供标准的 SQL 语法。此外,由于 TDengine 针对的时序性结构化数据不提供删除功能,因此在 TAO SQL 中不提供数据删除的相关功能。 +TAOS SQL 是用户对 TDengine 进行数据写入和查询的主要工具。TAOS SQL 提供标准的 SQL 语法,并针对时序数据和业务的特点优化和新增了许多语法和功能。TAOS SQL 语句的最大长度为 1M。TAOS SQL 不支持关键字的缩写,例如 DELETE 不能缩写为 DEL。 本章节 SQL 语法遵循如下约定: -- <\> 里的内容是用户需要输入的,但不要输入 <\> 本身 +- 用大写字母表示关键字,但 SQL 本身并不区分关键字和标识符的大小写 +- 用小写字母表示需要用户输入的内容 - \[ \] 表示内容为可选项,但不能输入 [] 本身 - | 表示多选一,选择其中一个即可,但不能输入 | 本身 - … 表示前面的项可重复多个 diff --git a/docs/zh/14-reference/03-connector/cpp.mdx b/docs/zh/14-reference/03-connector/cpp.mdx index aecf9fde12..3a8367ef33 100644 --- a/docs/zh/14-reference/03-connector/cpp.mdx +++ b/docs/zh/14-reference/03-connector/cpp.mdx @@ -279,7 +279,7 @@ TDengine 的异步 API 均采用非阻塞调用模式。应用程序可以用多 2. 调用 `taos_stmt_prepare()` 解析 INSERT 语句; 3. 如果 INSERT 语句中预留了表名但没有预留 TAGS,那么调用 `taos_stmt_set_tbname()` 来设置表名; 4. 如果 INSERT 语句中既预留了表名又预留了 TAGS(例如 INSERT 语句采取的是自动建表的方式),那么调用 `taos_stmt_set_tbname_tags()` 来设置表名和 TAGS 的值; -5. 调用 `taos_stmt_bind_param_batch()` 以多列的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值; +5. 调用 `taos_stmt_bind_param_batch()` 以多行的方式设置 VALUES 的值,或者调用 `taos_stmt_bind_param()` 以单行的方式设置 VALUES 的值; 6. 调用 `taos_stmt_add_batch()` 把当前绑定的参数加入批处理; 7. 可以重复第 3 ~ 6 步,为批处理加入更多的数据行; 8. 调用 `taos_stmt_execute()` 执行已经准备好的批处理指令; diff --git a/docs/zh/14-reference/03-connector/java.mdx b/docs/zh/14-reference/03-connector/java.mdx index 88a3674671..7a107bd04d 100644 --- a/docs/zh/14-reference/03-connector/java.mdx +++ b/docs/zh/14-reference/03-connector/java.mdx @@ -9,7 +9,7 @@ description: TDengine Java 连接器基于标准 JDBC API 实现, 并提供原 import Tabs from '@theme/Tabs'; import TabItem from '@theme/TabItem'; -`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例(2.4.0.0 及更高版本)。REST 连接实现的功能集合和原生连接有少量不同。 +`taos-jdbcdriver` 是 TDengine 的官方 Java 语言连接器,Java 开发人员可以通过它开发存取 TDengine 数据库的应用软件。`taos-jdbcdriver` 实现了 JDBC driver 标准的接口,并提供两种形式的连接器。一种是通过 TDengine 客户端驱动程序(taosc)原生连接 TDengine 实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能,一种是通过 taosAdapter 提供的 REST 接口连接 TDengine 实例。REST 连接实现的功能集合和原生连接有少量不同。 ![TDengine Database Connector Java](tdengine-jdbc-connector.webp) @@ -41,19 +41,19 @@ REST 连接支持所有能运行 Java 的平台。 TDengine 目前支持时间戳、数字、字符、布尔类型,与 Java 对应类型转换如下: -| TDengine DataType | JDBCType (driver 版本 < 2.0.24) | JDBCType (driver 版本 >= 2.0.24) | -| ----------------- | --------------------------------- | ---------------------------------- | -| TIMESTAMP | java.lang.Long | java.sql.Timestamp | -| INT | java.lang.Integer | java.lang.Integer | -| BIGINT | java.lang.Long | java.lang.Long | -| FLOAT | java.lang.Float | java.lang.Float | -| DOUBLE | java.lang.Double | java.lang.Double | -| SMALLINT | java.lang.Short | java.lang.Short | -| TINYINT | java.lang.Byte | java.lang.Byte | -| BOOL | java.lang.Boolean | java.lang.Boolean | -| BINARY | java.lang.String | byte array | -| NCHAR | java.lang.String | java.lang.String | -| JSON | - | java.lang.String | +| TDengine DataType | JDBCType | +| ----------------- | ---------------------------------- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT | java.lang.Short | +| TINYINT | java.lang.Byte | +| BOOL | java.lang.Boolean | +| BINARY | byte array | +| NCHAR | java.lang.String | +| JSON | java.lang.String | **注意**:JSON 类型仅在 tag 中支持。 @@ -198,7 +198,7 @@ url 中的配置参数如下: - user:登录 TDengine 用户名,默认值 'root'。 - password:用户登录密码,默认值 'taosdata'。 -- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 和 TDengine 2.4.0.12 版本开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 +- batchfetch: true:在执行查询时批量拉取结果集;false:逐行拉取结果集。默认值为:false。逐行拉取结果集使用 HTTP 方式进行数据传输。从 taos-jdbcdriver-2.0.38 开始,JDBC REST 连接增加批量拉取数据功能。taos-jdbcdriver 与 TDengine 之间通过 WebSocket 连接进行数据传输。相较于 HTTP,WebSocket 可以使 JDBC REST 连接支持大数据量查询,并提升查询性能。 - charset: 当开启批量拉取数据时,指定解析字符串数据的字符集。 - batchErrorIgnore:true:在执行 Statement 的 executeBatch 时,如果中间有一条 SQL 执行失败,继续执行下面的 SQL 了。false:不再执行失败 SQL 后的任何语句。默认值为:false。 - httpConnectTimeout: 连接超时时间,单位 ms, 默认值为 5000。 @@ -216,7 +216,7 @@ url 中的配置参数如下: INSERT INTO test.t1 USING test.weather (ts, temperature) TAGS('California.SanFrancisco') VALUES(now, 24.6); ``` -- 从 taos-jdbcdriver-2.0.36 和 TDengine 2.2.0.0 版本开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); +- 从 taos-jdbcdriver-2.0.36 开始,如果在 url 中指定了 dbname,那么,JDBC REST 连接会默认使用/rest/sql/dbname 作为 restful 请求的 url,在 SQL 中不需要指定 dbname。例如:url 为 jdbc:TAOS-RS://127.0.0.1:6041/test,那么,可以执行 sql:insert into t1 using weather(ts, temperature) tags('California.SanFrancisco') values(now, 24.6); ::: @@ -358,11 +358,11 @@ JDBC 连接器可能报错的错误码包括 3 种:JDBC driver 本身的报错 具体的错误码请参考: - [TDengine Java Connector](https://github.com/taosdata/taos-connector-jdbc/blob/main/src/main/java/com/taosdata/jdbc/TSDBErrorNumbers.java) -- [TDengine_ERROR_CODE](https://github.com/taosdata/TDengine/blob/develop/src/inc/taoserror.h) +- [TDengine_ERROR_CODE](../error-code) ### 通过参数绑定写入数据 -从 2.1.2.0 版本开始,TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 +TDengine 的 JDBC 原生连接实现大幅改进了参数绑定方式对数据写入(INSERT)场景的支持。采用这种方式写入数据时,能避免 SQL 语法解析的资源消耗,从而在很多情况下显著提升写入性能。 **注意**: @@ -630,7 +630,7 @@ public void setNString(int columnIndex, ArrayList list, int size) throws ### 无模式写入 -从 2.2.0.0 版本开始,TDengine 增加了对无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](/reference/schemaless/)。 +TDengine 支持无模式写入功能。无模式写入兼容 InfluxDB 的 行协议(Line Protocol)、OpenTSDB 的 telnet 行协议和 OpenTSDB 的 JSON 格式协议。详情请参见[无模式写入](../../schemaless)。 **注意**: @@ -670,55 +670,127 @@ public class SchemalessInsertTest { TDengine Java 连接器支持订阅功能,应用 API 如下: -#### 创建订阅 +#### 创建 Topic ```java -TSDBSubscribe sub = ((TSDBConnection)conn).subscribe("topic", "select * from meters", false); +Connection connection = DriverManager.getConnection(url, properties); +Statement statement = connection.createStatement(); +statement.executeUpdate("create topic if not exists topic_speed as select ts, speed from speed_table"); ``` `subscribe` 方法的三个参数含义如下: -- topic:订阅的主题(即名称),此参数是订阅的唯一标识 -- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据 -- restart:如果订阅已经存在,是重新开始,还是继续之前的订阅 +- topic_speed:订阅的主题(即名称),此参数是订阅的唯一标识。 +- sql:订阅的查询语句,此语句只能是 `select` 语句,只应查询原始数据,只能按时间正序查询数据。 -如上面的例子将使用 SQL 语句 `select * from meters` 创建一个名为 `topic` 的订阅,如果这个订阅已经存在,将继续之前的查询进度,而不是从头开始消费所有的数据。 +如上面的例子将使用 SQL 语句 `select ts, speed from speed_table` 创建一个名为 `topic_speed` 的订阅。 + +#### 创建 Consumer + +```java +Properties config = new Properties(); +config.setProperty("enable.auto.commit", "true"); +config.setProperty("group.id", "group1"); +config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); + +TaosConsumer consumer = new TaosConsumer<>(config); +``` + +- enable.auto.commit: 是否允许自动提交。 +- group.id: consumer: 所在的 group。 +- value.deserializer: 结果集反序列化方法,可以继承 `com.taosdata.jdbc.tmq.ReferenceDeserializer`,并指定结果集 bean,实现反序列化。也可以继承 `com.taosdata.jdbc.tmq.Deserializer`,根据 SQL 的 resultSet 自定义反序列化方式。 +- 其他参数请参考:[Consumer 参数列表](../../../develop/tmq#创建-consumer-以及consumer-group) #### 订阅消费数据 ```java -int total = 0; while(true) { - TSDBResultSet rs = sub.consume(); - int count = 0; - while(rs.next()) { - count++; - } - total += count; - System.out.printf("%d rows consumed, total %d\n", count, total); - Thread.sleep(1000); + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ResultBean record : records) { + process(record); + } } ``` -`consume` 方法返回一个结果集,其中包含从上次 `consume` 到目前为止的所有新数据。请务必按需选择合理的调用 `consume` 的频率(如例子中的 `Thread.sleep(1000)`),否则会给服务端造成不必要的压力。 +`poll` 方法返回一个结果集,其中包含从上次 `poll` 到目前为止的所有新数据。请务必按需选择合理的调用 `poll` 的频率(如例子中的 `Duration.ofMillis(100)`),否则会给服务端造成不必要的压力。 #### 关闭订阅 ```java -sub.close(true); +consumer.close() ``` -`close` 方法关闭一个订阅。如果其参数为 `true` 表示保留订阅进度信息,后续可以创建同名订阅继续消费数据;如为 `false` 则不保留订阅进度。 - -### 关闭资源 +### 使用示例如下: ```java -resultSet.close(); -stmt.close(); -conn.close(); -``` +public abstract class ConsumerLoop { + private final TaosConsumer consumer; + private final List topics; + private final AtomicBoolean shutdown; + private final CountDownLatch shutdownLatch; -> `注意务必要将 connection 进行关闭`,否则会出现连接泄露。 + public ConsumerLoop() throws SQLException { + Properties config = new Properties(); + config.setProperty("msg.with.table.name", "true"); + config.setProperty("enable.auto.commit", "true"); + config.setProperty("group.id", "group1"); + config.setProperty("value.deserializer", "com.taosdata.jdbc.tmq.ConsumerTest.ResultDeserializer"); + + this.consumer = new TaosConsumer<>(config); + this.topics = Collections.singletonList("topic_speed"); + this.shutdown = new AtomicBoolean(false); + this.shutdownLatch = new CountDownLatch(1); + } + + public abstract void process(ResultBean result); + + public void pollData() throws SQLException { + try { + consumer.subscribe(topics); + + while (!shutdown.get()) { + ConsumerRecords records = consumer.poll(Duration.ofMillis(100)); + for (ResultBean record : records) { + process(record); + } + } + } finally { + consumer.close(); + shutdownLatch.countDown(); + } + } + + public void shutdown() throws InterruptedException { + shutdown.set(true); + shutdownLatch.await(); + } + + static class ResultDeserializer extends ReferenceDeserializer { + + } + + static class ResultBean { + private Timestamp ts; + private int speed; + + public Timestamp getTs() { + return ts; + } + + public void setTs(Timestamp ts) { + this.ts = ts; + } + + public int getSpeed() { + return speed; + } + + public void setSpeed(int speed) { + this.speed = speed; + } + } +} +``` ### 与连接池使用 @@ -787,20 +859,6 @@ public static void main(String[] args) throws Exception { > 更多 druid 使用问题请查看[官方说明](https://github.com/alibaba/druid)。 -**注意事项:** - -- TDengine `v1.6.4.1` 版本开始提供了一个专门用于心跳检测的函数 `select server_status()`,所以在使用连接池时推荐使用 `select server_status()` 进行 Validation Query。 - -如下所示,`select server_status()` 执行成功会返回 `1`。 - -```sql -taos> select server_status(); -server_status()| -================ -1 | -Query OK, 1 row(s) in set (0.000141s) -``` - ### 更多示例程序 示例程序源码位于 `TDengine/examples/JDBC` 下: @@ -811,7 +869,7 @@ Query OK, 1 row(s) in set (0.000141s) - SpringJdbcTemplate:Spring JdbcTemplate 中使用 taos-jdbcdriver。 - mybatisplus-demo:Springboot + Mybatis 中使用 taos-jdbcdriver。 -请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/develop/examples/JDBC) +请参考:[JDBC example](https://github.com/taosdata/TDengine/tree/3.0/examples/JDBC) ## 最近更新记录 @@ -842,7 +900,7 @@ Query OK, 1 row(s) in set (0.000141s) **解决方法**:重新安装 64 位 JDK。 -4. 其它问题请参考 [FAQ](/train-faq/faq) +4. 其它问题请参考 [FAQ](../../../train-faq/faq) ## API 参考 diff --git a/docs/zh/14-reference/03-connector/node.mdx b/docs/zh/14-reference/03-connector/node.mdx index 9f2bed9e97..b089da99d2 100644 --- a/docs/zh/14-reference/03-connector/node.mdx +++ b/docs/zh/14-reference/03-connector/node.mdx @@ -15,11 +15,11 @@ import NodeOpenTSDBTelnet from "../../07-develop/03-insert-data/_js_opts_telnet. import NodeOpenTSDBJson from "../../07-develop/03-insert-data/_js_opts_json.mdx"; import NodeQuery from "../../07-develop/04-query-data/_js.mdx"; -`td2.0-connector` 和 `td2.0-rest-connector` 是 TDengine 的官方 Node.js 语言连接器。Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。 +`@tdengine/client` 和 `@tdengine/rest` 是 TDengine 的官方 Node.js 语言连接器。 Node.js 开发人员可以通过它开发可以存取 TDengine 集群数据的应用软件。注意:从 TDengine 3.0 开始 Node.js 原生连接器的包名由 `td2.0-connector` 改名为 `@tdengine/client` 而 rest 连接器的包名由 `td2.0-rest-connector` 改为 `@tdengine/rest`。并且不与 TDengine 2.x 兼容。 -`td2.0-connector` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`td2.0-rest-connector` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。 +`@tdengine/client` 是**原生连接器**,它通过 TDengine 客户端驱动程序(taosc)连接 TDengine 运行实例,支持数据写入、查询、订阅、schemaless 接口和参数绑定接口等功能。`@tdengine/rest` 是 **REST 连接器**,它通过 taosAdapter 提供的 REST 接口连接 TDengine 的运行实例。REST 连接器可以在任何平台运行,但性能略为下降,接口实现的功能特性集合和原生接口有少量不同。 -Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node)。 +Node.js 连接器源码托管在 [GitHub](https://github.com/taosdata/taos-connector-node/tree/3.0)。 ## 支持的平台 @@ -58,7 +58,7 @@ REST 连接器支持所有能运行 Node.js 的平台。 - `python` (建议`v2.7` , `v3.x.x` 目前还不支持) -- `td2.0-connector` 2.0.6 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;2.0.5 及更早版本支持 Node.js LTS v10.x 版本。其他版本可能存在包兼容性的问题 +- `@tdengine/client` 3.0.0 支持 Node.js LTS v10.9.0 或更高版本, Node.js LTS v12.8.0 或更高版本;其他版本可能存在包兼容性的问题 - `make` - C 语言编译器,[GCC](https://gcc.gnu.org) v4.8.5 或更高版本 @@ -90,14 +90,14 @@ REST 连接器支持所有能运行 Node.js 的平台。 ```bash -npm install td2.0-connector +npm install @tdengine/client ``` ```bash -npm i td2.0-rest-connector +npm install @tdengine/rest ``` @@ -109,13 +109,13 @@ npm i td2.0-rest-connector 验证方法: -- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/TDengine/tree/develop/examples/nodejs/nodejsChecker.js)到本地。 +- 新建安装验证目录,例如:`~/tdengine-test`,下载 GitHub 上 [nodejsChecker.js 源代码](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/nodejsChecker.js)到本地。 - 在命令行中执行以下命令。 ```bash npm init -y -npm install td2.0-connector +npm install @tdengine/client node nodejsChecker.js host=localhost ``` @@ -128,11 +128,11 @@ node nodejsChecker.js host=localhost -安装并引用 `td2.0-connector` 包。 +安装并引用 `@tdengine/client` 包。 ```javascript //A cursor also needs to be initialized in order to interact with TDengine from Node.js. -const taos = require("td2.0-connector"); +const taos = require("@tdengine/client"); var conn = taos.connect({ host: "127.0.0.1", user: "root", @@ -149,12 +149,12 @@ conn.close(); -安装并引用 `td2.0-rest-connector` 包。 +安装并引用 `@tdengine/rest` 包。 ```javascript //A cursor also needs to be initialized in order to interact with TDengine from Node.js. -import { options, connect } from "td2.0-rest-connector"; -options.path = "/rest/sqlt"; +import { options, connect } from "@tdengine/rest"; +options.path = "/rest/sql"; // set host options.host = "localhost"; // set other options like user/passwd @@ -190,26 +190,23 @@ let cursor = conn.cursor(); + ## 更多示例程序 | 示例程序 | 示例程序描述 | | ------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------- | -| [connection](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/cursorClose.js) | 建立连接的示例。 | -| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamBatchSample.js) | 绑定多行参数插入的示例。 | -| [stmtBind](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindParamSample.js) | 一行一行绑定参数插入的示例。 | -| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtBindSingleParamBatchSample.js) | 按列绑定参数插入的示例。 | -| [stmtUseResult](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/stmtUseResultSample.js) | 绑定参数查询的示例。 | -| [json tag](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testJsonTag.js) | Json tag 的使用示例。 | -| [Nanosecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testNanoseconds.js) | 时间戳为纳秒精度的使用的示例。 | -| [Microsecond](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testMicroseconds.js) | 时间戳为微秒精度的使用的示例。 | -| [schemless insert](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSchemalessInsert.js) | schemless 插入的示例。 | -| [subscribe](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/testSubscribe.js) | 订阅的使用示例。 | -| [asyncQuery](https://github.com/taosdata/taos-connector-node/tree/develop/nodejs/examples/tset.js) | 异步查询的使用示例。 | -| [REST](https://github.com/taosdata/taos-connector-node/blob/develop/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 | +| [basicUse](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/queryExample.js) | 基本的使用如如建立连接,执行 SQL 等操作。 | +| [stmtBindBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindParamBatch.js) | 绑定多行参数插入的示例。 | | +| [stmtBindSingleParamBatch](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/bindSingleParamBatch.js) | 按列绑定参数插入的示例。 | +| [stmtQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/stmtQuery.js) | 绑定参数查询的示例。 | +| [schemless insert](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/schemaless.js) | schemless 插入的示例。 | +| [TMQ](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/tmq.js) | 订阅的使用示例。 | +| [asyncQuery](https://github.com/taosdata/taos-connector-node/blob/3.0/nodejs/examples/asyncQueryExample.js) | 异步查询的使用示例。 | +| [REST](https://github.com/taosdata/taos-connector-node/blob/3.0/typescript-rest/example/example.ts) | 使用 REST 连接的 TypeScript 使用示例。 | ## 使用限制 -Node.js 连接器 >= v2.0.6 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。 +native 连接器(`@tdengine/client`) >= v3.0.0 目前支持 node 的版本为:支持 >=v12.8.0 <= v12.9.1 || >=v10.20.0 <= v10.9.0 ;2.0.5 及更早版本支持 v10.x 版本,其他版本可能存在包兼容性的问题。 ## 其他说明 @@ -225,7 +222,7 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202 2. Node.js 版本 - 连接器 >v2.0.6 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 + 原生连接器 `@tdengine/client` 目前兼容的 Node.js 版本为:>=v10.20.0 <= v10.9.0 || >=v12.8.0 <= v12.9.1 3. "Unable to establish connection","Unable to resolve FQDN" @@ -235,18 +232,22 @@ Node.js 连接器的使用参见[视频教程](https://www.taosdata.com/blog/202 ### 原生连接器 -| td2.0-connector 版本 | 说明 | -| -------------------- | ---------------------------------------------------------------- | -| 2.0.12 | 修复 cursor.close() 报错的 bug。 | -| 2.0.11 | 支持绑定参数、json tag、schemaless 接口等功能。 | -| 2.0.10 | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | - +| package name | version | TDengine version | 说明 | +|------------------|---------|---------------------|------------------------------------------------------------------| +| @tdengine/client | 3.0.0 | 3.0.0 | 支持TDengine 3.0 且不与2.x 兼容。 | +| td2.0-connector | 2.0.12 | 2.4.x;2.5.x;2.6.x | 修复 cursor.close() 报错的 bug。 | +| td2.0-connector | 2.0.11 | 2.4.x;2.5.x;2.6.x | 支持绑定参数、json tag、schemaless 接口等功能。 | +| td2.0-connector | 2.0.10 | 2.4.x;2.5.x;2.6.x | 支持连接管理,普通查询、连续查询、获取系统信息、订阅功能等功能。 | ### REST 连接器 -| td2.0-rest-connector 版本 | 说明 | -| ------------------------- | ---------------------------------------------------------------- | -| 1.0.3 | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | +| package name | version | TDengine version | 说明 | +|----------------------|---------|---------------------|---------------------------------------------------------------------------| +| @tdengine/rest | 3.0.0 | 3.0.0 | 支持 TDegnine 3.0,且不与2.x 兼容。 | +| td2.0-rest-connector | 1.0.7 | 2.4.x;2.5.x;2.6.x | 移除默认端口 6041。 | +| td2.0-rest-connector | 1.0.6 | 2.4.x;2.5.x;2.6.x | 修复create,insert,update,alter 等SQL 执行返回的 affectRows 错误的bug。 | +| td2.0-rest-connector | 1.0.5 | 2.4.x;2.5.x;2.6.x | 支持云服务 cloud Token; | +| td2.0-rest-connector | 1.0.3 | 2.4.x;2.5.x;2.6.x | 支持连接管理、普通查询、获取系统信息、错误信息、连续查询等功能。 | ## API 参考 -[API 参考](https://docs.taosdata.com/api/td2.0-connector/) +[API 参考](https://docs.taosdata.com/api/td2.0-connector/) \ No newline at end of file diff --git a/docs/zh/14-reference/03-connector/python.mdx b/docs/zh/14-reference/03-connector/python.mdx index a77dc71db7..d7b17dc74a 100644 --- a/docs/zh/14-reference/03-connector/python.mdx +++ b/docs/zh/14-reference/03-connector/python.mdx @@ -306,8 +306,7 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 | [bind_row.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/bind-row.py) | 参数绑定,一次绑定一行 | | [insert_lines.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/insert-lines.py) | InfluxDB 行协议写入 | | [json_tag.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/json-tag.py) | 使用 JSON 类型的标签 | -| [subscribe-async.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-async.py) | 异步订阅 | -| [subscribe-sync.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/subscribe-sync.py) | 同步订阅 | +| [tmq.py](https://github.com/taosdata/taos-connector-python/blob/main/examples/tmq.py) | tmq 订阅 | ## 其它说明 @@ -326,23 +325,15 @@ TaosCursor 类使用原生连接进行写入、查询操作。在客户端多线 1. https://stackoverflow.com/questions/10611328/parsing-datetime-strings-containing-nanoseconds 2. https://www.python.org/dev/peps/pep-0564/ - -## 常见问题 - -欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。 - ## 重要更新 -| 连接器版本 | 重要更新 | 发布日期 | -| ---------- | --------------------------------------------------------------------------------- | ---------- | -| 2.3.1 | 1. support TDengine REST API
2. remove support for Python version below 3.6 | 2022-04-28 | -| 2.2.5 | support timezone option when connect | 2022-04-13 | -| 2.2.2 | support sqlalchemy dialect plugin | 2022-03-28 | - - [**Release Notes**](https://github.com/taosdata/taos-connector-python/releases) ## API 参考 - [taos](https://docs.taosdata.com/api/taospy/taos/) - [taosrest](https://docs.taosdata.com/api/taospy/taosrest) + +## 常见问题 + +欢迎[提问或报告问题](https://github.com/taosdata/taos-connector-python/issues)。 diff --git a/docs/zh/17-operation/01-pkg-install.md b/docs/zh/17-operation/01-pkg-install.md index 0680f76095..959510b670 100644 --- a/docs/zh/17-operation/01-pkg-install.md +++ b/docs/zh/17-operation/01-pkg-install.md @@ -56,8 +56,8 @@ lrwxrwxrwx 1 root root 13 Feb 22 09:34 log -> /var/log/taos/ ``` $ sudo dpkg -r tdengine -(Reading database ... 137504 files and directories currently installed.) -Removing tdengine (2.4.0.7) ... +(Reading database ... 120119 files and directories currently installed.) +Removing tdengine (3.0.0.10002) ... TDengine is removed successfully! ``` @@ -81,10 +81,7 @@ TDengine is removed successfully! ``` $ rmtaos -Nginx for TDengine is running, stopping it... TDengine is removed successfully! - -taosKeeper is removed successfully! ```
diff --git a/docs/zh/25-application/03-immigrate.md b/docs/zh/25-application/_03-immigrate.md similarity index 100% rename from docs/zh/25-application/03-immigrate.md rename to docs/zh/25-application/_03-immigrate.md diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 5b57e345c8..59e0d7cae0 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -1,241 +1,200 @@ ---- -title: 常见问题及反馈 ---- - -## 问题反馈 - -如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: - -1. /var/log/taos (如果没有修改过默认路径) -2. /etc/taos - -附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 - -为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 - -``` - alter dnode debugFlag 135; -``` - -但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 - -## 常见问题列表 - -### 1. TDengine2.0 之前的版本升级到 2.0 及以上的版本应该注意什么?☆☆☆ - -2.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: - -1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` -2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` -3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` -4. 安装最新稳定版本的 TDengine -5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 - -### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 - -### 3. 创建数据表时提示 more dnodes are needed - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/965.html)。 - -### 4. 如何让 TDengine crash 时生成 core 文件? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 - -### 5. 遇到错误“Unable to establish connection” 怎么办? - -客户端遇到连接故障,请按照下面的步骤进行检查: - -1. 检查网络环境 - - - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030-6042 的访问权限 - - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname - - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 - -2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 - -3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* - -4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 - -5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 - -6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030-6042 上的 TCP/UDP 协议能够互通。 - -7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 - -8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) - -9. 如果仍不能排除连接故障 - - - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 - 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` - 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` - 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` - - - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 - -10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅(包括 TCP 和 UDP):[TDengine 内嵌网络检测工具使用指南](https://www.taosdata.com/blog/2020/09/08/1816.html)。 - -### 6. 遇到错误 “Unexpected generic error in RPC”或者“Unable to resolve FQDN” 怎么办? - -产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: - -1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) -2. 如果网络配置有 DNS server,请检查是否正常工作 -3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 -4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 -5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnodeEps.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 -6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN - -### 7. 虽然语法正确,为什么我还是得到 "Invalid SQL" 错误? - -如果你确认语法正确,2.0 之前版本,请检查 SQL 语句长度是否超过 64K。如果超过,也会返回这个错误。 - -### 8. 是否支持 validation queries? - -TDengine 还没有一组专用的 validation queries。然而建议你使用系统监测的数据库”log"来做。 - - - -### 9. 我可以删除或更新一条记录吗? - -TDengine 目前尚不支持删除功能,未来根据用户需求可能会支持。 - -从 2.0.8.0 开始,TDengine 支持更新已经写入数据的功能。使用更新功能需要在创建数据库时使用 UPDATE 1 参数,之后可以使用 INSERT INTO 命令更新已经写入的相同时间戳数据。UPDATE 参数不支持 ALTER DATABASE 命令修改。没有使用 UPDATE 1 参数创建的数据库,写入相同时间戳的数据不会修改之前的数据,也不会报错。 - -另需注意,在 UPDATE 设置为 0 时,后发送的相同时间戳的数据会被直接丢弃,但并不会报错,而且仍然会被计入 affected rows (所以不能利用 INSERT 指令的返回信息进行时间戳查重)。这样设计的主要原因是,TDengine 把写入的数据看做一个数据流,无论时间戳是否出现冲突,TDengine 都认为产生数据的原始设备真实地产生了这样的数据。UPDATE 参数只是控制这样的流数据在进行持久化时要怎样处理——UPDATE 为 0 时,表示先写入的数据覆盖后写入的数据;而 UPDATE 为 1 时,表示后写入的数据覆盖先写入的数据。这种覆盖关系如何选择,取决于对数据的后续使用和统计中,希望以先还是后生成的数据为准。 - -此外,从 2.1.7.0 版本开始,支持将 UPDATE 参数设为 2,表示“支持部分列更新”。也即,当 UPDATE 设为 1 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会被设为 NULL;而当 UPDATE 设为 2 时,如果更新一个数据行,其中某些列没有提供取值,那么这些列会保持原有数据行中的对应值。 - -### 10. 我怎么创建超过 1024 列的表? - -使用 2.0 及其以上版本,默认支持 1024 列;2.0 之前的版本,TDengine 最大允许创建 250 列的表。但是如果确实超过限值,建议按照数据特性,逻辑地将这个宽表分解成几个小表。(从 2.1.7.0 版本开始,表的最大列数增加到了 4096 列。) - -### 11. 最有效的写入数据的方法是什么? - -批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 - -### 12. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? - -Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: - -```JAVA -Class.forName("com.taosdata.jdbc.TSDBDriver"); -Properties properties = new Properties(); -properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); -Connection = DriverManager.getConnection(url, properties); -``` - -### 13. Windows 系统下客户端无法正常显示中文字符? - -Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 - -【 v2.2.1.5以后版本 】在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: - -``` -locale C -charset UTF-8 -``` - -### 14. JDBC 报错: the executed SQL is not a DML or a DDL? - -请更新至最新的 JDBC 驱动,参考 [Java 连接器](/reference/connector/java) - -### 15. taos connect failed, reason: invalid timestamp - -常见原因是服务器和客户端时间没有校准,可以通过和时间服务器同步的方式(Linux 下使用 ntpdate 命令,Windows 在系统时间设置中选择自动同步)校准。 - -### 16. 表名显示不全 - -由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 - -### 17. 如何进行数据迁移? - -TDengine 是根据 hostname 唯一标志一台机器的,在数据文件从机器 A 移动机器 B 时,注意如下两件事: - - - 2.0.0.0 至 2.0.6.x 的版本,重新配置机器 B 的 hostname 为机器 A 的 hostname。 - - 2.0.7.0 及以后的版本,到/var/lib/taos/dnode 下,修复 dnodeEps.json 的 dnodeId 对应的 FQDN,重启。确保机器内所有机器的此文件是完全相同的。 - - 1.x 和 2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 - -### 18. 如何在命令行程序 taos 中临时调整日志级别 - -为了调试方便,从 2.0.16 版本开始,命令行程序 taos 新增了与日志记录相关的两条指令: - -```sql -ALTER LOCAL flag_name flag_value; -``` - -其含义是,在当前的命令行程序下,修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): - - - flag_name 的取值可以是:debugFlag,cDebugFlag,tmrDebugFlag,uDebugFlag,rpcDebugFlag - - flag_value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志) - -```sql -ALTER LOCAL RESETLOG; -``` - -其含义是,清空本机所有由客户端生成的日志文件。 - - - -### 19. go 语言编写组件编译失败怎样解决? - -TDengine 2.3.0.0 及之后的版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,取代之前 taosd 内置的 httpd ,提供包含原 httpd 功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 -使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 - -目前编译方式默认自动编译 taosAdapter。go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: - -```sh -go env -w GO111MODULE=on -go env -w GOPROXY=https://goproxy.cn,direct -``` - -如果希望继续使用之前的内置 httpd,可以关闭 taosAdapter 编译,使用 -`cmake .. -DBUILD_HTTP=true` 使用原来内置的 httpd。 - -### 20. 如何查询数据占用的存储空间大小? - -默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 - -若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 - -若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 - -若仅仅想查看指定(超级)表的数据块分布及大小,可查看[_block_dist 函数](https://docs.taosdata.com/taos-sql/select/#_block_dist-%E5%87%BD%E6%95%B0) - -### 21. 客户端连接串如何保证高可用? - -请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) - -### 22. 时间戳的时区信息是怎样处理的? - -TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 - -客户端在处理时间戳字符串时,会采取如下逻辑: - -1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 -2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 -3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 -4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 - -### 23. TDengine 2.0 都会用到哪些网络端口? - -使用到的网络端口请看文档:[serverport](/reference/config/#serverport) - -需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 - -### 24. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功?? - -taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 - -需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 - -有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](https://docs.taosdata.com/reference/taosadapter/) - -### 25. 发生了 OOM 怎么办? - -OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 - -TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 maxVgroupsPerDb 影响,每个 VNode 占用的内存大小受 Blocks 和 Cache 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 +--- +title: 常见问题及反馈 +--- + +## 问题反馈 + +如果 FAQ 中的信息不能够帮到您,需要 TDengine 技术团队的技术支持与协助,请将以下两个目录中内容打包: + +1. /var/log/taos (如果没有修改过默认路径) +2. /etc/taos(如果没有指定其他配置文件路径) + +附上必要的问题描述,包括使用的 TDengine 版本信息、平台环境信息、发生该问题的执行操作、出现问题的表征及大概的时间,在 [GitHub](https://github.com/taosdata/TDengine) 提交 issue。 + +为了保证有足够的 debug 信息,如果问题能够重复,请修改/etc/taos/taos.cfg 文件,最后面添加一行“debugFlag 135"(不带引号本身),然后重启 taosd, 重复问题,然后再递交。也可以通过如下 SQL 语句,临时设置 taosd 的日志级别。 + +``` + alter dnode 'debugFlag' '135'; +``` + +其中 dnode_id 请从 show dnodes; 命令输出中获取。 + +但系统正常运行时,请一定将 debugFlag 设置为 131,否则会产生大量的日志信息,降低系统效率。 + +## 常见问题列表 + +### 1. TDengine3.0 之前的版本升级到 3.0 及以上的版本应该注意什么? + +3.0 版在之前版本的基础上,进行了完全的重构,配置文件和数据文件是不兼容的。在升级之前务必进行如下操作: + +1. 删除配置文件,执行 `sudo rm -rf /etc/taos/taos.cfg` +2. 删除日志文件,执行 `sudo rm -rf /var/log/taos/` +3. 确保数据已经不再需要的前提下,删除数据文件,执行 `sudo rm -rf /var/lib/taos/` +4. 安装最新3.0稳定版本的 TDengine +5. 如果需要迁移数据或者数据文件损坏,请联系涛思数据官方技术支持团队,进行协助解决 + +### 2. Windows 平台下 JDBCDriver 找不到动态链接库,怎么办? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/03/950.html)。 + +### 3. 如何让 TDengine crash 时生成 core 文件? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2019/12/06/974.html)。 + +### 4. 遇到错误“Unable to establish connection” 怎么办? + +客户端遇到连接故障,请按照下面的步骤进行检查: + +1. 检查网络环境 + + - 云服务器:检查云服务器的安全组是否打开 TCP/UDP 端口 6030/6041 的访问权限 + - 本地虚拟机:检查网络能否 ping 通,尽量避免使用`localhost` 作为 hostname + - 公司服务器:如果为 NAT 网络环境,请务必检查服务器能否将消息返回值客户端 + +2. 确保客户端与服务端版本号是完全一致的,开源社区版和企业版也不能混用 + +3. 在服务器,执行 `systemctl status taosd` 检查*taosd*运行状态。如果没有运行,启动*taosd* + +4. 确认客户端连接时指定了正确的服务器 FQDN (Fully Qualified Domain Name —— 可在服务器上执行 Linux 命令 hostname -f 获得),FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html)。 + +5. ping 服务器 FQDN,如果没有反应,请检查你的网络,DNS 设置,或客户端所在计算机的系统 hosts 文件。如果部署的是 TDengine 集群,客户端需要能 ping 通所有集群节点的 FQDN。 + +6. 检查防火墙设置(Ubuntu 使用 ufw status,CentOS 使用 firewall-cmd --list-port),确保集群中所有主机在端口 6030/6041 上的 TCP/UDP 协议能够互通。 + +7. 对于 Linux 上的 JDBC(ODBC, Python, Go 等接口类似)连接, 确保*libtaos.so*在目录*/usr/local/taos/driver*里, 并且*/usr/local/taos/driver*在系统库函数搜索路径*LD_LIBRARY_PATH*里 + +8. 对于 Windows 上的 JDBC, ODBC, Python, Go 等连接,确保*C:\TDengine\driver\taos.dll*在你的系统库函数搜索目录里 (建议*taos.dll*放在目录 _C:\Windows\System32_) + +9. 如果仍不能排除连接故障 + + - Linux 系统请使用命令行工具 nc 来分别判断指定端口的 TCP 和 UDP 连接是否通畅 + 检查 UDP 端口连接是否工作:`nc -vuz {hostIP} {port} ` + 检查服务器侧 TCP 端口连接是否工作:`nc -l {port}` + 检查客户端侧 TCP 端口连接是否工作:`nc {hostIP} {port}` + + - Windows 系统请使用 PowerShell 命令 Test-NetConnection -ComputerName {fqdn} -Port {port} 检测服务段端口是否访问 + +10. 也可以使用 taos 程序内嵌的网络连通检测功能,来验证服务器和客户端之间指定的端口连接是否通畅:[诊断及其他](https://docs.taosdata.com/3.0-preview/operation/diagnose/)。 + +### 5. 遇到错误 Unable to resolve FQDN” 怎么办? + +产生这个错误,是由于客户端或数据节点无法解析 FQDN(Fully Qualified Domain Name)导致。对于 TAOS Shell 或客户端应用,请做如下检查: + +1. 请检查连接的服务器的 FQDN 是否正确,FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) +2. 如果网络配置有 DNS server,请检查是否正常工作 +3. 如果网络没有配置 DNS server,请检查客户端所在机器的 hosts 文件,查看该 FQDN 是否配置,并是否有正确的 IP 地址 +4. 如果网络配置 OK,从客户端所在机器,你需要能 Ping 该连接的 FQDN,否则客户端是无法连接服务器的 +5. 如果服务器曾经使用过 TDengine,且更改过 hostname,建议检查 data 目录的 dnode.json 是否符合当前配置的 EP,路径默认为/var/lib/taos/dnode。正常情况下,建议更换新的数据目录或者备份后删除以前的数据目录,这样可以避免该问题。 +6. 检查/etc/hosts 和/etc/hostname 是否是预配置的 FQDN + +### 6. 最有效的写入数据的方法是什么? + +批量插入。每条写入语句可以一张表同时插入多条记录,也可以同时插入多张表的多条记录。 + +### 7. Windows 系统下插入的 nchar 类数据中的汉字被解析成了乱码如何解决? + +Windows 下插入 nchar 类的数据中如果有中文,请先确认系统的地区设置成了中国(在 Control Panel 里可以设置),这时 cmd 中的`taos`客户端应该已经可以正常工作了;如果是在 IDE 里开发 Java 应用,比如 Eclipse, IntelliJ,请确认 IDE 里的文件编码为 GBK(这是 Java 默认的编码类型),然后在生成 Connection 时,初始化客户端的配置,具体语句如下: + +```JAVA +Class.forName("com.taosdata.jdbc.TSDBDriver"); +Properties properties = new Properties(); +properties.setProperty(TSDBDriver.LOCALE_KEY, "UTF-8"); +Connection = DriverManager.getConnection(url, properties); +``` + +### 8. Windows 系统下客户端无法正常显示中文字符? + +Windows 系统中一般是采用 GBK/GB18030 存储中文字符,而 TDengine 的默认字符集为 UTF-8 ,在 Windows 系统中使用 TDengine 客户端时,客户端驱动会将字符统一转换为 UTF-8 编码后发送到服务端存储,因此在应用开发过程中,调用接口时正确配置当前的中文字符集即可。 + +在 Windows 10 环境下运行 TDengine 客户端命令行工具 taos 时,若无法正常输入、显示中文,可以对客户端 taos.cfg 做如下配置: + +``` +locale C +charset UTF-8 +``` + +### 9. 表名显示不全 + +由于 taos shell 在终端中显示宽度有限,有可能比较长的表名显示不全,如果按照显示的不全的表名进行相关操作会发生 Table does not exist 错误。解决方法可以是通过修改 taos.cfg 文件中的设置项 maxBinaryDisplayWidth, 或者直接输入命令 set max_binary_display_width 100。或者在命令结尾使用 \G 参数来调整结果的显示方式。 + +### 10. 如何进行数据迁移? + +TDengine 是根据 hostname 唯一标志一台机器的,对于3.0版本,将数据文件从机器 A 移动机器 B 时,需要重新配置机器 B 的 hostname 为机器 A 的 hostname。 + +注:3.x 和 之前的1.x、2.x 版本的存储结构不兼容,需要使用迁移工具或者自己开发应用导出导入数据。 + +### 11. 如何在命令行程序 taos 中临时调整日志级别 + +为了调试方便,命令行程序 taos 新增了与日志记录相关的指令: + +```sql +ALTER LOCAL local_option + +local_option: { + 'resetLog' + | 'rpcDebugFlag' value + | 'tmrDebugFlag' value + | 'cDebugFlag' value + | 'uDebugFlag' value + | 'debugFlag' value +} +``` + +其含义是,在当前的命令行程序下,清空本机所有客户端生成的日志文件(resetLog),或修改一个特定模块的日志记录级别(只对当前命令行程序有效,如果 taos 命令行程序重启,则需要重新设置): + + - value 的取值可以是:131(输出错误和警告日志),135( 输出错误、警告和调试日志),143( 输出错误、警告、调试和跟踪日志)。 + +### 12. go 语言编写组件编译失败怎样解决? + +TDengine 3.0版本包含一个使用 go 语言开发的 taosAdapter 独立组件,需要单独运行,提供restful接入功能以及支持多种其他软件(Prometheus、Telegraf、collectd、StatsD 等)的数据接入功能。 +使用最新 develop 分支代码编译需要先 `git submodule update --init --recursive` 下载 taosAdapter 仓库代码后再编译。 + +go 语言版本要求 1.14 以上,如果发生 go 编译错误,往往是国内访问 go mod 问题,可以通过设置 go 环境变量来解决: + +```sh +go env -w GO111MODULE=on +go env -w GOPROXY=https://goproxy.cn,direct +``` + +### 13. 如何查询数据占用的存储空间大小? + +默认情况下,TDengine 的数据文件存储在 /var/lib/taos ,日志文件存储在 /var/log/taos 。 + +若想查看所有数据文件占用的具体大小,可以执行 Shell 指令:`du -sh /var/lib/taos/vnode --exclude='wal'` 来查看。此处排除了 WAL 目录,因为在持续写入的情况下,这里大小几乎是固定的,并且每当正常关闭 TDengine 让数据落盘后,WAL 目录都会清空。 + +若想查看单个数据库占用的大小,可在命令行程序 taos 内指定要查看的数据库后执行 `show vgroups;` ,通过得到的 VGroup id 去 /var/lib/taos/vnode 下查看包含的文件夹大小。 + +### 14. 客户端连接串如何保证高可用? + +请看为此问题撰写的 [技术博客](https://www.taosdata.com/blog/2021/04/16/2287.html) + +### 15. 时间戳的时区信息是怎样处理的? + +TDengine 中时间戳的时区总是由客户端进行处理,而与服务端无关。具体来说,客户端会对 SQL 语句中的时间戳进行时区转换,转为 UTC 时区(即 Unix 时间戳——Unix Timestamp)再交由服务端进行写入和查询;在读取数据时,服务端也是采用 UTC 时区提供原始数据,客户端收到后再根据本地设置,把时间戳转换为本地系统所要求的时区进行显示。 + +客户端在处理时间戳字符串时,会采取如下逻辑: + +1. 在未做特殊设置的情况下,客户端默认使用所在操作系统的时区设置。 +2. 如果在 taos.cfg 中设置了 timezone 参数,则客户端会以这个配置文件中的设置为准。 +3. 如果在 C/C++/Java/Python 等各种编程语言的 Connector Driver 中,在建立数据库连接时显式指定了 timezone,那么会以这个指定的时区设置为准。例如 Java Connector 的 JDBC URL 中就有 timezone 参数。 +4. 在书写 SQL 语句时,也可以直接使用 Unix 时间戳(例如 `1554984068000`)或带有时区的时间戳字符串,也即以 RFC 3339 格式(例如 `2013-04-12T15:52:01.123+08:00`)或 ISO-8601 格式(例如 `2013-04-12T15:52:01.123+0800`)来书写时间戳,此时这些时间戳的取值将不再受其他时区设置的影响。 + +### 16. TDengine 3.0 都会用到哪些网络端口? + +使用到的网络端口请看文档:[serverport](../../reference/config/#serverport) + +需要注意,文档上列举的端口号都是以默认端口 6030 为前提进行说明,如果修改了配置文件中的设置,那么列举的端口都会随之出现变化,管理员可以参考上述的信息调整防火墙设置。 + +### 17. 为什么 RESTful 接口无响应、Grafana 无法添加 TDengine 为数据源、TDengineGUI 选了 6041 端口还是无法连接成功? + +taosAdapter 从 TDengine 2.4.0.0 版本开始成为 TDengine 服务端软件的组成部分,是 TDengine 集群和应用程序之间的桥梁和适配器。在此之前 RESTful 接口等功能是由 taosd 内置的 HTTP 服务提供的,而如今要实现上述功能需要执行:```systemctl start taosadapter``` 命令来启动 taosAdapter 服务。 + +需要说明的是,taosAdapter 的日志路径 path 需要单独配置,默认路径是 /var/log/taos ;日志等级 logLevel 有 8 个等级,默认等级是 info ,配置成 panic 可关闭日志输出。请注意操作系统 / 目录的空间大小,可通过命令行参数、环境变量或配置文件来修改配置,默认配置文件是 /etc/taos/taosadapter.toml 。 + +有关 taosAdapter 组件的详细介绍请看文档:[taosAdapter](../../reference/taosadapter/) + +### 18. 发生了 OOM 怎么办? + +OOM 是操作系统的保护机制,当操作系统内存(包括 SWAP )不足时,会杀掉某些进程,从而保证操作系统的稳定运行。通常内存不足主要是如下两个原因导致,一是剩余内存小于 vm.min_free_kbytes ;二是程序请求的内存大于剩余内存。还有一种情况是内存充足但程序占用了特殊的内存地址,也会触发 OOM 。 + +TDengine 会预先为每个 VNode 分配好内存,每个 Database 的 VNode 个数受 建库时的vgroups参数影响,每个 VNode 占用的内存大小受 buffer参数 影响。要防止 OOM,需要在项目建设之初合理规划内存,并合理设置 SWAP ,除此之外查询过量的数据也有可能导致内存暴涨,这取决于具体的查询语句。TDengine 企业版对内存管理做了优化,采用了新的内存分配器,对稳定性有更高要求的用户可以考虑选择企业版。 diff --git a/examples/rust b/examples/rust new file mode 160000 index 0000000000..7ed7a97715 --- /dev/null +++ b/examples/rust @@ -0,0 +1 @@ +Subproject commit 7ed7a97715388fa144718764d6bf20f9bfc29a12 diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 7839859e8b..9ca67056c6 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -246,7 +246,7 @@ void blockDebugShowDataBlocks(const SArray* dataBlocks, const char* flag); // for debug char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** dumpBuf); -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlocks, STSchema* pTSchema, int32_t vgId, tb_uid_t suid); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); diff --git a/include/common/tmsg.h b/include/common/tmsg.h index 9eabeb4f58..d26e99f423 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2657,6 +2657,31 @@ typedef struct { SEpSet epSet; } SVgEpSet; +typedef struct { + int64_t suid; + int8_t level; +} SRSmaFetchMsg; + +static FORCE_INLINE int32_t tEncodeSRSmaFetchMsg(SEncoder* pCoder, const SRSmaFetchMsg* pReq) { + if (tStartEncode(pCoder) < 0) return -1; + + if (tEncodeI64(pCoder, pReq->suid) < 0) return -1; + if (tEncodeI8(pCoder, pReq->level) < 0) return -1; + + tEndEncode(pCoder); + return 0; +} + +static FORCE_INLINE int32_t tDecodeSRSmaFetchMsg(SDecoder* pCoder, SRSmaFetchMsg* pReq) { + if (tStartDecode(pCoder) < 0) return -1; + + if (tDecodeI64(pCoder, &pReq->suid) < 0) return -1; + if (tDecodeI8(pCoder, &pReq->level) < 0) return -1; + + tEndDecode(pCoder); + return 0; +} + typedef struct { int8_t version; // for compatibility(default 0) int8_t intervalUnit; // MACRO: TIME_UNIT_XXX diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index 20dc04631e..9ddb872007 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -200,6 +200,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_VND_CANCEL_SMA, "vnode-cancel-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DROP_SMA, "vnode-drop-sma", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_SUBMIT_RSMA, "vnode-submit-rsma", SSubmitReq, SSubmitRsp) + TD_DEF_MSG_TYPE(TDMT_VND_FETCH_RSMA, "vnode-fetch-rsma", SRSmaFetchMsg, NULL) TD_DEF_MSG_TYPE(TDMT_VND_DELETE, "delete-data", SVDeleteReq, SVDeleteRsp) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIG, "alter-config", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL) diff --git a/include/common/ttypes.h b/include/common/ttypes.h index 4b3e11f947..ceb3eae033 100644 --- a/include/common/ttypes.h +++ b/include/common/ttypes.h @@ -354,8 +354,6 @@ void operateVal(void *dst, void *s1, void *s2, int32_t optr, int32_t type); void *getDataMin(int32_t type); void *getDataMax(int32_t type); -#define SET_DOUBLE_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_DOUBLE_NULL) -#define SET_BIGINT_NULL(v) (*(uint64_t *)(v) = TSDB_DATA_BIGINT_NULL) #ifdef __cplusplus } diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 8fa63bbd45..72732ee198 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -67,7 +67,7 @@ typedef struct SResultRowEntryInfo { bool initialized:1; // output buffer has been initialized bool complete:1; // query has completed uint8_t isNullRes:6; // the result is null - uint16_t numOfRes; // num of output result in current buffer + uint16_t numOfRes; // num of output result in current buffer. NOT NULL RESULT } SResultRowEntryInfo; // determine the real data need to calculated the result diff --git a/include/libs/function/functionMgt.h b/include/libs/function/functionMgt.h index e2846c5974..479432eebe 100644 --- a/include/libs/function/functionMgt.h +++ b/include/libs/function/functionMgt.h @@ -202,7 +202,7 @@ bool fmIsForbidStreamFunc(int32_t funcId); bool fmIsIntervalInterpoFunc(int32_t funcId); bool fmIsInterpFunc(int32_t funcId); bool fmIsLastRowFunc(int32_t funcId); -bool fmIsReturnNotNullFunc(int32_t funcId); +bool fmIsNotNullOutputFunc(int32_t funcId); bool fmIsSelectValueFunc(int32_t funcId); bool fmIsSystemInfoFunc(int32_t funcId); bool fmIsImplicitTsFunc(int32_t funcId); diff --git a/include/libs/nodes/plannodes.h b/include/libs/nodes/plannodes.h index 40879de3bc..384b4a829a 100644 --- a/include/libs/nodes/plannodes.h +++ b/include/libs/nodes/plannodes.h @@ -121,6 +121,7 @@ typedef struct SProjectLogicNode { SLogicNode node; SNodeList* pProjections; char stmtName[TSDB_TABLE_NAME_LEN]; + bool ignoreGroupId; } SProjectLogicNode; typedef struct SIndefRowsFuncLogicNode { @@ -344,6 +345,7 @@ typedef struct SProjectPhysiNode { SPhysiNode node; SNodeList* pProjections; bool mergeDataBlock; + bool ignoreGroupId; } SProjectPhysiNode; typedef struct SIndefRowsFuncPhysiNode { diff --git a/include/libs/stream/tstreamUpdate.h b/include/libs/stream/tstreamUpdate.h index a4728e6382..78543118da 100644 --- a/include/libs/stream/tstreamUpdate.h +++ b/include/libs/stream/tstreamUpdate.h @@ -47,6 +47,8 @@ bool updateInfoIgnore(SUpdateInfo *pInfo, STimeWindow* pWin, uint64_t groupId, u void updateInfoDestroy(SUpdateInfo *pInfo); void updateInfoAddCloseWindowSBF(SUpdateInfo *pInfo); void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo); +int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo); +int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo); #ifdef __cplusplus } diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index d96a55c74c..35e79cf45d 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -26,7 +26,10 @@ extern "C" { extern bool gRaftDetailLog; -#define SYNC_RESP_TTL_MS 10000000 +#define SYNC_RESP_TTL_MS 10000000 +#define SYNC_SPEED_UP_HB_TIMER 400 +#define SYNC_SPEED_UP_AFTER_MS (1000 * 20) +#define SYNC_SLOW_DOWN_RANGE 100 #define SYNC_MAX_BATCH_SIZE 1 #define SYNC_INDEX_BEGIN 0 @@ -205,6 +208,7 @@ int32_t syncSetStandby(int64_t rid); ESyncState syncGetMyRole(int64_t rid); bool syncIsReady(int64_t rid); const char* syncGetMyRoleStr(int64_t rid); +bool syncRestoreFinish(int64_t rid); SyncTerm syncGetMyTerm(int64_t rid); SyncGroupId syncGetVgId(int64_t rid); void syncGetEpSet(int64_t rid, SEpSet* pEpSet); diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index 467f0a9ff0..655c903c0b 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -47,8 +47,6 @@ typedef struct SRpcHandleInfo { int8_t persistHandle; // persist handle or not int8_t hasEpSet; - STraceId traceId; - // app info void *ahandle; // app handle set by client void *wrapper; // wrapper handle @@ -58,7 +56,8 @@ typedef struct SRpcHandleInfo { void *rsp; int32_t rspLen; - // conn info + STraceId traceId; + SRpcConnInfo conn; } SRpcHandleInfo; diff --git a/include/os/osDef.h b/include/os/osDef.h index 14f38eb7ff..be8689398d 100644 --- a/include/os/osDef.h +++ b/include/os/osDef.h @@ -57,7 +57,7 @@ extern "C" { #if defined(WINDOWS) char *stpcpy (char *dest, const char *src); - char *stpncpy (char *dest, const char *src, size_t n); + char *stpncpy (char *dest, const char *src, int n); // specific #ifndef __COMPAR_FN_T @@ -77,7 +77,7 @@ extern "C" { char * strsep(char **stringp, const char *delim); char * getpass(const char *prefix); - char * strndup(const char *s, size_t n); + char * strndup(const char *s, int n); // for send function in tsocket.c #define MSG_NOSIGNAL 0 diff --git a/include/os/osSystem.h b/include/os/osSystem.h index 6770be6e46..c5b8fc7be7 100644 --- a/include/os/osSystem.h +++ b/include/os/osSystem.h @@ -32,6 +32,7 @@ extern "C" { typedef struct TdCmd *TdCmdPtr; TdCmdPtr taosOpenCmd(const char* cmd); +int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf); int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf); int32_t taosEOFCmd(TdCmdPtr pCmd); int64_t taosCloseCmd(TdCmdPtr* ppCmd); diff --git a/include/util/taoserror.h b/include/util/taoserror.h index 145e2f7aad..ce73dceec2 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -610,6 +610,8 @@ int32_t* taosGetErrno(); #define TSDB_CODE_RSMA_QTASKINFO_CREATE TAOS_DEF_ERROR_CODE(0, 0x3152) #define TSDB_CODE_RSMA_FILE_CORRUPTED TAOS_DEF_ERROR_CODE(0, 0x3153) #define TSDB_CODE_RSMA_REMOVE_EXISTS TAOS_DEF_ERROR_CODE(0, 0x3154) +#define TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP TAOS_DEF_ERROR_CODE(0, 0x3155) +#define TSDB_CODE_RSMA_EMPTY_INFO TAOS_DEF_ERROR_CODE(0, 0x3156) //index #define TSDB_CODE_INDEX_REBUILDING TAOS_DEF_ERROR_CODE(0, 0x3200) diff --git a/include/util/tbloomfilter.h b/include/util/tbloomfilter.h index b168da594a..c9ca905f82 100644 --- a/include/util/tbloomfilter.h +++ b/include/util/tbloomfilter.h @@ -17,6 +17,7 @@ #define _TD_UTIL_BLOOMFILTER_H_ #include "os.h" +#include "tencode.h" #include "thash.h" #ifdef __cplusplus @@ -42,6 +43,8 @@ int32_t tBloomFilterNoContain(const SBloomFilter *pBF, const void *keyBuf, void tBloomFilterDestroy(SBloomFilter *pBF); void tBloomFilterDump(const SBloomFilter *pBF); bool tBloomFilterIsFull(const SBloomFilter *pBF); +int32_t tBloomFilterEncode(const SBloomFilter *pBF, SEncoder* pEncoder); +SBloomFilter* tBloomFilterDecode(SDecoder* pDecoder); #ifdef __cplusplus } diff --git a/include/util/tdef.h b/include/util/tdef.h index 6d893765fc..6fbec4bc67 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -359,11 +359,11 @@ typedef enum ELogicConditionType { #define TSDB_DEFAULT_DB_SCHEMALESS TSDB_DB_SCHEMALESS_OFF #define TSDB_DB_MIN_WAL_RETENTION_PERIOD -1 -#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD 0 +#define TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD (24 * 60 * 60 * 2) #define TSDB_DB_MIN_WAL_RETENTION_SIZE -1 -#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE 0 +#define TSDB_DEFAULT_DB_WAL_RETENTION_SIZE -1 #define TSDB_DB_MIN_WAL_ROLL_PERIOD 0 -#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD 0 +#define TSDB_DEFAULT_DB_WAL_ROLL_PERIOD (24 * 60 * 60 * 1) #define TSDB_DB_MIN_WAL_SEGMENT_SIZE 0 #define TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE 0 @@ -396,7 +396,7 @@ typedef enum ELogicConditionType { #ifdef WINDOWS #define TSDB_MAX_RPC_THREADS 4 // windows pipe only support 4 connections. #else -#define TSDB_MAX_RPC_THREADS 5 +#define TSDB_MAX_RPC_THREADS 10 #endif #define TSDB_QUERY_TYPE_NON_TYPE 0x00u // none type diff --git a/include/util/tlockfree.h b/include/util/tlockfree.h index 8db6be8860..82dd5a0e8b 100644 --- a/include/util/tlockfree.h +++ b/include/util/tlockfree.h @@ -25,9 +25,9 @@ extern "C" { // reference counting typedef void (*_ref_fn_t)(const void *pObj); -#define T_REF_DECLARE() \ - struct { \ - int32_t val; \ +#define T_REF_DECLARE() \ + struct { \ + volatile int32_t val; \ } _ref; #define T_REF_REGISTER_FUNC(s, e) \ diff --git a/include/util/tscalablebf.h b/include/util/tscalablebf.h index 8f88f65048..1386f840a8 100644 --- a/include/util/tscalablebf.h +++ b/include/util/tscalablebf.h @@ -33,7 +33,8 @@ int32_t tScalableBfPut(SScalableBf *pSBf, const void *keyBuf, uint32_t len); int32_t tScalableBfNoContain(const SScalableBf *pSBf, const void *keyBuf, uint32_t len); void tScalableBfDestroy(SScalableBf *pSBf); -void tScalableBfDump(const SScalableBf *pSBf); +int32_t tScalableBfEncode(const SScalableBf *pSBf, SEncoder* pEncoder); +SScalableBf* tScalableBfDecode(SDecoder* pDecoder); #ifdef __cplusplus } diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 3e46a1aa55..6de475a4c0 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -60,7 +60,7 @@ cp ${compile_dir}/../packaging/tools/set_core.sh ${pkg_dir}${install_home_pat cp ${compile_dir}/../packaging/tools/taosd-dump-cfg.gdb ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/bin/taosd ${pkg_dir}${install_home_path}/bin -#cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin +cp ${compile_dir}/build/bin/taosBenchmark ${pkg_dir}${install_home_path}/bin if [ -f "${compile_dir}/build/bin/taosadapter" ]; then cp ${compile_dir}/build/bin/taosadapter ${pkg_dir}${install_home_path}/bin ||: diff --git a/packaging/release.bat b/packaging/release.bat index c1cf7875a5..ffd3a68048 100644 --- a/packaging/release.bat +++ b/packaging/release.bat @@ -2,61 +2,78 @@ set internal_dir=%~dp0\..\..\ set community_dir=%~dp0\.. -cd %community_dir% -git checkout -- . -cd %community_dir%\packaging +set package_dir=%cd% :: %1 name %2 version if !%1==! GOTO USAGE if !%2==! GOTO USAGE -if %1 == taos GOTO TAOS -if %1 == power GOTO POWER -if %1 == tq GOTO TQ -if %1 == pro GOTO PRO -if %1 == kh GOTO KH -if %1 == jh GOTO JH -GOTO USAGE -:TAOS -goto RELEASE - -:POWER -call sed_power.bat %community_dir% -goto RELEASE - -:TQ -call sed_tq.bat %community_dir% -goto RELEASE - -:PRO -call sed_pro.bat %community_dir% -goto RELEASE - -:KH -call sed_kh.bat %community_dir% -goto RELEASE - -:JH -call sed_jh.bat %community_dir% -goto RELEASE - -:RELEASE -echo release windows-client-64 for %1, version: %2 -if not exist %internal_dir%\debug\ver-%2-64bit-%1 ( - md %internal_dir%\debug\ver-%2-64bit-%1 +if "%1" == "cluster" ( + set work_dir=%internal_dir% + set packagServerName_x64=TDengine-enterprise-server-%2-beta-Windows-x64 + @REM set packagServerName_x86=TDengine-enterprise-server-%2-beta-Windows-x86 + set packagClientName_x64=TDengine-enterprise-client-%2-beta-Windows-x64 + set packagClientName_x86=TDengine-enterprise-client-%2-beta-Windows-x86 ) else ( - rd /S /Q %internal_dir%\debug\ver-%2-64bit-%1 - md %internal_dir%\debug\ver-%2-64bit-%1 + set work_dir=%community_dir% + set packagServerName_x64=TDengine-server-%2-Windows-x64 + @REM set packagServerName_x86=TDengine-server-%2-Windows-x86 + set packagClientName_x64=TDengine-client-%2-Windows-x64 + set packagClientName_x86=TDengine-client-%2-Windows-x86 ) -cd %internal_dir%\debug\ver-%2-64bit-%1 -call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" amd64 -cmake ../../ -G "NMake Makefiles" -DVERNUMBER=%2 -DCPUTYPE=x64 -set CL=/MP4 -nmake install + +echo release windows-client for %1, version: %2 +if not exist %work_dir%\debug ( + md %work_dir%\debug +) +if not exist %work_dir%\debug\ver-%2-x64 ( + md %work_dir%\debug\ver-%2-x64 +) else ( + rd /S /Q %work_dir%\debug\ver-%2-x64 + md %work_dir%\debug\ver-%2-x64 +) +if not exist %work_dir%\debug\ver-%2-x86 ( + md %work_dir%\debug\ver-%2-x86 +) else ( + rd /S /Q %work_dir%\debug\ver-%2-x86 + md %work_dir%\debug\ver-%2-x86 +) +cd %work_dir%\debug\ver-%2-x64 +call vcvarsall.bat x64 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x64 +cmake --build . +rd /s /Q C:\TDengine +cmake --install . +if not %errorlevel% == 0 ( call :RUNFAILED build x64 failed & exit /b 1) +cd %package_dir% +iscc /DMyAppInstallName="%packagServerName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x64% failed & exit /b 1) +iscc /DMyAppInstallName="%packagClientName_x64%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x64% failed & exit /b 1) + +cd %work_dir%\debug\ver-%2-x86 +call vcvarsall.bat x86 +cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DVERNUMBER=%2 -DCPUTYPE=x86 +cmake --build . +rd /s /Q C:\TDengine +cmake --install . +if not %errorlevel% == 0 ( call :RUNFAILED build x86 failed & exit /b 1) +cd %package_dir% +@REM iscc /DMyAppInstallName="%packagServerName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="" tools\tdengine.iss /O..\release +@REM if not %errorlevel% == 0 ( call :RUNFAILED package %packagServerName_x86% failed & exit /b 1) +iscc /DMyAppInstallName="%packagClientName_x86%" /DMyAppVersion="%2" /DMyAppExcludeSource="taosd.exe" tools\tdengine.iss /O..\release +if not %errorlevel% == 0 ( call :RUNFAILED package %packagClientName_x86% failed & exit /b 1) + goto EXIT0 :USAGE -echo Usage: release.bat $productName $version +echo Usage: release.bat $verMode $version goto EXIT0 -:EXIT0 \ No newline at end of file +:EXIT0 +exit /b + +:RUNFAILED +echo %* +cd %package_dir% +goto :eof \ No newline at end of file diff --git a/packaging/release.sh b/packaging/release.sh index 3426c2856d..2452ee1813 100755 --- a/packaging/release.sh +++ b/packaging/release.sh @@ -26,7 +26,7 @@ soMode=dynamic # [static | dynamic] dbName=taos # [taos | ...] allocator=glibc # [glibc | jemalloc] verNumber="" -verNumberComp="2.0.0.0" +verNumberComp="3.0.0.0" httpdBuild=false while getopts "hv:V:c:o:l:s:d:a:n:m:H:" arg; do @@ -216,7 +216,7 @@ else fi # check support cpu type -if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "mips64" ]]; then +if [[ "$cpuType" == "x64" ]] || [[ "$cpuType" == "aarch64" ]] || [[ "$cpuType" == "aarch32" ]] || [[ "$cpuType" == "arm64" ]] || [[ "$cpuType" == "arm32" ]] || [[ "$cpuType" == "mips64" ]]; then if [ "$verMode" != "cluster" ]; then # community-version compile cmake ../ -DCPUTYPE=${cpuType} -DOSTYPE=${osType} -DSOMODE=${soMode} -DDBNAME=${dbName} -DVERTYPE=${verType} -DVERDATE="${build_time}" -DGITINFO=${gitinfo} -DGITINFOI=${gitinfoOfInternal} -DVERNUMBER=${verNumber} -DVERCOMPATIBLE=${verNumberComp} -DPAGMODE=${pagMode} -DBUILD_HTTP=${BUILD_HTTP} -DBUILD_TOOLS=${BUILD_TOOLS} ${allocator_macro} diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 79e4b2edb8..d3d4bab0e6 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -69,7 +69,7 @@ cp %{_compiledir}/../packaging/tools/set_core.sh %{buildroot}%{homepath}/bin cp %{_compiledir}/../packaging/tools/taosd-dump-cfg.gdb %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taos %{buildroot}%{homepath}/bin cp %{_compiledir}/build/bin/taosd %{buildroot}%{homepath}/bin -#cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin +cp %{_compiledir}/build/bin/taosBenchmark %{buildroot}%{homepath}/bin if [ -f %{_compiledir}/build/bin/taosadapter ]; then cp %{_compiledir}/build/bin/taosadapter %{buildroot}%{homepath}/bin ||: diff --git a/packaging/tools/favicon.ico b/packaging/tools/favicon.ico new file mode 100644 index 0000000000..20b8026d1d Binary files /dev/null and b/packaging/tools/favicon.ico differ diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2c3b70f5ed..beb0718987 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -30,7 +30,6 @@ configDir="/etc/taos" installDir="/usr/local/taos" adapterName="taosadapter" benchmarkName="taosBenchmark" -tmqName="tmq_sim" dumpName="taosdump" demoName="taosdemo" diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index a3f4c35842..6103ce170c 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -60,7 +60,7 @@ if [ "$pagMode" == "lite" ]; then strip ${build_dir}/bin/${serverName} strip ${build_dir}/bin/${clientName} # lite version doesn't include taosadapter, which will lead to no restful interface - bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark ${build_dir}/bin/tmq_sim" + bin_files="${build_dir}/bin/${serverName} ${build_dir}/bin/${clientName} ${script_dir}/remove.sh ${script_dir}/startPre.sh ${build_dir}/bin/taosBenchmark " taostools_bin_files="" else @@ -78,7 +78,6 @@ else taostools_bin_files=" ${build_dir}/bin/taosdump \ ${build_dir}/bin/taosBenchmark \ - ${build_dir}/bin/tmq_sim \ ${build_dir}/bin/TDinsight.sh \ $tdinsight_caches" diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index d637ab8d5a..aa80cfb86c 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -132,6 +132,7 @@ function install_bin() { [ -x ${bin_dir}/taosd ] && ${csudo}ln -s ${bin_dir}/taosd ${bin_link_dir}/taosd || : [ -x ${bin_dir}/taosadapter ] && ${csudo}ln -s ${bin_dir}/taosadapter ${bin_link_dir}/taosadapter || : [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosdemo || : + [ -x ${bin_dir}/taosBenchmark ] && ${csudo}ln -sf ${bin_dir}/taosBenchmark ${bin_link_dir}/taosBenchmark || : [ -x ${bin_dir}/TDinsight.sh ] && ${csudo}ln -sf ${bin_dir}/TDinsight.sh ${bin_link_dir}/TDinsight.sh || : [ -x ${bin_dir}/taosdump ] && ${csudo}ln -s ${bin_dir}/taosdump ${bin_link_dir}/taosdump || : [ -x ${bin_dir}/set_core.sh ] && ${csudo}ln -s ${bin_dir}/set_core.sh ${bin_link_dir}/set_core || : diff --git a/packaging/tools/taos.bat b/packaging/tools/taos.bat new file mode 100644 index 0000000000..c3fe625e16 --- /dev/null +++ b/packaging/tools/taos.bat @@ -0,0 +1,6 @@ +@echo off +cd C:\TDengine +if not "%1" == "" ( + %1 --help + @cmd /k +) \ No newline at end of file diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss new file mode 100644 index 0000000000..7310201815 --- /dev/null +++ b/packaging/tools/tdengine.iss @@ -0,0 +1,81 @@ +#define MyAppName "TDengine" +#define MyAppPublisher "taosdata" +#define MyAppURL "http://www.taosdata.com/" +#define MyAppBeforeInstallTxt "windows_before_install.txt" +#define MyAppIco "favicon.ico" +#define MyAppInstallDir "C:\TDengine" +#define MyAppOutputDir "./" +#define MyAppSourceDir "C:\TDengine" +;#define MyAppAllFile "\*" +#define MyAppCfgName "\cfg\*" +#define MyAppDriverName "\driver\*" +#define MyAppConnectorName "\connector\*" +#define MyAppExamplesName "\examples\*" +#define MyAppIncludeName "\include\*" +#define MyAppExeName "\*.exe" +#define MyAppTaosExeName "\taos.bat" +#define MyAppTaosdemoExeName "\taosBenchmark.exe" +#define MyAppDLLName "\driver\taos.dll" +;#define MyAppVersion "3.0" +;#define MyAppInstallName "TDengine" + +[Setup] +VersionInfoVersion={#MyAppVersion} +AppId={{A0F7A93C-79C4-485D-B2B8-F0D03DF42FAB} +AppName={#MyAppName} +AppVersion={#MyAppVersion} +;AppVerName={#MyAppName} {#MyAppVersion} +AppPublisher={#MyAppPublisher} +AppPublisherURL={#MyAppURL} +AppSupportURL={#MyAppURL} +AppUpdatesURL={#MyAppURL} +DefaultDirName={#MyAppInstallDir} +DefaultGroupName={#MyAppName} +DisableProgramGroupPage=yes +InfoBeforeFile={#MyAppBeforeInstallTxt} +OutputDir={#MyAppOutputDir} +OutputBaseFilename={#MyAppInstallName} +SetupIconFile={#MyAppIco} +Compression=lzma +SolidCompression=yes +DisableDirPage=yes +Uninstallable=yes + +[Languages] +Name: "chinesesimp"; MessagesFile: "compiler:Default.isl" +;Name: "english"; MessagesFile: "compiler:Languages\English.isl" + +[Files] +;Source: {#MyAppSourceDir}{#MyAppAllFile}; DestDir: "{app}"; Flags: igNoreversion recursesubdirs createallsubdirs +Source: taos.bat; DestDir: "{app}\include"; Flags: igNoreversion; +;Source: taosdemo.png; DestDir: "{app}\include"; Flags: igNoreversion; +;Source: taosShell.png; DestDir: "{app}\include"; Flags: igNoreversion; +Source: favicon.ico; DestDir: "{app}\include"; Flags: igNoreversion; +Source: {#MyAppSourceDir}{#MyAppDLLName}; DestDir: "{win}\System32"; Flags: igNoreversion; +Source: {#MyAppSourceDir}{#MyAppCfgName}; DestDir: "{app}\cfg"; Flags: igNoreversion recursesubdirs createallsubdirs onlyifdoesntexist uninsneveruninstall +Source: {#MyAppSourceDir}{#MyAppDriverName}; DestDir: "{app}\driver"; Flags: igNoreversion recursesubdirs createallsubdirs +;Source: {#MyAppSourceDir}{#MyAppConnectorName}; DestDir: "{app}\connector"; Flags: igNoreversion recursesubdirs createallsubdirs +;Source: {#MyAppSourceDir}{#MyAppExamplesName}; DestDir: "{app}\examples"; Flags: igNoreversion recursesubdirs createallsubdirs +Source: {#MyAppSourceDir}{#MyAppIncludeName}; DestDir: "{app}\include"; Flags: igNoreversion recursesubdirs createallsubdirs +Source: {#MyAppSourceDir}{#MyAppExeName}; DestDir: "{app}"; Excludes: {#MyAppExcludeSource} ; Flags: igNoreversion recursesubdirs createallsubdirs +Source: {#MyAppSourceDir}{#MyAppTaosdemoExeName}; DestDir: "{app}"; Flags: igNoreversion recursesubdirs createallsubdirs + +[UninstallDelete] +Name: {app}\driver; Type: filesandordirs +Name: {app}\connector; Type: filesandordirs +Name: {app}\examples; Type: filesandordirs +Name: {app}\include; Type: filesandordirs + +[Tasks] +Name: "desktopicon";Description: "{cm:CreateDesktopIcon}"; GroupDescription:"{cm:AdditionalIcons}"; Flags: checkablealone + +[Icons] +Name:"{group}\Taos Shell"; Filename: "{app}\include\{#MyAppTaosExeName}" ; Parameters: "taos.exe" ; IconFilename: "{app}\include\{#MyAppIco}" +Name:"{group}\Open TDengine Directory"; Filename: "{app}\" +Name:"{group}\Taosdemo"; Filename: "{app}\include\{#MyAppTaosExeName}" ; Parameters: "taosdemo.exe" ; IconFilename: "{app}\include\{#MyAppIco}" +Name: "{group}\{cm:UninstallProgram,{#MyAppName}}"; Filename: "{uninstallexe}" ; IconFilename: "{app}\include\{#MyAppIco}" +Name:"{commondesktop}\Taos Shell"; Filename: "{app}\include\{#MyAppTaosExeName}" ; Parameters: "taos.exe" ; Tasks: desktopicon; WorkingDir: "{app}" ; IconFilename: "{app}\include\{#MyAppIco}" + + +[Messages] +ConfirmUninstall=Do you really want to uninstall TDengine from your computer?%n%nPress [Y] to completely delete %1 and all its components;%nPress [N] to keep the software on your computer. diff --git a/packaging/tools/windows_before_install.txt b/packaging/tools/windows_before_install.txt new file mode 100644 index 0000000000..b793a3e801 --- /dev/null +++ b/packaging/tools/windows_before_install.txt @@ -0,0 +1,3 @@ +TDengine is a high-efficient, scalable, high-available distributed time-series database, which makes a lot of optimizations on inserting and querying data, which is far more efficient than normal regular databases. So TDengine can meet the high requirements of IOT and other areas on storing and querying a large amount of data. + +TDengine will be installed under C:\TDengine, users can modify configuration file C:\TDengine\cfg\taos.cfg, set the log file path or other parameters. \ No newline at end of file diff --git a/source/client/src/TSDBJNIConnector.c b/source/client/src/TSDBJNIConnector.c index 227c2fff18..38bbec24ce 100644 --- a/source/client/src/TSDBJNIConnector.c +++ b/source/client/src/TSDBJNIConnector.c @@ -757,7 +757,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_prepareStmtImp(J int32_t code = taos_stmt_prepare(pStmt, str, len); taosMemoryFreeClear(str); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + jniError("prepareStmt jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -785,7 +785,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setBindTableNameI if (code != TSDB_CODE_SUCCESS) { (*env)->ReleaseStringUTFChars(env, jname, name); - jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); + jniError("bindTableName jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -860,7 +860,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_setTableNameTagsI (*env)->ReleaseStringUTFChars(env, tableName, name); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); + jniError("tableNameTags jobj:%p, conn:%p, code:%s", jobj, tsconn, tstrerror(code)); return JNI_TDENGINE_ERROR; } return JNI_SUCCESS; @@ -926,7 +926,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_bindColDataImp( taosMemoryFreeClear(b); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + jniError("bindColData jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -949,7 +949,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_addBatchImp(JNIEn int32_t code = taos_stmt_add_batch(pStmt); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + jniError("add batch jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -973,7 +973,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeBatchImp(J int32_t code = taos_stmt_execute(pStmt); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + jniError("excute batch jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } @@ -997,7 +997,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeStmt(JNIEnv int32_t code = taos_stmt_close(pStmt); if (code != TSDB_CODE_SUCCESS) { - jniError("jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); + jniError("close stmt jobj:%p, conn:%p, code:%s", jobj, tscon, tstrerror(code)); return JNI_TDENGINE_ERROR; } diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 812351e208..ff1b9322c9 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -126,7 +126,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { rpcInit.numOfThreads = numOfThread; rpcInit.cfp = processMsgFromServer; rpcInit.rfp = clientRpcRfp; - rpcInit.tfp = clientRpcTfp; + // rpcInit.tfp = clientRpcTfp; rpcInit.sessions = 1024; rpcInit.connType = TAOS_CONN_CLIENT; rpcInit.user = (char *)user; diff --git a/source/client/src/clientHb.c b/source/client/src/clientHb.c index 06bd3f3887..9475d1b51e 100644 --- a/source/client/src/clientHb.c +++ b/source/client/src/clientHb.c @@ -286,6 +286,7 @@ static int32_t hbAsyncCallBack(void *param, SDataBuf *pMsg, int32_t code) { if (pInst == NULL || NULL == *pInst) { taosThreadMutexUnlock(&appInfo.mutex); tscError("cluster not exist, key:%s", key); + taosMemoryFree(pMsg->pData); tFreeClientHbBatchRsp(&pRsp); return -1; } @@ -326,7 +327,13 @@ int32_t hbBuildQueryDesc(SQueryHbReqBasic *hbBasic, STscObj *pObj) { while (pIter != NULL) { int64_t *rid = pIter; SRequestObj *pRequest = acquireRequest(*rid); - if (NULL == pRequest || pRequest->killed) { + if (NULL == pRequest) { + pIter = taosHashIterate(pObj->pRequests, pIter); + continue; + } + + if (pRequest->killed) { + releaseRequest(*rid); pIter = taosHashIterate(pObj->pRequests, pIter); continue; } diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c index b85d2a67ac..4917a01c55 100644 --- a/source/client/src/clientImpl.c +++ b/source/client/src/clientImpl.c @@ -283,7 +283,7 @@ void asyncExecLocalCmd(SRequestObj* pRequest, SQuery* pQuery) { int32_t code = qExecCommand(pQuery->pRoot, &pRsp); if (TSDB_CODE_SUCCESS == code && NULL != pRsp) { - code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, false); + code = setQueryResultFromRsp(&pRequest->body.resInfo, pRsp, false, true); } SReqResultInfo* pResultInfo = &pRequest->body.resInfo; diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c index 6785390952..0d41d35721 100644 --- a/source/client/src/clientMain.c +++ b/source/client/src/clientMain.c @@ -182,6 +182,7 @@ void taos_free_result(TAOS_RES *res) { if (TD_RES_QUERY(res)) { SRequestObj *pRequest = (SRequestObj *)res; + tscDebug("0x%" PRIx64 " taos_free_result start to free query", pRequest->requestId); destroyRequest(pRequest); } else if (TD_RES_TMQ(res)) { SMqRspObj *pRsp = (SMqRspObj *)res; @@ -482,7 +483,7 @@ void taos_stop_query(TAOS_RES *res) { int32_t numOfFields = taos_num_fields(pRequest); // It is not a query, no need to stop. if (numOfFields == 0) { - tscDebug("request %" PRIx64 " no need to be killed since not query", pRequest->requestId); + tscDebug("request 0x%" PRIx64 " no need to be killed since not query", pRequest->requestId); return; } @@ -657,12 +658,17 @@ typedef struct SqlParseWrapper { SQuery *pQuery; } SqlParseWrapper; +static void destoryTablesReq(void *p) { + STablesReq *pRes = (STablesReq *)p; + taosArrayDestroy(pRes->pTables); +} + static void destorySqlParseWrapper(SqlParseWrapper *pWrapper) { taosArrayDestroy(pWrapper->catalogReq.pDbVgroup); taosArrayDestroy(pWrapper->catalogReq.pDbCfg); taosArrayDestroy(pWrapper->catalogReq.pDbInfo); - taosArrayDestroy(pWrapper->catalogReq.pTableMeta); - taosArrayDestroy(pWrapper->catalogReq.pTableHash); + taosArrayDestroyEx(pWrapper->catalogReq.pTableMeta, destoryTablesReq); + taosArrayDestroyEx(pWrapper->catalogReq.pTableHash, destoryTablesReq); taosArrayDestroy(pWrapper->catalogReq.pUdf); taosArrayDestroy(pWrapper->catalogReq.pIndex); taosArrayDestroy(pWrapper->catalogReq.pUser); @@ -847,7 +853,7 @@ static void fetchCallback(void *pResult, void *param, int32_t code) { } pRequest->code = - setQueryResultFromRsp(pResultInfo, (SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, false); + setQueryResultFromRsp(pResultInfo, (SRetrieveTableRsp *)pResultInfo->pData, pResultInfo->convertUcs4, true); if (pRequest->code != TSDB_CODE_SUCCESS) { pResultInfo->numOfRows = 0; pRequest->code = code; diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c index 4217cf08b3..0c4cf23c4e 100644 --- a/source/client/src/clientMsgHandler.c +++ b/source/client/src/clientMsgHandler.c @@ -389,7 +389,7 @@ int32_t processShowVariablesRsp(void* param, SDataBuf* pMsg, int32_t code) { code = buildShowVariablesRsp(rsp.variables, &pRes); } if (TSDB_CODE_SUCCESS == code) { - code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, false); + code = setQueryResultFromRsp(&pRequest->body.resInfo, pRes, false, true); } tFreeSShowVariablesRsp(&rsp); diff --git a/source/common/src/systable.c b/source/common/src/systable.c index edd718090f..be76a1b453 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -24,7 +24,7 @@ #define SYSTABLE_SCH_COL_NAME_LEN ((TSDB_COL_NAME_LEN - 1) + VARSTR_HEADER_SIZE) static const SSysDbTableSchema dnodesSchema[] = { - {.name = "id", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "endpoint", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, {.name = "support_vnodes", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, @@ -66,7 +66,7 @@ static const SSysDbTableSchema bnodesSchema[] = { }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; @@ -74,7 +74,7 @@ static const SSysDbTableSchema clusterSchema[] = { static const SSysDbTableSchema userDBSchema[] = { {.name = "name", .bytes = SYSTABLE_SCH_DB_NAME_LEN, .type = TSDB_DATA_TYPE_VARCHAR}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, - {.name = "vgroups", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT}, + {.name = "vgroups", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "replica", .bytes = 1, .type = TSDB_DATA_TYPE_TINYINT}, {.name = "strict", .bytes = TSDB_DB_STRICT_STR_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, @@ -97,7 +97,7 @@ static const SSysDbTableSchema userDBSchema[] = { {.name = "wal_retention_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "wal_retention_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "wal_roll_period", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, - {.name = "wal_seg_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, + {.name = "wal_segment_size", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, }; static const SSysDbTableSchema userFuncSchema[] = { @@ -284,7 +284,7 @@ static const SSysDbTableSchema consumerSchema[] = { {.name = "client_id", .bytes = SYSTABLE_SCH_TABLE_NAME_LEN, .type = TSDB_DATA_TYPE_BINARY}, {.name = "status", .bytes = 20 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, {.name = "topics", .bytes = TSDB_TOPIC_FNAME_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, - {.name = "end_point", .bytes = TSDB_EP_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY}, + /*{.name = "end_point", .bytes = TSDB_IPv4ADDR_LEN + 6 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},*/ {.name = "up_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "subscribe_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, {.name = "rebalance_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, @@ -353,11 +353,19 @@ static const SSysTableMeta perfsMeta[] = { {TSDB_PERFS_TABLE_APPS, appSchema, tListLen(appSchema)}}; void getInfosDbMeta(const SSysTableMeta** pInfosTableMeta, size_t* size) { - *pInfosTableMeta = infosMeta; - *size = tListLen(infosMeta); + if (pInfosTableMeta) { + *pInfosTableMeta = infosMeta; + } + if (size) { + *size = tListLen(infosMeta); + } } void getPerfDbMeta(const SSysTableMeta** pPerfsTableMeta, size_t* size) { - *pPerfsTableMeta = perfsMeta; - *size = tListLen(perfsMeta); + if (pPerfsTableMeta) { + *pPerfsTableMeta = perfsMeta; + } + if (size) { + *size = tListLen(perfsMeta); + } } diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 302874962e..bf33976c08 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -1874,21 +1874,20 @@ char* dumpBlockData(SSDataBlock* pDataBlock, const char* flag, char** pDataBuf) * @brief TODO: Assume that the final generated result it less than 3M * * @param pReq - * @param pDataBlocks + * @param pDataBlock * @param vgId - * @param suid // TODO: check with Liao whether suid response is reasonable + * @param suid * - * TODO: colId should be set */ -int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks, STSchema* pTSchema, int32_t vgId, +int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SSDataBlock* pDataBlock, STSchema* pTSchema, int32_t vgId, tb_uid_t suid) { - int32_t sz = taosArrayGetSize(pDataBlocks); int32_t bufSize = sizeof(SSubmitReq); + int32_t sz = 1; for (int32_t i = 0; i < sz; ++i) { - SDataBlockInfo* pBlkInfo = &((SSDataBlock*)taosArrayGet(pDataBlocks, i))->info; + const SDataBlockInfo* pBlkInfo = &pDataBlock->info; - int32_t numOfCols = taosArrayGetSize(pDataBlocks); - bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(numOfCols)); + int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); + bufSize += pBlkInfo->rows * (TD_ROW_HEAD_LEN + pBlkInfo->rowSize + BitmapLen(colNum)); bufSize += sizeof(SSubmitBlk); } @@ -1905,7 +1904,6 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq** pReq, const SArray* pDataBlocks tdSRowInit(&rb, pTSchema->version); for (int32_t i = 0; i < sz; ++i) { - SSDataBlock* pDataBlock = taosArrayGet(pDataBlocks, i); int32_t colNum = taosArrayGetSize(pDataBlock->pDataBlock); int32_t rows = pDataBlock->info.rows; // int32_t rowSize = pDataBlock->info.rowSize; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index f836cd76ac..8d3deff495 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -355,7 +355,11 @@ static int32_t taosAddSystemCfg(SConfig *pCfg) { static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddDir(pCfg, "dataDir", tsDataDir, 0) != 0) return -1; if (cfgAddFloat(pCfg, "minimalDataDirGB", 2.0f, 0.001f, 10000000, 0) != 0) return -1; + + tsNumOfSupportVnodes = tsNumOfCores * 2; + tsNumOfSupportVnodes = TMAX(tsNumOfSupportVnodes, 2); if (cfgAddInt32(pCfg, "supportVnodes", tsNumOfSupportVnodes, 0, 4096, 0) != 0) return -1; + if (cfgAddInt32(pCfg, "maxShellConns", tsMaxShellConns, 10, 50000000, 0) != 0) return -1; if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, 0) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 10, 1000000, 0) != 0) return -1; @@ -401,7 +405,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1; - tsNumOfVnodeSyncThreads = tsNumOfCores; + // tsNumOfVnodeSyncThreads = tsNumOfCores; + tsNumOfVnodeSyncThreads = 32; tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 1); if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1; @@ -422,7 +427,7 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfSnodeUniqueThreads", tsNumOfSnodeUniqueThreads, 2, 1024, 0) != 0) return -1; tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; - tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, TSDB_MAX_MSG_SIZE * 10000L); + tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, 0) != 0) return -1; diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c index eca61dd960..1b799b1e5e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c @@ -347,6 +347,7 @@ SArray *vmGetMsgHandles() { if (dmSetMgmtHandle(pArray, TDMT_VND_TABLES_META, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_CANCEL_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_SCH_DROP_TASK, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; + if (dmSetMgmtHandle(pArray, TDMT_VND_FETCH_RSMA, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_CREATE_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_DROP_TTL_TABLE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_STB, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER; diff --git a/source/dnode/mnode/impl/inc/mndDb.h b/source/dnode/mnode/impl/inc/mndDb.h index 38a5ecd273..cea0a43b61 100644 --- a/source/dnode/mnode/impl/inc/mndDb.h +++ b/source/dnode/mnode/impl/inc/mndDb.h @@ -28,6 +28,7 @@ SDbObj *mndAcquireDb(SMnode *pMnode, const char *db); void mndReleaseDb(SMnode *pMnode, SDbObj *pDb); int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs, void **ppRsp, int32_t *pRspLen); int32_t mndExtractDbInfo(SMnode *pMnode, SDbObj *pDb, SUseDbRsp *pRsp, const SUseDbReq *pReq); +bool mndIsDbReady(SMnode *pMnode, SDbObj *pDb); const char *mndGetDbStr(const char *src); diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index c4da9b5c3d..455da6a40e 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -162,8 +162,9 @@ typedef struct { int64_t lastExecTime; int32_t lastAction; int32_t lastErrorNo; - tmsg_t lastMsgType; SEpSet lastEpset; + tmsg_t lastMsgType; + tmsg_t originRpcType; char dbname1[TSDB_TABLE_FNAME_LEN]; char dbname2[TSDB_TABLE_FNAME_LEN]; int32_t startFunc; diff --git a/source/dnode/mnode/impl/src/mndConsumer.c b/source/dnode/mnode/impl/src/mndConsumer.c index 3cdc53cd2c..614348c209 100644 --- a/source/dnode/mnode/impl/src/mndConsumer.c +++ b/source/dnode/mnode/impl/src/mndConsumer.c @@ -942,8 +942,8 @@ static int32_t mndRetrieveConsumer(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock * } // end point - pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true); + /*pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);*/ + /*colDataAppend(pColInfo, numOfRows, (const char *)&pConsumer->ep, true);*/ // up time pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); diff --git a/source/dnode/mnode/impl/src/mndDb.c b/source/dnode/mnode/impl/src/mndDb.c index 0120e1146d..89ea7abc23 100644 --- a/source/dnode/mnode/impl/src/mndDb.c +++ b/source/dnode/mnode/impl/src/mndDb.c @@ -1495,8 +1495,34 @@ static const char *getCacheModelStr(int8_t cacheModel) { return "unknown"; } -static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, int32_t rows, int64_t numOfTables, - bool sysDb, ESdbStatus objStatus, bool sysinfo) { +bool mndIsDbReady(SMnode *pMnode, SDbObj *pDb) { + if (pDb->cfg.replications == 1) return true; + + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; + bool isReady = true; + while (1) { + SVgObj *pVgroup = NULL; + pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup); + if (pIter == NULL) break; + + if (pVgroup->dbUid == pDb->uid && pVgroup->replica > 1) { + bool hasLeader = false; + for (int32_t i = 0; i < pVgroup->replica; ++i) { + if (pVgroup->vnodeGid[i].role == TAOS_SYNC_STATE_LEADER) { + hasLeader = true; + } + } + if (!hasLeader) isReady = false; + } + sdbRelease(pSdb, pVgroup); + } + + return isReady; +} + +static void mndDumpDbInfoData(SMnode *pMnode, SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, int32_t rows, + int64_t numOfTables, bool sysDb, ESdbStatus objStatus, bool sysinfo) { int32_t cols = 0; int32_t bytes = pShow->pMeta->pSchemas[cols].bytes; char *buf = taosMemoryMalloc(bytes); @@ -1509,8 +1535,16 @@ static void dumpDbInfoData(SSDataBlock *pBlock, SDbObj *pDb, SShowObj *pShow, in } char *statusStr = "ready"; - if (objStatus == SDB_STATUS_CREATING) statusStr = "creating"; - if (objStatus == SDB_STATUS_DROPPING) statusStr = "dropping"; + if (objStatus == SDB_STATUS_CREATING) { + statusStr = "creating"; + } else if (objStatus == SDB_STATUS_DROPPING) { + statusStr = "dropping"; + } else { + if (!sysDb && !mndIsDbReady(pMnode, pDb)) { + statusStr = "unsynced"; + } + } + char statusVstr[24] = {0}; STR_WITH_SIZE_TO_VARSTR(statusVstr, statusStr, strlen(statusStr)); @@ -1672,7 +1706,10 @@ static void setPerfSchemaDbCfg(SDbObj *pDbObj) { static bool mndGetTablesOfDbFp(SMnode *pMnode, void *pObj, void *p1, void *p2, void *p3) { SVgObj *pVgroup = pObj; int32_t *numOfTables = p1; - *numOfTables += pVgroup->numOfTables; + int64_t uid = *(int64_t*)p2; + if (pVgroup->dbUid == uid) { + *numOfTables += pVgroup->numOfTables; + } return true; } @@ -1691,13 +1728,17 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc if (!pShow->sysDbRsp) { SDbObj infoschemaDb = {0}; setInformationSchemaDbCfg(&infoschemaDb); - dumpDbInfoData(pBlock, &infoschemaDb, pShow, numOfRows, 14, true, 0, 1); + size_t numOfTables = 0; + getInfosDbMeta(NULL, &numOfTables); + mndDumpDbInfoData(pMnode, pBlock, &infoschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); numOfRows += 1; SDbObj perfschemaDb = {0}; setPerfSchemaDbCfg(&perfschemaDb); - dumpDbInfoData(pBlock, &perfschemaDb, pShow, numOfRows, 3, true, 0, 1); + numOfTables = 0; + getPerfDbMeta(NULL, &numOfTables); + mndDumpDbInfoData(pMnode, pBlock, &perfschemaDb, pShow, numOfRows, numOfTables, true, 0, 1); numOfRows += 1; pShow->sysDbRsp = true; @@ -1709,8 +1750,8 @@ static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBloc if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_READ_OR_WRITE_DB, pDb) == 0) { int32_t numOfTables = 0; - sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, NULL, NULL); - dumpDbInfoData(pBlock, pDb, pShow, numOfRows, numOfTables, false, objStatus, sysinfo); + sdbTraverse(pSdb, SDB_VGROUP, mndGetTablesOfDbFp, &numOfTables, &pDb->uid, NULL); + mndDumpDbInfoData(pMnode, pBlock, pDb, pShow, numOfRows, numOfTables, false, objStatus, sysinfo); numOfRows++; } diff --git a/source/dnode/mnode/impl/src/mndSma.c b/source/dnode/mnode/impl/src/mndSma.c index 6411d06081..006d9e749c 100644 --- a/source/dnode/mnode/impl/src/mndSma.c +++ b/source/dnode/mnode/impl/src/mndSma.c @@ -489,7 +489,7 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea smaObj.uid = mndGenerateUid(pCreate->name, TSDB_TABLE_FNAME_LEN); ASSERT(smaObj.uid != 0); char resultTbName[TSDB_TABLE_FNAME_LEN + 16] = {0}; - snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "td.tsma.rst.tb.%s", pCreate->name); + snprintf(resultTbName, TSDB_TABLE_FNAME_LEN + 16, "%s_td_tsma_rst_tb",pCreate->name); memcpy(smaObj.dstTbName, resultTbName, TSDB_TABLE_FNAME_LEN); smaObj.dstTbUid = mndGenerateUid(smaObj.dstTbName, TSDB_TABLE_FNAME_LEN); smaObj.stbUid = pStb->uid; @@ -603,6 +603,9 @@ static int32_t mndCreateSma(SMnode *pMnode, SRpcMsg *pReq, SMCreateSmaReq *pCrea if (mndPersistStream(pMnode, pTrans, &streamObj) != 0) goto _OVER; if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER; + mDebug("mndSma: create sma index %s %" PRIi64 " on stb:%" PRIi64 ", dstSuid:%" PRIi64 " dstTb:%s dstVg:%d", + pCreate->name, smaObj.uid, smaObj.stbUid, smaObj.dstTbUid, smaObj.dstTbName, smaObj.dstVgId); + code = 0; _OVER: diff --git a/source/dnode/mnode/impl/src/mndTrans.c b/source/dnode/mnode/impl/src/mndTrans.c index b2a0e6aac8..17b4336465 100644 --- a/source/dnode/mnode/impl/src/mndTrans.c +++ b/source/dnode/mnode/impl/src/mndTrans.c @@ -124,8 +124,7 @@ static SSdbRaw *mndTransActionEncode(STrans *pTrans) { SDB_SET_INT8(pRaw, dataPos, pTrans->exec, _OVER) SDB_SET_INT8(pRaw, dataPos, pTrans->oper, _OVER) SDB_SET_INT8(pRaw, dataPos, 0, _OVER) - SDB_SET_INT8(pRaw, dataPos, 0, _OVER) - SDB_SET_INT8(pRaw, dataPos, 0, _OVER) + SDB_SET_INT16(pRaw, dataPos, pTrans->originRpcType, _OVER) SDB_SET_INT64(pRaw, dataPos, pTrans->createdTime, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_TABLE_FNAME_LEN, _OVER) SDB_SET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_TABLE_FNAME_LEN, _OVER) @@ -282,13 +281,12 @@ static SSdbRow *mndTransActionDecode(SSdbRaw *pRaw) { SDB_GET_INT8(pRaw, dataPos, &exec, _OVER) SDB_GET_INT8(pRaw, dataPos, &oper, _OVER) SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) - SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) - SDB_GET_INT8(pRaw, dataPos, &reserved, _OVER) pTrans->stage = stage; pTrans->policy = policy; pTrans->conflict = conflict; pTrans->exec = exec; pTrans->oper = oper; + SDB_GET_INT16(pRaw, dataPos, &pTrans->originRpcType, _OVER) SDB_GET_INT64(pRaw, dataPos, &pTrans->createdTime, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname1, TSDB_TABLE_FNAME_LEN, _OVER) SDB_GET_BINARY(pRaw, dataPos, pTrans->dbname2, TSDB_TABLE_FNAME_LEN, _OVER) @@ -611,6 +609,7 @@ STrans *mndTransCreate(SMnode *pMnode, ETrnPolicy policy, ETrnConflct conflict, if (pReq != NULL) { taosArrayPush(pTrans->pRpcArray, &pReq->info); + pTrans->originRpcType = pReq->msgType; } mTrace("trans:%d, local object is created, data:%p", pTrans->id, pTrans); return pTrans; @@ -910,6 +909,23 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) { } } + if (pTrans->originRpcType == TDMT_MND_CREATE_DB) { + mDebug("trans:%d, origin msgtype:%s", pTrans->id, TMSG_INFO(pTrans->originRpcType)); + SDbObj *pDb = mndAcquireDb(pMnode, pTrans->dbname1); + if (pDb != NULL) { + for (int32_t j = 0; j < 12; j++) { + bool ready = mndIsDbReady(pMnode, pDb); + if (!ready) { + mDebug("trans:%d, db:%s not ready yet, wait %d times", pTrans->id, pTrans->dbname1, j); + taosMsleep(1000); + } else { + break; + } + } + } + mndReleaseDb(pMnode, pDb); + } + tmsgSendRsp(&rspMsg); } } diff --git a/source/dnode/vnode/src/inc/sma.h b/source/dnode/vnode/src/inc/sma.h index 1aee08027c..944d7759b2 100644 --- a/source/dnode/vnode/src/inc/sma.h +++ b/source/dnode/vnode/src/inc/sma.h @@ -115,24 +115,29 @@ struct SSmaStat { #define RSMA_FS_LOCK(r) (&(r)->lock) struct SRSmaInfoItem { - void *taskInfo; // qTaskInfo_t - int64_t refId; - tmr_h tmrId; - int32_t maxDelay; int8_t level; int8_t triggerStat; + int32_t maxDelay; + tmr_h tmrId; }; struct SRSmaInfo { STSchema *pTSchema; int64_t suid; + int64_t refId; // refId of SRSmaStat int8_t delFlag; T_REF_DECLARE() SRSmaInfoItem items[TSDB_RETENTION_L2]; + void *taskInfo[TSDB_RETENTION_L2]; // qTaskInfo_t + void *iTaskInfo[TSDB_RETENTION_L2]; // immutable }; -#define RSMA_INFO_HEAD_LEN 24 -#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1) -#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1) + +#define RSMA_INFO_HEAD_LEN 32 +#define RSMA_INFO_IS_DEL(r) ((r)->delFlag == 1) +#define RSMA_INFO_SET_DEL(r) ((r)->delFlag = 1) +#define RSMA_INFO_QTASK(r, i) ((r)->taskInfo[i]) +#define RSMA_INFO_IQTASK(r, i) ((r)->iTaskInfo[i]) +#define RSMA_INFO_ITEM(r, i) (&(r)->items[i]) enum { TASK_TRIGGER_STAT_INIT = 0, @@ -168,8 +173,8 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat); int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo); -void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); -int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln); +void *tdAcquireSmaRef(int32_t rsetId, int64_t refId); +int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId); int32_t tdCheckAndInitSmaEnv(SSma *pSma, int8_t smaType); @@ -223,12 +228,11 @@ static FORCE_INLINE void tdSmaStatSetDropped(STSmaStat *pTStat) { void tdRSmaQTaskInfoGetFileName(int32_t vid, int64_t version, char *outputName); void tdRSmaQTaskInfoGetFullName(int32_t vid, int64_t version, const char *path, char *outputName); -int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc); +int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc); void tdFreeQTaskInfo(qTaskInfo_t *taskHandle, int32_t vgId, int32_t level); static int32_t tdDestroySmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeSmaState(SSmaStat *pSmaStat, int8_t smaType); void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree); -void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid); int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash); int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, const char *tbName); diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h index bce2b642e8..7b22e061f0 100644 --- a/source/dnode/vnode/src/inc/vnodeInt.h +++ b/source/dnode/vnode/src/inc/vnodeInt.h @@ -97,7 +97,7 @@ typedef struct STbUidStore STbUidStore; int metaOpen(SVnode* pVnode, SMeta** ppMeta); int metaClose(SMeta* pMeta); -int metaBegin(SMeta* pMeta); +int metaBegin(SMeta* pMeta, int8_t fromSys); int metaCommit(SMeta* pMeta); int metaCreateSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); int metaAlterSTable(SMeta* pMeta, int64_t version, SVCreateStbReq* pReq); @@ -187,6 +187,7 @@ int32_t smaAsyncPreCommit(SSma* pSma); int32_t smaAsyncCommit(SSma* pSma); int32_t smaAsyncPostCommit(SSma* pSma); int32_t smaDoRetention(SSma* pSma, int64_t now); +int32_t smaProcessFetch(SSma* pSma, void* pMsg); int32_t tdProcessTSmaCreate(SSma* pSma, int64_t version, const char* msg); int32_t tdProcessTSmaInsert(SSma* pSma, int64_t indexUid, const char* msg); diff --git a/source/dnode/vnode/src/meta/metaCommit.c b/source/dnode/vnode/src/meta/metaCommit.c index b4987aea2b..85ed40970c 100644 --- a/source/dnode/vnode/src/meta/metaCommit.c +++ b/source/dnode/vnode/src/meta/metaCommit.c @@ -19,9 +19,12 @@ static FORCE_INLINE void *metaMalloc(void *pPool, size_t size) { return vnodeBuf static FORCE_INLINE void metaFree(void *pPool, void *p) { vnodeBufPoolFree((SVBufPool *)pPool, p); } // begin a meta txn -int metaBegin(SMeta *pMeta) { - tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); - +int metaBegin(SMeta *pMeta, int8_t fromSys) { + if (fromSys) { + tdbTxnOpen(&pMeta->txn, 0, tdbDefaultMalloc, tdbDefaultFree, NULL, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); + } else { + tdbTxnOpen(&pMeta->txn, 0, metaMalloc, metaFree, pMeta->pVnode->inUse, TDB_TXN_WRITE | TDB_TXN_READ_UNCOMMITTED); + } if (tdbBegin(pMeta->pEnv, &pMeta->txn) < 0) { return -1; } diff --git a/source/dnode/vnode/src/meta/metaQuery.c b/source/dnode/vnode/src/meta/metaQuery.c index 797a60baf6..eed0ae5e14 100644 --- a/source/dnode/vnode/src/meta/metaQuery.c +++ b/source/dnode/vnode/src/meta/metaQuery.c @@ -481,7 +481,7 @@ int64_t metaGetTbNum(SMeta *pMeta) { /* int64_t num = 0; */ /* vnodeGetAllCtbNum(pMeta->pVnode, &num); */ - return pMeta->pVnode->config.vndStats.numOfCTables; + return pMeta->pVnode->config.vndStats.numOfCTables + pMeta->pVnode->config.vndStats.numOfNTables; } // N.B. Called by statusReq per second diff --git a/source/dnode/vnode/src/meta/metaSnapshot.c b/source/dnode/vnode/src/meta/metaSnapshot.c index e01f0e7c01..973c381407 100644 --- a/source/dnode/vnode/src/meta/metaSnapshot.c +++ b/source/dnode/vnode/src/meta/metaSnapshot.c @@ -145,7 +145,7 @@ int32_t metaSnapWriterOpen(SMeta* pMeta, int64_t sver, int64_t ever, SMetaSnapWr pWriter->sver = sver; pWriter->ever = ever; - metaBegin(pMeta); + metaBegin(pMeta, 1); *ppWriter = pWriter; return code; diff --git a/source/dnode/vnode/src/meta/metaTable.c b/source/dnode/vnode/src/meta/metaTable.c index fdf2da5558..7427f79509 100644 --- a/source/dnode/vnode/src/meta/metaTable.c +++ b/source/dnode/vnode/src/meta/metaTable.c @@ -381,6 +381,8 @@ int metaCreateTable(SMeta *pMeta, int64_t version, SVCreateTbReq *pReq) { terrno = TSDB_CODE_TDB_TABLE_ALREADY_EXIST; metaReaderClear(&mr); return -1; + } else if (terrno == TSDB_CODE_PAR_TABLE_NOT_EXIST) { + terrno = TSDB_CODE_SUCCESS; } metaReaderClear(&mr); diff --git a/source/dnode/vnode/src/sma/smaCommit.c b/source/dnode/vnode/src/sma/smaCommit.c index 3c88b6f5cb..373cfdfb47 100644 --- a/source/dnode/vnode/src/sma/smaCommit.c +++ b/source/dnode/vnode/src/sma/smaCommit.c @@ -308,12 +308,12 @@ static int32_t tdProcessRSmaSyncPostCommitImpl(SSma *pSma) { * @return int32_t */ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { - SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); - if (!pSmaEnv) { + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + if (!pEnv) { return TSDB_CODE_SUCCESS; } - SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); // step 1: set rsma stat @@ -337,18 +337,26 @@ static int32_t tdProcessRSmaAsyncPreCommitImpl(SSma *pSma) { } // step 3: swap rsmaInfoHash and iRsmaInfoHash - ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat)); + // lock + taosWLockLatch(SMA_ENV_LOCK(pEnv)); + ASSERT(RSMA_INFO_HASH(pRSmaStat)); + ASSERT(!RSMA_IMU_INFO_HASH(pRSmaStat)); RSMA_IMU_INFO_HASH(pRSmaStat) = RSMA_INFO_HASH(pRSmaStat); RSMA_INFO_HASH(pRSmaStat) = taosHashInit(RSMA_TASK_INFO_HASH_SLOT, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK); if (!RSMA_INFO_HASH(pRSmaStat)) { + // unlock + taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); smaError("vgId:%d, rsma async commit failed since %s", SMA_VID(pSma), terrstr()); return TSDB_CODE_FAILED; } + // unlock + taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + // step 4: others pRSmaStat->commitAppliedVer = pSma->pVnode->state.applied; @@ -383,26 +391,52 @@ static int32_t tdProcessRSmaAsyncCommitImpl(SSma *pSma) { * @return int32_t */ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { - SSmaEnv *pSmaEnv = SMA_RSMA_ENV(pSma); - if (!pSmaEnv) { + SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); + if (!pEnv) { return TSDB_CODE_SUCCESS; } - SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); + SSmaStat *pStat = SMA_ENV_STAT(pEnv); SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); // step 1: merge rsmaInfoHash and iRsmaInfoHash - taosWLockLatch(SMA_ENV_LOCK(pSmaEnv)); - + // lock + taosWLockLatch(SMA_ENV_LOCK(pEnv)); +#if 0 if (taosHashGetSize(RSMA_INFO_HASH(pRSmaStat)) <= 0) { - // TODO: optimization - just switch the hash pointer if rsmaInfoHash is empty - } - + // just switch the hash pointer if rsmaInfoHash is empty + if (taosHashGetSize(RSMA_IMU_INFO_HASH(pRSmaStat)) > 0) { + SHashObj *infoHash = RSMA_INFO_HASH(pRSmaStat); + RSMA_INFO_HASH(pRSmaStat) = RSMA_IMU_INFO_HASH(pRSmaStat); + RSMA_IMU_INFO_HASH(pRSmaStat) = infoHash; + } + } else { +#endif +#if 1 void *pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), NULL); while (pIter) { tb_uid_t *pSuid = (tb_uid_t *)taosHashGetKey(pIter, NULL); if (!taosHashGet(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t))) { + SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)pIter; + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + int32_t refVal = T_REF_VAL_GET(pRSmaInfo); + if (refVal == 0) { + tdFreeRSmaInfo(pSma, pRSmaInfo, true); + smaDebug( + "vgId:%d, rsma async post commit, free rsma info since already deleted and ref is 0 for " + "table:%" PRIi64, + SMA_VID(pSma), *pSuid); + } else { + smaDebug( + "vgId:%d, rsma async post commit, not free rsma info since ref is %d although already deleted for " + "table:%" PRIi64, + SMA_VID(pSma), refVal, *pSuid); + } + + pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter); + continue; + } taosHashPut(RSMA_INFO_HASH(pRSmaStat), pSuid, sizeof(tb_uid_t), pIter, sizeof(pIter)); smaDebug("vgId:%d, rsma async post commit, migrated from iRsmaInfoHash for table:%" PRIi64, SMA_VID(pSma), *pSuid); @@ -416,11 +450,14 @@ static int32_t tdProcessRSmaAsyncPostCommitImpl(SSma *pSma) { pIter = taosHashIterate(RSMA_IMU_INFO_HASH(pRSmaStat), pIter); } +#endif + // } taosHashCleanup(RSMA_IMU_INFO_HASH(pRSmaStat)); RSMA_IMU_INFO_HASH(pRSmaStat) = NULL; - taosWUnLockLatch(SMA_ENV_LOCK(pSmaEnv)); + // unlock + taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); // step 2: cleanup outdated qtaskinfo files tdCleanupQTaskInfoFiles(pSma, pRSmaStat); diff --git a/source/dnode/vnode/src/sma/smaEnv.c b/source/dnode/vnode/src/sma/smaEnv.c index 4b831225bc..ccb6ad3a72 100644 --- a/source/dnode/vnode/src/sma/smaEnv.c +++ b/source/dnode/vnode/src/sma/smaEnv.c @@ -17,7 +17,7 @@ typedef struct SSmaStat SSmaStat; -#define SMA_MGMT_REF_NUM 10240 +#define SMA_MGMT_REF_NUM 10240 extern SSmaMgmt smaMgmt; @@ -171,7 +171,7 @@ int32_t tdUnRefSmaStat(SSma *pSma, SSmaStat *pStat) { int32_t tdRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { if (!pRSmaInfo) return 0; - + int ref = T_REF_INC(pRSmaInfo); smaDebug("vgId:%d, ref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); return 0; @@ -183,9 +183,6 @@ int32_t tdUnRefRSmaInfo(SSma *pSma, SRSmaInfo *pRSmaInfo) { int ref = T_REF_DEC(pRSmaInfo); smaDebug("vgId:%d, unref rsma info:%p, val:%d", SMA_VID(pSma), pRSmaInfo, ref); - if (ref == 0) { - tdRemoveRSmaInfoBySuid(pSma, pRSmaInfo->suid); - } return 0; } diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 1c6d65c4d8..662558529d 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -32,21 +32,21 @@ static int32_t tdUidStorePut(STbUidStore *pStore, tb_uid_t suid, tb_uid_t *ui static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids); static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat *pStat, SRSmaInfo *pRSmaInfo, int8_t idx); -static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfoItem *rsmaItem, - STSchema *pTSchema, tb_uid_t suid, int8_t level); -static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid); -static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid, SRSmaStat *pStat, - int8_t blkType); +static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid, + int8_t level); +static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid); +static void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo); +static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid, int8_t blkType); static void tdRSmaFetchTrigger(void *param, void *tmrId); - -static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); -static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); -static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); -static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem); - -static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); -static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer); -static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); +static int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level); +static int32_t tdRSmaQTaskInfoIterInit(SRSmaQTaskInfoIter *pIter, STFile *pTFile); +static int32_t tdRSmaQTaskInfoIterNextBlock(SRSmaQTaskInfoIter *pIter, bool *isFinish); +static int32_t tdRSmaQTaskInfoRestore(SSma *pSma, int8_t type, SRSmaQTaskInfoIter *pIter); +static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem *infoItem); +static int32_t tdRSmaRestoreQTaskInfoInit(SSma *pSma, int64_t *nTables); +static int32_t tdRSmaRestoreQTaskInfoReload(SSma *pSma, int8_t type, int64_t qTaskFileVer); +static int32_t tdRSmaRestoreTSDataReload(SSma *pSma); static SRSmaInfo *tdGetRSmaInfoByItem(SRSmaInfoItem *pItem) { // adapt accordingly if definition of SRSmaInfo update @@ -115,17 +115,26 @@ void *tdFreeRSmaInfo(SSma *pSma, SRSmaInfo *pInfo, bool isDeepFree) { if (pInfo) { for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { SRSmaInfoItem *pItem = &pInfo->items[i]; - if (pItem->taskInfo) { - if (isDeepFree && pItem->tmrId) { - smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid, - pItem->tmrId, i + 1); - taosTmrStopA(&pItem->tmrId); - } - tdFreeQTaskInfo(&pItem->taskInfo, SMA_VID(pSma), i + 1); + + if (isDeepFree && pItem->tmrId) { + smaDebug("vgId:%d, stop fetch timer %p for table %" PRIi64 " level %d", SMA_VID(pSma), pInfo->suid, + pItem->tmrId, i + 1); + taosTmrStopA(&pItem->tmrId); + } + + if (isDeepFree && pInfo->taskInfo[i]) { + tdFreeQTaskInfo(&pInfo->taskInfo[i], SMA_VID(pSma), i + 1); } else { smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty taskInfo", SMA_VID(pSma), pInfo->suid, i + 1); } + + if (pInfo->iTaskInfo[i]) { + tdFreeQTaskInfo(&pInfo->iTaskInfo[i], SMA_VID(pSma), i + 1); + } else { + smaDebug("vgId:%d, table %" PRIi64 " no need to destroy rsma info level %d since empty iTaskInfo", + SMA_VID(pSma), pInfo->suid, i + 1); + } } if (isDeepFree) { taosMemoryFreeClear(pInfo->pTSchema); @@ -155,7 +164,12 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) return TSDB_CODE_FAILED; } - pRSmaInfo = tdGetRSmaInfoBySuid(pSma, *suid); + if (!taosArrayGetSize(tbUids)) { + smaDebug("vgId:%d, no need to update tbUidList for suid:%" PRIi64 " since Empty tbUids", SMA_VID(pSma), *suid); + return TSDB_CODE_SUCCESS; + } + + pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, *suid); if (!pRSmaInfo) { smaError("vgId:%d, failed to get rsma info for uid:%" PRIi64, SMA_VID(pSma), *suid); @@ -163,26 +177,21 @@ static int32_t tdUpdateTbUidListImpl(SSma *pSma, tb_uid_t *suid, SArray *tbUids) return TSDB_CODE_FAILED; } - if (pRSmaInfo->items[0].taskInfo) { - if ((qUpdateQualifiedTableId(pRSmaInfo->items[0].taskInfo, tbUids, true) < 0)) { - smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr()); - return TSDB_CODE_FAILED; - } else { - smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), - pRSmaInfo->items[0].taskInfo, *suid, *(int64_t *)taosArrayGet(tbUids, 0)); - } - } - - if (pRSmaInfo->items[1].taskInfo) { - if ((qUpdateQualifiedTableId(pRSmaInfo->items[1].taskInfo, tbUids, true) < 0)) { - smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " since %s", SMA_VID(pSma), *suid, terrstr()); - return TSDB_CODE_FAILED; - } else { - smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 ", uid:%" PRIi64, SMA_VID(pSma), - pRSmaInfo->items[1].taskInfo, *suid, *(int64_t *)taosArrayGet(tbUids, 0)); + for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { + if (pRSmaInfo->taskInfo[i]) { + if ((qUpdateQualifiedTableId(pRSmaInfo->taskInfo[i], tbUids, true) < 0)) { + tdReleaseRSmaInfo(pSma, pRSmaInfo); + smaError("vgId:%d, update tbUidList failed for uid:%" PRIi64 " level %d since %s", SMA_VID(pSma), *suid, i, + terrstr()); + return TSDB_CODE_FAILED; + } else { + smaDebug("vgId:%d, update tbUidList succeed for qTaskInfo:%p with suid:%" PRIi64 " uid:%" PRIi64 " level %d", + SMA_VID(pSma), pRSmaInfo->taskInfo[0], *suid, *(int64_t *)taosArrayGet(tbUids, 0), i); + } } } + tdReleaseRSmaInfo(pSma, pRSmaInfo); return TSDB_CODE_SUCCESS; } @@ -267,13 +276,12 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat .initTqReader = 1, }; - SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]); - pItem->refId = RSMA_REF_ID(pStat); - pItem->taskInfo = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle); - if (!pItem->taskInfo) { + pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle); + if (!pRSmaInfo->taskInfo[idx]) { terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE; return TSDB_CODE_FAILED; } + SRSmaInfoItem *pItem = &(pRSmaInfo->items[idx]); pItem->triggerStat = TASK_TRIGGER_STAT_INACTIVE; if (param->maxdelay[idx] < TSDB_MIN_ROLLUP_MAX_DELAY) { int64_t msInterval = @@ -342,6 +350,7 @@ int32_t tdProcessRSmaCreateImpl(SSma *pSma, SRSmaParam *param, int64_t suid, con } pRSmaInfo->pTSchema = pTSchema; pRSmaInfo->suid = suid; + pRSmaInfo->refId = RSMA_REF_ID(pStat); T_REF_INIT_VAL(pRSmaInfo, 1); if (tdSetRSmaInfoItemParams(pSma, param, pStat, pRSmaInfo, 0) < 0) { @@ -411,7 +420,7 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) { SSmaStat *pStat = SMA_ENV_STAT(pSmaEnv); SRSmaStat *pRSmaStat = SMA_RSMA_STAT(pStat); - SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, pReq->suid); + SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pReq->suid); if (!pRSmaInfo) { smaWarn("vgId:%d, drop rsma for stable %s %" PRIi64 " failed no rsma in hash", TD_VID(pVnode), pReq->name, @@ -423,8 +432,10 @@ int32_t tdProcessRSmaDrop(SSma *pSma, SVDropStbReq *pReq) { RSMA_INFO_SET_DEL(pRSmaInfo); tdUnRefRSmaInfo(pSma, pRSmaInfo); - // save to file + tdReleaseRSmaInfo(pSma, pRSmaInfo); + // save to file + // TODO smaDebug("vgId:%d, drop rsma for table %" PRIi64 " succeed", TD_VID(pVnode), pReq->suid); return TSDB_CODE_SUCCESS; } @@ -567,45 +578,56 @@ static void tdDestroySDataBlockArray(SArray *pArray) { taosArrayDestroy(pArray); } -static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSchema, int64_t suid, SRSmaStat *pStat, - int8_t blkType) { - SArray *pResult = NULL; - SSma *pSma = pStat->pSma; +/** + * @brief retention of rsma1/rsma2 + * + * @param pSma + * @param now + * @return int32_t + */ +int32_t smaDoRetention(SSma *pSma, int64_t now) { + int32_t code = TSDB_CODE_SUCCESS; + if (VND_IS_RSMA(pSma->pVnode)) { + return code; + } + for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { + if (pSma->pRSmaTsdb[i]) { + code = tsdbDoRetention(pSma->pRSmaTsdb[i], now); + if (code) goto _end; + } + } + +_end: + return code; +} + +static int32_t tdRSmaFetchAndSubmitResult(SSma *pSma, qTaskInfo_t taskInfo, SRSmaInfoItem *pItem, STSchema *pTSchema, + int64_t suid, int8_t blkType) { while (1) { SSDataBlock *output = NULL; uint64_t ts; - int32_t code = qExecTask(pItem->taskInfo, &output, &ts); + int32_t code = qExecTask(taskInfo, &output, &ts); if (code < 0) { smaError("vgId:%d, qExecTask for rsma table %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid, pItem->level, terrstr(code)); goto _err; } - if (!output) { - break; - } - if (!pResult) { - pResult = taosArrayInit(1, sizeof(SSDataBlock)); - if (!pResult) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - goto _err; - } - } - - taosArrayPush(pResult, output); - - if (taosArrayGetSize(pResult) > 0) { -#if 1 + if (output) { +#if 0 char flag[10] = {0}; snprintf(flag, 10, "level %" PRIi8, pItem->level); + SArray *pResult = taosArrayInit(1, sizeof(SSDataBlock)); + taosArrayPush(pResult, output); blockDebugShowDataBlocks(pResult, flag); + taosArrayDestroy(pResult); #endif STsdb *sinkTsdb = (pItem->level == TSDB_RETENTION_L1 ? pSma->pRSmaTsdb[0] : pSma->pRSmaTsdb[1]); SSubmitReq *pReq = NULL; // TODO: the schema update should be handled later(TD-17965) - if (buildSubmitReqFromDataBlock(&pReq, pResult, pTSchema, SMA_VID(pSma), suid) < 0) { + if (buildSubmitReqFromDataBlock(&pReq, output, pTSchema, SMA_VID(pSma), suid) < 0) { smaError("vgId:%d, build submit req for rsma stable %" PRIi64 " level %" PRIi8 " failed since %s", SMA_VID(pSma), suid, pItem->level, terrstr()); goto _err; @@ -622,44 +644,44 @@ static int32_t tdRSmaFetchAndSubmitResult(SRSmaInfoItem *pItem, STSchema *pTSche SMA_VID(pSma), suid, pItem->level, output->info.version); taosMemoryFreeClear(pReq); - taosArrayClear(pResult); } else if (terrno == 0) { smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched yet", SMA_VID(pSma), pItem->level); + break; } else { smaDebug("vgId:%d, no rsma %" PRIi8 " data fetched since %s", SMA_VID(pSma), pItem->level, terrstr()); + goto _err; } } - tdDestroySDataBlockArray(pResult); return TSDB_CODE_SUCCESS; _err: - tdDestroySDataBlockArray(pResult); return TSDB_CODE_FAILED; } -static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfoItem *pItem, - STSchema *pTSchema, tb_uid_t suid, int8_t level) { - if (!pItem || !pItem->taskInfo) { +static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType, SRSmaInfo *pInfo, tb_uid_t suid, + int8_t level) { + int32_t idx = level - 1; + if (!pInfo || !RSMA_INFO_QTASK(pInfo, idx)) { smaDebug("vgId:%d, no qTaskInfo to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); return TSDB_CODE_SUCCESS; } - if (!pTSchema) { + if (!pInfo->pTSchema) { smaWarn("vgId:%d, no schema to execute rsma %" PRIi8 " task for suid:%" PRIu64, SMA_VID(pSma), level, suid); return TSDB_CODE_FAILED; } smaDebug("vgId:%d, execute rsma %" PRIi8 " task for qTaskInfo:%p suid:%" PRIu64, SMA_VID(pSma), level, - pItem->taskInfo, suid); + RSMA_INFO_QTASK(pInfo, idx), suid); - if (qSetMultiStreamInput(pItem->taskInfo, pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT + if (qSetMultiStreamInput(RSMA_INFO_QTASK(pInfo, idx), pMsg, 1, inputType) < 0) { // INPUT__DATA_SUBMIT smaError("vgId:%d, rsma %" PRIi8 " qSetStreamInput failed since %s", SMA_VID(pSma), level, tstrerror(terrno)); return TSDB_CODE_FAILED; } - SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); - SRSmaStat *pStat = SMA_RSMA_STAT(pEnv->pStat); + SRSmaInfoItem *pItem = RSMA_INFO_ITEM(pInfo, idx); - tdRSmaFetchAndSubmitResult(pItem, pTSchema, suid, pStat, STREAM_INPUT__DATA_SUBMIT); + tdRSmaFetchAndSubmitResult(pSma, RSMA_INFO_QTASK(pInfo, idx), pItem, pInfo->pTSchema, suid, + STREAM_INPUT__DATA_SUBMIT); atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); if (smaMgmt.tmrHandle) { @@ -678,108 +700,102 @@ static int32_t tdExecuteRSmaImpl(SSma *pSma, const void *pMsg, int32_t inputType * @param suid * @return SRSmaInfo* */ -static SRSmaInfo *tdGetRSmaInfoBySuid(SSma *pSma, int64_t suid) { +static SRSmaInfo *tdAcquireRSmaInfoBySuid(SSma *pSma, int64_t suid) { SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); SRSmaStat *pStat = NULL; SRSmaInfo *pRSmaInfo = NULL; if (!pEnv) { + terrno = TSDB_CODE_RSMA_INVALID_ENV; return NULL; } pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); if (!pStat || !RSMA_INFO_HASH(pStat)) { + terrno = TSDB_CODE_RSMA_INVALID_STAT; return NULL; } + taosRLockLatch(SMA_ENV_LOCK(pEnv)); pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (pRSmaInfo && (pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); + return NULL; + } + tdRefRSmaInfo(pSma, pRSmaInfo); + taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); return pRSmaInfo; } + taosRUnLockLatch(SMA_ENV_LOCK(pEnv)); - if (RSMA_COMMIT_STAT(pStat) == 0) { + if (RSMA_COMMIT_STAT(pStat) == 0) { // return NULL if not in committing stat return NULL; } + // clone the SRSmaInfo from iRsmaInfoHash to rsmaInfoHash if in committing stat SRSmaInfo *pCowRSmaInfo = NULL; // lock taosWLockLatch(SMA_ENV_LOCK(pEnv)); - if (!taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t))) { // 2-phase lock + if (!(pCowRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)))) { // 2-phase lock void *iRSmaInfo = taosHashGet(RSMA_IMU_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); if (iRSmaInfo) { SRSmaInfo *pIRSmaInfo = *(SRSmaInfo **)iRSmaInfo; - if (pIRSmaInfo) { - if (tdCloneRSmaInfo(pSma, pCowRSmaInfo, pIRSmaInfo) < 0) { + if (pIRSmaInfo && !RSMA_INFO_IS_DEL(pIRSmaInfo)) { + if (tdCloneRSmaInfo(pSma, &pCowRSmaInfo, pIRSmaInfo) < 0) { + // unlock taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); return NULL; } smaDebug("vgId:%d, clone rsma info succeed for suid:%" PRIu64, SMA_VID(pSma), suid); if (taosHashPut(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t), &pCowRSmaInfo, sizeof(pCowRSmaInfo)) < 0) { + // unlock taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); + smaError("vgId:%d, clone rsma info failed for suid:%" PRIu64 " since %s", SMA_VID(pSma), suid, terrstr()); return NULL; } } } + } else { + pCowRSmaInfo = *(SRSmaInfo **)pCowRSmaInfo; + ASSERT(!pCowRSmaInfo); + } + + if (pCowRSmaInfo) { + tdRefRSmaInfo(pSma, pCowRSmaInfo); } // unlock taosWUnLockLatch(SMA_ENV_LOCK(pEnv)); return pCowRSmaInfo; } -/** - * @brief During the drop procedure, only need to delete the object in rsmaInfoHash. - * - * @param pSma - * @param suid - * @return SRSmaInfo* - */ -void tdRemoveRSmaInfoBySuid(SSma *pSma, int64_t suid) { - SSmaEnv *pEnv = SMA_RSMA_ENV(pSma); - SRSmaStat *pStat = NULL; - SRSmaInfo *pRSmaInfo = NULL; - - if (!pEnv) { - return; - } - - pStat = (SRSmaStat *)SMA_ENV_STAT(pEnv); - if (!pStat || !RSMA_INFO_HASH(pStat)) { - return; - } - - pRSmaInfo = taosHashGet(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); - if (pRSmaInfo) { - if ((pRSmaInfo = *(SRSmaInfo **)pRSmaInfo)) { - tdFreeRSmaInfo(pSma, pRSmaInfo, true); - } - taosHashRemove(RSMA_INFO_HASH(pStat), &suid, sizeof(tb_uid_t)); - smaDebug("vgId:%d, remove from infoHash for table:%" PRIu64 " succeed", SMA_VID(pSma), suid); +static FORCE_INLINE void tdReleaseRSmaInfo(SSma *pSma, SRSmaInfo *pInfo) { + if (pInfo) { + tdUnRefRSmaInfo(pSma, pInfo); } } static int32_t tdExecuteRSma(SSma *pSma, const void *pMsg, int32_t inputType, tb_uid_t suid) { - SRSmaInfo *pRSmaInfo = tdGetRSmaInfoBySuid(pSma, suid); + SRSmaInfo *pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, suid); if (!pRSmaInfo) { smaDebug("vgId:%d, execute rsma, no rsma info for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } - if (!pRSmaInfo->items[0].taskInfo) { + if (!RSMA_INFO_QTASK(pRSmaInfo, 0)) { + tdReleaseRSmaInfo(pSma, pRSmaInfo); smaDebug("vgId:%d, execute rsma, no rsma qTaskInfo for suid:%" PRIu64, SMA_VID(pSma), suid); return TSDB_CODE_SUCCESS; } if (inputType == STREAM_INPUT__DATA_SUBMIT) { - tdRefRSmaInfo(pSma, pRSmaInfo); - - tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[0], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L1); - tdExecuteRSmaImpl(pSma, pMsg, inputType, &pRSmaInfo->items[1], pRSmaInfo->pTSchema, suid, TSDB_RETENTION_L2); - - tdUnRefRSmaInfo(pSma, pRSmaInfo); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L1); + tdExecuteRSmaImpl(pSma, pMsg, inputType, pRSmaInfo, suid, TSDB_RETENTION_L2); } + tdReleaseRSmaInfo(pSma, pRSmaInfo); return TSDB_CODE_SUCCESS; } @@ -990,26 +1006,28 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem * SRSmaInfo *pRSmaInfo = NULL; void *qTaskInfo = NULL; - pRSmaInfo = tdGetRSmaInfoBySuid(pSma, pItem->suid); + pRSmaInfo = tdAcquireRSmaInfoBySuid(pSma, pItem->suid); if (!pRSmaInfo) { smaDebug("vgId:%d, no restore as no rsma info for table:%" PRIu64, SMA_VID(pSma), pItem->suid); return TSDB_CODE_SUCCESS; } if (pItem->type == TSDB_RETENTION_L1) { - qTaskInfo = pRSmaInfo->items[0].taskInfo; + qTaskInfo = RSMA_INFO_QTASK(pRSmaInfo, 0); } else if (pItem->type == TSDB_RETENTION_L2) { - qTaskInfo = pRSmaInfo->items[1].taskInfo; + qTaskInfo = RSMA_INFO_QTASK(pRSmaInfo, 1); } else { ASSERT(0); } if (!qTaskInfo) { + tdReleaseRSmaInfo(pSma, pRSmaInfo); smaDebug("vgId:%d, no restore as NULL rsma qTaskInfo for table:%" PRIu64, SMA_VID(pSma), pItem->suid); return TSDB_CODE_SUCCESS; } if (qDeserializeTaskStatus(qTaskInfo, pItem->qTaskInfo, pItem->len) < 0) { + tdReleaseRSmaInfo(pSma, pRSmaInfo); smaError("vgId:%d, restore rsma task failed for table:%" PRIi64 " level %d since %s", SMA_VID(pSma), pItem->suid, pItem->type, terrstr()); return TSDB_CODE_FAILED; @@ -1017,6 +1035,7 @@ static int32_t tdRSmaQTaskInfoItemRestore(SSma *pSma, const SRSmaQTaskInfoItem * smaDebug("vgId:%d, restore rsma task success for table:%" PRIi64 " level %d", SMA_VID(pSma), pItem->suid, pItem->type); + tdReleaseRSmaInfo(pSma, pRSmaInfo); return TSDB_CODE_SUCCESS; } @@ -1195,8 +1214,14 @@ int32_t tdRSmaPersistExecImpl(SRSmaStat *pRSmaStat, SHashObj *pInfoHash) { while (infoHash) { SRSmaInfo *pRSmaInfo = *(SRSmaInfo **)infoHash; + + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + infoHash = taosHashIterate(pInfoHash, infoHash); + continue; + } + for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { - qTaskInfo_t taskInfo = pRSmaInfo->items[i].taskInfo; + qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pRSmaInfo, i); if (!taskInfo) { smaDebug("vgId:%d, rsma, table %" PRIi64 " level %d qTaskInfo is NULL", vid, pRSmaInfo->suid, i + 1); continue; @@ -1282,7 +1307,7 @@ _err: } /** - * @brief trigger to get rsma result + * @brief trigger to get rsma result in async mode * * @param param * @param tmrId @@ -1290,11 +1315,17 @@ _err: static void tdRSmaFetchTrigger(void *param, void *tmrId) { SRSmaInfoItem *pItem = param; SSma *pSma = NULL; - SRSmaStat *pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pItem->refId, __func__, __LINE__); + SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem); + + if (RSMA_INFO_IS_DEL(pRSmaInfo)) { + return; + } + + SRSmaStat *pStat = (SRSmaStat *)tdAcquireSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); if (!pStat) { smaDebug("rsma fetch task not start since already destroyed, rsetId rsetId:%" PRIi64 " refId:%d)", smaMgmt.rsetId, - pItem->refId); + pRSmaInfo->refId); return; } @@ -1305,13 +1336,12 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { switch (rsmaTriggerStat) { case TASK_TRIGGER_STAT_PAUSED: case TASK_TRIGGER_STAT_CANCELLED: { - tdReleaseSmaRef(smaMgmt.rsetId, pItem->refId, __func__, __LINE__); + tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data since stat is %" PRIi8 ", rsetId rsetId:%" PRIi64 " refId:%d", - SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pItem->refId); + SMA_VID(pSma), pItem->level, rsmaTriggerStat, smaMgmt.rsetId, pRSmaInfo->refId); if (rsmaTriggerStat == TASK_TRIGGER_STAT_PAUSED) { - taosTmrReset(tdRSmaFetchTrigger, pItem->maxDelay > 5000 ? 5000 : pItem->maxDelay, pItem, smaMgmt.tmrHandle, - &pItem->tmrId); + taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId); } return; } @@ -1319,29 +1349,14 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { break; } - SRSmaInfo *pRSmaInfo = tdGetRSmaInfoByItem(pItem); - if (RSMA_INFO_IS_DEL(pRSmaInfo)) { - goto _end; - } - int8_t fetchTriggerStat = atomic_val_compare_exchange_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE, TASK_TRIGGER_STAT_INACTIVE); switch (fetchTriggerStat) { case TASK_TRIGGER_STAT_ACTIVE: { smaDebug("vgId:%d, fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is active", SMA_VID(pSma), pItem->level, pRSmaInfo->suid); - - // sync procedure => async process - tdRefRSmaInfo(pSma, pRSmaInfo); - - SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; - qSetMultiStreamInput(pItem->taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK); - tdRSmaFetchAndSubmitResult(pItem, pRSmaInfo->pTSchema, pRSmaInfo->suid, pStat, STREAM_INPUT__DATA_BLOCK); - tdCleanupStreamInputDataBlock(pItem->taskInfo); - - tdUnRefRSmaInfo(pSma, pRSmaInfo); - // atomic_store_8(&pItem->triggerStat, TASK_TRIGGER_STAT_ACTIVE); - // taosTmrReset(tdRSmaFetchTrigger, 5000, pItem, smaMgmt.tmrHandle, &pItem->tmrId); + // async process + tdRSmaFetchSend(pSma, pRSmaInfo, pItem->level); } break; case TASK_TRIGGER_STAT_PAUSED: { smaDebug("vgId:%d, not fetch rsma level %" PRIi8 " data for table:%" PRIi64 " since stat is paused", @@ -1362,22 +1377,120 @@ static void tdRSmaFetchTrigger(void *param, void *tmrId) { } _end: - tdReleaseSmaRef(smaMgmt.rsetId, pItem->refId, __func__, __LINE__); + tdReleaseSmaRef(smaMgmt.rsetId, pRSmaInfo->refId); } -int32_t smaDoRetention(SSma *pSma, int64_t now) { - int32_t code = TSDB_CODE_SUCCESS; - if (VND_IS_RSMA(pSma->pVnode)) { - return code; +/** + * @brief put rsma fetch msg to fetch queue + * + * @param pSma + * @param pInfo + * @param level + * @return int32_t + */ +int32_t tdRSmaFetchSend(SSma *pSma, SRSmaInfo *pInfo, int8_t level) { + SRSmaFetchMsg fetchMsg = { .suid = pInfo->suid, .level = level}; + int32_t ret = 0; + int32_t contLen = 0; + SEncoder encoder = {0}; + tEncodeSize(tEncodeSRSmaFetchMsg, &fetchMsg, contLen, ret); + if (ret < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tEncoderClear(&encoder); + goto _err; } - for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { - if (pSma->pRSmaTsdb[i]) { - code = tsdbDoRetention(pSma->pRSmaTsdb[i], now); - if (code) goto _end; + void *pBuf = rpcMallocCont(contLen + sizeof(SMsgHead)); + tEncoderInit(&encoder, POINTER_SHIFT(pBuf, sizeof(SMsgHead)), contLen); + if (tEncodeSRSmaFetchMsg(&encoder, &fetchMsg) < 0) { + terrno = TSDB_CODE_OUT_OF_MEMORY; + tEncoderClear(&encoder); + } + tEncoderClear(&encoder); + + ((SMsgHead *)pBuf)->vgId = SMA_VID(pSma); + ((SMsgHead *)pBuf)->contLen = contLen + sizeof(SMsgHead); + + SRpcMsg rpcMsg = { + .code = 0, + .msgType = TDMT_VND_FETCH_RSMA, + .pCont = pBuf, + .contLen = contLen, + }; + + if ((terrno = tmsgPutToQueue(&pSma->pVnode->msgCb, FETCH_QUEUE, &rpcMsg)) != 0) { + smaError("vgId:%d, failed to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8 " since %s", + SMA_VID(pSma), pInfo->suid, level, terrstr()); + goto _err; + } + + smaDebug("vgId:%d, success to put rsma fetch msg into fetch-queue for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), + pInfo->suid, level); + + return TSDB_CODE_SUCCESS; +_err: + return TSDB_CODE_FAILED; +} + +/** + * @brief fetch rsma data of level 2/3 and submit + * + * @param pSma + * @param pMsg + * @return int32_t + */ +int32_t smaProcessFetch(SSma *pSma, void *pMsg) { + SRpcMsg *pRpcMsg = (SRpcMsg *)pMsg; + SRSmaFetchMsg req = {0}; + SDecoder decoder = {0}; + void *pBuf = NULL; + SRSmaInfo *pInfo = NULL; + SRSmaInfoItem *pItem = NULL; + + if (!pRpcMsg || pRpcMsg->contLen < sizeof(SMsgHead)) { + terrno = TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP; + return -1; + } + + pBuf = POINTER_SHIFT(pRpcMsg->pCont, sizeof(SMsgHead)); + + tDecoderInit(&decoder, pBuf, pRpcMsg->contLen); + if (tDecodeSRSmaFetchMsg(&decoder, &req) < 0) { + terrno = TSDB_CODE_INVALID_MSG; + goto _err; + } + + pInfo = tdAcquireRSmaInfoBySuid(pSma, req.suid); + if (!pInfo) { + if (terrno == TSDB_CODE_SUCCESS) { + terrno = TSDB_CODE_RSMA_EMPTY_INFO; } + smaWarn("vgId:%d, failed to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8 " since %s", SMA_VID(pSma), + req.suid, req.level, terrstr()); + goto _err; } -_end: - return code; -} \ No newline at end of file + pItem = RSMA_INFO_ITEM(pInfo, req.level - 1); + + SSDataBlock dataBlock = {.info.type = STREAM_GET_ALL}; + qTaskInfo_t taskInfo = RSMA_INFO_QTASK(pInfo, req.level - 1); + if ((terrno = qSetMultiStreamInput(taskInfo, &dataBlock, 1, STREAM_INPUT__DATA_BLOCK)) < 0) { + goto _err; + } + if (tdRSmaFetchAndSubmitResult(pSma, taskInfo, pItem, pInfo->pTSchema, pInfo->suid, STREAM_INPUT__DATA_BLOCK) < 0) { + goto _err; + } + + tdCleanupStreamInputDataBlock(taskInfo); + + tdReleaseRSmaInfo(pSma, pInfo); + tDecoderClear(&decoder); + smaDebug("vgId:%d, success to process rsma fetch msg for suid:%" PRIi64 " level:%" PRIi8, SMA_VID(pSma), req.suid, + req.level); + return TSDB_CODE_SUCCESS; +_err: + tdReleaseRSmaInfo(pSma, pInfo); + tDecoderClear(&decoder); + smaError("vgId:%d, failed to process rsma fetch msg since %s", SMA_VID(pSma), terrstr()); + return TSDB_CODE_FAILED; +} diff --git a/source/dnode/vnode/src/sma/smaTimeRange.c b/source/dnode/vnode/src/sma/smaTimeRange.c index 6d71f3a250..7f69acc889 100644 --- a/source/dnode/vnode/src/sma/smaTimeRange.c +++ b/source/dnode/vnode/src/sma/smaTimeRange.c @@ -116,8 +116,10 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) { } // create stable to save tsma result in dstVgId + SName stbFullName = {0}; + tNameFromString(&stbFullName, pCfg->dstTbName, T_NAME_ACCT | T_NAME_DB | T_NAME_TABLE); SVCreateStbReq pReq = {0}; - pReq.name = pCfg->dstTbName; + pReq.name = (char*)tNameGetTableName(&stbFullName); pReq.suid = pCfg->dstTbUid; pReq.schemaRow = pCfg->schemaRow; pReq.schemaTag = pCfg->schemaTag; @@ -125,6 +127,12 @@ int32_t tdProcessTSmaCreateImpl(SSma *pSma, int64_t version, const char *pMsg) { if (metaCreateSTable(SMA_META(pSma), version, &pReq) < 0) { return -1; } + + smaDebug("vgId:%d, success to create sma index %s %" PRIi64 " on stb:%" PRIi64 ", dstSuid:%" PRIi64 + " dstTb:%s dstVg:%d", + SMA_VID(pSma), pCfg->indexName, pCfg->indexUid, pCfg->tableUid, pCfg->dstTbUid, pReq.name, pCfg->dstVgId); + } else { + ASSERT(0); } return 0; diff --git a/source/dnode/vnode/src/sma/smaUtil.c b/source/dnode/vnode/src/sma/smaUtil.c index 9dce166afa..d9f38ffd09 100644 --- a/source/dnode/vnode/src/sma/smaUtil.c +++ b/source/dnode/vnode/src/sma/smaUtil.c @@ -287,22 +287,22 @@ int32_t tdRemoveTFile(STFile *pTFile) { } // smaXXXUtil ================ -void *tdAcquireSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln) { +void *tdAcquireSmaRef(int32_t rsetId, int64_t refId) { void *pResult = taosAcquireRef(rsetId, refId); if (!pResult) { - smaWarn("%s:%d taosAcquireRef for rsetId:%" PRIi64 " refId:%d failed since %s", tags, ln, rsetId, refId, terrstr()); + smaWarn("rsma acquire ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr()); } else { - smaDebug("%s:%d taosAcquireRef for rsetId:%" PRIi64 " refId:%d success", tags, ln, rsetId, refId); + smaDebug("rsma acquire ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId); } return pResult; } -int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId, const char *tags, int32_t ln) { +int32_t tdReleaseSmaRef(int32_t rsetId, int64_t refId) { if (taosReleaseRef(rsetId, refId) < 0) { - smaWarn("%s:%d taosReleaseRef for rsetId:%" PRIi64 " refId:%d failed since %s", tags, ln, rsetId, refId, terrstr()); + smaWarn("rsma release ref for rsetId:%" PRIi64 " refId:%d failed since %s", rsetId, refId, terrstr()); return TSDB_CODE_FAILED; } - smaDebug("%s:%d taosReleaseRef for rsetId:%" PRIi64 " refId:%d success", tags, ln, rsetId, refId); + smaDebug("rsma release ref for rsetId:%" PRIi64 " refId:%d success", rsetId, refId); return TSDB_CODE_SUCCESS; } @@ -313,7 +313,7 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t char *pOutput = NULL; int32_t len = 0; - if (qSerializeTaskStatus(srcTaskInfo, &pOutput, &len) < 0) { + if ((terrno = qSerializeTaskStatus(srcTaskInfo, &pOutput, &len)) < 0) { smaError("vgId:%d, rsma clone, table %" PRIi64 " serialize qTaskInfo failed since %s", TD_VID(pVnode), suid, terrstr()); goto _err; @@ -337,41 +337,34 @@ static int32_t tdCloneQTaskInfo(SSma *pSma, qTaskInfo_t dstTaskInfo, qTaskInfo_t goto _err; } - smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid); + smaDebug("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " succeed", TD_VID(pVnode), suid); taosMemoryFreeClear(pOutput); return TSDB_CODE_SUCCESS; _err: taosMemoryFreeClear(pOutput); tdFreeQTaskInfo(dstTaskInfo, TD_VID(pVnode), idx + 1); + smaError("vgId:%d, rsma clone, restore rsma task for table:%" PRIi64 " failed since %s", TD_VID(pVnode), suid, + terrstr()); return TSDB_CODE_FAILED; } /** * @brief pTSchema is shared - * - * @param pSma - * @param pDest - * @param pSrc - * @return int32_t + * + * @param pSma + * @param pDest + * @param pSrc + * @return int32_t */ -int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) { +int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo **pDest, SRSmaInfo *pSrc) { SVnode *pVnode = pSma->pVnode; SRSmaParam *param = NULL; if (!pSrc) { + *pDest = NULL; return TSDB_CODE_SUCCESS; } - if (!pDest) { - pDest = taosMemoryCalloc(1, sizeof(SRSmaInfo)); - if (!pDest) { - terrno = TSDB_CODE_OUT_OF_MEMORY; - return TSDB_CODE_FAILED; - } - } - - memcpy(pDest, pSrc, sizeof(SRSmaInfo)); - SMetaReader mr = {0}; metaReaderInit(&mr, SMA_META(pSma), 0); smaDebug("vgId:%d, rsma clone, suid is %" PRIi64, TD_VID(pVnode), pSrc->suid); @@ -384,21 +377,22 @@ int32_t tdCloneRSmaInfo(SSma *pSma, SRSmaInfo *pDest, SRSmaInfo *pSrc) { ASSERT(mr.me.uid == pSrc->suid); if (TABLE_IS_ROLLUP(mr.me.flags)) { param = &mr.me.stbEntry.rsmaParam; - for (int i = 0; i < TSDB_RETENTION_L2; ++i) { - SRSmaInfoItem *pItem = &pSrc->items[i]; - if (pItem->taskInfo) { - tdCloneQTaskInfo(pSma, pDest->items[i].taskInfo, pItem->taskInfo, param, pSrc->suid, i); + for (int32_t i = 0; i < TSDB_RETENTION_L2; ++i) { + if (tdCloneQTaskInfo(pSma, pSrc->iTaskInfo[i], pSrc->taskInfo[i], param, pSrc->suid, i) < 0) { + goto _err; } } smaDebug("vgId:%d, rsma clone env success for %" PRIi64, TD_VID(pVnode), pSrc->suid); } metaReaderClear(&mr); + + *pDest = pSrc; // pointer copy + return TSDB_CODE_SUCCESS; _err: + *pDest = NULL; metaReaderClear(&mr); - tdFreeRSmaInfo(pSma, pDest, false); + smaError("vgId:%d, rsma clone env failed for %" PRIi64 " since %s", TD_VID(pVnode), pSrc->suid, terrstr()); return TSDB_CODE_FAILED; -} - -// ... \ No newline at end of file +} \ No newline at end of file diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index a9f07cbf24..c40fb98d62 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -115,12 +115,19 @@ static int32_t tsdbSnapReadData(STsdbSnapReader* pReader, uint8_t** ppData) { TSDBROW row = tsdbRowFromBlockData(&pReader->oBlockData, iRow); int64_t version = TSDBROW_VERSION(&row); + tsdbTrace("vgId:%d, vnode snapshot tsdb read for %s, %" PRId64 "(%" PRId64 " , %" PRId64 ")", + TD_VID(pReader->pTsdb->pVnode), pReader->pTsdb->path, version, pReader->sver, pReader->ever); + if (version < pReader->sver || version > pReader->ever) continue; code = tBlockDataAppendRow(&pReader->nBlockData, &row, NULL); if (code) goto _err; } + if (pReader->nBlockData.nRow <= 0) { + continue; + } + // org data // compress data (todo) int32_t size = sizeof(TABLEID) + tPutBlockData(NULL, &pReader->nBlockData); @@ -808,7 +815,8 @@ static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, TABLEID id) { if (code) goto _err; _exit: - tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), pWriter->pTsdb->path); + tsdbDebug("vgId:%d, vnode snapshot tsdb write data impl for %s", TD_VID(pWriter->pTsdb->pVnode), + pWriter->pTsdb->path); return code; _err: diff --git a/source/dnode/vnode/src/vnd/vnodeCommit.c b/source/dnode/vnode/src/vnd/vnodeCommit.c index 460d0b6fbb..c8dc07af0a 100644 --- a/source/dnode/vnode/src/vnd/vnodeCommit.c +++ b/source/dnode/vnode/src/vnd/vnodeCommit.c @@ -42,7 +42,7 @@ int vnodeBegin(SVnode *pVnode) { pVnode->state.commitID++; // begin meta - if (metaBegin(pVnode->pMeta) < 0) { + if (metaBegin(pVnode->pMeta, 0) < 0) { vError("vgId:%d, failed to begin meta since %s", TD_VID(pVnode), tstrerror(terrno)); return -1; } diff --git a/source/dnode/vnode/src/vnd/vnodeQuery.c b/source/dnode/vnode/src/vnd/vnodeQuery.c index 707c4bc471..d55f1796ad 100644 --- a/source/dnode/vnode/src/vnd/vnodeQuery.c +++ b/source/dnode/vnode/src/vnd/vnodeQuery.c @@ -473,7 +473,7 @@ int32_t vnodeGetTimeSeriesNum(SVnode *pVnode, int64_t *num) { int numOfCols = 0; vnodeGetStbColumnNum(pVnode, id, &numOfCols); - *num += ctbNum * numOfCols; + *num += ctbNum * (numOfCols - 1); } metaCloseStbCursor(pCur); diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c index 2d181c5458..e4e1e608e5 100644 --- a/source/dnode/vnode/src/vnd/vnodeSvr.c +++ b/source/dnode/vnode/src/vnd/vnodeSvr.c @@ -325,6 +325,8 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { return vnodeGetTableCfg(pVnode, pMsg, true); case TDMT_VND_BATCH_META: return vnodeGetBatchMeta(pVnode, pMsg); + case TDMT_VND_FETCH_RSMA: + return smaProcessFetch(pVnode->pSma, pMsg); case TDMT_VND_CONSUME: return tqProcessPollReq(pVnode->pTq, pMsg); case TDMT_STREAM_TASK_RUN: @@ -351,7 +353,6 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) { // TODO: remove the function void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) { // TODO - blockDebugShowDataBlocks(data, __func__); tdProcessTSmaInsert(((SVnode *)pVnode)->pSma, smaId, (const char *)data); } @@ -852,6 +853,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq if (metaCreateTable(pVnode->pMeta, version, &createTbReq) < 0) { if (terrno != TSDB_CODE_TDB_TABLE_ALREADY_EXIST) { submitBlkRsp.code = terrno; + pRsp->code = terrno; tDecoderClear(&decoder); taosArrayDestroy(createTbReq.ctb.tagName); goto _exit; diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c index cea0fc0029..c525d09057 100644 --- a/source/dnode/vnode/src/vnd/vnodeSync.c +++ b/source/dnode/vnode/src/vnd/vnodeSync.c @@ -730,10 +730,13 @@ void vnodeSyncClose(SVnode *pVnode) { syncStop(pVnode->sync); } bool vnodeIsLeader(SVnode *pVnode) { if (!syncIsReady(pVnode->sync)) { + vDebug("vgId:%d, vnode not ready, state:%s, restore:%d", pVnode->config.vgId, syncGetMyRoleStr(pVnode->sync), + syncRestoreFinish(pVnode->sync)); return false; } if (!pVnode->restored) { + vDebug("vgId:%d, vnode not restored", pVnode->config.vgId); terrno = TSDB_CODE_APP_NOT_READY; return false; } diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h index 4ef23860cd..777dcd0592 100644 --- a/source/libs/catalog/inc/catalogInt.h +++ b/source/libs/catalog/inc/catalogInt.h @@ -279,6 +279,7 @@ typedef struct SCtgMsgCtx { void* lastOut; void* out; char* target; + SHashObj* pBatchs; } SCtgMsgCtx; @@ -315,7 +316,6 @@ typedef struct SCtgTask { SRWLatch lock; SArray* pParents; SCtgSubRes subRes; - SHashObj* pBatchs; } SCtgTask; typedef struct SCtgTaskReq { diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c index 63d99cc58b..f279d87815 100644 --- a/source/libs/catalog/src/ctgAsync.c +++ b/source/libs/catalog/src/ctgAsync.c @@ -855,6 +855,7 @@ int32_t ctgCallSubCb(SCtgTask *pTask) { int32_t parentNum = taosArrayGetSize(pTask->pParents); for (int32_t i = 0; i < parentNum; ++i) { + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); SCtgTask* pParent = taosArrayGetP(pTask->pParents, i); pParent->subRes.code = pTask->code; @@ -865,7 +866,9 @@ int32_t ctgCallSubCb(SCtgTask *pTask) { } } - pParent->pBatchs = pTask->pBatchs; + SCtgMsgCtx *pParMsgCtx = CTG_GET_TASK_MSGCTX(pParent, -1); + + pParMsgCtx->pBatchs = pMsgCtx->pBatchs; CTG_ERR_JRET(pParent->subRes.fp(pParent)); } @@ -1082,7 +1085,7 @@ _return: ctgReleaseVgInfoToCache(pCtg, dbCache); } - if (pTask->res) { + if (pTask->res || code) { ctgHandleTaskEnd(pTask, code); } @@ -1625,6 +1628,11 @@ _return: int32_t ctgLaunchGetTbMetaTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res)); if (pTask->res) { @@ -1645,6 +1653,7 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbMetasCtx* pCtx = (SCtgTbMetasCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; int32_t dbNum = taosArrayGetSize(pCtx->pNames); int32_t fetchIdx = 0; @@ -1670,7 +1679,11 @@ int32_t ctgLaunchGetTbMetasTask(SCtgTask *pTask) { for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); SName* pName = ctgGetFetchName(pCtx->pNames, pFetch); - + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } + SCtgTaskReq tReq; tReq.pTask = pTask; tReq.msgIdx = pFetch->fetchIdx; @@ -1686,6 +1699,11 @@ int32_t ctgLaunchGetDbVgTask(SCtgTask *pTask) { SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDBCache *dbCache = NULL; SCtgDbVgCtx* pCtx = (SCtgDbVgCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); if (NULL != dbCache) { @@ -1722,6 +1740,11 @@ int32_t ctgLaunchGetTbHashTask(SCtgTask *pTask) { SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDBCache *dbCache = NULL; SCtgTbHashCtx* pCtx = (SCtgTbHashCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgAcquireVgInfoFromCache(pCtg, pCtx->dbFName, &dbCache)); if (NULL != dbCache) { @@ -1761,6 +1784,7 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbHashsCtx* pCtx = (SCtgTbHashsCtx*)pTask->taskCtx; SCtgDBCache *dbCache = NULL; + SCtgJob* pJob = pTask->pJob; int32_t dbNum = taosArrayGetSize(pCtx->pNames); int32_t fetchIdx = 0; int32_t baseResIdx = 0; @@ -1803,7 +1827,11 @@ int32_t ctgLaunchGetTbHashsTask(SCtgTask *pTask) { for (int32_t i = 0; i < pCtx->fetchNum; ++i) { SCtgFetch* pFetch = taosArrayGet(pCtx->pFetchs, i); STablesReq* pReq = taosArrayGet(pCtx->pNames, pFetch->dbIdx); - + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, i); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } + SBuildUseDBInput input = {0}; strcpy(input.db, pReq->dbFName); @@ -1831,6 +1859,11 @@ int32_t ctgLaunchGetTbIndexTask(SCtgTask *pTask) { SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgTbIndexCtx* pCtx = (SCtgTbIndexCtx*)pTask->taskCtx; SArray* pRes = NULL; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgReadTbIndexFromCache(pCtg, pCtx->pName, &pRes)); if (pRes) { @@ -1852,6 +1885,11 @@ int32_t ctgLaunchGetTbCfgTask(SCtgTask *pTask) { SArray* pRes = NULL; char dbFName[TSDB_DB_FNAME_LEN]; tNameGetFullDbName(pCtx->pName, dbFName); + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } if (pCtx->tbType <= 0) { CTG_ERR_JRET(ctgReadTbTypeFromCache(pCtg, dbFName, pCtx->pName->tname, &pCtx->tbType)); @@ -1890,6 +1928,11 @@ _return: int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetQnodeListFromMnode(pCtg, pConn, NULL, pTask)); return TSDB_CODE_SUCCESS; @@ -1898,6 +1941,11 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) { int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetDnodeListFromMnode(pCtg, pConn, NULL, pTask)); return TSDB_CODE_SUCCESS; @@ -1908,6 +1956,11 @@ int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgDbCfgCtx* pCtx = (SCtgDbCfgCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetDBCfgFromMnode(pCtg, pConn, pCtx->dbFName, NULL, pTask)); @@ -1919,6 +1972,11 @@ int32_t ctgLaunchGetDbInfoTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SCtgDBCache *dbCache = NULL; SCtgDbInfoCtx* pCtx = (SCtgDbInfoCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } pTask->res = taosMemoryCalloc(1, sizeof(SDbInfo)); if (NULL == pTask->res) { @@ -1953,6 +2011,11 @@ int32_t ctgLaunchGetIndexTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgIndexCtx* pCtx = (SCtgIndexCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetIndexInfoFromMnode(pCtg, pConn, pCtx->indexFName, NULL, pTask)); @@ -1963,6 +2026,11 @@ int32_t ctgLaunchGetUdfTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; SCtgUdfCtx* pCtx = (SCtgUdfCtx*)pTask->taskCtx; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetUdfInfoFromMnode(pCtg, pConn, pCtx->udfName, NULL, pTask)); @@ -1975,6 +2043,11 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { SCtgUserCtx* pCtx = (SCtgUserCtx*)pTask->taskCtx; bool inCache = false; bool pass = false; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgChkAuthFromCache(pCtg, pCtx->user.user, pCtx->user.dbFName, pCtx->user.type, &inCache, &pass)); if (inCache) { @@ -1996,6 +2069,11 @@ int32_t ctgLaunchGetUserTask(SCtgTask *pTask) { int32_t ctgLaunchGetSvrVerTask(SCtgTask *pTask) { SCatalog* pCtg = pTask->pJob->pCtg; SRequestConnInfo* pConn = &pTask->pJob->conn; + SCtgJob* pJob = pTask->pJob; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + if (NULL == pMsgCtx->pBatchs) { + pMsgCtx->pBatchs = pJob->pBatchs; + } CTG_ERR_RET(ctgGetSvrVerFromMnode(pCtg, pConn, NULL, pTask)); @@ -2129,7 +2207,10 @@ int32_t ctgSetSubTaskCb(SCtgTask *pSub, SCtgTask *pTask) { if (CTG_TASK_DONE == pSub->status) { pTask->subRes.code = pSub->code; CTG_ERR_JRET((*gCtgAsyncFps[pTask->type].cloneFp)(pSub, &pTask->subRes.res)); - pTask->pBatchs = pSub->pBatchs; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1); + pMsgCtx->pBatchs = pSubMsgCtx->pBatchs; + CTG_ERR_JRET(pTask->subRes.fp(pTask)); } else { if (NULL == pSub->pParents) { @@ -2167,7 +2248,10 @@ int32_t ctgLaunchSubTask(SCtgTask *pTask, CTG_TASK_TYPE type, ctgSubTaskCbFp fp, CTG_ERR_RET(ctgSetSubTaskCb(pSub, pTask)); if (newTask) { - pSub->pBatchs = pTask->pBatchs; + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + SCtgMsgCtx *pSubMsgCtx = CTG_GET_TASK_MSGCTX(pSub, -1); + pSubMsgCtx->pBatchs = pMsgCtx->pBatchs; + CTG_ERR_RET((*gCtgAsyncFps[pSub->type].launchFp)(pSub)); pSub->status = CTG_TASK_LAUNCHED; } @@ -2180,7 +2264,6 @@ int32_t ctgLaunchJob(SCtgJob *pJob) { for (int32_t i = 0; i < taskNum; ++i) { SCtgTask *pTask = taosArrayGet(pJob->pTasks, i); - pTask->pBatchs = pJob->pBatchs; qDebug("QID:0x%" PRIx64 " ctg launch [%dth] task", pJob->queryId, pTask->taskId); CTG_ERR_RET((*gCtgAsyncFps[pTask->type].launchFp)(pTask)); diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c index 8333cb28c0..bd3402dc39 100644 --- a/source/libs/catalog/src/ctgDbg.c +++ b/source/libs/catalog/src/ctgDbg.c @@ -19,7 +19,7 @@ #include "catalogInt.h" extern SCatalogMgmt gCtgMgmt; -SCtgDebug gCTGDebug = {.lockEnable = true}; +SCtgDebug gCTGDebug = {0}; void ctgdUserCallback(SMetaData* pResult, void* param, int32_t code) { ASSERT(*(int32_t*)param == 1); diff --git a/source/libs/catalog/src/ctgRemote.c b/source/libs/catalog/src/ctgRemote.c index a9f2d426bc..1fdf84e120 100644 --- a/source/libs/catalog/src/ctgRemote.c +++ b/source/libs/catalog/src/ctgRemote.c @@ -68,14 +68,14 @@ int32_t ctgHandleBatchRsp(SCtgJob* pJob, SCtgTaskCallbackParam* cbParam, SDataBu taskMsg.pData = NULL; taskMsg.len = 0; } - - pTask->pBatchs = pBatchs; SCtgTaskReq tReq; tReq.pTask = pTask; tReq.msgIdx = rsp.msgIdx; + SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq.msgIdx); + pMsgCtx->pBatchs = pBatchs; - ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s", pJob->queryId, pTask->taskId, rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1)); + ctgDebug("QID:0x%" PRIx64 " ctg task %d idx %d start to handle rsp %s, pBatchs: %p", pJob->queryId, pTask->taskId, rsp.msgIdx, TMSG_INFO(taskMsg.msgType + 1), pBatchs); (*gCtgAsyncFps[pTask->type].handleRspFp)(&tReq, rsp.reqType, &taskMsg, (rsp.rspCode ? rsp.rspCode : rspCode)); } @@ -343,7 +343,9 @@ int32_t ctgHandleMsgCallback(void* param, SDataBuf* pMsg, int32_t rspCode) { ctgError("taosHashInit %d batch failed", CTG_DEFAULT_BATCH_NUM); CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } - pTask->pBatchs = pBatchs; + + SCtgMsgCtx *pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, -1); + pMsgCtx->pBatchs = pBatchs; #endif SCtgTaskReq tReq; @@ -444,7 +446,8 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT uint32_t msgSize) { int32_t code = 0; SCtgTask* pTask = tReq->pTask; - SHashObj* pBatchs = pTask->pBatchs; + SCtgMsgCtx* pMsgCtx = CTG_GET_TASK_MSGCTX(pTask, tReq->msgIdx); + SHashObj* pBatchs = pMsgCtx->pBatchs; SCtgJob* pJob = pTask->pJob; SCtgBatch* pBatch = taosHashGet(pBatchs, &vgId, sizeof(vgId)); SCtgBatch newBatch = {0}; @@ -467,6 +470,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (NULL == taosArrayPush(newBatch.pMsgs, &req)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } + msg = NULL; if (NULL == taosArrayPush(newBatch.pTaskIds, &pTask->taskId)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } @@ -517,6 +521,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT if (NULL == taosArrayPush(pBatch->pMsgs, &req)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } + msg = NULL; if (NULL == taosArrayPush(pBatch->pTaskIds, &pTask->taskId)) { CTG_ERR_JRET(TSDB_CODE_OUT_OF_MEMORY); } @@ -545,7 +550,7 @@ int32_t ctgAddBatch(SCatalog* pCtg, int32_t vgId, SRequestConnInfo* pConn, SCtgT CTG_ERR_JRET(TSDB_CODE_APP_ERROR); } - tNameGetFullDbName(pName, newBatch.dbFName); + tNameGetFullDbName(pName, pBatch->dbFName); } ctgDebug("task %d %s req added to batch %d, target vgId %d", pTask->taskId, TMSG_INFO(msgType), pBatch->batchId, diff --git a/source/libs/catalog/src/ctgUtil.c b/source/libs/catalog/src/ctgUtil.c index 8e5fb90f1a..1ca60c89cd 100644 --- a/source/libs/catalog/src/ctgUtil.c +++ b/source/libs/catalog/src/ctgUtil.c @@ -438,6 +438,14 @@ void ctgFreeMsgCtx(SCtgMsgCtx* pCtx) { } } +void ctgFreeTbMetasMsgCtx(SCtgMsgCtx* pCtx) { + ctgFreeMsgCtx(pCtx); + if (pCtx->lastOut) { + ctgFreeSTableMetaOutput((STableMetaOutput*)pCtx->lastOut); + pCtx->lastOut = NULL; + } +} + void ctgFreeSTableMetaOutput(STableMetaOutput* pOutput) { if (NULL == pOutput) { return; @@ -641,7 +649,7 @@ void ctgFreeTaskCtx(SCtgTask* pTask) { taosArrayDestroy(taskCtx->pFetchs); // NO NEED TO FREE pNames - taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeMsgCtx); + taosArrayDestroyEx(pTask->msgCtxs, (FDelete)ctgFreeTbMetasMsgCtx); if (pTask->msgCtx.lastOut) { ctgFreeSTableMetaOutput((STableMetaOutput*)pTask->msgCtx.lastOut); diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h index 2ae4666ff6..09d2c9f7e1 100644 --- a/source/libs/command/inc/commandInt.h +++ b/source/libs/command/inc/commandInt.h @@ -58,6 +58,7 @@ extern "C" { #define EXPLAIN_RATIO_TIME_FORMAT "Ratio: %f" #define EXPLAIN_MERGE_FORMAT "Merge" #define EXPLAIN_MERGE_KEYS_FORMAT "Merge Key: " +#define EXPLAIN_IGNORE_GROUPID_FORMAT "Ignore Group Id: %s" #define EXPLAIN_PLANNING_TIME_FORMAT "Planning Time: %.3f ms" #define EXPLAIN_EXEC_TIME_FORMAT "Execution Time: %.3f ms" diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index c080b666cc..8442859627 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -612,6 +612,11 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i EXPLAIN_ROW_END(); QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + EXPLAIN_ROW_NEW(level + 1, EXPLAIN_OUTPUT_FORMAT); + EXPLAIN_ROW_APPEND(EXPLAIN_IGNORE_GROUPID_FORMAT, pPrjNode->ignoreGroupId ? "true" : "false"); + EXPLAIN_ROW_END(); + QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1)); + if (pPrjNode->node.pConditions) { EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT); QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, diff --git a/source/libs/executor/inc/executorimpl.h b/source/libs/executor/inc/executorimpl.h index 096d2b5b22..8439cf700d 100644 --- a/source/libs/executor/inc/executorimpl.h +++ b/source/libs/executor/inc/executorimpl.h @@ -732,18 +732,19 @@ typedef struct SStreamSessionAggOperatorInfo { } SStreamSessionAggOperatorInfo; typedef struct STimeSliceOperatorInfo { - SSDataBlock* pRes; - STimeWindow win; - SInterval interval; - int64_t current; - SArray* pPrevRow; // SArray - SArray* pNextRow; // SArray - bool isPrevRowSet; - bool isNextRowSet; - int32_t fillType; // fill type - SColumn tsCol; // primary timestamp column - SExprSupp scalarSup; // scalar calculation - struct SFillColInfo* pFillColInfo; // fill column info + SSDataBlock* pRes; + STimeWindow win; + SInterval interval; + int64_t current; + SArray* pPrevRow; // SArray + SArray* pNextRow; // SArray + SArray* pLinearInfo; // SArray + bool isPrevRowSet; + bool isNextRowSet; + int32_t fillType; // fill type + SColumn tsCol; // primary timestamp column + SExprSupp scalarSup; // scalar calculation + struct SFillColInfo* pFillColInfo; // fill column info } STimeSliceOperatorInfo; typedef struct SStateWindowOperatorInfo { diff --git a/source/libs/executor/inc/tfill.h b/source/libs/executor/inc/tfill.h index b604794dad..c2de48d0eb 100644 --- a/source/libs/executor/inc/tfill.h +++ b/source/libs/executor/inc/tfill.h @@ -33,11 +33,20 @@ typedef struct SFillColInfo { SVariant fillVal; } SFillColInfo; +typedef struct SFillLinearInfo { + SPoint start; + SPoint end; + bool hasNull; + bool fillLastPoint; + int16_t type; + int32_t bytes; +} SFillLinearInfo; + typedef struct { SSchema col; char* tagVal; } SFillTagColInfo; - + typedef struct SFillInfo { TSKEY start; // start timestamp TSKEY end; // endKey for fill diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index cffd38c98a..7e77799702 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -1449,7 +1449,7 @@ static void doUpdateNumOfRows(SqlFunctionCtx* pCtx, SResultRow* pRow, int32_t nu pRow->numOfRows = pResInfo->numOfRes; } - if (fmIsReturnNotNullFunc(pCtx[j].functionId)) { + if (fmIsNotNullOutputFunc(pCtx[j].functionId)) { returnNotNull = true; } } diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index e96844f1e3..2f9eff50b6 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -256,7 +256,7 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t SArray* rightRowLocations = taosArrayInit(8, sizeof(SRowLocation)); SArray* rightCreatedBlocks = taosArrayInit(8, POINTER_BYTES); - + int32_t code = TSDB_CODE_SUCCESS; mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 0, pJoinInfo->leftCol.slotId, pJoinInfo->pLeft, pJoinInfo->leftPos, timestamp, leftRowLocations, leftCreatedBlocks); mergeJoinGetDownStreamRowsEqualTimeStamp(pOperator, 1, pJoinInfo->rightCol.slotId, pJoinInfo->pRight, @@ -264,14 +264,20 @@ static int32_t mergeJoinJoinDownstreamTsRanges(SOperatorInfo* pOperator, int64_t size_t leftNumJoin = taosArrayGetSize(leftRowLocations); size_t rightNumJoin = taosArrayGetSize(rightRowLocations); - for (int32_t i = 0; i < leftNumJoin; ++i) { - for (int32_t j = 0; j < rightNumJoin; ++j) { - SRowLocation* leftRow = taosArrayGet(leftRowLocations, i); - SRowLocation* rightRow = taosArrayGet(rightRowLocations, j); - mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock, - rightRow->pos); - ++*nRows; - } + code = blockDataEnsureCapacity(pRes, *nRows + leftNumJoin * rightNumJoin); + if (code != TSDB_CODE_SUCCESS) { + qError("%s can not ensure block capacity for join. left: %zu, right: %zu", GET_TASKID(pOperator->pTaskInfo), leftNumJoin, rightNumJoin); + } + if (code == TSDB_CODE_SUCCESS) { + for (int32_t i = 0; i < leftNumJoin; ++i) { + for (int32_t j = 0; j < rightNumJoin; ++j) { + SRowLocation *leftRow = taosArrayGet(leftRowLocations, i); + SRowLocation *rightRow = taosArrayGet(rightRowLocations, j); + mergeJoinJoinLeftRight(pOperator, pRes, *nRows, leftRow->pDataBlock, leftRow->pos, rightRow->pDataBlock, + rightRow->pos); + ++*nRows; + } + } } for (int i = 0; i < taosArrayGetSize(rightCreatedBlocks); ++i) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index c71292733a..b21e937eb9 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2204,10 +2204,10 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { // build message and send to mnode to fetch the content of system tables. SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; SSysTableScanInfo* pInfo = pOperator->info; + char dbName[TSDB_DB_NAME_LEN] = {0}; const char* name = tNameGetTableName(&pInfo->name); if (pInfo->showRewrite) { - char dbName[TSDB_DB_NAME_LEN] = {0}; getDBNameFromCondition(pInfo->pCondition, dbName); sprintf(pInfo->req.db, "%d.%s", pInfo->accountId, dbName); } @@ -2217,7 +2217,7 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) { } else if (strncasecmp(name, TSDB_INS_TABLE_TAGS, TSDB_TABLE_FNAME_LEN) == 0) { return sysTableScanUserTags(pOperator); } else if (strncasecmp(name, TSDB_INS_TABLE_STABLES, TSDB_TABLE_FNAME_LEN) == 0 && - IS_SYS_DBNAME(pInfo->req.db)) { + pInfo->showRewrite && IS_SYS_DBNAME(dbName)) { return sysTableScanUserSTables(pOperator); } else { // load the meta from mnode of the given epset if (pOperator->status == OP_EXEC_DONE) { diff --git a/source/libs/executor/src/tfill.c b/source/libs/executor/src/tfill.c index c5d68676d2..bc266cc33e 100644 --- a/source/libs/executor/src/tfill.c +++ b/source/libs/executor/src/tfill.c @@ -669,4 +669,4 @@ SFillColInfo* createFillColInfo(SExprInfo* pExpr, int32_t numOfOutput, const str } return pFillCol; -} \ No newline at end of file +} diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index e7d9a8d8b2..51fea5ac24 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -2087,6 +2087,34 @@ static void doKeepNextRows(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock pSliceInfo->isNextRowSet = true; } +static void doKeepLinearInfo(STimeSliceOperatorInfo* pSliceInfo, const SSDataBlock* pBlock, int32_t rowIndex) { + int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pColInfoData = taosArrayGet(pBlock->pDataBlock, i); + SColumnInfoData* pTsCol = taosArrayGet(pBlock->pDataBlock, pSliceInfo->tsCol.slotId); + SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, i); + + // null data should not be kept since it can not be used to perform interpolation + if (!colDataIsNull_s(pColInfoData, i)) { + int64_t startKey = *(int64_t*)colDataGetData(pTsCol, rowIndex); + int64_t endKey = *(int64_t*)colDataGetData(pTsCol, rowIndex + 1); + pLinearInfo->start.key = startKey; + pLinearInfo->end.key = endKey; + + char* val; + val = colDataGetData(pColInfoData, rowIndex); + memcpy(pLinearInfo->start.val, val, pLinearInfo->bytes); + val = colDataGetData(pColInfoData, rowIndex + 1); + memcpy(pLinearInfo->end.val, val, pLinearInfo->bytes); + + pLinearInfo->hasNull = false; + } else { + pLinearInfo->hasNull = true; + } + } + +} + static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp* pExprSup, SSDataBlock* pBlock, SSDataBlock* pResBlock) { int32_t rows = pResBlock->info.rows; @@ -2115,52 +2143,41 @@ static void genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { float v = 0; GET_TYPED_DATA(v, float, pVar->nType, &pVar->i); - colDataAppend(pDst, rows, (char*)&v, false); + colDataAppend(pDst, rows, (char *)&v, false); } else if (pDst->info.type == TSDB_DATA_TYPE_DOUBLE) { double v = 0; GET_TYPED_DATA(v, double, pVar->nType, &pVar->i); - colDataAppend(pDst, rows, (char*)&v, false); + colDataAppend(pDst, rows, (char *)&v, false); } else if (IS_SIGNED_NUMERIC_TYPE(pDst->info.type)) { int64_t v = 0; GET_TYPED_DATA(v, int64_t, pVar->nType, &pVar->i); - colDataAppend(pDst, rows, (char*)&v, false); + colDataAppend(pDst, rows, (char *)&v, false); } pResBlock->info.rows += 1; break; } case TSDB_FILL_LINEAR: { -#if 0 - if (pCtx->start.key == INT64_MIN || pCtx->start.key > pCtx->startTs - || pCtx->end.key == INT64_MIN || pCtx->end.key < pCtx->startTs) { -// goto interp_exit; - } + SFillLinearInfo* pLinearInfo = taosArrayGet(pSliceInfo->pLinearInfo, srcSlot); - double v1 = -1, v2 = -1; - GET_TYPED_DATA(v1, double, pCtx->inputType, &pCtx->start.val); - GET_TYPED_DATA(v2, double, pCtx->inputType, &pCtx->end.val); + SPoint start = pLinearInfo->start; + SPoint end = pLinearInfo->end; + SPoint current = {.key = pSliceInfo->current}; + current.val = taosMemoryCalloc(pLinearInfo->bytes, 1); - SPoint point1 = {.key = ts, .val = &v1}; - SPoint point2 = {.key = nextTs, .val = &v2}; - SPoint point = {.key = pCtx->startTs, .val = pCtx->pOutput}; + // before interp range, do not fill + if (start.key == INT64_MIN || end.key == INT64_MAX) { + break; + } - int32_t srcType = pCtx->inputType; - if (isNull((char *)&pCtx->start.val, srcType) || isNull((char *)&pCtx->end.val, srcType)) { - setNull(pCtx->pOutput, srcType, pCtx->inputBytes); - } else { - bool exceedMax = false, exceedMin = false; - taosGetLinearInterpolationVal(&point, pCtx->outputType, &point1, &point2, TSDB_DATA_TYPE_DOUBLE, &exceedMax, &exceedMin); - if (exceedMax || exceedMin) { - __compar_fn_t func = getComparFunc((int32_t)pCtx->inputType, 0); - if (func(&pCtx->start.val, &pCtx->end.val) <= 0) { - COPY_TYPED_DATA(pCtx->pOutput, pCtx->inputType, exceedMax ? &pCtx->start.val : &pCtx->end.val); - } else { - COPY_TYPED_DATA(pCtx->pOutput, pCtx->inputType, exceedMax ? &pCtx->end.val : &pCtx->start.val); - } - } - } -#endif - // TODO: pResBlock->info.rows += 1; + if (pLinearInfo->hasNull) { + colDataAppendNULL(pDst, rows); + } else { + taosGetLinearInterpolationVal(¤t, pLinearInfo->type, &start, &end, pLinearInfo->type); + colDataAppend(pDst, rows, (char *)current.val, false); + } + + pResBlock->info.rows += 1; break; } case TSDB_FILL_PREV: { @@ -2246,6 +2263,55 @@ static int32_t initNextRowsKeeper(STimeSliceOperatorInfo* pInfo, SSDataBlock* pB return TSDB_CODE_SUCCESS; } +static int32_t initFillLinearInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) { + if (pInfo->pLinearInfo != NULL) { + return TSDB_CODE_SUCCESS; + } + + pInfo->pLinearInfo = taosArrayInit(4, sizeof(SFillLinearInfo)); + if (pInfo->pNextRow == NULL) { + return TSDB_CODE_OUT_OF_MEMORY; + } + + int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); + for (int32_t i = 0; i < numOfCols; ++i) { + SColumnInfoData* pColInfo = taosArrayGet(pBlock->pDataBlock, i); + + SFillLinearInfo linearInfo = {0}; + linearInfo.start.key = INT64_MIN; + linearInfo.end.key = INT64_MAX; + linearInfo.start.val = taosMemoryCalloc(1, pColInfo->info.bytes); + linearInfo.end.val = taosMemoryCalloc(1, pColInfo->info.bytes); + linearInfo.hasNull = false; + linearInfo.fillLastPoint = false; + linearInfo.type = pColInfo->info.type; + linearInfo.bytes = pColInfo->info.bytes; + taosArrayPush(pInfo->pLinearInfo, &linearInfo); + } + + return TSDB_CODE_SUCCESS; +} + +static int32_t initKeeperInfo(STimeSliceOperatorInfo* pInfo, SSDataBlock* pBlock) { + int32_t code; + code = initPrevRowsKeeper(pInfo, pBlock); + if (code != TSDB_CODE_SUCCESS) { + return TSDB_CODE_FAILED; + } + + code = initNextRowsKeeper(pInfo, pBlock); + if (code != TSDB_CODE_SUCCESS) { + return TSDB_CODE_FAILED; + } + + code = initFillLinearInfo(pInfo, pBlock); + if (code != TSDB_CODE_SUCCESS) { + return TSDB_CODE_FAILED; + } + + return TSDB_CODE_SUCCESS; +} + static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { if (pOperator->status == OP_EXEC_DONE) { return NULL; @@ -2278,13 +2344,7 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { break; } - int32_t code; - code = initPrevRowsKeeper(pSliceInfo, pBlock); - if (code != TSDB_CODE_SUCCESS) { - longjmp(pTaskInfo->env, code); - } - - code = initNextRowsKeeper(pSliceInfo, pBlock); + int32_t code = initKeeperInfo(pSliceInfo, pBlock); if (code != TSDB_CODE_SUCCESS) { longjmp(pTaskInfo->env, code); } @@ -2312,46 +2372,98 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { pResBlock->info.rows += 1; doKeepPrevRows(pSliceInfo, pBlock, i); - pSliceInfo->current = - taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); - if (pSliceInfo->current > pSliceInfo->win.ekey) { - doSetOperatorCompleted(pOperator); - break; - } + // for linear interpolation, always fill value between this and next points; + // if its the first point in data block, also fill values between previous(if there's any) and this point; + // if its the last point in data block, no need to fill, but reserve this point as the start value for next data block. + if (pSliceInfo->fillType == TSDB_FILL_LINEAR) { + doKeepLinearInfo(pSliceInfo, pBlock, i); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (i < pBlock->info.rows - 1) { + int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); + if (nextTs > pSliceInfo->current) { + while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { + genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + } - if (pResBlock->info.rows >= pResBlock->info.capacity) { - break; - } - } else if (ts < pSliceInfo->current) { - // in case interpolation window starts and ends between two datapoints, fill(prev) need to interpolate - doKeepPrevRows(pSliceInfo, pBlock, i); - - if (i < pBlock->info.rows - 1) { - // in case interpolation window starts and ends between two datapoints, fill(next) need to interpolate - doKeepNextRows(pSliceInfo, pBlock, i + 1); - int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); - if (nextTs > pSliceInfo->current) { - while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { - genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); - pSliceInfo->current = - taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); - if (pResBlock->info.rows >= pResBlock->info.capacity) { + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); break; } } - - if (pSliceInfo->current > pSliceInfo->win.ekey) { - doSetOperatorCompleted(pOperator); - break; - } - } else { - // ignore current row, and do nothing } - } else { // it is the last row of current block - doKeepPrevRows(pSliceInfo, pBlock, i); + } else { // non-linear interpolation + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); + break; + } + + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + } + } else if (ts < pSliceInfo->current) { + // in case of interpolation window starts and ends between two datapoints, fill(prev) need to interpolate + doKeepPrevRows(pSliceInfo, pBlock, i); + + if (pSliceInfo->fillType == TSDB_FILL_LINEAR) { + doKeepLinearInfo(pSliceInfo, pBlock, i); + // no need to increate pSliceInfo->current here + //pSliceInfo->current = + // taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (i < pBlock->info.rows - 1) { + int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); + if (nextTs > pSliceInfo->current) { + while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { + genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + } + + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); + break; + } + } + } + } else { // non-linear interpolation + if (i < pBlock->info.rows - 1) { + // in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate + doKeepNextRows(pSliceInfo, pBlock, i + 1); + int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); + if (nextTs > pSliceInfo->current) { + while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { + genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + } + + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); + break; + } + } else { + // ignore current row, and do nothing + } + } else { // it is the last row of current block + doKeepPrevRows(pSliceInfo, pBlock, i); + } } } else { // ts > pSliceInfo->current - // in case interpolation window starts and ends between two datapoints, fill(next) need to interpolate + // in case of interpolation window starts and ends between two datapoints, fill(next) need to interpolate doKeepNextRows(pSliceInfo, pBlock, i); while (pSliceInfo->current < ts && pSliceInfo->current <= pSliceInfo->win.ekey) { @@ -2380,11 +2492,36 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { pResBlock->info.rows += 1; doKeepPrevRows(pSliceInfo, pBlock, i); - pSliceInfo->current = - taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); - if (pResBlock->info.rows >= pResBlock->info.capacity) { - break; + if (pSliceInfo->fillType == TSDB_FILL_LINEAR) { + doKeepLinearInfo(pSliceInfo, pBlock, i); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (i < pBlock->info.rows - 1) { + int64_t nextTs = *(int64_t*)colDataGetData(pTsCol, i + 1); + if (nextTs > pSliceInfo->current) { + while (pSliceInfo->current < nextTs && pSliceInfo->current <= pSliceInfo->win.ekey) { + genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } + } + + if (pSliceInfo->current > pSliceInfo->win.ekey) { + doSetOperatorCompleted(pOperator); + break; + } + } + } + } else { // non-linear interpolation + pSliceInfo->current = + taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); + + if (pResBlock->info.rows >= pResBlock->info.capacity) { + break; + } } } @@ -2396,8 +2533,8 @@ static SSDataBlock* doTimeslice(SOperatorInfo* pOperator) { } // check if need to interpolate after ts range - // except for fill(next) - while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT) { + // except for fill(next), fill(linear) + while (pSliceInfo->current <= pSliceInfo->win.ekey && pSliceInfo->fillType != TSDB_FILL_NEXT && pSliceInfo->fillType != TSDB_FILL_LINEAR) { genInterpolationResult(pSliceInfo, &pOperator->exprSupp, pBlock, pResBlock); pSliceInfo->current = taosTimeAdd(pSliceInfo->current, pInterval->interval, pInterval->intervalUnit, pInterval->precision); @@ -2446,6 +2583,9 @@ SOperatorInfo* createTimeSliceOperatorInfo(SOperatorInfo* downstream, SPhysiNode pInfo->fillType = convertFillType(pInterpPhyNode->fillMode); initResultSizeInfo(&pOperator->resultInfo, 4096); + pInfo->pPrevRow = NULL; + pInfo->pNextRow = NULL; + pInfo->pLinearInfo = NULL; pInfo->pFillColInfo = createFillColInfo(pExprInfo, numOfExprs, (SNodeListNode*)pInterpPhyNode->pFillValues); pInfo->pRes = createResDataBlock(pPhyNode->pOutputDataBlockDesc); pInfo->win = pInterpPhyNode->timeRange; diff --git a/source/libs/function/src/builtinsimpl.c b/source/libs/function/src/builtinsimpl.c index e3e98a6895..9aa7c11fce 100644 --- a/source/libs/function/src/builtinsimpl.c +++ b/source/libs/function/src/builtinsimpl.c @@ -3845,14 +3845,17 @@ int32_t spreadFunctionMerge(SqlFunctionCtx* pCtx) { SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); int32_t start = pInput->startRowIndex; - for (int32_t i = start; i < start + pInput->numOfRows; ++i) { char* data = colDataGetData(pCol, i); SSpreadInfo* pInputInfo = (SSpreadInfo*)varDataVal(data); - spreadTransferInfo(pInputInfo, pInfo); + if (pInputInfo->hasResult) { + spreadTransferInfo(pInputInfo, pInfo); + } } - SET_VAL(GET_RES_INFO(pCtx), 1, 1); + if (pInfo->hasResult) { + GET_RES_INFO(pCtx)->numOfRes = 1; + } return TSDB_CODE_SUCCESS; } @@ -3861,6 +3864,8 @@ int32_t spreadFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) { SSpreadInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx)); if (pInfo->hasResult == true) { SET_DOUBLE_VAL(&pInfo->result, pInfo->max - pInfo->min); + } else { + GET_RES_INFO(pCtx)->isNullRes = 1; } return functionFinalize(pCtx, pBlock); } diff --git a/source/libs/function/src/functionMgt.c b/source/libs/function/src/functionMgt.c index 88efa7c2ff..0235f22f55 100644 --- a/source/libs/function/src/functionMgt.c +++ b/source/libs/function/src/functionMgt.c @@ -221,7 +221,7 @@ bool fmIsLastRowFunc(int32_t funcId) { return FUNCTION_TYPE_LAST_ROW == funcMgtBuiltins[funcId].type; } -bool fmIsReturnNotNullFunc(int32_t funcId) { +bool fmIsNotNullOutputFunc(int32_t funcId) { if (funcId < 0 || funcId >= funcMgtBuiltinsNum) { return false; } diff --git a/source/libs/index/src/indexFilter.c b/source/libs/index/src/indexFilter.c index 6dfdbf6840..21aeaba70b 100644 --- a/source/libs/index/src/indexFilter.c +++ b/source/libs/index/src/indexFilter.c @@ -358,9 +358,101 @@ static Filter sifGetFilterFunc(EIndexQueryType type, bool *reverse) { } return NULL; } -static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { - int ret = 0; +typedef union { + uint8_t u8; + uint16_t u16; + uint32_t u32; + uint64_t u64; + int8_t i8; + int16_t i16; + int32_t i32; + int64_t i64; + + double d; + float f; +} SDataTypeBuf; + +#define SIF_DATA_CONVERT(type, val, dst) \ + do { \ + if (type == TSDB_DATA_TYPE_DOUBLE) \ + dst = GET_DOUBLE_VAL(val); \ + else if (type == TSDB_DATA_TYPE_BIGINT) \ + dst = *(int64_t *)val; \ + else if (type == TSDB_DATA_TYPE_INT) \ + dst = *(int32_t *)val; \ + else if (type == TSDB_DATA_TYPE_SMALLINT) \ + dst = *(int16_t *)val; \ + else if (type == TSDB_DATA_TYPE_TINYINT) \ + dst = *(int8_t *)val; \ + else if (type == TSDB_DATA_TYPE_UTINYINT) \ + dst = *(uint8_t *)val; \ + else if (type == TSDB_DATA_TYPE_USMALLINT) \ + dst = *(uint16_t *)val; \ + else if (type == TSDB_DATA_TYPE_UINT) \ + dst = *(uint32_t *)val; \ + else if (type == TSDB_DATA_TYPE_UBIGINT) \ + dst = *(uint64_t *)val; \ + } while (0); + +static void sifSetFltParam(SIFParam *left, SIFParam *right, SDataTypeBuf *typedata, SMetaFltParam *param) { + int8_t ltype = left->colValType, rtype = right->colValType; + if (ltype == TSDB_DATA_TYPE_FLOAT) { + float f; + SIF_DATA_CONVERT(rtype, right->condValue, f); + typedata->f = f; + param->val = &typedata->f; + } else if (ltype == TSDB_DATA_TYPE_DOUBLE) { + double d; + SIF_DATA_CONVERT(rtype, right->condValue, d); + typedata->d = d; + param->val = &typedata->d; + } else if (ltype == TSDB_DATA_TYPE_BIGINT) { + int64_t i64; + SIF_DATA_CONVERT(rtype, right->condValue, i64); + typedata->i64 = i64; + param->val = &typedata->i64; + } else if (ltype == TSDB_DATA_TYPE_INT) { + int32_t i32; + SIF_DATA_CONVERT(rtype, right->condValue, i32); + typedata->i32 = i32; + param->val = &typedata->i32; + } else if (ltype == TSDB_DATA_TYPE_SMALLINT) { + int16_t i16; + + SIF_DATA_CONVERT(rtype, right->condValue, i16); + typedata->i16 = i16; + param->val = &typedata->i16; + } else if (ltype == TSDB_DATA_TYPE_TINYINT) { + int8_t i8; + SIF_DATA_CONVERT(rtype, right->condValue, i8) + typedata->i8 = i8; + param->val = &typedata->i8; + } else if (ltype == TSDB_DATA_TYPE_UBIGINT) { + uint64_t u64; + SIF_DATA_CONVERT(rtype, right->condValue, u64); + typedata->u64 = u64; + param->val = &typedata->u64; + + } else if (ltype == TSDB_DATA_TYPE_UINT) { + uint32_t u32; + SIF_DATA_CONVERT(rtype, right->condValue, u32); + typedata->u32 = u32; + param->val = &typedata->u32; + } else if (ltype == TSDB_DATA_TYPE_USMALLINT) { + uint16_t u16; + SIF_DATA_CONVERT(rtype, right->condValue, u16); + typedata->u16 = u16; + param->val = &typedata->u16; + } else if (ltype == TSDB_DATA_TYPE_UTINYINT) { + uint8_t u8; + SIF_DATA_CONVERT(rtype, right->condValue, u8); + typedata->u8 = u8; + param->val = &typedata->u8; + } +} +static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFParam *output) { + int ret = 0; SIndexMetaArg *arg = &output->arg; EIndexQueryType qtype = 0; SIF_ERR_RET(sifGetFuncFromSql(operType, &qtype)); @@ -385,9 +477,10 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP .reverse = reverse, .filterFunc = filterFunc}; - char buf[128] = {0}; - float f = 0.0; - double d = 0.0; + char buf[128] = {0}; + + SDataTypeBuf typedata; + memset(&typedata, 0, sizeof(typedata)); if (IS_VAR_DATA_TYPE(left->colValType)) { if (!IS_VAR_DATA_TYPE(right->colValType)) { NUM_TO_STRING(right->colValType, right->condValue, sizeof(buf) - 2, buf + VARSTR_HEADER_SIZE); @@ -395,26 +488,7 @@ static int32_t sifDoIndex(SIFParam *left, SIFParam *right, int8_t operType, SIFP param.val = buf; } } else { - if (left->colValType == TSDB_DATA_TYPE_FLOAT) { - if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { - f = GET_DOUBLE_VAL(right->condValue); - param.val = &f; - } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { - f = *(int64_t *)(right->condValue); - param.val = &f; - } else { - f = *(int32_t *)(right->condValue); - param.val = &f; - } - } else if (left->colValType == TSDB_DATA_TYPE_DOUBLE) { - if (right->colValType == TSDB_DATA_TYPE_DOUBLE) { - d = GET_DOUBLE_VAL(right->condValue); - param.val = &d; - } else if (right->colValType == TSDB_DATA_TYPE_BIGINT) { - d = *(int64_t *)(right->condValue); - param.val = &d; - } - } + sifSetFltParam(left, right, &typedata, ¶m); } ret = metaFilterTableIds(arg->metaEx, ¶m, output->result); } diff --git a/source/libs/nodes/src/nodesCloneFuncs.c b/source/libs/nodes/src/nodesCloneFuncs.c index 79ef18eeb6..c4f3dbb9bd 100644 --- a/source/libs/nodes/src/nodesCloneFuncs.c +++ b/source/libs/nodes/src/nodesCloneFuncs.c @@ -390,6 +390,7 @@ static int32_t logicProjectCopy(const SProjectLogicNode* pSrc, SProjectLogicNode COPY_BASE_OBJECT_FIELD(node, logicNodeCopy); CLONE_NODE_LIST_FIELD(pProjections); COPY_CHAR_ARRAY_FIELD(stmtName); + COPY_SCALAR_FIELD(ignoreGroupId); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/nodes/src/nodesCodeFuncs.c b/source/libs/nodes/src/nodesCodeFuncs.c index 9d15b01acf..9a6d4f64ca 100644 --- a/source/libs/nodes/src/nodesCodeFuncs.c +++ b/source/libs/nodes/src/nodesCodeFuncs.c @@ -655,6 +655,7 @@ static int32_t jsonToLogicScanNode(const SJson* pJson, void* pObj) { } static const char* jkProjectLogicPlanProjections = "Projections"; +static const char* jkProjectLogicPlanIgnoreGroupId = "IgnoreGroupId"; static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) { const SProjectLogicNode* pNode = (const SProjectLogicNode*)pObj; @@ -663,6 +664,9 @@ static int32_t logicProjectNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = nodeListToJson(pJson, jkProjectLogicPlanProjections, pNode->pProjections); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddIntegerToObject(pJson, jkProjectLogicPlanIgnoreGroupId, pNode->ignoreGroupId); + } return code; } @@ -674,6 +678,9 @@ static int32_t jsonToLogicProjectNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = jsonToNodeList(pJson, jkProjectLogicPlanProjections, &pNode->pProjections); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkProjectLogicPlanIgnoreGroupId, &pNode->ignoreGroupId); + } return code; } @@ -1689,6 +1696,7 @@ static int32_t jsonToPhysiSysTableScanNode(const SJson* pJson, void* pObj) { static const char* jkProjectPhysiPlanProjections = "Projections"; static const char* jkProjectPhysiPlanMergeDataBlock = "MergeDataBlock"; +static const char* jkProjectPhysiPlanIgnoreGroupId = "IgnoreGroupId"; static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) { const SProjectPhysiNode* pNode = (const SProjectPhysiNode*)pObj; @@ -1700,6 +1708,9 @@ static int32_t physiProjectNodeToJson(const void* pObj, SJson* pJson) { if (TSDB_CODE_SUCCESS == code) { code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanMergeDataBlock, pNode->mergeDataBlock); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonAddBoolToObject(pJson, jkProjectPhysiPlanIgnoreGroupId, pNode->ignoreGroupId); + } return code; } @@ -1714,6 +1725,9 @@ static int32_t jsonToPhysiProjectNode(const SJson* pJson, void* pObj) { if (TSDB_CODE_SUCCESS == code) { code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanMergeDataBlock, &pNode->mergeDataBlock); } + if (TSDB_CODE_SUCCESS == code) { + code = tjsonGetBoolValue(pJson, jkProjectPhysiPlanIgnoreGroupId, &pNode->ignoreGroupId); + } return code; } diff --git a/source/libs/nodes/src/nodesUtilFuncs.c b/source/libs/nodes/src/nodesUtilFuncs.c index 6f71b58aef..cbb0e8e59b 100644 --- a/source/libs/nodes/src/nodesUtilFuncs.c +++ b/source/libs/nodes/src/nodesUtilFuncs.c @@ -392,6 +392,9 @@ static void destroyDataSinkNode(SDataSinkNode* pNode) { nodesDestroyNode((SNode* static void destroyExprNode(SExprNode* pExpr) { taosArrayDestroy(pExpr->pAssociation); } static void destroyTableCfg(STableCfg* pCfg) { + if (NULL == pCfg) { + return; + } taosArrayDestroy(pCfg->pFuncs); taosMemoryFree(pCfg->pComment); taosMemoryFree(pCfg->pSchemas); diff --git a/source/libs/parser/src/parAstCreater.c b/source/libs/parser/src/parAstCreater.c index d19c203ffe..aa526dd440 100644 --- a/source/libs/parser/src/parAstCreater.c +++ b/source/libs/parser/src/parAstCreater.c @@ -819,6 +819,10 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) { pOptions->numOfVgroups = TSDB_DEFAULT_VN_PER_DB; pOptions->singleStable = TSDB_DEFAULT_DB_SINGLE_STABLE; pOptions->schemaless = TSDB_DEFAULT_DB_SCHEMALESS; + pOptions->walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD; + pOptions->walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE; + pOptions->walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD; + pOptions->walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE; return (SNode*)pOptions; } @@ -846,6 +850,10 @@ SNode* createAlterDatabaseOptions(SAstCreateContext* pCxt) { pOptions->numOfVgroups = -1; pOptions->singleStable = -1; pOptions->schemaless = -1; + pOptions->walRetentionPeriod = -1; + pOptions->walRetentionSize = -1; + pOptions->walRollPeriod = -1; + pOptions->walSegmentSize = -1; return (SNode*)pOptions; } diff --git a/source/libs/parser/src/parAstParser.c b/source/libs/parser/src/parAstParser.c index f5f44da9db..08fcdcb0cb 100644 --- a/source/libs/parser/src/parAstParser.c +++ b/source/libs/parser/src/parAstParser.c @@ -339,6 +339,11 @@ static int32_t collectMetaKeyFromShowBnodes(SCollectMetaKeyCxt* pCxt, SShowStmt* pCxt->pMetaCache); } +static int32_t collectMetaKeyFromShowCluster(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { + return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, + pCxt->pMetaCache); +} + static int32_t collectMetaKeyFromShowDatabases(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) { return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_DATABASES, pCxt->pMetaCache); @@ -547,6 +552,8 @@ static int32_t collectMetaKeyFromQuery(SCollectMetaKeyCxt* pCxt, SNode* pStmt) { return collectMetaKeyFromShowSnodes(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_BNODES_STMT: return collectMetaKeyFromShowBnodes(pCxt, (SShowStmt*)pStmt); + case QUERY_NODE_SHOW_CLUSTER_STMT: + return collectMetaKeyFromShowCluster(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_DATABASES_STMT: return collectMetaKeyFromShowDatabases(pCxt, (SShowStmt*)pStmt); case QUERY_NODE_SHOW_FUNCTIONS_STMT: diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index 85f73f0663..fa86cfb5b5 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -1537,6 +1537,13 @@ int32_t parseInsertSql(SParseContext* pContext, SQuery** pQuery, SParseMetaCache if (pContext->pStmtCb && *pQuery) { (*pContext->pStmtCb->getExecInfoFn)(pContext->pStmtCb->pStmt, &context.pVgroupsHashObj, &context.pTableBlockHashObj); + if (NULL == context.pVgroupsHashObj) { + context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); + } + if (NULL == context.pTableBlockHashObj) { + context.pTableBlockHashObj = + taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK); + } } else { context.pVgroupsHashObj = taosHashInit(128, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_NO_LOCK); context.pTableBlockHashObj = diff --git a/source/libs/parser/src/parInsertData.c b/source/libs/parser/src/parInsertData.c index ae123a3563..8fbed3853c 100644 --- a/source/libs/parser/src/parInsertData.c +++ b/source/libs/parser/src/parInsertData.c @@ -525,7 +525,7 @@ static int sortMergeDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* p SBlockRowMerger** ppBlkRowMerger) { SSubmitBlk* pBlocks = (SSubmitBlk*)dataBuf->pData; STableMeta* pTableMeta = dataBuf->pTableMeta; - int16_t nRows = pBlocks->numOfRows; + int32_t nRows = pBlocks->numOfRows; // size is less than the total size, since duplicated rows may be removed. @@ -546,7 +546,7 @@ static int sortMergeDataBlockDupRows(STableDataBlocks* dataBuf, SBlockKeyInfo* p int32_t extendedRowSize = getExtendedRowSize(dataBuf); SBlockKeyTuple* pBlkKeyTuple = pBlkKeyInfo->pKeyTuple; char* pBlockData = pBlocks->data + pBlocks->schemaLen; - int n = 0; + int32_t n = 0; while (n < nRows) { pBlkKeyTuple->skey = TD_ROW_KEY((STSRow*)pBlockData); pBlkKeyTuple->payloadAddr = pBlockData; diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 9971f20d3d..5c49a6e0ab 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -2515,7 +2515,7 @@ static bool isPartitionByTbname(SNodeList* pPartitionByList) { return false; } SNode* pPartKey = nodesListGetNode(pPartitionByList, 0); - return QUERY_NODE_FUNCTION != nodeType(pPartKey) || FUNCTION_TYPE_TBNAME != ((SFunctionNode*)pPartKey)->funcType; + return QUERY_NODE_FUNCTION == nodeType(pPartKey) && FUNCTION_TYPE_TBNAME == ((SFunctionNode*)pPartKey)->funcType; } static int32_t checkStateWindowForStream(STranslateContext* pCxt, SSelectStmt* pSelect) { @@ -2566,7 +2566,6 @@ static int32_t translateWindow(STranslateContext* pCxt, SSelectStmt* pSelect) { if (NULL == pSelect->pWindow) { return TSDB_CODE_SUCCESS; } - pSelect->isTimeLineResult = true; pCxt->currClause = SQL_CLAUSE_WINDOW; int32_t code = translateExpr(pCxt, &pSelect->pWindow); if (TSDB_CODE_SUCCESS == code) { @@ -2637,7 +2636,6 @@ static int32_t translatePartitionBy(STranslateContext* pCxt, SSelectStmt* pSelec if (NULL == pSelect->pPartitionByList) { return TSDB_CODE_SUCCESS; } - pSelect->isTimeLineResult = false; pCxt->currClause = SQL_CLAUSE_PARTITION_BY; return translateExprList(pCxt, pSelect->pPartitionByList); } @@ -4708,6 +4706,12 @@ static int32_t translateKillTransaction(STranslateContext* pCxt, SKillStmt* pStm return buildCmdMsg(pCxt, TDMT_MND_KILL_TRANS, (FSerializeFunc)tSerializeSKillTransReq, &killReq); } +static bool crossTableWithoutAggOper(SSelectStmt* pSelect) { + return NULL == pSelect->pWindow && !pSelect->hasAggFuncs && !pSelect->hasIndefiniteRowsFunc && + !pSelect->hasInterpFunc && TSDB_SUPER_TABLE == ((SRealTableNode*)pSelect->pFromTable)->pMeta->tableType && + !isPartitionByTbname(pSelect->pPartitionByList); +} + static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pStmt) { if (NULL != pStmt->pOptions->pWatermark && (DEAL_RES_ERROR == translateValue(pCxt, (SValueNode*)pStmt->pOptions->pWatermark))) { @@ -4723,14 +4727,12 @@ static int32_t checkCreateStream(STranslateContext* pCxt, SCreateStreamStmt* pSt return TSDB_CODE_SUCCESS; } - if (QUERY_NODE_SELECT_STMT == nodeType(pStmt->pQuery)) { - SSelectStmt* pSelect = (SSelectStmt*)pStmt->pQuery; - if (QUERY_NODE_REAL_TABLE == nodeType(pSelect->pFromTable)) { - return TSDB_CODE_SUCCESS; - } + if (QUERY_NODE_SELECT_STMT != nodeType(pStmt->pQuery) || + QUERY_NODE_REAL_TABLE != nodeType(((SSelectStmt*)pStmt->pQuery)->pFromTable)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); } - return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); + return TSDB_CODE_SUCCESS; } static void getSourceDatabase(SNode* pStmt, int32_t acctId, char* pDbFName) { @@ -4759,12 +4761,23 @@ static int32_t addWstartTsToCreateStreamQuery(SNode* pStmt) { return code; } +static int32_t checkStreamQuery(STranslateContext* pCxt, SSelectStmt* pSelect) { + if (TSDB_DATA_TYPE_TIMESTAMP != ((SExprNode*)nodesListGetNode(pSelect->pProjectionList, 0))->resType.type || + !pSelect->isTimeLineResult || crossTableWithoutAggOper(pSelect)) { + return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_STREAM_QUERY, "Unsupported stream query"); + } + return TSDB_CODE_SUCCESS; +} + static int32_t buildCreateStreamQuery(STranslateContext* pCxt, SNode* pStmt, SCMCreateStreamReq* pReq) { pCxt->createStream = true; int32_t code = addWstartTsToCreateStreamQuery(pStmt); if (TSDB_CODE_SUCCESS == code) { code = translateQuery(pCxt, pStmt); } + if (TSDB_CODE_SUCCESS == code) { + code = checkStreamQuery(pCxt, (SSelectStmt*)pStmt); + } if (TSDB_CODE_SUCCESS == code) { getSourceDatabase(pStmt, pCxt->pParseCxt->acctId, pReq->sourceDB); code = nodesNodeToString(pStmt, false, &pReq->ast, NULL); diff --git a/source/libs/parser/test/mockCatalog.cpp b/source/libs/parser/test/mockCatalog.cpp index 6834f9ccca..ad491af105 100644 --- a/source/libs/parser/test/mockCatalog.cpp +++ b/source/libs/parser/test/mockCatalog.cpp @@ -119,6 +119,12 @@ void generateInformationSchema(MockCatalogService* mcs) { .addColumn("dnode_id", TSDB_DATA_TYPE_INT); builder.done(); } + { + ITableBuilder& builder = + mcs->createTableBuilder(TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CLUSTER, TSDB_SYSTEM_TABLE, 1) + .addColumn("id", TSDB_DATA_TYPE_BIGINT); + builder.done(); + } } void generatePerformanceSchema(MockCatalogService* mcs) { diff --git a/source/libs/parser/test/parInitialCTest.cpp b/source/libs/parser/test/parInitialCTest.cpp index b513ff57ed..5cf6c30b31 100644 --- a/source/libs/parser/test/parInitialCTest.cpp +++ b/source/libs/parser/test/parInitialCTest.cpp @@ -111,6 +111,10 @@ TEST_F(ParserInitialCTest, createDatabase) { expect.numOfVgroups = TSDB_DEFAULT_VN_PER_DB; expect.numOfStables = TSDB_DEFAULT_DB_SINGLE_STABLE; expect.schemaless = TSDB_DEFAULT_DB_SCHEMALESS; + expect.walRetentionPeriod = TSDB_DEFAULT_DB_WAL_RETENTION_PERIOD; + expect.walRetentionSize = TSDB_DEFAULT_DB_WAL_RETENTION_SIZE; + expect.walRollPeriod = TSDB_DEFAULT_DB_WAL_ROLL_PERIOD; + expect.walSegmentSize = TSDB_DEFAULT_DB_WAL_SEGMENT_SIZE; }; auto setDbBufferFunc = [&](int32_t buffer) { expect.buffer = buffer; }; diff --git a/source/libs/parser/test/parShowToUse.cpp b/source/libs/parser/test/parShowToUse.cpp index 7af6d8350c..6590378565 100644 --- a/source/libs/parser/test/parShowToUse.cpp +++ b/source/libs/parser/test/parShowToUse.cpp @@ -25,6 +25,15 @@ class ParserShowToUseTest : public ParserDdlTest {}; // todo SHOW apps // todo SHOW connections +TEST_F(ParserShowToUseTest, showCluster) { + useDb("root", "test"); + + setCheckDdlFunc( + [&](const SQuery* pQuery, ParserStage stage) { ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_SELECT_STMT); }); + + run("SHOW CLUSTER"); +} + TEST_F(ParserShowToUseTest, showConsumers) { useDb("root", "test"); diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 0762b28188..e37595f34a 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -865,6 +865,7 @@ static int32_t createProjectLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSel TSWAP(pProject->node.pLimit, pSelect->pLimit); TSWAP(pProject->node.pSlimit, pSelect->pSlimit); + pProject->ignoreGroupId = (NULL == pSelect->pPartitionByList); pProject->node.groupAction = (!pSelect->isSubquery && pCxt->pPlanCxt->streamQuery) ? GROUP_ACTION_KEEP : GROUP_ACTION_CLEAR; pProject->node.requireDataOrder = DATA_ORDER_LEVEL_NONE; @@ -1078,6 +1079,7 @@ static int32_t createSetOpProjectLogicNode(SLogicPlanContext* pCxt, SSetOperator if (NULL == pSetOperator->pOrderByList) { TSWAP(pProject->node.pLimit, pSetOperator->pLimit); } + pProject->ignoreGroupId = true; int32_t code = TSDB_CODE_SUCCESS; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 6d70aa60d5..5eeb0551d3 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -998,6 +998,7 @@ static int32_t createProjectPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pChild } pProject->mergeDataBlock = projectCanMergeDataBlock(pProjectLogicNode); + pProject->ignoreGroupId = pProjectLogicNode->ignoreGroupId; int32_t code = TSDB_CODE_SUCCESS; if (0 == LIST_LENGTH(pChildren)) { diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index e10f0586ca..97977878ad 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -268,7 +268,7 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) { case QUERY_NODE_LOGIC_PLAN_JOIN: return stbSplNeedSplitJoin(streamQuery, (SJoinLogicNode*)pNode); case QUERY_NODE_LOGIC_PLAN_PARTITION: - return stbSplIsMultiTbScanChild(streamQuery, pNode); + return streamQuery ? false : stbSplIsMultiTbScanChild(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_AGG: return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode); case QUERY_NODE_LOGIC_PLAN_WINDOW: diff --git a/source/libs/qcom/src/queryUtil.c b/source/libs/qcom/src/queryUtil.c index 4cad6a078b..41333e7756 100644 --- a/source/libs/qcom/src/queryUtil.c +++ b/source/libs/qcom/src/queryUtil.c @@ -415,7 +415,7 @@ int32_t cloneTableMeta(STableMeta* pSrc, STableMeta** pDst) { return TSDB_CODE_SUCCESS; } - int32_t metaSize = (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); + int32_t metaSize = sizeof(STableMeta) + (pSrc->tableInfo.numOfColumns + pSrc->tableInfo.numOfTags) * sizeof(SSchema); *pDst = taosMemoryMalloc(metaSize); if (NULL == *pDst) { return TSDB_CODE_TSC_OUT_OF_MEMORY; diff --git a/source/libs/scheduler/inc/schInt.h b/source/libs/scheduler/inc/schInt.h index 02e878f4f8..1b3d75f33b 100644 --- a/source/libs/scheduler/inc/schInt.h +++ b/source/libs/scheduler/inc/schInt.h @@ -61,6 +61,8 @@ typedef enum { #define SCH_MAX_TASK_TIMEOUT_USEC 60000000 #define SCH_DEFAULT_MAX_RETRY_NUM 6 +#define SCH_ASYNC_LAUNCH_TASK 0 + typedef struct SSchDebug { bool lockEnable; bool apiEnable; @@ -281,6 +283,11 @@ typedef struct SSchJob { SQueryProfileSummary summary; } SSchJob; +typedef struct SSchTaskCtx { + SSchJob *pJob; + SSchTask *pTask; +} SSchTaskCtx; + extern SSchedulerMgmt schMgmt; #define SCH_TASK_TIMEOUT(_task) ((taosGetTimestampUs() - *(int64_t*)taosArrayGet((_task)->profile.execTime, (_task)->execId)) > (_task)->timeoutUsec) @@ -428,7 +435,7 @@ int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel); int32_t schDecTaskFlowQuota(SSchJob *pJob, SSchTask *pTask); int32_t schCheckIncTaskFlowQuota(SSchJob *pJob, SSchTask *pTask, bool *enough); int32_t schLaunchTasksInFlowCtrlList(SSchJob *pJob, SSchTask *pTask); -int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask); +int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask); int32_t schLaunchFetchTask(SSchJob *pJob); int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode); int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId, SArray* taskAction); diff --git a/source/libs/scheduler/src/schFlowCtrl.c b/source/libs/scheduler/src/schFlowCtrl.c index 6b34a394b6..c5c2bfb2bb 100644 --- a/source/libs/scheduler/src/schFlowCtrl.c +++ b/source/libs/scheduler/src/schFlowCtrl.c @@ -54,7 +54,7 @@ int32_t schChkJobNeedFlowCtrl(SSchJob *pJob, SSchLevel *pLevel) { sum += pTask->plan->execNodeStat.tableNum; } - if (sum < schMgmt.cfg.maxNodeTableNum) { + if (schMgmt.cfg.maxNodeTableNum <= 0 || sum < schMgmt.cfg.maxNodeTableNum) { SCH_JOB_DLOG("job no need flow ctrl, totalTableNum:%d", sum); return TSDB_CODE_SUCCESS; } @@ -230,7 +230,7 @@ int32_t schLaunchTasksInFlowCtrlListImpl(SSchJob *pJob, SSchFlowControl *ctrl) { SCH_TASK_DLOG("task to launch, fqdn:%s, port:%d, tableNum:%d, remainNum:%d, remainExecTaskNum:%d", ep->fqdn, ep->port, pTask->plan->execNodeStat.tableNum, ctrl->tableNumSum, ctrl->execTaskNum); - SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); + SCH_ERR_JRET(schAsyncLaunchTaskImpl(pJob, pTask)); remainNum -= pTask->plan->execNodeStat.tableNum; if (remainNum <= 0) { diff --git a/source/libs/scheduler/src/schTask.c b/source/libs/scheduler/src/schTask.c index cabca0dc0c..612b908d41 100644 --- a/source/libs/scheduler/src/schTask.c +++ b/source/libs/scheduler/src/schTask.c @@ -411,7 +411,7 @@ int32_t schHandleRedirect(SSchJob *pJob, SSchTask *pTask, SDataBuf *pData, int32 if (pJob->fetched) { SCH_UNLOCK(SCH_WRITE, &pJob->resLock); SCH_TASK_ELOG("already fetched while got error %s", tstrerror(rspCode)); - SCH_ERR_RET(rspCode); + SCH_ERR_JRET(rspCode); } SCH_UNLOCK(SCH_WRITE, &pJob->resLock); @@ -819,7 +819,10 @@ int32_t schProcessOnTaskStatusRsp(SQueryNodeEpId *pEpId, SArray *pStatusList) { return TSDB_CODE_SUCCESS; } -int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { +int32_t schLaunchTaskImpl(void *param) { + SSchTaskCtx *pCtx = (SSchTaskCtx *)param; + SSchJob *pJob = pCtx->pJob; + SSchTask *pTask = pCtx->pTask; int8_t status = 0; int32_t code = 0; @@ -834,12 +837,12 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { if (schJobNeedToStop(pJob, &status)) { SCH_TASK_DLOG("no need to launch task cause of job status %s", jobTaskStatusStr(status)); - SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR); + SCH_ERR_JRET(TSDB_CODE_SCH_IGNORE_ERROR); } // NOTE: race condition: the task should be put into the hash table before send msg to server if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXEC) { - SCH_ERR_RET(schPushTaskToExecList(pJob, pTask)); + SCH_ERR_JRET(schPushTaskToExecList(pJob, pTask)); SCH_SET_TASK_STATUS(pTask, JOB_TASK_STATUS_EXEC); } @@ -850,19 +853,51 @@ int32_t schLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { if (TSDB_CODE_SUCCESS != code) { SCH_TASK_ELOG("failed to create physical plan, code:%s, msg:%p, len:%d", tstrerror(code), pTask->msg, pTask->msgLen); - SCH_ERR_RET(code); + SCH_ERR_JRET(code); } else { SCH_TASK_DLOGL("physical plan len:%d, %s", pTask->msgLen, pTask->msg); } } - SCH_ERR_RET(schSetTaskCandidateAddrs(pJob, pTask)); + SCH_ERR_JRET(schSetTaskCandidateAddrs(pJob, pTask)); if (SCH_IS_QUERY_JOB(pJob)) { - SCH_ERR_RET(schEnsureHbConnection(pJob, pTask)); + SCH_ERR_JRET(schEnsureHbConnection(pJob, pTask)); } - SCH_ERR_RET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType)); + SCH_ERR_JRET(schBuildAndSendMsg(pJob, pTask, NULL, plan->msgType)); + +_return: + + taosMemoryFree(param); + +#if SCH_ASYNC_LAUNCH_TASK + if (code) { + code = schProcessOnTaskFailure(pJob, pTask, code); + } + if (code) { + code = schHandleJobFailure(pJob, code); + } +#endif + + SCH_RET(code); +} + +int32_t schAsyncLaunchTaskImpl(SSchJob *pJob, SSchTask *pTask) { + + SSchTaskCtx *param = taosMemoryCalloc(1, sizeof(SSchTaskCtx)); + if (NULL == param) { + SCH_ERR_RET(TSDB_CODE_OUT_OF_MEMORY); + } + + param->pJob = pJob; + param->pTask = pTask; + +#if SCH_ASYNC_LAUNCH_TASK + taosAsyncExec(schLaunchTaskImpl, param, NULL); +#else + SCH_ERR_RET(schLaunchTaskImpl(param)); +#endif return TSDB_CODE_SUCCESS; } @@ -878,10 +913,10 @@ int32_t schLaunchTask(SSchJob *pJob, SSchTask *pTask) { SCH_ERR_JRET(schCheckIncTaskFlowQuota(pJob, pTask, &enough)); if (enough) { - SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); + SCH_ERR_JRET(schAsyncLaunchTaskImpl(pJob, pTask)); } } else { - SCH_ERR_JRET(schLaunchTaskImpl(pJob, pTask)); + SCH_ERR_JRET(schAsyncLaunchTaskImpl(pJob, pTask)); } return TSDB_CODE_SUCCESS; diff --git a/source/libs/scheduler/src/scheduler.c b/source/libs/scheduler/src/scheduler.c index a37cd4fd9e..f1ec7f5e04 100644 --- a/source/libs/scheduler/src/scheduler.c +++ b/source/libs/scheduler/src/scheduler.c @@ -154,6 +154,8 @@ void schedulerFreeJob(int64_t* jobId, int32_t errCode) { return; } + SCH_JOB_DLOG("start to free job 0x%" PRIx64 ", errCode:0x%x", *jobId, errCode); + schHandleJobDrop(pJob, errCode); schReleaseJob(*jobId); diff --git a/source/libs/stream/src/streamUpdate.c b/source/libs/stream/src/streamUpdate.c index ff1ef7b4b9..c686fa05ce 100644 --- a/source/libs/stream/src/streamUpdate.c +++ b/source/libs/stream/src/streamUpdate.c @@ -14,6 +14,7 @@ */ #include "tstreamUpdate.h" +#include "tencode.h" #include "ttime.h" #include "query.h" @@ -250,3 +251,110 @@ void updateInfoDestoryColseWinSBF(SUpdateInfo *pInfo) { tScalableBfDestroy(pInfo->pCloseWinSBF); pInfo->pCloseWinSBF = NULL; } + +int32_t updateInfoSerialize(void *buf, int32_t bufLen, const SUpdateInfo *pInfo) { + ASSERT(pInfo); + SEncoder encoder = {0}; + tEncoderInit(&encoder, buf, bufLen); + if (tStartEncode(&encoder) < 0) return -1; + + int32_t size = taosArrayGetSize(pInfo->pTsBuckets); + if (tEncodeI32(&encoder, size) < 0) return -1; + for (int32_t i = 0; i < size; i++) { + TSKEY* pTs = (TSKEY*)taosArrayGet(pInfo->pTsBuckets, i); + if (tEncodeI64(&encoder, *pTs) < 0) return -1; + } + + if (tEncodeU64(&encoder, pInfo->numBuckets) < 0) return -1; + + int32_t sBfSize = taosArrayGetSize(pInfo->pTsSBFs); + if (tEncodeI32(&encoder, sBfSize) < 0) return -1; + for (int32_t i = 0; i < sBfSize; i++) { + SScalableBf* pSBf = taosArrayGetP(pInfo->pTsSBFs, i); + if (tScalableBfEncode(pSBf, &encoder) < 0) return -1; + } + + if (tEncodeU64(&encoder, pInfo->numSBFs) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->interval) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->watermark) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->minTS) < 0) return -1; + + if (tScalableBfEncode(pInfo->pCloseWinSBF, &encoder) < 0) return -1; + + int32_t mapSize = taosHashGetSize(pInfo->pMap); + if (tEncodeI32(&encoder, mapSize) < 0) return -1; + void* pIte = NULL; + size_t keyLen = 0; + while ((pIte = taosHashIterate(pInfo->pMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + if (tEncodeU64(&encoder, *(uint64_t*)key) < 0) return -1; + if (tEncodeI64(&encoder, *(TSKEY*)pIte) < 0) return -1; + } + + if (tEncodeI64(&encoder, pInfo->scanWindow.skey) < 0) return -1; + if (tEncodeI64(&encoder, pInfo->scanWindow.ekey) < 0) return -1; + if (tEncodeU64(&encoder, pInfo->scanGroupId) < 0) return -1; + if (tEncodeU64(&encoder, pInfo->maxVersion) < 0) return -1; + + tEndEncode(&encoder); + + int32_t tlen = encoder.pos; + tEncoderClear(&encoder); + return tlen; +} + +int32_t updateInfoDeserialize(void *buf, int32_t bufLen, SUpdateInfo *pInfo) { + ASSERT(pInfo); + SDecoder decoder = {0}; + tDecoderInit(&decoder, buf, bufLen); + if (tStartDecode(&decoder) < 0) return -1; + + int32_t size = 0; + if (tDecodeI32(&decoder, &size) < 0) return -1; + pInfo->pTsBuckets = taosArrayInit(size, sizeof(TSKEY)); + TSKEY ts = INT64_MIN; + for (int32_t i = 0; i < size; i++) { + if (tDecodeI64(&decoder, &ts) < 0) return -1; + taosArrayPush(pInfo->pTsBuckets, &ts); + } + + if (tDecodeU64(&decoder, &pInfo->numBuckets) < 0) return -1; + + int32_t sBfSize = 0; + if (tDecodeI32(&decoder, &sBfSize) < 0) return -1; + pInfo->pTsSBFs = taosArrayInit(sBfSize, sizeof(void *)); + for (int32_t i = 0; i < sBfSize; i++) { + SScalableBf* pSBf = tScalableBfDecode(&decoder); + if (!pSBf) return -1; + taosArrayPush(pInfo->pTsSBFs, &pSBf); + } + + if (tDecodeU64(&decoder, &pInfo->numSBFs) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->interval) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->watermark) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->minTS) < 0) return -1; + pInfo->pCloseWinSBF = tScalableBfDecode(&decoder); + + int32_t mapSize = 0; + if (tDecodeI32(&decoder, &mapSize) < 0) return -1; + _hash_fn_t hashFn = taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY); + pInfo->pMap = taosHashInit(mapSize, hashFn, true, HASH_NO_LOCK); + uint64_t uid = 0; + ts = INT64_MIN; + for(int32_t i = 0; i < mapSize; i++) { + if (tDecodeU64(&decoder, &uid) < 0) return -1; + if (tDecodeI64(&decoder, &ts) < 0) return -1; + taosHashPut(pInfo->pMap, &uid, sizeof(uint64_t), &ts, sizeof(TSKEY)); + } + ASSERT(mapSize == taosHashGetSize(pInfo->pMap)); + + if (tDecodeI64(&decoder, &pInfo->scanWindow.skey) < 0) return -1; + if (tDecodeI64(&decoder, &pInfo->scanWindow.ekey) < 0) return -1; + if (tDecodeU64(&decoder, &pInfo->scanGroupId) < 0) return -1; + if (tDecodeU64(&decoder, &pInfo->maxVersion) < 0) return -1; + + tEndDecode(&decoder); + + tDecoderClear(&decoder); + return 0; +} diff --git a/source/libs/stream/test/tstreamUpdateTest.cpp b/source/libs/stream/test/tstreamUpdateTest.cpp index 93e114db02..933549b8a6 100644 --- a/source/libs/stream/test/tstreamUpdateTest.cpp +++ b/source/libs/stream/test/tstreamUpdateTest.cpp @@ -6,11 +6,37 @@ using namespace std; #define MAX_NUM_SCALABLE_BF 100000 +bool equalSBF(SScalableBf* left, SScalableBf* right) { + if (left->growth != right->growth) return false; + if (left->numBits != right->numBits) return false; + int lsize = taosArrayGetSize(left->bfArray); + int rsize = taosArrayGetSize(right->bfArray); + if (lsize != rsize) return false; + for (int32_t i = 0; i < lsize; i++) { + SBloomFilter* pLeftBF = (SBloomFilter*)taosArrayGetP(left->bfArray, i); + SBloomFilter* pRightBF = (SBloomFilter*)taosArrayGetP(right->bfArray, i); + if (pLeftBF->errorRate != pRightBF->errorRate) return false; + if (pLeftBF->expectedEntries != pRightBF->expectedEntries) return false; + if (pLeftBF->hashFn1 != pRightBF->hashFn1) return false; + if (pLeftBF->hashFn2 != pRightBF->hashFn2) return false; + if (pLeftBF->hashFunctions != pRightBF->hashFunctions) return false; + if (pLeftBF->numBits != pRightBF->numBits) return false; + if (pLeftBF->numUnits != pRightBF->numUnits) return false; + if (pLeftBF->size != pRightBF->size) return false; + uint64_t* leftUint = (uint64_t*) pLeftBF->buffer; + uint64_t* rightUint = (uint64_t*) pRightBF->buffer; + for (int32_t j = 0; j < pLeftBF->numUnits; j++) { + if (leftUint[j] != rightUint[j]) return false; + } + } + return true; +} + TEST(TD_STREAM_UPDATE_TEST, update) { - int64_t interval = 20 * 1000; - int64_t watermark = 10 * 60 * 1000; + const int64_t interval = 20 * 1000; + const int64_t watermark = 10 * 60 * 1000; SUpdateInfo *pSU = updateInfoInit(interval, TSDB_TIME_PRECISION_MILLI, watermark); - GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,1, 0), true); + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,1, 0), false); GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,1, -1), true); for(int i=0; i < 1024; i++) { @@ -31,15 +57,16 @@ TEST(TD_STREAM_UPDATE_TEST, update) { GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,i, 1), true); } + TSKEY uid = 0; for(int i=3; i < 1024; i++) { - GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,0, i), false); + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU, uid, i), false); } - GTEST_ASSERT_EQ(*(int64_t*)taosArrayGet(pSU->pTsBuckets,0), 1023); + GTEST_ASSERT_EQ(*(TSKEY*)taosHashGet(pSU->pMap, &uid, sizeof(uint64_t)), 1023); for(int i=3; i < 1024; i++) { - GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU,0, i), true); + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU, uid, i), true); } - GTEST_ASSERT_EQ(*(int64_t*)taosArrayGet(pSU->pTsBuckets,0), 1023); + GTEST_ASSERT_EQ(*(TSKEY*)taosHashGet(pSU->pMap, &uid, sizeof(uint64_t)), 1023); SUpdateInfo *pSU1 = updateInfoInit(interval, TSDB_TIME_PRECISION_MILLI, watermark); for(int i=1; i <= watermark / interval; i++) { @@ -75,7 +102,8 @@ TEST(TD_STREAM_UPDATE_TEST, update) { GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU2, 1, i * interval + 5), false); GTEST_ASSERT_EQ(pSU2->minTS, (i-(pSU2->numSBFs-1))*interval); GTEST_ASSERT_EQ(pSU2->numSBFs, watermark / interval); - GTEST_ASSERT_EQ(*(int64_t*)taosArrayGet(pSU2->pTsBuckets,1), i * interval + 5); + TSKEY uid2 = 1; + GTEST_ASSERT_EQ(*(TSKEY*)taosHashGet(pSU2->pMap, &uid2, sizeof(uint64_t)), i * interval + 5); } SUpdateInfo *pSU3 = updateInfoInit(interval, TSDB_TIME_PRECISION_MILLI, watermark); @@ -84,7 +112,8 @@ TEST(TD_STREAM_UPDATE_TEST, update) { GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU3, i, i * interval + 5 * j), false); GTEST_ASSERT_EQ(pSU3->minTS, 0); GTEST_ASSERT_EQ(pSU3->numSBFs, watermark / interval); - GTEST_ASSERT_EQ(*(int64_t*)taosArrayGet(pSU3->pTsBuckets, i), i * interval + 5 * j); + uint64_t uid3 = i; + GTEST_ASSERT_EQ(*(TSKEY*)taosHashGet(pSU3->pMap, &uid3, sizeof(uint64_t)), i * interval + 5 * j); SScalableBf *pSBF = (SScalableBf *)taosArrayGetP(pSU3->pTsSBFs, i); SBloomFilter *pBF = (SBloomFilter *)taosArrayGetP(pSBF->bfArray, 0); GTEST_ASSERT_EQ(pBF->size, j); @@ -92,13 +121,66 @@ TEST(TD_STREAM_UPDATE_TEST, update) { } SUpdateInfo *pSU4 = updateInfoInit(-1, TSDB_TIME_PRECISION_MILLI, -1); - GTEST_ASSERT_EQ(pSU4->watermark, MAX_NUM_SCALABLE_BF * pSU4->interval); + GTEST_ASSERT_EQ(pSU4->watermark, pSU4->interval); GTEST_ASSERT_EQ(pSU4->interval, MILLISECOND_PER_MINUTE); SUpdateInfo *pSU5 = updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0); - GTEST_ASSERT_EQ(pSU5->watermark, MAX_NUM_SCALABLE_BF * pSU4->interval); + GTEST_ASSERT_EQ(pSU5->watermark, pSU4->interval); GTEST_ASSERT_EQ(pSU5->interval, MILLISECOND_PER_MINUTE); + SUpdateInfo *pSU7 = updateInfoInit(interval, TSDB_TIME_PRECISION_MILLI, watermark); + updateInfoAddCloseWindowSBF(pSU7); + for(int64_t i = 1; i < 2048000; i++) { + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU7,i, i), false); + } + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU7,100, 1), true); + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU7,110, 10), true); + GTEST_ASSERT_EQ(updateInfoIsUpdated(pSU7,200, 20), true); + + int32_t bufLen = updateInfoSerialize(NULL, 0, pSU7); + void* buf = taosMemoryCalloc(1, bufLen); + int32_t resSize = updateInfoSerialize(buf, bufLen, pSU7); + + SUpdateInfo *pSU6 = updateInfoInit(0, TSDB_TIME_PRECISION_MILLI, 0); + int32_t desSize = updateInfoDeserialize(buf, bufLen, pSU6); + GTEST_ASSERT_EQ(desSize, 0); + + GTEST_ASSERT_EQ(pSU7->interval, pSU6->interval); + GTEST_ASSERT_EQ(pSU7->maxVersion, pSU6->maxVersion); + GTEST_ASSERT_EQ(pSU7->minTS, pSU6->minTS); + GTEST_ASSERT_EQ(pSU7->numBuckets, pSU6->numBuckets); + GTEST_ASSERT_EQ(pSU7->numSBFs, pSU6->numSBFs); + GTEST_ASSERT_EQ(pSU7->scanGroupId, pSU6->scanGroupId); + GTEST_ASSERT_EQ(pSU7->scanWindow.ekey, pSU6->scanWindow.ekey); + GTEST_ASSERT_EQ(pSU7->scanWindow.skey, pSU6->scanWindow.skey); + GTEST_ASSERT_EQ(pSU7->watermark, pSU6->watermark); + GTEST_ASSERT_EQ(equalSBF(pSU7->pCloseWinSBF, pSU6->pCloseWinSBF), true); + + int32_t mapSize = taosHashGetSize(pSU7->pMap); + GTEST_ASSERT_EQ(mapSize, taosHashGetSize(pSU6->pMap)); + void* pIte = NULL; + size_t keyLen = 0; + while ((pIte = taosHashIterate(pSU7->pMap, pIte)) != NULL) { + void* key = taosHashGetKey(pIte, &keyLen); + void* value6 = taosHashGet(pSU6->pMap, key, keyLen); + GTEST_ASSERT_EQ(*(TSKEY*)pIte, *(TSKEY*)value6); + } + + int32_t buSize = taosArrayGetSize(pSU7->pTsBuckets); + GTEST_ASSERT_EQ(buSize, taosArrayGetSize(pSU6->pTsBuckets)); + for (int32_t i = 0; i < buSize; i++) { + TSKEY ts1 = *(TSKEY*)taosArrayGet(pSU7->pTsBuckets, i); + TSKEY ts2 = *(TSKEY*)taosArrayGet(pSU6->pTsBuckets, i); + GTEST_ASSERT_EQ(ts1, ts2); + } + int32_t lSize = taosArrayGetSize(pSU7->pTsSBFs); + int32_t rSize = taosArrayGetSize(pSU6->pTsSBFs); + GTEST_ASSERT_EQ(lSize, rSize); + for (int32_t i = 0; i < lSize; i++) { + SScalableBf* pLeftSBF = (SScalableBf*)taosArrayGetP(pSU7->pTsSBFs, i); + SScalableBf* pRightSBF = (SScalableBf*)taosArrayGetP(pSU6->pTsSBFs, i); + GTEST_ASSERT_EQ(equalSBF(pLeftSBF, pRightSBF), true); + } updateInfoDestroy(pSU); updateInfoDestroy(pSU1); @@ -106,6 +188,9 @@ TEST(TD_STREAM_UPDATE_TEST, update) { updateInfoDestroy(pSU3); updateInfoDestroy(pSU4); updateInfoDestroy(pSU5); + updateInfoDestroy(pSU6); + updateInfoDestroy(pSU7); + } int main(int argc, char* argv[]) { diff --git a/source/libs/sync/inc/syncInt.h b/source/libs/sync/inc/syncInt.h index 82399f52b9..856870caba 100644 --- a/source/libs/sync/inc/syncInt.h +++ b/source/libs/sync/inc/syncInt.h @@ -162,6 +162,9 @@ typedef struct SSyncNode { // is config changing bool changing; + int64_t startTime; + int64_t lastReplicateTime; + } SSyncNode; // open/close -------------- @@ -186,15 +189,19 @@ int32_t syncNodePingAll(SSyncNode* pSyncNode); // timer control -------------- int32_t syncNodeStartPingTimer(SSyncNode* pSyncNode); int32_t syncNodeStopPingTimer(SSyncNode* pSyncNode); + int32_t syncNodeStartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeStopElectTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartElectTimer(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode); + int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode); -int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeStartHeartbeatTimerNow(SSyncNode* pSyncNode); +int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms); int32_t syncNodeStopHeartbeatTimer(SSyncNode* pSyncNode); int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode); -int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode); +int32_t syncNodeRestartHeartbeatTimerNow(SSyncNode* pSyncNode); +int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms); // utils -------------- int32_t syncNodeSendMsgById(const SRaftId* destRaftId, SSyncNode* pSyncNode, SRpcMsg* pMsg); diff --git a/source/libs/sync/inc/syncRaftLog.h b/source/libs/sync/inc/syncRaftLog.h index 65ec77e38f..ff59189a9d 100644 --- a/source/libs/sync/inc/syncRaftLog.h +++ b/source/libs/sync/inc/syncRaftLog.h @@ -47,6 +47,8 @@ char* logStoreSimple2Str(SSyncLogStore* pLogStore); SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore); +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore); + // for debug void logStorePrint(SSyncLogStore* pLogStore); void logStorePrint2(char* s, SSyncLogStore* pLogStore); diff --git a/source/libs/sync/inc/syncReplication.h b/source/libs/sync/inc/syncReplication.h index dad7d9b5ec..21821be6c7 100644 --- a/source/libs/sync/inc/syncReplication.h +++ b/source/libs/sync/inc/syncReplication.h @@ -55,7 +55,7 @@ int32_t syncNodeAppendEntriesPeers(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode); int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode); -int32_t syncNodeReplicate(SSyncNode* pSyncNode); +int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer); int32_t syncNodeAppendEntries(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntries* pMsg); int32_t syncNodeAppendEntriesBatch(SSyncNode* pSyncNode, const SRaftId* destRaftId, const SyncAppendEntriesBatch* pMsg); diff --git a/source/libs/sync/src/syncAppendEntries.c b/source/libs/sync/src/syncAppendEntries.c index f31f3dd1ae..4f93d8197d 100644 --- a/source/libs/sync/src/syncAppendEntries.c +++ b/source/libs/sync/src/syncAppendEntries.c @@ -357,16 +357,14 @@ static int32_t syncNodeMakeLogSame(SSyncNode* ths, SyncAppendEntries* pMsg) { code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); - return code; } +// if FromIndex > walCommitVer, return 0 +// else return num of pass entries static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { - int32_t code; + int32_t code = 0; + int32_t pass = 0; SyncIndex delBegin = FromIndex; SyncIndex delEnd = ths->pLogStore->syncLogLastIndex(ths->pLogStore); @@ -398,16 +396,31 @@ static int32_t syncNodeDoMakeLogSame(SSyncNode* ths, SyncIndex FromIndex) { } } + // update delete begin + SyncIndex walCommitVer = logStoreWalCommitVer(ths->pLogStore); + + if (delBegin <= walCommitVer) { + delBegin = walCommitVer + 1; + pass = walCommitVer - delBegin + 1; + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "update delete begin to %ld", delBegin); + syncNodeEventLog(ths, logBuf); + } while (0); + } + // delete confict entries code = ths->pLogStore->syncLogTruncate(ths->pLogStore, delBegin); ASSERT(code == 0); - char eventLog[128]; - snprintf(eventLog, sizeof(eventLog), "log truncate, from %" PRId64 " to %" PRId64, delBegin, delEnd); - syncNodeEventLog(ths, eventLog); - logStoreSimpleLog2("after syncNodeMakeLogSame", ths->pLogStore); + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "make log same from:%ld, delbegin:%ld, pass:%d", FromIndex, delBegin, pass); + syncNodeEventLog(ths, logBuf); + } while (0); - return code; + return pass; } int32_t syncNodePreCommit(SSyncNode* ths, SSyncRaftEntry* pEntry, int32_t code) { @@ -543,31 +556,34 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc SOffsetAndContLen* metaTableArr = syncAppendEntriesBatchMetaTableArray(pMsg); if (hasAppendEntries && pMsg->prevLogIndex == ths->commitIndex) { + int32_t pass = 0; + SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); + bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; + // make log same - do { - SyncIndex logLastIndex = ths->pLogStore->syncLogLastIndex(ths->pLogStore); - bool hasExtraEntries = logLastIndex > pMsg->prevLogIndex; - - if (hasExtraEntries) { - // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); - } - - } while (0); + if (hasExtraEntries) { + // make log same, rollback deleted entries + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); + } // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount <= 1); + + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once @@ -670,25 +686,33 @@ int32_t syncNodeOnAppendEntriesSnapshot2Cb(SSyncNode* ths, SyncAppendEntriesBatc syncLogRecvAppendEntriesBatch(ths, pMsg, "really match"); + int32_t pass = 0; + if (hasExtraEntries) { // make log same, rollback deleted entries - code = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); - ASSERT(code == 0); + pass = syncNodeDoMakeLogSame(ths, pMsg->prevLogIndex + 1); + ASSERT(pass >= 0); } if (hasAppendEntries) { // append entry batch - for (int32_t i = 0; i < pMsg->dataCount; ++i) { - SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); - code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); - if (code != 0) { - return -1; + if (pass == 0) { + // assert! no batch + ASSERT(pMsg->dataCount <= 1); + + // append entry batch + for (int32_t i = 0; i < pMsg->dataCount; ++i) { + SSyncRaftEntry* pAppendEntry = (SSyncRaftEntry*)(pMsg->data + metaTableArr[i].offset); + code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pAppendEntry); + if (code != 0) { + return -1; + } + + code = syncNodePreCommit(ths, pAppendEntry, 0); + ASSERT(code == 0); + + // syncEntryDestory(pAppendEntry); } - - code = syncNodePreCommit(ths, pAppendEntry, 0); - ASSERT(code == 0); - - // syncEntryDestory(pAppendEntry); } // fsync once diff --git a/source/libs/sync/src/syncCommit.c b/source/libs/sync/src/syncCommit.c index a603cfff27..3a94ed9713 100644 --- a/source/libs/sync/src/syncCommit.c +++ b/source/libs/sync/src/syncCommit.c @@ -92,6 +92,12 @@ void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) { } } + // advance commit index as large as possible + SyncIndex walCommitVer = logStoreWalCommitVer(pSyncNode->pLogStore); + if (walCommitVer > newCommitIndex) { + newCommitIndex = walCommitVer; + } + // maybe execute fsm if (newCommitIndex > pSyncNode->commitIndex) { SyncIndex beginIndex = pSyncNode->commitIndex + 1; diff --git a/source/libs/sync/src/syncMain.c b/source/libs/sync/src/syncMain.c index 78004c0ad6..20b4b23bf7 100644 --- a/source/libs/sync/src/syncMain.c +++ b/source/libs/sync/src/syncMain.c @@ -495,6 +495,18 @@ const char* syncGetMyRoleStr(int64_t rid) { return s; } +bool syncRestoreFinish(int64_t rid) { + SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); + if (pSyncNode == NULL) { + return false; + } + ASSERT(rid == pSyncNode->rid); + bool restoreFinish = pSyncNode->restoreFinish; + + taosReleaseRef(tsNodeRefId, pSyncNode->rid); + return restoreFinish; +} + SyncTerm syncGetMyTerm(int64_t rid) { SSyncNode* pSyncNode = (SSyncNode*)taosAcquireRef(tsNodeRefId, rid); if (pSyncNode == NULL) { @@ -1086,6 +1098,10 @@ SSyncNode* syncNodeOpen(const SSyncInfo* pOldSyncInfo) { // start raft // syncNodeBecomeFollower(pSyncNode); + int64_t timeNow = taosGetTimestampMs(); + pSyncNode->startTime = timeNow; + pSyncNode->lastReplicateTime = timeNow; + syncNodeEventLog(pSyncNode, "sync open"); return pSyncNode; @@ -1303,7 +1319,7 @@ int32_t syncNodeResetElectTimer(SSyncNode* pSyncNode) { return ret; } -int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { +static int32_t syncNodeDoStartHeartbeatTimer(SSyncNode* pSyncNode) { int32_t ret = 0; if (syncEnvIsStart()) { taosTmrReset(pSyncNode->FpHeartbeatTimerCB, pSyncNode->heartbeatTimerMS, pSyncNode, gSyncEnv->pTimerManager, @@ -1322,21 +1338,21 @@ int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { return ret; } -int32_t syncNodeStartNowHeartbeatTimer(SSyncNode* pSyncNode) { - int32_t ret = 0; - if (syncEnvIsStart()) { - taosTmrReset(pSyncNode->FpHeartbeatTimerCB, 1, pSyncNode, gSyncEnv->pTimerManager, &pSyncNode->pHeartbeatTimer); - atomic_store_64(&pSyncNode->heartbeatTimerLogicClock, pSyncNode->heartbeatTimerLogicClockUser); - } else { - sError("vgId:%d, start heartbeat timer error, sync env is stop", pSyncNode->vgId); - } +int32_t syncNodeStartHeartbeatTimer(SSyncNode* pSyncNode) { + pSyncNode->heartbeatTimerMS = pSyncNode->hbBaseLine; + int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode); + return ret; +} - do { - char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "start heartbeat timer, ms:%d", 1); - syncNodeEventLog(pSyncNode, logBuf); - } while (0); +int32_t syncNodeStartHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) { + pSyncNode->heartbeatTimerMS = ms; + int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode); + return ret; +} +int32_t syncNodeStartHeartbeatTimerNow(SSyncNode* pSyncNode) { + pSyncNode->heartbeatTimerMS = 1; + int32_t ret = syncNodeDoStartHeartbeatTimer(pSyncNode); return ret; } @@ -1357,9 +1373,15 @@ int32_t syncNodeRestartHeartbeatTimer(SSyncNode* pSyncNode) { return 0; } -int32_t syncNodeRestartNowHeartbeatTimer(SSyncNode* pSyncNode) { +int32_t syncNodeRestartHeartbeatTimerNow(SSyncNode* pSyncNode) { syncNodeStopHeartbeatTimer(pSyncNode); - syncNodeStartNowHeartbeatTimer(pSyncNode); + syncNodeStartHeartbeatTimerNow(pSyncNode); + return 0; +} + +int32_t syncNodeRestartNowHeartbeatTimerMS(SSyncNode* pSyncNode, int32_t ms) { + syncNodeStopHeartbeatTimer(pSyncNode); + syncNodeStartHeartbeatTimerMS(pSyncNode, ms); return 0; } @@ -1931,9 +1953,6 @@ void syncNodeDoConfigChange(SSyncNode* pSyncNode, SSyncCfg* pNewConfig, SyncInde // Raft 3.6.2 Committing entries from previous terms syncNodeAppendNoop(pSyncNode); -#if 0 // simon - syncNodeReplicate(pSyncNode); -#endif syncMaybeAdvanceCommitIndex(pSyncNode); } else { @@ -2115,9 +2134,6 @@ void syncNodeCandidate2Leader(SSyncNode* pSyncNode) { // Raft 3.6.2 Committing entries from previous terms syncNodeAppendNoop(pSyncNode); -#if 0 // simon - syncNodeReplicate(pSyncNode); -#endif syncMaybeAdvanceCommitIndex(pSyncNode); } @@ -2409,6 +2425,9 @@ static void syncNodeEqElectTimer(void* param, void* tmrId) { static void syncNodeEqHeartbeatTimer(void* param, void* tmrId) { SSyncNode* pSyncNode = (SSyncNode*)param; + + syncNodeEventLog(pSyncNode, "eq hb timer"); + if (pSyncNode->replicaNum > 1) { if (atomic_load_64(&pSyncNode->heartbeatTimerLogicClockUser) <= atomic_load_64(&pSyncNode->heartbeatTimerLogicClock)) { @@ -2485,7 +2504,7 @@ static int32_t syncNodeAppendNoop(SSyncNode* ths) { if (ths->state == TAOS_SYNC_STATE_LEADER) { int32_t code = ths->pLogStore->syncLogAppendEntry(ths->pLogStore, pEntry); ASSERT(code == 0); - syncNodeReplicate(ths); + syncNodeReplicate(ths, false); } syncEntryDestory(pEntry); @@ -2558,7 +2577,7 @@ int32_t syncNodeOnClientRequestCb(SSyncNode* ths, SyncClientRequest* pMsg, SyncI // if mulit replica, start replicate right now if (ths->replicaNum > 1) { - syncNodeReplicate(ths); + syncNodeReplicate(ths, false); // pre commit syncNodePreCommit(ths, pEntry, 0); @@ -2627,7 +2646,7 @@ int32_t syncNodeOnClientRequestBatchCb(SSyncNode* ths, SyncClientRequestBatch* p if (ths->replicaNum > 1) { // if multi replica, start replicate right now - syncNodeReplicate(ths); + syncNodeReplicate(ths, false); } else if (ths->replicaNum == 1) { // one replica @@ -2665,7 +2684,22 @@ int32_t syncDoLeaderTransfer(SSyncNode* ths, SRpcMsg* pRpcMsg, SSyncRaftEntry* p syncNodeEventLog(ths, "I am not follower, can not do leader transfer"); return 0; } - syncNodeEventLog(ths, "do leader transfer"); + + if (!ths->restoreFinish) { + syncNodeEventLog(ths, "restore not finish, can not do leader transfer"); + return 0; + } + + if (ths->vgId > 1) { + syncNodeEventLog(ths, "I am vnode, can not do leader transfer"); + return 0; + } + + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "do leader transfer, index:%ld", pEntry->index); + syncNodeEventLog(ths, logBuf); + } while (0); bool sameId = syncUtilSameId(&(pSyncLeaderTransfer->newLeaderId), &(ths->myRaftId)); bool sameNodeInfo = strcmp(pSyncLeaderTransfer->newNodeInfo.nodeFqdn, ths->myNodeInfo.nodeFqdn) == 0 && diff --git a/source/libs/sync/src/syncRaftLog.c b/source/libs/sync/src/syncRaftLog.c index b575e40d86..0649e064e4 100644 --- a/source/libs/sync/src/syncRaftLog.c +++ b/source/libs/sync/src/syncRaftLog.c @@ -305,10 +305,18 @@ static int32_t raftLogGetEntry(struct SSyncLogStore* pLogStore, SyncIndex index, return code; } +// truncate semantic static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIndex) { SSyncLogStoreData* pData = pLogStore->data; SWal* pWal = pData->pWal; - int32_t code = walRollback(pWal, fromIndex); + + // need not truncate + SyncIndex wallastVer = walGetLastVer(pWal); + if (fromIndex > wallastVer) { + return 0; + } + + int32_t code = walRollback(pWal, fromIndex); if (code != 0) { int32_t err = terrno; const char* errStr = tstrerror(err); @@ -323,7 +331,7 @@ static int32_t raftLogTruncate(struct SSyncLogStore* pLogStore, SyncIndex fromIn // event log do { char logBuf[128]; - snprintf(logBuf, sizeof(logBuf), "wal truncate, from-index:%" PRId64, fromIndex); + snprintf(logBuf, sizeof(logBuf), "log truncate, from-index:%" PRId64, fromIndex); syncNodeEventLog(pData->pSyncNode, logBuf); } while (0); @@ -637,6 +645,12 @@ SyncIndex logStoreFirstIndex(SSyncLogStore* pLogStore) { return walGetFirstVer(pWal); } +SyncIndex logStoreWalCommitVer(SSyncLogStore* pLogStore) { + SSyncLogStoreData* pData = pLogStore->data; + SWal* pWal = pData->pWal; + return walGetCommittedVer(pWal); +} + // for debug ----------------- void logStorePrint(SSyncLogStore* pLogStore) { char* serialized = logStore2Str(pLogStore); diff --git a/source/libs/sync/src/syncReplication.c b/source/libs/sync/src/syncReplication.c index bc703e519c..24f75de5d3 100644 --- a/source/libs/sync/src/syncReplication.c +++ b/source/libs/sync/src/syncReplication.c @@ -199,10 +199,27 @@ int32_t syncNodeAppendEntriesPeersSnapshot2(SSyncNode* pSyncNode) { // send msg syncNodeAppendEntriesBatch(pSyncNode, pDestId, pMsg); + + // speed up + if (pMsg->dataCount > 0 && pSyncNode->commitIndex - pMsg->prevLogIndex > SYNC_SLOW_DOWN_RANGE) { + ret = 1; + +#if 0 + do { + char logBuf[128]; + char host[64]; + uint16_t port; + syncUtilU642Addr(pDestId->addr, host, sizeof(host), &port); + snprintf(logBuf, sizeof(logBuf), "maybe speed up for %s:%d, pre-index:%ld", host, port, pMsg->prevLogIndex); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); +#endif + } + syncAppendEntriesBatchDestroy(pMsg); } - return 0; + return ret; } int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) { @@ -287,7 +304,7 @@ int32_t syncNodeAppendEntriesPeersSnapshot(SSyncNode* pSyncNode) { return ret; } -int32_t syncNodeReplicate(SSyncNode* pSyncNode) { +int32_t syncNodeReplicate(SSyncNode* pSyncNode, bool isTimer) { // start replicate int32_t ret = 0; @@ -309,7 +326,39 @@ int32_t syncNodeReplicate(SSyncNode* pSyncNode) { break; } - syncNodeRestartHeartbeatTimer(pSyncNode); + // start delay + int64_t timeNow = taosGetTimestampMs(); + int64_t startDelay = timeNow - pSyncNode->startTime; + + // replicate delay + int64_t replicateDelay = timeNow - pSyncNode->lastReplicateTime; + pSyncNode->lastReplicateTime = timeNow; + + if (ret > 0 && isTimer && startDelay > SYNC_SPEED_UP_AFTER_MS) { + // speed up replicate + int32_t ms = + pSyncNode->heartbeatTimerMS < SYNC_SPEED_UP_HB_TIMER ? pSyncNode->heartbeatTimerMS : SYNC_SPEED_UP_HB_TIMER; + syncNodeRestartNowHeartbeatTimerMS(pSyncNode, ms); + +#if 0 + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "replicate speed up"); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); +#endif + + } else { + syncNodeRestartHeartbeatTimer(pSyncNode); + +#if 0 + do { + char logBuf[128]; + snprintf(logBuf, sizeof(logBuf), "replicate slow down"); + syncNodeEventLog(pSyncNode, logBuf); + } while (0); +#endif + } return ret; } diff --git a/source/libs/sync/src/syncTimeout.c b/source/libs/sync/src/syncTimeout.c index 15b1054e63..65314b812a 100644 --- a/source/libs/sync/src/syncTimeout.c +++ b/source/libs/sync/src/syncTimeout.c @@ -58,7 +58,7 @@ int32_t syncNodeOnTimeoutCb(SSyncNode* ths, SyncTimeout* pMsg) { ++(ths->heartbeatTimerCounter); sInfo("vgId:%d, sync timeout, type:replicate count:%d, heartbeatTimerLogicClockUser:%ld", ths->vgId, ths->heartbeatTimerCounter, ths->heartbeatTimerLogicClockUser); - syncNodeReplicate(ths); + syncNodeReplicate(ths, true); } } else { sError("vgId:%d, unknown timeout-type:%d", ths->vgId, pMsg->timeoutType); diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index ad0ce4f5e1..04b58da570 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -105,13 +105,13 @@ typedef SRpcCtxVal STransCtxVal; typedef SRpcInfo STrans; typedef SRpcConnInfo STransHandleInfo; -// ref mgt -// handle +// ref mgt handle typedef struct SExHandle { void* handle; int64_t refId; void* pThrd; } SExHandle; + /*convet from fqdn to ip */ typedef struct SCvtAddr { char ip[TSDB_FQDN_LEN]; @@ -128,7 +128,7 @@ typedef struct { int8_t retryCnt; int8_t retryLimit; - // bool setMaxRetry; + STransCtx appCtx; // STransMsg* pRsp; // for synchronous API tsem_t* pSem; // for synchronous API @@ -195,17 +195,7 @@ typedef enum { ConnNormal, ConnAcquire, ConnRelease, ConnBroken, ConnInPool } Co #define transLabel(trans) ((STrans*)trans)->label -// int rpcAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey); -// void rpcBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey); -//// int32_t rpcCompressRpcMsg(char* pCont, int32_t contLen); -// -// int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey); -// void transBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey); -// bool transCompressMsg(char* msg, int32_t len, int32_t* flen); -// bool transDecompressMsg(char* msg, int32_t len, int32_t* flen); - void transFreeMsg(void* msg); - // typedef struct SConnBuffer { char* buf; @@ -322,8 +312,8 @@ void* transCtxDumpBrokenlinkVal(STransCtx* ctx, int32_t* msgType); // request list typedef struct STransReq { - queue q; - void* data; + queue q; + uv_write_t wreq; } STransReq; void transReqQueueInit(queue* q); diff --git a/source/libs/transport/src/.transSvr.c.swo b/source/libs/transport/src/.transSvr.c.swo deleted file mode 100644 index c9486c5086..0000000000 Binary files a/source/libs/transport/src/.transSvr.c.swo and /dev/null differ diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index dc7ffa9b13..47d47d3dbe 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -16,7 +16,8 @@ #include "transComm.h" typedef struct SConnList { - queue conn; + queue conn; + int32_t size; } SConnList; typedef struct SCliConn { @@ -339,8 +340,8 @@ void cliHandleResp(SCliConn* conn) { tDebug("%s conn %p stop timer", CONN_GET_INST_LABEL(conn), conn); uv_timer_stop(conn->timer); } - conn->timer->data = NULL; taosArrayPush(pThrd->timerList, &conn->timer); + conn->timer->data = NULL; conn->timer = NULL; } @@ -482,6 +483,7 @@ void cliReadTimeoutCb(uv_timer_t* handle) { // set up timeout cb SCliConn* conn = handle->data; tTrace("%s conn %p timeout, ref:%d", CONN_GET_INST_LABEL(conn), conn, T_REF_VAL_GET(conn)); + uv_read_stop(conn->stream); cliHandleExceptImpl(conn, TSDB_CODE_RPC_TIMEOUT); } @@ -509,7 +511,7 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) { SHashObj* pPool = pool; SConnList* plist = taosHashGet(pPool, key, strlen(key)); if (plist == NULL) { - SConnList list; + SConnList list = {0}; taosHashPut(pPool, key, strlen(key), (void*)&list, sizeof(list)); plist = taosHashGet(pPool, key, strlen(key)); QUEUE_INIT(&plist->conn); @@ -518,15 +520,18 @@ static SCliConn* getConnFromPool(void* pool, char* ip, uint32_t port) { if (QUEUE_IS_EMPTY(&plist->conn)) { return NULL; } + + plist->size -= 1; queue* h = QUEUE_HEAD(&plist->conn); SCliConn* conn = QUEUE_DATA(h, SCliConn, q); conn->status = ConnNormal; QUEUE_REMOVE(&conn->q); QUEUE_INIT(&conn->q); - transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task); - conn->task = NULL; - + if (conn->task != NULL) { + transDQCancel(((SCliThrd*)conn->hostThrd)->timeoutQueue, conn->task); + conn->task = NULL; + } return conn; } static void addConnToPool(void* pool, SCliConn* conn) { @@ -538,6 +543,13 @@ static void addConnToPool(void* pool, SCliConn* conn) { allocConnRef(conn, true); + if (conn->timer != NULL) { + uv_timer_stop(conn->timer); + taosArrayPush(thrd->timerList, &conn->timer); + conn->timer->data = NULL; + conn->timer = NULL; + } + STrans* pTransInst = thrd->pTransInst; cliReleaseUnfinishedMsg(conn); transQueueClear(&conn->cliMsgs); @@ -555,13 +567,17 @@ static void addConnToPool(void* pool, SCliConn* conn) { assert(conn->list != NULL); QUEUE_INIT(&conn->q); QUEUE_PUSH(&conn->list->conn, &conn->q); + conn->list->size += 1; + conn->task = NULL; assert(!QUEUE_IS_EMPTY(&conn->list->conn)); - STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg)); - arg->param1 = conn; - arg->param2 = thrd; - conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime)); + if (conn->list->size >= 50) { + STaskArg* arg = taosMemoryCalloc(1, sizeof(STaskArg)); + arg->param1 = conn; + arg->param2 = thrd; + conn->task = transDQSched(thrd->timeoutQueue, doCloseIdleConn, arg, CONN_PERSIST_TIME(pTransInst->idleTime)); + } } static int32_t allocConnRef(SCliConn* conn, bool update) { if (update) { @@ -993,6 +1009,8 @@ static void cliAsyncCb(uv_async_t* handle) { if (count >= 2) { tTrace("cli process batch size:%d", count); } + // if (!uv_is_active((uv_handle_t*)pThrd->prepare)) uv_prepare_start(pThrd->prepare, cliPrepareCb); + if (pThrd->stopMsg != NULL) cliHandleQuit(pThrd->stopMsg, pThrd); } static void cliPrepareCb(uv_prepare_t* handle) { @@ -1088,7 +1106,7 @@ static SCliThrd* createThrdObj() { pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t)); uv_prepare_init(pThrd->loop, pThrd->prepare); pThrd->prepare->data = pThrd; - uv_prepare_start(pThrd->prepare, cliPrepareCb); + // uv_prepare_start(pThrd->prepare, cliPrepareCb); int32_t timerSize = 512; pThrd->timerList = taosArrayInit(timerSize, sizeof(void*)); @@ -1125,7 +1143,6 @@ static void destroyThrdObj(SCliThrd* pThrd) { taosMemoryFree(timer); } taosArrayDestroy(pThrd->timerList); - taosMemoryFree(pThrd->prepare); taosMemoryFree(pThrd->loop); taosMemoryFree(pThrd); @@ -1269,6 +1286,9 @@ int cliAppCb(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { transFreeMsg(pResp->pCont); cliSchedMsgToNextNode(pMsg, pThrd); return -1; + } else { + // change error code for taos client driver if retryCnt exceeds limit + if (0 == strncmp(pTransInst->label, "TSC", strlen("TSC"))) pResp->code = TSDB_CODE_APP_NOT_READY; } } } @@ -1369,7 +1389,7 @@ int transReleaseCliHandle(void* handle) { } STransMsg tmsg = {.info.handle = handle}; - TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64()); + // TRACE_SET_MSGID(&tmsg.info.traceId, tGenIdPI64()); SCliMsg* cmsg = taosMemoryCalloc(1, sizeof(SCliMsg)); cmsg->msg = tmsg; @@ -1379,6 +1399,7 @@ int transReleaseCliHandle(void* handle) { tGDebug("send release request at thread:%08" PRId64 "", pThrd->pid); if (0 != transAsyncSend(pThrd->asyncPool, &cmsg->q)) { + taosMemoryFree(cmsg); return -1; } return 0; @@ -1409,7 +1430,6 @@ int transSendRequest(void* shandle, const SEpSet* pEpSet, STransMsg* pReq, STran if (ctx != NULL) { pCtx->appCtx = *ctx; } - assert(pTransInst->connType == TAOS_CONN_CLIENT); SCliMsg* cliMsg = taosMemoryCalloc(1, sizeof(SCliMsg)); cliMsg->ctx = pCtx; diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index 8cf525a506..b568163e23 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -23,33 +23,6 @@ static TdThreadOnce transModuleInit = PTHREAD_ONCE_INIT; static int32_t refMgt; static int32_t instMgt; -int transAuthenticateMsg(void* pMsg, int msgLen, void* pAuth, void* pKey) { - T_MD5_CTX context; - int ret = -1; - - tMD5Init(&context); - tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN); - tMD5Update(&context, (uint8_t*)pMsg, msgLen); - tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN); - tMD5Final(&context); - - if (memcmp(context.digest, pAuth, sizeof(context.digest)) == 0) ret = 0; - - return ret; -} - -void transBuildAuthHead(void* pMsg, int msgLen, void* pAuth, void* pKey) { - T_MD5_CTX context; - - tMD5Init(&context); - tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN); - tMD5Update(&context, (uint8_t*)pMsg, msgLen); - tMD5Update(&context, (uint8_t*)pKey, TSDB_PASSWORD_LEN); - tMD5Final(&context); - - memcpy(pAuth, context.digest, sizeof(context.digest)); -} - bool transCompressMsg(char* msg, int32_t len, int32_t* flen) { return false; // SRpcHead* pHead = rpcHeadFromCont(pCont); @@ -176,7 +149,6 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) { * info--->| */ SConnBuffer* p = connBuf; - uvBuf->base = p->buf + p->len; if (p->left == -1) { uvBuf->len = p->cap - p->len; @@ -184,7 +156,8 @@ int transAllocBuffer(SConnBuffer* connBuf, uv_buf_t* uvBuf) { if (p->left < p->cap - p->len) { uvBuf->len = p->left; } else { - p->buf = taosMemoryRealloc(p->buf, p->left + p->len); + p->cap = p->left + p->len; + p->buf = taosMemoryRealloc(p->buf, p->cap); uvBuf->base = p->buf + p->len; uvBuf->len = p->left; } @@ -222,14 +195,13 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) pool->asyncs = taosMemoryCalloc(1, sizeof(uv_async_t) * pool->nAsync); for (int i = 0; i < pool->nAsync; i++) { - uv_async_t* async = &(pool->asyncs[i]); - uv_async_init(loop, async, cb); - SAsyncItem* item = taosMemoryCalloc(1, sizeof(SAsyncItem)); item->pThrd = arg; QUEUE_INIT(&item->qmsg); taosThreadMutexInit(&item->mtx, NULL); + uv_async_t* async = &(pool->asyncs[i]); + uv_async_init(loop, async, cb); async->data = item; } return pool; @@ -238,7 +210,7 @@ SAsyncPool* transAsyncPoolCreate(uv_loop_t* loop, int sz, void* arg, AsyncCB cb) void transAsyncPoolDestroy(SAsyncPool* pool) { for (int i = 0; i < pool->nAsync; i++) { uv_async_t* async = &(pool->asyncs[i]); - // uv_close((uv_handle_t*)async, NULL); + SAsyncItem* item = async->data; taosThreadMutexDestroy(&item->mtx); taosMemoryFree(item); @@ -267,14 +239,9 @@ int transAsyncSend(SAsyncPool* pool, queue* q) { uv_async_t* async = &(pool->asyncs[idx]); SAsyncItem* item = async->data; - int64_t st = taosGetTimestampUs(); taosThreadMutexLock(&item->mtx); QUEUE_PUSH(&item->qmsg, q); taosThreadMutexUnlock(&item->mtx); - int64_t el = taosGetTimestampUs() - st; - if (el > 50) { - // tInfo("lock and unlock cost:%d", (int)el); - } return uv_async_send(async); } @@ -350,30 +317,21 @@ void transReqQueueInit(queue* q) { QUEUE_INIT(q); } void* transReqQueuePush(queue* q) { - uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t)); - STransReq* wreq = taosMemoryCalloc(1, sizeof(STransReq)); - wreq->data = req; - req->data = wreq; - QUEUE_PUSH(q, &wreq->q); - return req; + STransReq* req = taosMemoryCalloc(1, sizeof(STransReq)); + req->wreq.data = req; + QUEUE_PUSH(q, &req->q); + return &req->wreq; } void* transReqQueueRemove(void* arg) { void* ret = NULL; - uv_write_t* req = arg; - STransReq* wreq = req && req->data ? req->data : NULL; + uv_write_t* wreq = arg; - assert(wreq->data == req); - if (wreq == NULL || wreq->data == NULL) { - taosMemoryFree(wreq->data); - taosMemoryFree(wreq); - return req; - } + STransReq* req = wreq ? wreq->data : NULL; + if (req == NULL) return NULL; + QUEUE_REMOVE(&req->q); - QUEUE_REMOVE(&wreq->q); - - ret = req && req->handle ? req->handle->data : NULL; - taosMemoryFree(wreq->data); - taosMemoryFree(wreq); + ret = wreq && wreq->handle ? wreq->handle->data : NULL; + taosMemoryFree(req); return ret; } @@ -382,7 +340,6 @@ void transReqQueueClear(queue* q) { queue* h = QUEUE_HEAD(q); QUEUE_REMOVE(h); STransReq* req = QUEUE_DATA(h, STransReq, q); - taosMemoryFree(req->data); taosMemoryFree(req); } } diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index 8b27d95e52..14b8b35478 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -75,7 +75,6 @@ typedef struct SWorkThrd { SAsyncPool* asyncPool; uv_prepare_t* prepare; queue msg; - TdThreadMutex msgMtx; queue conn; void* pTransInst; @@ -499,6 +498,7 @@ void uvWorkerAsyncCb(uv_async_t* handle) { tError("unexcept occurred, continue"); continue; } + // release handle to rpc init if (msg->type == Quit) { (*transAsyncHandle[msg->type])(msg, pThrd); @@ -743,7 +743,6 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) { pThrd->pipe->data = pThrd; QUEUE_INIT(&pThrd->msg); - taosThreadMutexInit(&pThrd->msgMtx, NULL); pThrd->prepare = taosMemoryCalloc(1, sizeof(uv_prepare_t)); uv_prepare_init(pThrd->loop, pThrd->prepare); diff --git a/source/libs/transport/test/svrBench.c b/source/libs/transport/test/svrBench.c index 224f527385..6eb80c8504 100644 --- a/source/libs/transport/test/svrBench.c +++ b/source/libs/transport/test/svrBench.c @@ -75,15 +75,14 @@ void processShellMsg() { void *handle = pRpcMsg->info.handle; taosFreeQitem(pRpcMsg); - - { - SRpcMsg nRpcMsg = {0}; - nRpcMsg.pCont = rpcMallocCont(msgSize); - nRpcMsg.contLen = msgSize; - nRpcMsg.info.handle = handle; - nRpcMsg.code = TSDB_CODE_CTG_NOT_READY; - rpcSendResponse(&nRpcMsg); - } + //{ + // SRpcMsg nRpcMsg = {0}; + // nRpcMsg.pCont = rpcMallocCont(msgSize); + // nRpcMsg.contLen = msgSize; + // nRpcMsg.info.handle = handle; + // nRpcMsg.code = TSDB_CODE_CTG_NOT_READY; + // rpcSendResponse(&nRpcMsg); + //} } taosUpdateItemSize(qinfo.queue, numOfMsgs); diff --git a/source/os/src/osAtomic.c b/source/os/src/osAtomic.c index d1d768fbcc..a9eb23abba 100644 --- a/source/os/src/osAtomic.c +++ b/source/os/src/osAtomic.c @@ -193,7 +193,13 @@ void* interlocked_sub_fetch_ptr(void* volatile* ptr, void* val) { } int32_t interlocked_fetch_sub_32(int32_t volatile* ptr, int32_t val) { return _InterlockedExchangeAdd(ptr, -val); } -int64_t interlocked_fetch_sub_64(int64_t volatile* ptr, int64_t val) { return _InterlockedExchangeAdd64(ptr, -val); } +int64_t interlocked_fetch_sub_64(int64_t volatile* ptr, int64_t val) { +#ifdef _TD_WINDOWS_32 + return _InterlockedExchangeAdd((int32_t volatile*)ptr, -(int32_t)val); +#else + return _InterlockedExchangeAdd64(ptr, -val); +#endif +} void* interlocked_fetch_sub_ptr(void* volatile* ptr, void* val) { #ifdef WINDOWS @@ -375,7 +381,11 @@ int32_t atomic_exchange_32(int32_t volatile* ptr, int32_t val) { int64_t atomic_exchange_64(int64_t volatile* ptr, int64_t val) { #ifdef WINDOWS +#ifdef _TD_WINDOWS_32 + return _InterlockedExchange((int32_t volatile*)(ptr), (int32_t)(val)); +#else return _InterlockedExchange64((int64_t volatile*)(ptr), (int64_t)(val)); +#endif #elif defined(_TD_NINGSI_60) return atomic_exchange_64_impl((int64_t*)ptr, (int64_t)val); #else @@ -529,7 +539,11 @@ int32_t atomic_fetch_add_32(int32_t volatile* ptr, int32_t val) { int64_t atomic_fetch_add_64(int64_t volatile* ptr, int64_t val) { #ifdef WINDOWS +#ifdef _TD_WINDOWS_32 + return _InterlockedExchangeAdd((int32_t volatile*)(ptr), (int32_t)(val)); +#else return _InterlockedExchangeAdd64((int64_t volatile*)(ptr), (int64_t)(val)); +#endif #elif defined(_TD_NINGSI_60) return __sync_fetch_and_add((ptr), (val)); #else @@ -631,7 +645,11 @@ int32_t atomic_fetch_sub_32(int32_t volatile* ptr, int32_t val) { int64_t atomic_fetch_sub_64(int64_t volatile* ptr, int64_t val) { #ifdef WINDOWS +#ifdef _TD_WINDOWS_32 + return _InterlockedExchangeAdd((int32_t volatile*)(ptr), -(int32_t)(val)); +#else return _InterlockedExchangeAdd64((int64_t volatile*)(ptr), -(int64_t)(val)); +#endif #elif defined(_TD_NINGSI_60) return __sync_fetch_and_sub((ptr), (val)); #else diff --git a/source/os/src/osFile.c b/source/os/src/osFile.c index b76769bdb8..6c8e949b25 100644 --- a/source/os/src/osFile.c +++ b/source/os/src/osFile.c @@ -703,7 +703,11 @@ int64_t taosFSendFile(TdFilePtr pFileOut, TdFilePtr pFileIn, int64_t *offset, in int64_t sentbytes; while (leftbytes > 0) { + #ifdef _TD_ARM_32 + sentbytes = sendfile(pFileOut->fd, pFileIn->fd, (long int*)offset, leftbytes); + #else sentbytes = sendfile(pFileOut->fd, pFileIn->fd, offset, leftbytes); + #endif if (sentbytes == -1) { if (errno == EINTR || errno == EAGAIN || errno == EWOULDBLOCK) { continue; diff --git a/source/os/src/osSemaphore.c b/source/os/src/osSemaphore.c index ab4fb551fe..a7d2ba8531 100644 --- a/source/os/src/osSemaphore.c +++ b/source/os/src/osSemaphore.c @@ -17,6 +17,7 @@ #define _DEFAULT_SOURCE #include "os.h" #include "pthread.h" +#include "tdef.h" #ifdef WINDOWS @@ -57,7 +58,7 @@ int32_t taosGetAppName(char* name, int32_t* len) { end = filepath; } - strcpy(name, end); + tstrncpy(name, end, TSDB_APP_NAME_LEN); if (len != NULL) { *len = (int32_t)strlen(end); @@ -595,7 +596,7 @@ int32_t taosGetAppName(char *name, int32_t *len) { buf[PATH_MAX] = '\0'; size_t n = strlen(buf); if (len) *len = n; - if (name) strcpy(name, buf); + if (name) tstrncpy(name, buf, TSDB_APP_NAME_LEN); return 0; } @@ -622,7 +623,7 @@ void taosResetPthread(TdThread* thread) { *thread = 0; } bool taosComparePthread(TdThread first, TdThread second) { return first == second; } int32_t taosGetPId() { - static __thread int32_t pid = 0; + static int32_t pid; if (pid != 0) return pid; pid = getpid(); return pid; @@ -644,7 +645,7 @@ int32_t taosGetAppName(char* name, int32_t* len) { ++end; - strcpy(name, end); + tstrncpy(name, end, TSDB_APP_NAME_LEN); if (len != NULL) { *len = strlen(name); diff --git a/source/os/src/osString.c b/source/os/src/osString.c index 26aafd743f..db3aaa49a6 100644 --- a/source/os/src/osString.c +++ b/source/os/src/osString.c @@ -48,7 +48,7 @@ char *strsep(char **stringp, const char *delim) { /* NOTREACHED */ } /* Duplicate a string, up to at most size characters */ -char *strndup(const char *s, size_t size) { +char *strndup(const char *s, int size) { size_t l; char * s2; l = strlen(s); @@ -62,7 +62,7 @@ char *strndup(const char *s, size_t size) { } /* Copy no more than N characters of SRC to DEST, returning the address of the terminating '\0' in DEST, if any, or else DEST + N. */ -char *stpncpy(char *dest, const char *src, size_t n) { +char *stpncpy(char *dest, const char *src, int n) { size_t size = strnlen(src, n); memcpy(dest, src, size); dest += size; diff --git a/source/os/src/osSysinfo.c b/source/os/src/osSysinfo.c index 3e68b6e086..3a75e18a7f 100644 --- a/source/os/src/osSysinfo.c +++ b/source/os/src/osSysinfo.c @@ -398,7 +398,7 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) { if (line != NULL) taosMemoryFree(line); taosCloseFile(&pFile); - if (code != 0) { + if (code != 0 && (done & 1) == 0) { TdFilePtr pFile1 = taosOpenFile("/proc/device-tree/model", TD_FILE_READ | TD_FILE_STREAM); if (pFile1 == NULL) return code; taosGetsFile(pFile1, maxLen, cpuModel); @@ -407,6 +407,16 @@ int32_t taosGetCpuInfo(char *cpuModel, int32_t maxLen, float *numOfCores) { done |= 1; } + if (code != 0 && (done & 1) == 0) { + TdCmdPtr pCmd = taosOpenCmd("uname -a"); + if (pCmd == NULL) return code; + if (taosGetsCmd(pCmd, maxLen, cpuModel) > 0) { + code = 0; + done |= 1; + } + taosCloseCmd(&pCmd); + } + if ((done & 2) == 0) { *numOfCores = coreCount; done |= 2; diff --git a/source/os/src/osSystem.c b/source/os/src/osSystem.c index c86cd19e32..b6f6637601 100644 --- a/source/os/src/osSystem.c +++ b/source/os/src/osSystem.c @@ -248,6 +248,16 @@ TdCmdPtr taosOpenCmd(const char* cmd) { #endif } +int64_t taosGetsCmd(TdCmdPtr pCmd, int32_t maxSize, char *__restrict buf) { + if (pCmd == NULL || buf == NULL) { + return -1; + } + if (fgets(buf, maxSize, (FILE*)pCmd) == NULL) { + return -1; + } + return strlen(buf); +} + int64_t taosGetLineCmd(TdCmdPtr pCmd, char** __restrict ptrBuf) { if (pCmd == NULL || ptrBuf == NULL) { return -1; diff --git a/source/util/src/tbloomfilter.c b/source/util/src/tbloomfilter.c index 52c541ae2e..945cb58fcc 100644 --- a/source/util/src/tbloomfilter.c +++ b/source/util/src/tbloomfilter.c @@ -108,8 +108,41 @@ void tBloomFilterDestroy(SBloomFilter *pBF) { taosMemoryFree(pBF); } -void tBloomFilterDump(const struct SBloomFilter *pBF) { -// ToDo +int32_t tBloomFilterEncode(const SBloomFilter *pBF, SEncoder* pEncoder) { + if (tEncodeU32(pEncoder, pBF->hashFunctions) < 0) return -1; + if (tEncodeU64(pEncoder, pBF->expectedEntries) < 0) return -1; + if (tEncodeU64(pEncoder, pBF->numUnits) < 0) return -1; + if (tEncodeU64(pEncoder, pBF->numBits) < 0) return -1; + if (tEncodeU64(pEncoder, pBF->size) < 0) return -1; + for (uint64_t i = 0; i < pBF->numUnits; i++) { + uint64_t* pUnits = (uint64_t*)pBF->buffer; + if (tEncodeU64(pEncoder, pUnits[i]) < 0) return -1; + } + if (tEncodeDouble(pEncoder, pBF->errorRate) < 0) return -1; + return 0; +} + +SBloomFilter* tBloomFilterDecode(SDecoder* pDecoder) { + SBloomFilter *pBF = taosMemoryCalloc(1, sizeof(SBloomFilter)); + pBF->buffer = NULL; + if (tDecodeU32(pDecoder, &pBF->hashFunctions) < 0) goto _error; + if (tDecodeU64(pDecoder, &pBF->expectedEntries) < 0) goto _error; + if (tDecodeU64(pDecoder, &pBF->numUnits) < 0) goto _error; + if (tDecodeU64(pDecoder, &pBF->numBits) < 0) goto _error; + if (tDecodeU64(pDecoder, &pBF->size) < 0) goto _error; + pBF->buffer = taosMemoryCalloc(pBF->numUnits, sizeof(uint64_t)); + for (int32_t i = 0; i < pBF->numUnits; i++) { + uint64_t* pUnits = (uint64_t*)pBF->buffer; + if (tDecodeU64(pDecoder, pUnits + i) < 0) goto _error; + } + if (tDecodeDouble(pDecoder, &pBF->errorRate) < 0) goto _error; + pBF->hashFn1 = taosGetDefaultHashFunction(TSDB_DATA_TYPE_TIMESTAMP); + pBF->hashFn2 = taosGetDefaultHashFunction(TSDB_DATA_TYPE_NCHAR); + return pBF; + +_error: + tBloomFilterDestroy(pBF); + return NULL; } bool tBloomFilterIsFull(const SBloomFilter *pBF) { diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index 7cbc1cd555..dd61f7d225 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -35,7 +35,7 @@ typedef struct SCacheNode { uint64_t addedTime; // the added time when this element is added or updated into cache uint64_t lifespan; // life duration when this element should be remove from cache int64_t expireTime; // expire time - uint64_t signature; + void* signature; struct STrashElem *pTNodeHeader; // point to trash node head uint16_t keyLen : 15; // max key size: 32kb bool inTrashcan : 1; // denote if it is in trash or not @@ -208,7 +208,7 @@ static void taosTrashcanEmpty(SCacheObj *pCacheObj, bool force); * @param pNode data node */ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheNode *pNode) { - if (pNode->signature != (uint64_t)pNode) { + if (pNode->signature != pNode) { uError("key:%s, %p data is invalid, or has been released", pNode->key, pNode); return; } @@ -226,7 +226,7 @@ static FORCE_INLINE void taosCacheReleaseNode(SCacheObj *pCacheObj, SCacheNode * } static FORCE_INLINE STrashElem *doRemoveElemInTrashcan(SCacheObj *pCacheObj, STrashElem *pElem) { - if (pElem->pData->signature != (uint64_t)pElem->pData) { + if (pElem->pData->signature != pElem->pData) { uWarn("key:sig:0x%" PRIx64 " %p data has been released, ignore", pElem->pData->signature, pElem->pData); return NULL; } @@ -494,7 +494,7 @@ void *taosCacheAcquireByData(SCacheObj *pCacheObj, void *data) { if (pCacheObj == NULL || data == NULL) return NULL; SCacheNode *ptNode = (SCacheNode *)((char *)data - sizeof(SCacheNode)); - if (ptNode->signature != (uint64_t)ptNode) { + if (ptNode->signature != ptNode) { uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode); return NULL; } @@ -511,7 +511,7 @@ void *taosCacheTransferData(SCacheObj *pCacheObj, void **data) { if (pCacheObj == NULL || data == NULL || (*data) == NULL) return NULL; SCacheNode *ptNode = (SCacheNode *)((char *)(*data) - sizeof(SCacheNode)); - if (ptNode->signature != (uint64_t)ptNode) { + if (ptNode->signature != ptNode) { uError("cache:%s, key: %p the data from cache is invalid", pCacheObj->name, ptNode); return NULL; } @@ -539,7 +539,7 @@ void taosCacheRelease(SCacheObj *pCacheObj, void **data, bool _remove) { // It happens when there is only one object in the cache, and two threads which has referenced this object // start to free the it simultaneously [TD-1569]. SCacheNode *pNode = (SCacheNode *)((char *)(*data) - sizeof(SCacheNode)); - if (pNode->signature != (uint64_t)pNode) { + if (pNode->signature != pNode) { uError("cache:%s, %p, release invalid cache data", pCacheObj->name, pNode); return; } @@ -728,7 +728,7 @@ SCacheNode *taosCreateCacheNode(const char *key, size_t keyLen, const char *pDat pNewNode->addedTime = (uint64_t)taosGetTimestampMs(); pNewNode->lifespan = duration; pNewNode->expireTime = pNewNode->addedTime + pNewNode->lifespan; - pNewNode->signature = (uint64_t)pNewNode; + pNewNode->signature = pNewNode; pNewNode->size = (uint32_t)sizeInBytes; return pNewNode; diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 93b91739a3..f0a58c9bdc 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -47,7 +47,7 @@ STaosError errors[] = { // rpc TAOS_DEFINE_ERROR(TSDB_CODE_RPC_AUTH_FAILURE, "Authentication failure") -TAOS_DEFINE_ERROR(TSDB_CODE_RPC_REDIRECT, "Redirect") +TAOS_DEFINE_ERROR(TSDB_CODE_RPC_REDIRECT, "Database not ready, need retry") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_NETWORK_UNAVAIL, "Unable to establish connection") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_FQDN_ERROR, "Unable to resolve FQDN") TAOS_DEFINE_ERROR(TSDB_CODE_RPC_PORT_EADDRINUSE, "Port already in use") @@ -614,6 +614,8 @@ TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_INVALID_STAT, "Invalid rsma state" TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_QTASKINFO_CREATE, "Rsma qtaskinfo creation error") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FILE_CORRUPTED, "Rsma file corrupted") TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_REMOVE_EXISTS, "Rsma remove exists") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_FETCH_MSG_MSSED_UP, "Rsma fetch msg is messed up") +TAOS_DEFINE_ERROR(TSDB_CODE_RSMA_EMPTY_INFO, "Rsma info is empty") //index TAOS_DEFINE_ERROR(TSDB_CODE_INDEX_REBUILDING, "Index is rebuilding") diff --git a/source/util/src/tref.c b/source/util/src/tref.c index 9cd849b9be..c984ef3f34 100644 --- a/source/util/src/tref.c +++ b/source/util/src/tref.c @@ -44,11 +44,11 @@ typedef struct { void (*fp)(void *); } SRefSet; -static SRefSet tsRefSetList[TSDB_REF_OBJECTS]; +static SRefSet tsRefSetList[TSDB_REF_OBJECTS]; static TdThreadOnce tsRefModuleInit = PTHREAD_ONCE_INIT; static TdThreadMutex tsRefMutex; -static int32_t tsRefSetNum = 0; -static int32_t tsNextId = 0; +static int32_t tsRefSetNum = 0; +static int32_t tsNextId = 0; static void taosInitRefModule(void); static void taosLockList(int64_t *lockedBy); diff --git a/source/util/src/tscalablebf.c b/source/util/src/tscalablebf.c index 9ddac44e20..108eb34803 100644 --- a/source/util/src/tscalablebf.c +++ b/source/util/src/tscalablebf.c @@ -101,6 +101,42 @@ void tScalableBfDestroy(SScalableBf *pSBf) { taosMemoryFree(pSBf); } -void tScalableBfDump(const SScalableBf *pSBf) { - // Todo; -} \ No newline at end of file +int32_t tScalableBfEncode(const SScalableBf *pSBf, SEncoder* pEncoder) { + if (!pSBf) { + if (tEncodeI32(pEncoder, 0) < 0) return -1; + return 0; + } + int32_t size = taosArrayGetSize(pSBf->bfArray); + if (tEncodeI32(pEncoder, size) < 0) return -1; + for (int32_t i = 0; i < size; i++) { + SBloomFilter* pBF = taosArrayGetP(pSBf->bfArray, i); + if (tBloomFilterEncode(pBF, pEncoder) < 0) return -1; + } + if (tEncodeU32(pEncoder, pSBf->growth) < 0) return -1; + if (tEncodeU64(pEncoder, pSBf->numBits) < 0) return -1; + return 0; +} + +SScalableBf* tScalableBfDecode(SDecoder* pDecoder) { + SScalableBf *pSBf = taosMemoryCalloc(1, sizeof(SScalableBf)); + pSBf->bfArray = NULL; + int32_t size = 0; + if (tDecodeI32(pDecoder, &size) < 0) goto _error; + if (size == 0) { + tScalableBfDestroy(pSBf); + return NULL; + } + pSBf->bfArray = taosArrayInit(size * 2, sizeof(void *)); + for (int32_t i = 0; i < size; i++) { + SBloomFilter* pBF = tBloomFilterDecode(pDecoder); + if (!pBF) goto _error; + taosArrayPush(pSBf->bfArray, &pBF); + } + if (tDecodeU32(pDecoder, &pSBf->growth) < 0) goto _error; + if (tDecodeU64(pDecoder, &pSBf->numBits) < 0) goto _error; + return pSBf; + +_error: + tScalableBfDestroy(pSBf); + return NULL; +} diff --git a/tests/docs-examples-test/go.sh b/tests/docs-examples-test/go.sh index 185661e8a7..8248b4fe0d 100644 --- a/tests/docs-examples-test/go.sh +++ b/tests/docs-examples-test/go.sh @@ -4,7 +4,7 @@ set -e taosd >>/dev/null 2>&1 & taosadapter >>/dev/null 2>&1 & - +sleep 10 cd ../../docs/examples/go go mod tidy diff --git a/tests/docs-examples-test/python.sh b/tests/docs-examples-test/python.sh new file mode 100644 index 0000000000..140d05395b --- /dev/null +++ b/tests/docs-examples-test/python.sh @@ -0,0 +1,47 @@ +#!/bin/bash + +set -e + +taosd >>/dev/null 2>&1 & +taosadapter >>/dev/null 2>&1 & + +sleep 10 + +cd ../../docs/examples/python + +# 1 +taos -s "create database if not exists log" +python3 connect_example.py + +# 2 +taos -s "drop database if exists power" +python3 native_insert_example.py + +# 3 +taos -s "drop database power" +python3 bind_param_example.py + +# 4 +taos -s "drop database power" +python3 multi_bind_example.py + +# 5 +python3 query_example.py + +# 6 +python3 async_query_example.py + +# 7 +taos -s "drop database if exists test" +python3 line_protocol_example.py + +# 8 +taos -s "drop database test" +python3 telnet_line_protocol_example.py + +# 9 +taos -s "drop database test" +python3 json_protocol_example.py + +# 10 +# python3 subscribe_demo.py diff --git a/tests/parallel_test/collect_cases.sh b/tests/parallel_test/collect_cases.sh index 3294beebc1..802c014124 100755 --- a/tests/parallel_test/collect_cases.sh +++ b/tests/parallel_test/collect_cases.sh @@ -41,7 +41,7 @@ fi cat ../script/jenkins/basic.txt |grep -v "^#"|grep -v "^$"|sed "s/^/,,script,/" >>$case_file grep "^python" ../system-test/fulltest.sh |sed "s/^/,,system-test,/" >>$case_file grep "^python" ../develop-test/fulltest.sh |sed "s/^/,,develop-test,/" >>$case_file - +find ../docs-examples-test/ -name "*.sh" -printf '%f\n' | xargs -I {} echo ",,docs-examples-test,bash {}" >> $case_file # tar source code for run.sh to use # if [ $ent -eq 0 ]; then # cd ../../../ diff --git a/tests/parallel_test/run_case.sh b/tests/parallel_test/run_case.sh index eda66a884a..58dcb87345 100755 --- a/tests/parallel_test/run_case.sh +++ b/tests/parallel_test/run_case.sh @@ -50,12 +50,14 @@ if [ $ent -eq 0 ]; then export LD_LIBRARY_PATH=/home/TDengine/debug/build/lib ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDengine/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDengine/include/client/taos.h /usr/include/taos.h 2>/dev/null CONTAINER_TESTDIR=/home/TDengine else export PATH=$PATH:/home/TDinternal/debug/build/bin export LD_LIBRARY_PATH=/home/TDinternal/debug/build/lib ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so 2>/dev/null ln -s /home/TDinternal/debug/build/lib/libtaos.so /usr/lib/libtaos.so.1 2>/dev/null + ln -s /home/TDinternal/community/include/client/taos.h /usr/include/taos.h 2>/dev/null CONTAINER_TESTDIR=/home/TDinternal/community fi mkdir -p /var/lib/taos/subscribe diff --git a/tests/pytest/util/dnodes-random-fail.py b/tests/pytest/util/dnodes-random-fail.py index 7cadca64a3..9b653844f8 100644 --- a/tests/pytest/util/dnodes-random-fail.py +++ b/tests/pytest/util/dnodes-random-fail.py @@ -24,12 +24,9 @@ class TDSimClient: self.cfgDict = { "numOfLogLines": "100000000", - "numOfThreadsPerCore": "2.0", "locale": "en_US.UTF-8", "charset": "UTF-8", "asyncLog": "0", - "anyIp": "0", - "sdbDebugFlag": "135", "rpcDebugFlag": "135", "tmrDebugFlag": "131", "cDebugFlag": "135", diff --git a/tests/pytest/util/dnodes.py b/tests/pytest/util/dnodes.py index 20e4e4abe6..e530695d1e 100644 --- a/tests/pytest/util/dnodes.py +++ b/tests/pytest/util/dnodes.py @@ -30,7 +30,6 @@ class TDSimClient: self.path = path self.cfgDict = { "numOfLogLines": "100000000", - "numOfThreadsPerCore": "2.0", "locale": "en_US.UTF-8", "charset": "UTF-8", "asyncLog": "0", @@ -40,6 +39,7 @@ class TDSimClient: "udebugFlag": "143", "jnidebugFlag": "143", "qdebugFlag": "143", + "supportVnodes": "1024", "telemetryReporting": "0", } @@ -117,8 +117,6 @@ class TDDnode: self.valgrind = 0 self.remoteIP = "" self.cfgDict = { - "walLevel": "2", - "fsync": "1000", "monitor": "0", "maxShellConns": "30000", "locale": "en_US.UTF-8", @@ -139,6 +137,7 @@ class TDDnode: "qdebugFlag": "143", "numOfLogLines": "100000000", "statusInterval": "1", + "supportVnodes": "1024", "telemetryReporting": "0" } diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py index 4c50af6bac..753c41e094 100644 --- a/tests/pytest/util/sql.py +++ b/tests/pytest/util/sql.py @@ -84,10 +84,10 @@ class TDSql: self.queryResult = None tdLog.info("sql:%s, expect error occured" % (sql)) - def query(self, sql, row_tag=None,queyTimes=10): + def query(self, sql, row_tag=None,queryTimes=10): self.sql = sql i=1 - while i <= queyTimes: + while i <= queryTimes: try: self.cursor.execute(sql) self.queryResult = self.cursor.fetchall() @@ -97,26 +97,15 @@ class TDSql: return self.queryResult return self.queryRows except Exception as e: - i+=1 tdLog.notice("Try to query again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 time.sleep(1) pass - else: - try: - tdLog.notice("Try the last query ") - self.cursor.execute(sql) - self.queryResult = self.cursor.fetchall() - self.queryRows = len(self.queryResult) - self.queryCols = len(self.cursor.description) - if row_tag: - return self.queryResult - return self.queryRows - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - traceback.print_exc() - raise Exception(repr(e)) def is_err_sql(self, sql): @@ -305,28 +294,23 @@ class TDSql: time.sleep(1) continue - def execute(self, sql,queyTimes=10): + def execute(self, sql,queryTimes=10): self.sql = sql i=1 - while i <= queyTimes: + while i <= queryTimes: try: self.affectedRows = self.cursor.execute(sql) return self.affectedRows except Exception as e: - i+=1 tdLog.notice("Try to execute sql again, query times: %d "%i) + if i == queryTimes: + caller = inspect.getframeinfo(inspect.stack()[1][0]) + args = (caller.filename, caller.lineno, sql, repr(e)) + tdLog.notice("%s(%d) failed: sql:%s, %s" % args) + raise Exception(repr(e)) + i+=1 time.sleep(1) pass - else: - try: - tdLog.notice("Try the last execute sql ") - self.affectedRows = self.cursor.execute(sql) - return self.affectedRows - except Exception as e: - caller = inspect.getframeinfo(inspect.stack()[1][0]) - args = (caller.filename, caller.lineno, sql, repr(e)) - tdLog.notice("%s(%d) failed: sql:%s, %s" % args) - raise Exception(repr(e)) def checkAffectedRows(self, expectAffectedRows): if self.affectedRows != expectAffectedRows: diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt index 356eaed57e..0bb24826a3 100644 --- a/tests/script/jenkins/basic.txt +++ b/tests/script/jenkins/basic.txt @@ -84,7 +84,7 @@ ./test.sh -f tsim/insert/update0.sim ./test.sh -f tsim/insert/update1_sort_merge.sim -# ---- parser +# ---- parser ---- ./test.sh -f tsim/parser/alter__for_community_version.sim ./test.sh -f tsim/parser/alter_column.sim ./test.sh -f tsim/parser/alter_stable.sim @@ -106,7 +106,7 @@ ./test.sh -f tsim/parser/commit.sim ./test.sh -f tsim/parser/condition.sim ./test.sh -f tsim/parser/constCol.sim -#./test.sh -f tsim/parser/create_db.sim +./test.sh -f tsim/parser/create_db.sim ./test.sh -f tsim/parser/create_mt.sim ./test.sh -f tsim/parser/create_tb_with_tag_name.sim ./test.sh -f tsim/parser/create_tb.sim @@ -129,7 +129,7 @@ ./test.sh -f tsim/parser/import.sim ./test.sh -f tsim/parser/insert_multiTbl.sim ./test.sh -f tsim/parser/insert_tb.sim -# TD-17038 ./test.sh -f tsim/parser/interp.sim +# TD-18293 ./test.sh -f tsim/parser/interp.sim ./test.sh -f tsim/parser/join_manyblocks.sim ./test.sh -f tsim/parser/join_multitables.sim ./test.sh -f tsim/parser/join_multivnode.sim @@ -179,6 +179,7 @@ ./test.sh -f tsim/query/scalarFunction.sim ./test.sh -f tsim/query/scalarNull.sim ./test.sh -f tsim/query/session.sim +./test.sh -f tsim/query/udf.sim # ---- qnode ./test.sh -f tsim/qnode/basic1.sim @@ -317,7 +318,7 @@ ./test.sh -f tsim/valgrind/checkError7.sim ./test.sh -f tsim/valgrind/checkUdf.sim -# --- vnode +# --- vnode ---- # unsupport ./test.sh -f tsim/vnode/replica3_basic.sim # unsupport ./test.sh -f tsim/vnode/replica3_repeat.sim # unsupport ./test.sh -f tsim/vnode/replica3_vgroup.sim @@ -331,8 +332,8 @@ ./test.sh -f tsim/vnode/stable_replica3_vnode3.sim # --- sync -#./test.sh -f tsim/sync/3Replica1VgElect.sim -#./test.sh -f tsim/sync/3Replica5VgElect.sim +./test.sh -f tsim/sync/3Replica1VgElect.sim +./test.sh -f tsim/sync/3Replica5VgElect.sim ./test.sh -f tsim/sync/oneReplica1VgElect.sim ./test.sh -f tsim/sync/oneReplica5VgElect.sim diff --git a/tests/script/sh/compile_udf.sh b/tests/script/sh/compile_udf.sh index 12e922b2df..c7148d7d7d 100755 --- a/tests/script/sh/compile_udf.sh +++ b/tests/script/sh/compile_udf.sh @@ -3,8 +3,8 @@ set +e rm -rf /tmp/udf/libbitand.so /tmp/udf/libsqrsum.so mkdir -p /tmp/udf echo "compile udf bit_and and sqr_sum" -gcc -fPIC -shared sh/bit_and.c -o /tmp/udf/libbitand.so -gcc -fPIC -shared sh/sqr_sum.c -o /tmp/udf/libsqrsum.so +gcc -fPIC -shared sh/bit_and.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libbitand.so +gcc -fPIC -shared sh/l2norm.c -I../../include/libs/function/ -I../../include/client -I../../include/util -o /tmp/udf/libl2norm.so echo "debug show /tmp/udf/*.so" ls /tmp/udf/*.so diff --git a/tests/script/sh/sqr_sum.c b/tests/script/sh/l2norm.c similarity index 84% rename from tests/script/sh/sqr_sum.c rename to tests/script/sh/l2norm.c index af57f377ab..8ccdffb8d6 100644 --- a/tests/script/sh/sqr_sum.c +++ b/tests/script/sh/l2norm.c @@ -5,22 +5,22 @@ #include "taosudf.h" -DLL_EXPORT int32_t sqr_sum_init() { +DLL_EXPORT int32_t l2norm_init() { return 0; } -DLL_EXPORT int32_t sqr_sum_destroy() { +DLL_EXPORT int32_t l2norm_destroy() { return 0; } -DLL_EXPORT int32_t sqr_sum_start(SUdfInterBuf *buf) { +DLL_EXPORT int32_t l2norm_start(SUdfInterBuf *buf) { *(int64_t*)(buf->buf) = 0; buf->bufLen = sizeof(double); buf->numOfResult = 0; return 0; } -DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { +DLL_EXPORT int32_t l2norm(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInterBuf *newInterBuf) { double sumSquares = *(double*)interBuf->buf; int8_t numNotNull = 0; for (int32_t i = 0; i < block->numOfCols; ++i) { @@ -67,7 +67,7 @@ DLL_EXPORT int32_t sqr_sum(SUdfDataBlock* block, SUdfInterBuf *interBuf, SUdfInt return 0; } -DLL_EXPORT int32_t sqr_sum_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) { +DLL_EXPORT int32_t l2norm_finish(SUdfInterBuf* buf, SUdfInterBuf *resultData) { if (buf->numOfResult == 0) { resultData->numOfResult = 0; return 0; diff --git a/tests/script/tsim/db/alter_option.sim b/tests/script/tsim/db/alter_option.sim index 7df1f02713..00baa9b3f6 100644 --- a/tests/script/tsim/db/alter_option.sim +++ b/tests/script/tsim/db/alter_option.sim @@ -111,16 +111,16 @@ endi if $data21_db != 1000 then # wal_level fsyncperiod return -1 endi -if $data22_db != 0 then # +if $data22_db != 172800 then # wal_retention_period return -1 endi -if $data23_db != 0 then # +if $data23_db != -1 then # wal_retention_size return -1 endi -if $data24_db != 0 then # +if $data24_db != 86400 then # wal_roll_period return -1 endi -if $data25_db != 0 then # +if $data25_db != 0 then # wal_segment_size return -1 endi diff --git a/tests/script/tsim/parser/alter1.sim b/tests/script/tsim/parser/alter1.sim index d917f4b61e..6771b35eae 100644 --- a/tests/script/tsim/parser/alter1.sim +++ b/tests/script/tsim/parser/alter1.sim @@ -129,3 +129,5 @@ endi #if $rows != 0 then # return -1 #endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_bigint.sim b/tests/script/tsim/parser/columnValue_bigint.sim index ae97835dff..a196c61684 100644 --- a/tests/script/tsim/parser/columnValue_bigint.sim +++ b/tests/script/tsim/parser/columnValue_bigint.sim @@ -425,3 +425,5 @@ sql insert into st_bigint_e25 using mt_bigint tags (033) values (now, 00062) #sql_error alter table st_bigint_e23 set tag tagname="abc" #sql_error alter table st_bigint_e24 set tag tagname=" " #sql_error alter table st_bigint_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_bool.sim b/tests/script/tsim/parser/columnValue_bool.sim index d20c4efdc0..ffdb095cee 100644 --- a/tests/script/tsim/parser/columnValue_bool.sim +++ b/tests/script/tsim/parser/columnValue_bool.sim @@ -634,3 +634,4 @@ sql alter table st_bool_i4 set tag tagname="abc" sql alter table st_bool_i5 set tag tagname=" " sql alter table st_bool_i6 set tag tagname='' +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_double.sim b/tests/script/tsim/parser/columnValue_double.sim index dae64735ea..d74dda36dc 100644 --- a/tests/script/tsim/parser/columnValue_double.sim +++ b/tests/script/tsim/parser/columnValue_double.sim @@ -528,3 +528,5 @@ sql_error alter table st_double_e22 set tag tagname=abc sql alter table st_double_e23 set tag tagname="abc" sql alter table st_double_e24 set tag tagname=" " sql alter table st_double_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_float.sim b/tests/script/tsim/parser/columnValue_float.sim index 9b0ca4b186..a55e1aea2d 100644 --- a/tests/script/tsim/parser/columnValue_float.sim +++ b/tests/script/tsim/parser/columnValue_float.sim @@ -558,3 +558,5 @@ sql_error alter table st_float_e22 set tag tagname=abc sql alter table st_float_e23 set tag tagname="abc" sql alter table st_float_e24 set tag tagname=" " sql alter table st_float_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_int.sim b/tests/script/tsim/parser/columnValue_int.sim index 48d95f5ecb..34ad3d93fd 100644 --- a/tests/script/tsim/parser/columnValue_int.sim +++ b/tests/script/tsim/parser/columnValue_int.sim @@ -423,3 +423,5 @@ sql_error alter table st_int_e22 set tag tagname=abc sql_error alter table st_int_e23 set tag tagname="abc" sql_error alter table st_int_e24 set tag tagname=" " sql alter table st_int_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_smallint.sim b/tests/script/tsim/parser/columnValue_smallint.sim index ced486ba0b..39608a8efe 100644 --- a/tests/script/tsim/parser/columnValue_smallint.sim +++ b/tests/script/tsim/parser/columnValue_smallint.sim @@ -426,3 +426,5 @@ sql_error alter table st_smallint_e22 set tag tagname=abc sql_error alter table st_smallint_e23 set tag tagname="abc" sql_error alter table st_smallint_e24 set tag tagname=" " sql alter table st_smallint_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_tinyint.sim b/tests/script/tsim/parser/columnValue_tinyint.sim index bc1fcd3445..fe2734c245 100644 --- a/tests/script/tsim/parser/columnValue_tinyint.sim +++ b/tests/script/tsim/parser/columnValue_tinyint.sim @@ -424,3 +424,5 @@ sql_error alter table st_tinyint_e22 set tag tagname=abc sql_error alter table st_tinyint_e23 set tag tagname="abc" sql_error alter table st_tinyint_e24 set tag tagname=" " sql alter table st_tinyint_e25 set tag tagname='' + +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/columnValue_unsign.sim b/tests/script/tsim/parser/columnValue_unsign.sim index a72b1082f6..758814bc2b 100644 --- a/tests/script/tsim/parser/columnValue_unsign.sim +++ b/tests/script/tsim/parser/columnValue_unsign.sim @@ -130,3 +130,4 @@ if $rows != 1 then return -1 endi +system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file diff --git a/tests/script/tsim/parser/create_db.sim b/tests/script/tsim/parser/create_db.sim index 8d0bc3fe5b..d50b53b2f7 100644 --- a/tests/script/tsim/parser/create_db.sim +++ b/tests/script/tsim/parser/create_db.sim @@ -237,8 +237,8 @@ if $rows != 3 then return -1 endi sql show databases -print wallevel $data13_testwal -if $data13_testwal != 1 then +print wallevel $data20_testwal +if $data20_testwal != 1 then return -1 endi sql drop database testwal diff --git a/tests/script/tsim/query/udf.sim b/tests/script/tsim/query/udf.sim index 7259b1e779..2f685f8e24 100644 --- a/tests/script/tsim/query/udf.sim +++ b/tests/script/tsim/query/udf.sim @@ -24,10 +24,10 @@ if $system_content == Windows_NT then endi if $system_content == Windows_NT then sql create function bit_and as 'C:\\Windows\\Temp\\bitand.dll' outputtype int bufSize 8; - sql create aggregate function sqr_sum as 'C:\\Windows\\Temp\\sqrsum.dll' outputtype double bufSize 8; + sql create aggregate function l2norm as 'C:\\Windows\\Temp\\l2norm.dll' outputtype double bufSize 8; else sql create function bit_and as '/tmp/udf/libbitand.so' outputtype int bufSize 8; - sql create aggregate function sqr_sum as '/tmp/udf/libsqrsum.so' outputtype double bufSize 8; + sql create aggregate function l2norm as '/tmp/udf/libl2norm.so' outputtype double bufSize 8; endi sql show functions; if $rows != 2 then @@ -44,7 +44,7 @@ if $data10 != 2 then return -1 endi -sql select sqr_sum(f) from t; +sql select l2norm(f) from t; if $rows != 1 then print expect 1, actual $rows return -1 @@ -66,7 +66,7 @@ if $data10 != 1 then return -1 endi -sql select sqr_sum(f1, f2) from t2; +sql select l2norm(f1, f2) from t2; if $rows != 1 then return -1 endi @@ -95,7 +95,7 @@ if $data30 != NULL then return -1 endi -sql select sqr_sum(f1, f2) from t2; +sql select l2norm(f1, f2) from t2; print $rows, $data00 if $rows != 1 then return -1 @@ -105,7 +105,7 @@ if $data00 != 2.645751311 then endi sql insert into t2 values(now+4s, 4, 8)(now+5s, 5, 9); -sql select sqr_sum(f1-f2), sqr_sum(f1+f2) from t2; +sql select l2norm(f1-f2), l2norm(f1+f2) from t2; print $rows , $data00 , $data01 if $rows != 1 then return -1; @@ -117,7 +117,7 @@ if $data01 != 18.547236991 then return -1 endi -sql select sqr_sum(bit_and(f2, f1)), sqr_sum(bit_and(f1, f2)) from t2; +sql select l2norm(bit_and(f2, f1)), l2norm(bit_and(f1, f2)) from t2; print $rows , $data00 , $data01 if $rows != 1 then return -1 @@ -129,7 +129,7 @@ if $data01 != 1.414213562 then return -1 endi -sql select sqr_sum(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2); +sql select l2norm(f2) from udf.t2 group by 1-bit_and(f1, f2) order by 1-bit_and(f1,f2); print $rows , $data00 , $data10 , $data20 if $rows != 3 then return -1 @@ -149,10 +149,10 @@ sql show functions; if $rows != 1 then return -1 endi -if $data00 != @sqr_sum@ then +if $data00 != @l2norm@ then return -1 endi -sql drop function sqr_sum; +sql drop function l2norm; sql show functions; if $rows != 0 then return -1 diff --git a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim index bde56cb862..86bdbdcded 100644 --- a/tests/script/tsim/sma/rsmaCreateInsertQuery.sim +++ b/tests/script/tsim/sma/rsmaCreateInsertQuery.sim @@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10); sql insert into ct1 values(now+1s, 1); sql insert into ct1 values(now+2s, 100); -print =============== wait maxdelay 15+1 seconds for results -sleep 16000 +print =============== wait maxdelay 15+2 seconds for results +sleep 17000 print =============== select * from retention level 2 from memory sql select * from ct1; diff --git a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim index 1b54e5a47d..405d22ebdd 100644 --- a/tests/script/tsim/sma/rsmaPersistenceRecovery.sim +++ b/tests/script/tsim/sma/rsmaPersistenceRecovery.sim @@ -29,8 +29,8 @@ sql insert into ct1 values(now, 10, 10.0); sql insert into ct1 values(now+1s, 1, 1.0); sql insert into ct1 values(now+2s, 100, 100.0); -print =============== wait maxdelay 5+1 seconds for results -sleep 6000 +print =============== wait maxdelay 5+2 seconds for results +sleep 7000 print =============== select * from retention level 2 from memory sql select * from ct1; @@ -135,8 +135,8 @@ print =============== insert after rsma qtaskinfo recovery sql insert into ct1 values(now, 50, 500.0); sql insert into ct1 values(now+1s, 40, 40.0); -print =============== wait maxdelay 5+1 seconds for results -sleep 6000 +print =============== wait maxdelay 5+2 seconds for results +sleep 7000 print =============== select * from retention level 2 from file and memory after rsma qtaskinfo recovery sql select * from ct1; diff --git a/tests/script/tsim/stream/drop_stream.sim b/tests/script/tsim/stream/drop_stream.sim index bdd88bf780..747f59fe85 100644 --- a/tests/script/tsim/stream/drop_stream.sim +++ b/tests/script/tsim/stream/drop_stream.sim @@ -45,70 +45,70 @@ sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); sql create table scalar_ct1 using scalar_stb tags(10); sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); -sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; -sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; -sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; -sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; -sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; -sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; -sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; -sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; -sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; -sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; -sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; -sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; -sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; -sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname; sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; -sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; -sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; -sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname; sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; -sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname; sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; -sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname; sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; -sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname; sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; -sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname; sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; -sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname; sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); @@ -136,70 +136,70 @@ sql create table test.scalar_function_tb1 (ts timestamp, c1 tinyint, c2 smallint sql create table if not exists scalar_stb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)) tags (t1 int); sql create table scalar_ct1 using scalar_stb tags(10); sql create table if not exists scalar_tb (ts timestamp, c1 int, c2 double, c3 binary(20), c4 binary(20), c5 nchar(20)); -sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb; +sql create stream stb_abs_stream trigger at_once into output_abs_stb as select ts, abs(c1), abs(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_abs_stream trigger at_once into output_abs_ctb as select ts, abs(c1), abs(c2), c3 from scalar_ct1; sql create stream tb_abs_stream trigger at_once into output_abs_tb as select ts, abs(c1), abs(c2), c3 from scalar_tb; -sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb; +sql create stream stb_acos_stream trigger at_once into output_acos_stb as select ts, acos(c1), acos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_acos_stream trigger at_once into output_acos_ctb as select ts, acos(c1), acos(c2), c3 from scalar_ct1; sql create stream tb_acos_stream trigger at_once into output_acos_tb as select ts, acos(c1), acos(c2), c3 from scalar_tb; -sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb; +sql create stream stb_asin_stream trigger at_once into output_asin_stb as select ts, asin(c1), asin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_asin_stream trigger at_once into output_asin_ctb as select ts, asin(c1), asin(c2), c3 from scalar_ct1; sql create stream tb_asin_stream trigger at_once into output_asin_tb as select ts, asin(c1), asin(c2), c3 from scalar_tb; -sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb; +sql create stream stb_atan_stream trigger at_once into output_atan_stb as select ts, atan(c1), atan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_atan_stream trigger at_once into output_atan_ctb as select ts, atan(c1), atan(c2), c3 from scalar_ct1; sql create stream tb_atan_stream trigger at_once into output_atan_tb as select ts, atan(c1), atan(c2), c3 from scalar_tb; -sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb; +sql create stream stb_ceil_stream trigger at_once into output_ceil_stb as select ts, ceil(c1), ceil(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_ceil_stream trigger at_once into output_ceil_ctb as select ts, ceil(c1), ceil(c2), c3 from scalar_ct1; sql create stream tb_ceil_stream trigger at_once into output_ceil_tb as select ts, ceil(c1), ceil(c2), c3 from scalar_tb; -sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb; +sql create stream stb_cos_stream trigger at_once into output_cos_stb as select ts, cos(c1), cos(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_cos_stream trigger at_once into output_cos_ctb as select ts, cos(c1), cos(c2), c3 from scalar_ct1; sql create stream tb_cos_stream trigger at_once into output_cos_tb as select ts, cos(c1), cos(c2), c3 from scalar_tb; -sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb; +sql create stream stb_floor_stream trigger at_once into output_floor_stb as select ts, floor(c1), floor(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_floor_stream trigger at_once into output_floor_ctb as select ts, floor(c1), floor(c2), c3 from scalar_ct1; sql create stream tb_floor_stream trigger at_once into output_floor_tb as select ts, floor(c1), floor(c2), c3 from scalar_tb; -sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb; +sql create stream stb_log_stream trigger at_once into output_log_stb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_log_stream trigger at_once into output_log_ctb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_ct1; sql create stream tb_log_stream trigger at_once into output_log_tb as select ts, log(c1, 2), log(c2, 2), c3 from scalar_tb; -sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb; +sql create stream stb_pow_stream trigger at_once into output_pow_stb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_stb partition by tbname; sql create stream ctb_pow_stream trigger at_once into output_pow_ctb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_ct1; sql create stream tb_pow_stream trigger at_once into output_pow_tb as select ts, pow(c1, 2), pow(c2, 2), c3 from scalar_tb; -sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb; +sql create stream stb_round_stream trigger at_once into output_round_stb as select ts, round(c1), round(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_round_stream trigger at_once into output_round_ctb as select ts, round(c1), round(c2), c3 from scalar_ct1; sql create stream tb_round_stream trigger at_once into output_round_tb as select ts, round(c1), round(c2), c3 from scalar_tb; -sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb; +sql create stream stb_sin_stream trigger at_once into output_sin_stb as select ts, sin(c1), sin(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sin_stream trigger at_once into output_sin_ctb as select ts, sin(c1), sin(c2), c3 from scalar_ct1; sql create stream tb_sin_stream trigger at_once into output_sin_tb as select ts, sin(c1), sin(c2), c3 from scalar_tb; -sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb; +sql create stream stb_sqrt_stream trigger at_once into output_sqrt_stb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_sqrt_stream trigger at_once into output_sqrt_ctb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_ct1; sql create stream tb_sqrt_stream trigger at_once into output_sqrt_tb as select ts, sqrt(c1), sqrt(c2), c3 from scalar_tb; -sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb; +sql create stream stb_tan_stream trigger at_once into output_tan_stb as select ts, tan(c1), tan(c2), c3 from scalar_stb partition by tbname; sql create stream ctb_tan_stream trigger at_once into output_tan_ctb as select ts, tan(c1), tan(c2), c3 from scalar_ct1; sql create stream tb_tan_stream trigger at_once into output_tan_tb as select ts, tan(c1), tan(c2), c3 from scalar_tb; -sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb; +sql create stream stb_char_length_stream into output_char_length_stb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_stb partition by tbname; sql create stream ctb_char_length_stream into output_char_length_ctb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_ct1; sql create stream tb_char_length_stream into output_char_length_tb as select ts, char_length(c3), char_length(c4), char_length(c5) from scalar_tb; -sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb; +sql create stream stb_concat_stream into output_concat_stb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_stream into output_concat_ctb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_ct1; sql create stream tb_concat_stream into output_concat_tb as select ts, concat(c3, c4), concat(c3, c5), concat(c4, c5), concat(c3, c4, c5) from scalar_tb; -sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb; +sql create stream stb_concat_ws_stream into output_concat_ws_stb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_stb partition by tbname; sql create stream ctb_concat_ws_stream into output_concat_ws_ctb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_ct1; sql create stream tb_concat_ws_stream into output_concat_ws_tb as select ts, concat_ws("aND", c3, c4), concat_ws("and", c3, c5), concat_ws("And", c4, c5), concat_ws("AND", c3, c4, c5) from scalar_tb; -sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb; +sql create stream stb_length_stream into output_length_stb as select ts, length(c3), length(c4), length(c5) from scalar_stb partition by tbname; sql create stream ctb_length_stream into output_length_ctb as select ts, length(c3), length(c4), length(c5) from scalar_ct1; sql create stream tb_length_stream into output_length_tb as select ts, length(c3), length(c4), length(c5) from scalar_tb; -sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb; +sql create stream stb_lower_stream into output_lower_stb as select ts, lower(c3), lower(c4), lower(c5) from scalar_stb partition by tbname; sql create stream ctb_lower_stream into output_lower_ctb as select ts, lower(c3), lower(c4), lower(c5) from scalar_ct1; sql create stream tb_lower_stream into output_lower_tb as select ts, lower(c3), lower(c4), lower(c5) from scalar_tb; -sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb; +sql create stream stb_ltrim_stream into output_ltrim_stb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_stb partition by tbname; sql create stream ctb_ltrim_stream into output_ltrim_ctb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_ct1; sql create stream tb_ltrim_stream into output_ltrim_tb as select ts, ltrim(c3), ltrim(c4), ltrim(c5) from scalar_tb; -sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb; +sql create stream stb_rtrim_stream into output_rtrim_stb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_stb partition by tbname; sql create stream ctb_rtrim_stream into output_rtrim_ctb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_ct1; sql create stream tb_rtrim_stream into output_rtrim_tb as select ts, rtrim(c3), rtrim(c4), rtrim(c5) from scalar_tb; -sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb; +sql create stream stb_substr_stream into output_substr_stb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_stb partition by tbname; sql create stream ctb_substr_stream into output_substr_ctb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_ct1; sql create stream tb_substr_stream into output_substr_tb as select ts, substr(c3, 2), substr(c3, 2, 2), substr(c4, 5, 1), substr(c5, 3, 4) from scalar_tb; -sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb; +sql create stream stb_upper_stream into output_upper_stb as select ts, upper(c3), upper(c4), upper(c5) from scalar_stb partition by tbname; sql create stream ctb_upper_stream into output_upper_ctb as select ts, upper(c3), upper(c4), upper(c5) from scalar_ct1; sql create stream tb_upper_stream into output_upper_tb as select ts, upper(c3), upper(c4), upper(c5) from scalar_tb; sql insert into scalar_ct1 values (1656668180503, 100, 100.1, "beijing", "taos", "Taos"); diff --git a/tests/script/tsim/sync/3Replica1VgElect.sim b/tests/script/tsim/sync/3Replica1VgElect.sim index 4e127654ee..7cd291e56f 100644 --- a/tests/script/tsim/sync/3Replica1VgElect.sim +++ b/tests/script/tsim/sync/3Replica1VgElect.sim @@ -48,7 +48,7 @@ endi $replica = 3 $vgroups = 1 -print ============= create database +print ============= create database db sql create database db replica $replica vgroups $vgroups $loop_cnt = 0 @@ -135,7 +135,7 @@ endw $totalTblNum = $tbNum * 2 sleep 1000 sql show tables -print ====> expect $totalTblNum and infinsert $rows in fact +print ====> expect $totalTblNum and insert $rows in fact if $rows != $totalTblNum then return -1 endi diff --git a/tests/script/tsim/sync/3Replica5VgElect.sim b/tests/script/tsim/sync/3Replica5VgElect.sim index c4ab9bd4bc..a9858acbfb 100644 --- a/tests/script/tsim/sync/3Replica5VgElect.sim +++ b/tests/script/tsim/sync/3Replica5VgElect.sim @@ -202,7 +202,7 @@ else endi vg_ready: -print ====> create stable/child table +print ====> create stable stb /child table ctb and general table ntb sql create table stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int) sql show stables @@ -225,7 +225,7 @@ endw $totalTblNum = $tbNum * 2 sleep 1000 sql show tables -print ====> expect $totalTblNum and infinsert $rows in fact +print ====> expect $totalTblNum and insert $rows in fact if $rows != $totalTblNum then return -1 endi @@ -237,6 +237,7 @@ sql show vgroups $dnodeId = $data[0][3] $dnodeId = dnode . $dnodeId +print ====> switch_leader_to_offine_loop switch_leader_to_offine_loop: print $dnodeId @@ -271,22 +272,7 @@ system sh/exec.sh -n $dnodeId -s start $switch_loop_cnt = $switch_loop_cnt + 1 print $switch_loop_cnt -if $switch_loop_cnt == 1 then - sql show vgroups - $dnodeId = $data[0][3] - $dnodeId = dnode . $dnodeId - goto switch_leader_to_offine_loop -elif $switch_loop_cnt == 2 then - sql show vgroups - $dnodeId = $data[0][3] - $dnodeId = dnode . $dnodeId - goto switch_leader_to_offine_loop -elif $switch_loop_cnt == 3 then - sql show vgroups - $dnodeId = $data[0][3] - $dnodeId = dnode . $dnodeId - goto switch_leader_to_offine_loop -elif $switch_loop_cnt == 4 then +if $switch_loop_cnt <= 4 then sql show vgroups $dnodeId = $data[0][3] $dnodeId = dnode . $dnodeId diff --git a/tests/script/tsim/valgrind/basic3.sim b/tests/script/tsim/valgrind/basic3.sim index 0913691a11..3a51186d55 100644 --- a/tests/script/tsim/valgrind/basic3.sim +++ b/tests/script/tsim/valgrind/basic3.sim @@ -50,11 +50,11 @@ while $i < $tbNum endw print =============== step3: tb -sql select tbcol5 - tbcol3 from tb1 -print =============== step4: stb -sql select tbcol5 - tbcol3 from stb +sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 1 soffset 1; +sql select sum(tbcol) from stb partition by tbname interval(1s) slimit 2 soffset 4 limit 10 offset 1; +_OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT print =============== check $null= diff --git a/tests/script/tsim/valgrind/basic4.sim b/tests/script/tsim/valgrind/basic4.sim index 8be96f769b..98c4f8f2a8 100644 --- a/tests/script/tsim/valgrind/basic4.sim +++ b/tests/script/tsim/valgrind/basic4.sim @@ -22,41 +22,35 @@ if $data(1)[4] != ready then goto step1 endi -print =============== step2: create db -sql create database d1 vgroups 2 buffer 3 -sql show databases -sql use d1 -sql show vgroups +$tbPrefix = tb +$tbNum = 5 +$rowNum = 10 -print =============== step3: create show stable -sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) -sql show stables -if $rows != 1 then - return -1 -endi +print =============== step2: prepare data +sql create database db vgroups 2 +sql use db +sql create table if not exists stb (ts timestamp, tbcol int, tbcol2 float, tbcol3 double, tbcol4 binary(30), tbcol5 binary(30)) tags (tgcol int unsigned) -print =============== step4: create show table -sql create table ct1 using stb tags(1000) -sql create table ct2 using stb tags(2000) -sql create table ct3 using stb tags(3000) -sql show tables -if $rows != 3 then - return -1 -endi +$i = 0 +while $i < $tbNum + $tb = $tbPrefix . $i + sql create table $tb using stb tags( $i ) + $x = 0 + while $x < $rowNum + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , $x , $x , $x , "abcd1234=-+*" , "123456 0" ) + $x = $x + 1 + endw -print =============== step5: insert data (null / update) -sql insert into ct1 values(now+0s, 10, 2.0, 3.0) -sql insert into ct1 values(now+1s, 11, 2.1, NULL)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3) -sql insert into ct2 values(now+0s, 10, 2.0, 3.0) -sql insert into ct2 values(now+1s, 11, 2.1, 3.1)(now+2s, -12, -2.2, -3.2)(now+3s, -13, -2.3, -3.3) -sql insert into ct3 values('2021-01-01 00:00:00.000', NULL, NULL, 3.0) -sql insert into ct3 values('2022-03-02 16:59:00.010', 3 , 4, 5), ('2022-03-02 16:59:00.010', 33 , 4, 5), ('2022-04-01 16:59:00.011', 4, 4, 5), ('2022-04-01 16:59:00.011', 6, 4, 5), ('2022-03-06 16:59:00.013', 8, 4, 5); -sql insert into ct3 values('2022-03-02 16:59:00.010', 103, 1, 2), ('2022-03-02 16:59:00.010', 303, 3, 4), ('2022-04-01 16:59:00.011', 40, 5, 6), ('2022-04-01 16:59:00.011', 60, 4, 5), ('2022-03-06 16:59:00.013', 80, 4, 5); - -print =============== step6: query data= - -sql select * from stb where t1 between 1000 and 2500 + $cc = $x * 60000 + $ms = 1601481600000 + $cc + sql insert into $tb values ($ms , NULL , NULL , NULL , NULL , NULL ) + $i = $i + 1 +endw +print =============== step3: tb +sql select distinct(tbname), tgcol from stb; _OVER: system sh/exec.sh -n dnode1 -s stop -x SIGINT diff --git a/tests/script/tsim/valgrind/checkError6.sim b/tests/script/tsim/valgrind/checkError6.sim index 6e456148bf..70f55370d2 100644 --- a/tests/script/tsim/valgrind/checkError6.sim +++ b/tests/script/tsim/valgrind/checkError6.sim @@ -111,6 +111,9 @@ sql select _wstart, count(*) from tb1 session(ts, 1m) sql select count(tbcol), avg(tbcol), max(tbcol), min(tbcol), count(tbcol) from stb where ts <= 1601481840000 and ts >= 1601481800000 partition by tgcol interval(1m) fill(value, 0) sql select tbcol5 - tbcol3 from stb +sql select spread( tbcol2 )/44, spread(tbcol2), 0.204545455 * 44 from stb; +sql select min(tbcol) * max(tbcol) /4, sum(tbcol2) * apercentile(tbcol2, 20), apercentile(tbcol2, 33) + 52/9 from stb; + print =============== step5: explain sql explain analyze select ts from stb where -2; sql explain analyze select ts from tb1; diff --git a/tests/script/tsim/valgrind/checkError8.sim b/tests/script/tsim/valgrind/checkError8.sim new file mode 100644 index 0000000000..7ca01bc3d0 --- /dev/null +++ b/tests/script/tsim/valgrind/checkError8.sim @@ -0,0 +1,152 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start -v +sql connect + +sql drop database if exists cdb +sql create database if not exists cdb +sql use cdb +sql create table stb1 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double) +sql create table tb1 using stb1 tags(1,'1',1.0) +sql create table tb2 using stb1 tags(2,'2',2.0) +sql create table tb3 using stb1 tags(3,'3',3.0) +sql create table tb4 using stb1 tags(4,'4',4.0) +sql create table tb5 using stb1 tags(5,'5',5.0) +sql create table tb6 using stb1 tags(6,'6',6.0) + +sql insert into tb1 values ('2021-05-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1') +sql insert into tb1 values ('2021-05-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2') +sql insert into tb1 values ('2021-05-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3') +sql insert into tb1 values ('2021-05-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4') +sql insert into tb1 values ('2021-05-05 18:19:04',11,11.0,11,11,11,11.0,true ,'11','11') +sql insert into tb1 values ('2021-05-05 18:19:05',12,12.0,12,12,12,12.0,true ,'12','12') +sql insert into tb1 values ('2021-05-05 18:19:06',13,13.0,13,13,13,13.0,false,'13','13') +sql insert into tb1 values ('2021-05-05 18:19:07',14,14.0,14,14,14,14.0,false,'14','14') +sql insert into tb2 values ('2021-05-05 18:19:08',21,21.0,21,21,21,21.0,true ,'21','21') +sql insert into tb2 values ('2021-05-05 18:19:09',22,22.0,22,22,22,22.0,true ,'22','22') +sql insert into tb2 values ('2021-05-05 18:19:10',23,23.0,23,23,23,23.0,false,'23','23') +sql insert into tb2 values ('2021-05-05 18:19:11',24,24.0,24,24,24,24.0,false,'24','24') +sql insert into tb3 values ('2021-05-05 18:19:12',31,31.0,31,31,31,31.0,true ,'31','31') +sql insert into tb3 values ('2021-05-05 18:19:13',32,32.0,32,32,32,32.0,true ,'32','32') +sql insert into tb3 values ('2021-05-05 18:19:14',33,33.0,33,33,33,33.0,false,'33','33') +sql insert into tb3 values ('2021-05-05 18:19:15',34,34.0,34,34,34,34.0,false,'34','34') +sql insert into tb4 values ('2021-05-05 18:19:16',41,41.0,41,41,41,41.0,true ,'41','41') +sql insert into tb4 values ('2021-05-05 18:19:17',42,42.0,42,42,42,42.0,true ,'42','42') +sql insert into tb4 values ('2021-05-05 18:19:18',43,43.0,43,43,43,43.0,false,'43','43') +sql insert into tb4 values ('2021-05-05 18:19:19',44,44.0,44,44,44,44.0,false,'44','44') +sql insert into tb5 values ('2021-05-05 18:19:20',51,51.0,51,51,51,51.0,true ,'51','51') +sql insert into tb5 values ('2021-05-05 18:19:21',52,52.0,52,52,52,52.0,true ,'52','52') +sql insert into tb5 values ('2021-05-05 18:19:22',53,53.0,53,53,53,53.0,false,'53','53') +sql insert into tb5 values ('2021-05-05 18:19:23',54,54.0,54,54,54,54.0,false,'54','54') +sql insert into tb6 values ('2021-05-05 18:19:24',61,61.0,61,61,61,61.0,true ,'61','61') +sql insert into tb6 values ('2021-05-05 18:19:25',62,62.0,62,62,62,62.0,true ,'62','62') +sql insert into tb6 values ('2021-05-05 18:19:26',63,63.0,63,63,63,63.0,false,'63','63') +sql insert into tb6 values ('2021-05-05 18:19:27',64,64.0,64,64,64,64.0,false,'64','64') +sql insert into tb6 values ('2021-05-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) + +sql create table stb2 (ts timestamp, u1 int unsigned, u2 bigint unsigned, u3 smallint unsigned, u4 tinyint unsigned, ts2 timestamp) TAGS(t1 int unsigned, t2 bigint unsigned, t3 timestamp, t4 int) +sql create table tb2_1 using stb2 tags(1,1,'2021-05-05 18:38:38',1) +sql create table tb2_2 using stb2 tags(2,2,'2021-05-05 18:58:58',2) + +sql insert into tb2_1 values ('2021-05-05 18:19:00',1,2,3,4,'2021-05-05 18:28:01') +sql insert into tb2_1 values ('2021-05-05 18:19:01',5,6,7,8,'2021-05-05 18:28:02') +sql insert into tb2_1 values ('2021-05-05 18:19:02',2,2,3,4,'2021-05-05 18:28:03') +sql insert into tb2_1 values ('2021-05-05 18:19:03',5,6,7,8,'2021-05-05 18:28:04') +sql insert into tb2_1 values ('2021-05-05 18:19:04',3,2,3,4,'2021-05-05 18:28:05') +sql insert into tb2_1 values ('2021-05-05 18:19:05',5,6,7,8,'2021-05-05 18:28:06') +sql insert into tb2_1 values ('2021-05-05 18:19:06',4,2,3,4,'2021-05-05 18:28:07') +sql insert into tb2_1 values ('2021-05-05 18:19:07',5,6,7,8,'2021-05-05 18:28:08') +sql insert into tb2_1 values ('2021-05-05 18:19:08',5,2,3,4,'2021-05-05 18:28:09') +sql insert into tb2_1 values ('2021-05-05 18:19:09',5,6,7,8,'2021-05-05 18:28:10') +sql insert into tb2_1 values ('2021-05-05 18:19:10',6,2,3,4,'2021-05-05 18:28:11') +sql insert into tb2_2 values ('2021-05-05 18:19:11',5,6,7,8,'2021-05-05 18:28:12') +sql insert into tb2_2 values ('2021-05-05 18:19:12',7,2,3,4,'2021-05-05 18:28:13') +sql insert into tb2_2 values ('2021-05-05 18:19:13',5,6,7,8,'2021-05-05 18:28:14') +sql insert into tb2_2 values ('2021-05-05 18:19:14',8,2,3,4,'2021-05-05 18:28:15') +sql insert into tb2_2 values ('2021-05-05 18:19:15',5,6,7,8,'2021-05-05 18:28:16') + +sql create table stb3 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9)) TAGS(t1 int, t2 binary(10), t3 double) +sql create table tb3_1 using stb3 tags(1,'1',1.0) +sql create table tb3_2 using stb3 tags(2,'2',2.0) + +sql insert into tb3_1 values ('2021-01-05 18:19:00',1,1.0,1,1,1,1.0,true ,'1','1') +sql insert into tb3_1 values ('2021-02-05 18:19:01',2,2.0,2,2,2,2.0,true ,'2','2') +sql insert into tb3_1 values ('2021-03-05 18:19:02',3,3.0,3,3,3,3.0,false,'3','3') +sql insert into tb3_1 values ('2021-04-05 18:19:03',4,4.0,4,4,4,4.0,false,'4','4') +sql insert into tb3_1 values ('2021-05-05 18:19:28',5,NULL,5,NULL,5,NULL,true,NULL,'5') +sql insert into tb3_1 values ('2021-06-05 18:19:28',NULL,6.0,NULL,6,NULL,6.0,NULL,'6',NULL) +sql insert into tb3_1 values ('2021-07-05 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) +sql insert into tb3_2 values ('2021-01-06 18:19:00',11,11.0,11,11,11,11.0,true ,'11','11') +sql insert into tb3_2 values ('2021-02-06 18:19:01',12,12.0,12,12,12,12.0,true ,'12','12') +sql insert into tb3_2 values ('2021-03-06 18:19:02',13,13.0,13,13,13,13.0,false,'13','13') +sql insert into tb3_2 values ('2021-04-06 18:19:03',14,14.0,14,14,14,14.0,false,'14','14') +sql insert into tb3_2 values ('2021-05-06 18:19:28',15,NULL,15,NULL,15,NULL,true,NULL,'15') +sql insert into tb3_2 values ('2021-06-06 18:19:28',NULL,16.0,NULL,16,NULL,16.0,NULL,'16',NULL) +sql insert into tb3_2 values ('2021-07-06 18:19:28',NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL) + +sql create table stb4 (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool, c8 binary(10), c9 nchar(9),c10 binary(16300)) TAGS(t1 int, t2 binary(10), t3 double) +sql create table tb4_0 using stb4 tags(0,'0',0.0) +sql create table tb4_1 using stb4 tags(1,'1',1.0) +sql create table tb4_2 using stb4 tags(2,'2',2.0) +sql create table tb4_3 using stb4 tags(3,'3',3.0) +sql create table tb4_4 using stb4 tags(4,'4',4.0) + +$i = 0 +$ts0 = 1625850000000 +$blockNum = 5 +$delta = 0 +$tbname0 = tb4_ +$a = 0 +$b = 200 +$c = 400 +while $i < $blockNum + $x = 0 + $rowNum = 5 + while $x < $rowNum + $ts = $ts0 + $x + $a = $a + 1 + $b = $b + 1 + $c = $c + 1 + $d = $x / 10 + $tin = $rowNum + $binary = 'binary . $c + $binary = $binary . ' + $nchar = 'nchar . $c + $nchar = $nchar . ' + $tbname = 'tb4_ . $i + $tbname = $tbname . ' + sql insert into $tbname values ( $ts , $a , $b , $c , $d , $d , $c , true, $binary , $nchar , $binary ) + $x = $x + 1 + endw + + $i = $i + 1 + $ts0 = $ts0 + 259200000 +endw + + +print ============== query +sql select a.ts,a.c1,a.c8 from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and ((a.c1 > 50 and a.c1 < 60) or (b.c2 > 60));; +sql select * from stb1 where (c6 > 3.0 or c6 < 60) and c6 > 50 and (c6 != 53 or c6 != 63);; +sql select ts,c1 from stb1 where (c1 > 60 or c1 < 10 or (c1 > 20 and c1 < 30)) and ts > '2021-05-05 18:19:00.000' and ts < '2021-05-05 18:19:25.000' and c1 != 21 and c1 != 22 order by ts; +sql select a.* from (select * from stb1 where c7=true) a, (select * from stb1 where c1 > 30) b where a.ts=b.ts and a.c1 > 50 order by ts;; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and (a.c1 < 10 or a.c1 > 30) and (b.u1 < 5 or b.u1 > 5) order by ts;; +sql select a.ts,b.ts,a.c1,b.u1,b.u2 from (select * from stb1) a, (select * from stb2) b where a.ts=b.ts and a.c1 < 30 and b.u1 > 1 and a.c1 > 10 and b.u1 < 8 and b.u1<>5 order by ts;; +sql select tb1.ts,tb1.*,tb2_1.* from tb1, tb2_1 where tb1.ts=tb2_1.ts and tb1.ts >= '2021-05-05 18:19:03.000' and tb1.c7=false and tb2_1.u3>4 order by ts;; +sql select stb1.ts,stb1.c1,stb1.t1,stb2.ts,stb2.u1,stb2.t4 from stb1, stb2 where stb1.ts=stb2.ts and stb1.t1 = stb2.t4 order by ts;; +sql select count(*) from stb1 where tbname like 'tb%' or c1 > 0;; +sql select * from stb1 where tbname like 'tb%' and (t1=1 or t2=2 or t3=3) and t1 > 2 order by ts;; + +_OVER: +system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== check +$null= + +system_content sh/checkValgrind.sh -n dnode1 +print cmd return result ----> [ $system_content ] +if $system_content > 2 then + return -1 +endi + +if $system_content == $null then + return -1 +endi diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py index 96f9ea51ef..24c0dfc046 100644 --- a/tests/system-test/1-insert/create_retentions.py +++ b/tests/system-test/1-insert/create_retentions.py @@ -187,7 +187,7 @@ class TDTestCase: tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.{stb} tags ( {i+1} )') def __insert_data(self, rows, ctb_num=20, dbname=DBNAME, rsma=False, rsma_type="sum"): - tdLog.printNoPrefix("==========step: start inser data into tables now.....") + tdLog.printNoPrefix("==========step: start insert data into tables now.....") # from ...pytest.util.common import DataSet data = DataSet() data.get_order_set(rows) diff --git a/tests/system-test/1-insert/performanceInsert.json b/tests/system-test/1-insert/performanceInsert.json index 7278a6f735..1e83217e71 100644 --- a/tests/system-test/1-insert/performanceInsert.json +++ b/tests/system-test/1-insert/performanceInsert.json @@ -1,7 +1,7 @@ { "filetype": "insert", "cfgdir": "/etc/taos/", - "host": "test216", + "host": "localhost", "port": 6030, "user": "root", "password": "taosdata", @@ -10,14 +10,14 @@ "result_file": "./insert_res.txt", "confirm_parameter_prompt": "no", "insert_interval": 0, - "interlace_rows": 1000, + "interlace_rows": 0, "num_of_records_per_req": 100000, "databases": [ { "dbinfo": { "name": "db", "drop": "yes", - "vgroups": 24 + "vgroups": 8 }, "super_tables": [ { @@ -29,8 +29,8 @@ "batch_create_tbl_num": 50000, "data_source": "rand", "insert_mode": "taosc", - "insert_rows": 5, - "interlace_rows": 100000, + "insert_rows": 1000, + "interlace_rows": 0, "insert_interval": 0, "max_sql_len": 10000000, "disorder_ratio": 0, diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py index 135a61d194..c8957f216d 100644 --- a/tests/system-test/2-query/distribute_agg_spread.py +++ b/tests/system-test/2-query/distribute_agg_spread.py @@ -6,13 +6,10 @@ import random class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) + tdLog.debug(f"start to execute {__file__}") tdSql.init(conn.cursor()) self.vnode_disbutes = None self.ts = 1537146000000 @@ -31,60 +28,61 @@ class TDTestCase: same_result = tdSql.queryResult if spread_result !=same_result: - tdLog.exit(" max function work not as expected, sql : %s "% spread_sql) + tdLog.exit(f" max function work not as expected, sql : {spread_sql} ") else: - tdLog.info(" max function work as expected, sql : %s "% spread_sql) + tdLog.info(f" max function work as expected, sql : {spread_sql} ") - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -100,11 +98,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -112,9 +110,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -126,9 +123,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_spread_distribute_diff_vnode(self,col_name): + def check_spread_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -142,13 +139,13 @@ class TDTestCase: distribute_tbnames.append(random.sample(vnode_tables,1)[0]) tbname_ins = "" for tbname in distribute_tbnames: - tbname_ins += "'%s' ,"%tbname + tbname_ins += f"'{tbname}' ," tbname_filters = tbname_ins[:-1] - spread_sql = f"select spread({col_name}) from stb1 where tbname in ({tbname_filters})" + spread_sql = f"select spread({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})" - same_sql = f"select max({col_name}) - min({col_name}) from stb1 where tbname in ({tbname_filters})" + same_sql = f"select max({col_name}) - min({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters})" tdSql.query(spread_sql) spread_result = tdSql.queryResult @@ -157,20 +154,20 @@ class TDTestCase: same_result = tdSql.queryResult if spread_result !=same_result: - tdLog.exit(" spread function work not as expected, sql : %s "% spread_sql) + tdLog.exit(f" spread function work not as expected, sql : {spread_sql} ") else: - tdLog.info(" spread function work as expected, sql : %s "% spread_sql) + tdLog.info(f" spread function work as expected, sql : {spread_sql} ") - def check_spread_status(self): + def check_spread_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -185,80 +182,76 @@ class TDTestCase: # check max function for different vnode for colname in colnames: - if colname.startswith("c"): + if colname.startswith(f"c"): self.check_spread_distribute_diff_vnode(colname) - else: - # self.check_spread_distribute_diff_vnode(colname) # bug for tag - pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query("select spread(c1) from stb1 where c1 is null") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where c1 is null") tdSql.checkRows(1) - tdSql.query("select spread(c1) from stb1 where t1=1") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,8.000000000) - tdSql.query("select spread(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select spread(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,0.000000000) - tdSql.query("select spread(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,8.000000000) - tdSql.query("select spread(c1) from stb1 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select spread(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select spread(c1) from stb1 union all select max(c1)-min(c1) from stb1 ") + tdSql.query(f"select spread(c1) from {dbname}.stb1 union all select max(c1)-min(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,28.000000000) # join - tdSql.execute(" create database if not exists db ") - tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(f" create database if not exists db ") + tdSql.execute(f" use db ") + tdSql.execute(f" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(f" create table db.tb1 using db.st tags(1) ") + tdSql.execute(f" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select spread(tb1.c1), spread(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.query(f"select spread(tb1.c1), spread(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,9.000000000) tdSql.checkData(0,0,9.00000) # group by - tdSql.execute(" use testdb ") - tdSql.query(" select max(c1),c1 from stb1 group by t1 ") + tdSql.execute(f" use {dbname} ") + tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by t1 ") tdSql.checkRows(20) - tdSql.query(" select max(c1),c1 from stb1 group by c1 ") + tdSql.query(f" select max(c1),c1 from {dbname}.stb1 group by c1 ") tdSql.checkRows(30) - tdSql.query(" select max(c1),c2 from stb1 group by c2 ") + tdSql.query(f" select max(c1),c2 from {dbname}.stb1 group by c2 ") tdSql.checkRows(31) # partition by tbname or partition by tag - tdSql.query("select spread(c1) from stb1 partition by tbname") + tdSql.query(f"select spread(c1) from {dbname}.stb1 partition by tbname") query_data = tdSql.queryResult # nest query for support max - tdSql.query("select spread(c2+2)+1 from (select max(c1) c2 from stb1)") + tdSql.query(f"select spread(c2+2)+1 from (select max(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,1.000000000) - tdSql.query("select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select spread(c1+2)+1 as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,29.000000000) - tdSql.query("select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select spread(a+2)+1 as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,29.000000000) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),spread(c1) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),spread(c1) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) @@ -275,7 +268,7 @@ class TDTestCase: def stop(self): tdSql.close() - tdLog.success("%s successfully executed" % __file__) + tdLog.success(f"{__file__} successfully executed") tdCases.addWindows(__file__, TDTestCase()) tdCases.addLinux(__file__, TDTestCase()) diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py index 22c7c598b4..56768d3be4 100644 --- a/tests/system-test/2-query/distribute_agg_stddev.py +++ b/tests/system-test/2-query/distribute_agg_stddev.py @@ -7,10 +7,7 @@ import platform import math class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -45,55 +42,56 @@ class TDTestCase: else: tdLog.exit(" sql:%s; row:0 col:0 data:%d , expect:%d"%(stddev_sql,tdSql.queryResult[0][0],stddev_result)) - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -109,11 +107,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -121,9 +119,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -135,9 +132,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_stddev_distribute_diff_vnode(self,col_name): + def check_stddev_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -155,9 +152,9 @@ class TDTestCase: tbname_filters = tbname_ins[:-1] - stddev_sql = f"select stddev({col_name}) from stb1 where tbname in ({tbname_filters});" + stddev_sql = f"select stddev({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});" - same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null " + same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null " tdSql.query(same_sql) pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] @@ -175,17 +172,16 @@ class TDTestCase: tdSql.query(stddev_sql) tdSql.checkData(0,0,stddev_result) - - def check_stddev_status(self): + def check_stddev_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -197,50 +193,42 @@ class TDTestCase: for colname in colnames: if colname.startswith("c"): self.check_stddev_functions(tablename,colname) - else: - # self.check_stddev_functions(tablename,colname) - pass - # check max function for different vnode for colname in colnames: if colname.startswith("c"): self.check_stddev_distribute_diff_vnode(colname) - else: - # self.check_stddev_distribute_diff_vnode(colname) # bug for tag - pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query(" select stddev(c1) from stb1 ") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,6.694663959) - tdSql.query(" select stddev(a) from (select stddev(c1) a from stb1 partition by tbname) ") + tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 partition by tbname) ") tdSql.checkData(0,0,0.156797505) - tdSql.query(" select stddev(c1) from stb1 where t1=1") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,2.581988897) - tdSql.query("select stddev(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select stddev(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,0.000000000) - tdSql.query("select stddev(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,2.581988897) - tdSql.query("select stddev(c1) from stb1 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select stddev(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select stddev(c1) from stb1 union all select stddev(c1) from stb1 ") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 union all select stddev(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,6.694663959) - tdSql.query("select stddev(a) from (select stddev(c1) a from stb1 union all select stddev(c1) a from stb1)") + tdSql.query(f"select stddev(a) from (select stddev(c1) a from {dbname}.stb1 union all select stddev(c1) a from {dbname}.stb1)") tdSql.checkRows(1) tdSql.checkData(0,0,0.000000000) @@ -248,38 +236,38 @@ class TDTestCase: tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + tdSql.query("select stddev(tb1.c1), stddev(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,2.872281323) tdSql.checkData(0,1,2.872281323) # group by - tdSql.execute(" use testdb ") + tdSql.execute(f" use {dbname} ") # partition by tbname or partition by tag - tdSql.query("select stddev(c1) from stb1 partition by tbname") + tdSql.query(f"select stddev(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) # nest query for support max - tdSql.query("select stddev(c2+2)+1 from (select stddev(c1) c2 from stb1)") + tdSql.query(f"select stddev(c2+2)+1 from (select stddev(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,1.000000000) - tdSql.query("select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select stddev(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,6.694663959) - tdSql.query("select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select stddev(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,6.694663959) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2),avg(c1),stddev(c1) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py index 90d1edca90..90946f388d 100644 --- a/tests/system-test/2-query/distribute_agg_sum.py +++ b/tests/system-test/2-query/distribute_agg_sum.py @@ -7,10 +7,7 @@ import platform class TDTestCase: - updatecfgDict = {'debugFlag': 143 ,"cDebugFlag":143,"uDebugFlag":143 ,"rpcDebugFlag":143 , "tmrDebugFlag":143 , - "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143, - "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143, - "maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } + updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 } def init(self, conn, logSql): tdLog.debug("start to execute %s" % __file__) @@ -34,55 +31,56 @@ class TDTestCase: tdSql.query(sum_sql) tdSql.checkData(0,0,pre_sum) - def prepare_datas_of_distribute(self): + def prepare_datas_of_distribute(self, dbname="testdb"): # prepate datas for 20 tables distributed at different vgroups - tdSql.execute("create database if not exists testdb keep 3650 duration 1000 vgroups 5") - tdSql.execute(" use testdb ") + tdSql.execute(f"create database if not exists {dbname} keep 3650 duration 1000 vgroups 5") + tdSql.execute(f" use {dbname}") tdSql.execute( - '''create table stb1 + f'''create table {dbname}.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t0 timestamp, t1 int, t2 bigint, t3 smallint, t4 tinyint, t5 float, t6 double, t7 bool, t8 binary(16),t9 nchar(32)) ''' ) tdSql.execute( - ''' - create table t1 + f''' + create table {dbname}.t1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) ''' ) for i in range(20): - tdSql.execute(f'create table ct{i+1} using stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') + tdSql.execute(f'create table {dbname}.ct{i+1} using {dbname}.stb1 tags ( now(), {1*i}, {11111*i}, {111*i}, {1*i}, {1.11*i}, {11.11*i}, {i%2}, "binary{i}", "nchar{i}" )') for i in range(9): tdSql.execute( - f"insert into ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct1 values ( now()-{i*10}s, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) tdSql.execute( - f"insert into ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" + f"insert into {dbname}.ct4 values ( now()-{i*90}d, {1*i}, {11111*i}, {111*i}, {11*i}, {1.11*i}, {11.11*i}, {i%2}, 'binary{i}', 'nchar{i}', now()+{1*i}a )" ) for i in range(1,21): if i ==1 or i == 4: continue else: - tbname = "ct"+f'{i}' + tbname = f"ct{i}" for j in range(9): tdSql.execute( - f"insert into {tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" + f"insert into {dbname}.{tbname} values ( now()-{(i+j)*10}s, {1*(j+i)}, {11111*(j+i)}, {111*(j+i)}, {11*(j)}, {1.11*(j+i)}, {11.11*(j+i)}, {(j+i)%2}, 'binary{j}', 'nchar{j}', now()+{1*j}a )" ) - tdSql.execute("insert into ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") - tdSql.execute("insert into ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") - tdSql.execute("insert into ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") - tdSql.execute("insert into ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct1 values (now()-45s, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar0', now()+8a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+10s, 9, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+15s, 9, -99999, -999, -99, -9.99, NULL, 1, 'binary9', 'nchar9', now()+9a )") + tdSql.execute(f"insert into {dbname}.ct1 values (now()+20s, 9, -99999, -999, NULL, -9.99, -99.99, 1, 'binary9', 'nchar9', now()+9a )") + + tdSql.execute(f"insert into {dbname}.ct4 values (now()-810d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()-400d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") + tdSql.execute(f"insert into {dbname}.ct4 values (now()+90d, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ") tdSql.execute( - f'''insert into t1 values + f'''insert into {dbname}.t1 values ( '2020-04-21 01:01:01.000', NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL ) ( '2020-10-21 01:01:01.000', 1, 11111, 111, 11, 1.11, 11.11, 1, "binary1", "nchar1", now()+1a ) ( '2020-12-31 01:01:01.000', 2, 22222, 222, 22, 2.22, 22.22, 0, "binary2", "nchar2", now()+2a ) @@ -98,11 +96,11 @@ class TDTestCase: ''' ) - tdLog.info(" prepare data for distributed_aggregate done! ") + tdLog.info(f" prepare data for distributed_aggregate done! ") - def check_distribute_datas(self): + def check_distribute_datas(self, dbname="testdb"): # get vgroup_ids of all - tdSql.query("show vgroups ") + tdSql.query(f"show {dbname}.vgroups ") vgroups = tdSql.queryResult vnode_tables={} @@ -110,9 +108,8 @@ class TDTestCase: for vgroup_id in vgroups: vnode_tables[vgroup_id[0]]=[] - # check sub_table of per vnode ,make sure sub_table has been distributed - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: @@ -124,9 +121,9 @@ class TDTestCase: if len(v)>=2: count+=1 if count < 2: - tdLog.exit(" the datas of all not satisfy sub_table has been distributed ") + tdLog.exit(f" the datas of all not satisfy sub_table has been distributed ") - def check_sum_distribute_diff_vnode(self,col_name): + def check_sum_distribute_diff_vnode(self,col_name, dbname="testdb"): vgroup_ids = [] for k ,v in self.vnode_disbutes.items(): @@ -144,9 +141,9 @@ class TDTestCase: tbname_filters = tbname_ins[:-1] - sum_sql = f"select sum({col_name}) from stb1 where tbname in ({tbname_filters});" + sum_sql = f"select sum({col_name}) from {dbname}.stb1 where tbname in ({tbname_filters});" - same_sql = f"select {col_name} from stb1 where tbname in ({tbname_filters}) and {col_name} is not null " + same_sql = f"select {col_name} from {dbname}.stb1 where tbname in ({tbname_filters}) and {col_name} is not null " tdSql.query(same_sql) pre_data = np.array(tdSql.queryResult)[np.array(tdSql.queryResult) != None] @@ -157,16 +154,16 @@ class TDTestCase: tdSql.query(sum_sql) tdSql.checkData(0,0,pre_sum) - def check_sum_status(self): + def check_sum_status(self, dbname="testdb"): # check max function work status - tdSql.query("show tables like 'ct%'") + tdSql.query(f"show {dbname}.tables like 'ct%'") table_names = tdSql.queryResult tablenames = [] for table_name in table_names: - tablenames.append(table_name[0]) + tablenames.append(f"{dbname}.{table_name[0]}") - tdSql.query("desc stb1") + tdSql.query(f"desc {dbname}.stb1") col_names = tdSql.queryResult colnames = [] @@ -183,79 +180,75 @@ class TDTestCase: for colname in colnames: if colname.startswith("c"): self.check_sum_distribute_diff_vnode(colname) - else: - # self.check_sum_distribute_diff_vnode(colname) # bug for tag - pass - - def distribute_agg_query(self): + def distribute_agg_query(self, dbname="testdb"): # basic filter - tdSql.query(" select sum(c1) from stb1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb1 ") tdSql.checkData(0,0,2592) - tdSql.query(" select sum(a) from (select sum(c1) a from stb1 partition by tbname) ") + tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 partition by tbname) ") tdSql.checkData(0,0,2592) - tdSql.query(" select sum(c1) from stb1 where t1=1") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1=1") tdSql.checkData(0,0,54) - tdSql.query("select sum(c1+c2) from stb1 where c1 =1 ") + tdSql.query(f"select sum(c1+c2) from {dbname}.stb1 where c1 =1 ") tdSql.checkData(0,0,22224.000000000) - tdSql.query("select sum(c1) from stb1 where tbname=\"ct2\"") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where tbname=\"ct2\"") tdSql.checkData(0,0,54) - tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) - tdSql.query("select sum(c1) from stb1 where t1> 4 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 where t1> 4 partition by tbname") tdSql.checkRows(15) # union all - tdSql.query("select sum(c1) from stb1 union all select sum(c1) from stb1 ") + tdSql.query(f"select sum(c1) from {dbname}.stb1 union all select sum(c1) from {dbname}.stb1 ") tdSql.checkRows(2) tdSql.checkData(0,0,2592) - tdSql.query("select sum(a) from (select sum(c1) a from stb1 union all select sum(c1) a from stb1)") + tdSql.query(f"select sum(a) from (select sum(c1) a from {dbname}.stb1 union all select sum(c1) a from {dbname}.stb1)") tdSql.checkRows(1) tdSql.checkData(0,0,5184) # join - tdSql.execute(" create database if not exists db ") tdSql.execute(" use db ") - tdSql.execute(" create stable st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") - tdSql.execute(" create table tb1 using st tags(1) ") - tdSql.execute(" create table tb2 using st tags(2) ") + tdSql.execute(" create stable db.st (ts timestamp , c1 int ,c2 float) tags(t1 int) ") + tdSql.execute(" create table db.tb1 using db.st tags(1) ") + tdSql.execute(" create table db.tb2 using db.st tags(2) ") for i in range(10): ts = i*10 + self.ts - tdSql.execute(f" insert into tb1 values({ts},{i},{i}.0)") - tdSql.execute(f" insert into tb2 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb1 values({ts},{i},{i}.0)") + tdSql.execute(f" insert into db.tb2 values({ts},{i},{i}.0)") - tdSql.query("select sum(tb1.c1), sum(tb2.c2) from tb1, tb2 where tb1.ts=tb2.ts") + + tdSql.query("select sum(tb1.c1), sum(tb2.c2) from db.tb1 tb1, db.tb2 tb2 where tb1.ts=tb2.ts") tdSql.checkRows(1) tdSql.checkData(0,0,45) tdSql.checkData(0,1,45.000000000) # group by - tdSql.execute(" use testdb ") + tdSql.execute(f"use {dbname} ") # partition by tbname or partition by tag - tdSql.query("select sum(c1) from stb1 partition by tbname") + tdSql.query(f"select sum(c1) from {dbname}.stb1 partition by tbname") tdSql.checkRows(20) # nest query for support max - tdSql.query("select abs(c2+2)+1 from (select sum(c1) c2 from stb1)") + tdSql.query(f"select abs(c2+2)+1 from (select sum(c1) c2 from {dbname}.stb1)") tdSql.checkData(0,0,2595.000000000) - tdSql.query("select sum(c1+2) as c2 from (select ts ,c1 ,c2 from stb1)") + tdSql.query(f"select sum(c1+2) as c2 from (select ts ,c1 ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,2960.000000000) - tdSql.query("select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from stb1)") + tdSql.query(f"select sum(a+2) as c2 from (select ts ,abs(c1) a ,c2 from {dbname}.stb1)") tdSql.checkData(0,0,2960.000000000) # mixup with other functions - tdSql.query("select max(c1),count(c1),last(c2,c3),sum(c1+c2) from stb1") + tdSql.query(f"select max(c1),count(c1),last(c2,c3),sum(c1+c2) from {dbname}.stb1") tdSql.checkData(0,0,28) tdSql.checkData(0,1,184) tdSql.checkData(0,2,-99999) diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py index 75b33f1a5e..617f7e7464 100644 --- a/tests/system-test/2-query/tsbsQuery.py +++ b/tests/system-test/2-query/tsbsQuery.py @@ -4,6 +4,8 @@ import sys import datetime import inspect import random +from util.dnodes import TDDnode +from util.dnodes import tdDnodes from util.log import * from util.sql import * @@ -30,10 +32,10 @@ class TDTestCase: for i in range(ctbNum): tagValue = 'beijing' if (i % 10 == 0): - sql += " %s%d using %s (name,fleet,driver,device_version) tags('truck_%d', 'South%d','Trish%d','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,i) + sql += " %s%d using %s (name,fleet,driver,device_version,load_capacity,fuel_capacity,nominal_fuel_consumption) tags('truck_%d', 'South%d','Trish%d','v2.%d', 1500+%d*20, 150+%d*2, 5+%d)"%(ctbPrefix,i,stbName,i,i,i,i,(1500+i*20),(150+i*2),(5+i)) else: model = 'H-%d'%i - sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d')"%(ctbPrefix,i,stbName,i,i,i,model,i) + sql += " %s%d using %s tags('truck_%d', 'South%d','Trish%d','%s','v2.%d', %d, %d,%d)"%(ctbPrefix,i,stbName,i,i,i,model,i,(1500+i*20),(150+i*2),(5+i)) if (i > 0) and (i%1000 == 0): tsql.execute(sql) sql = pre_create @@ -55,10 +57,10 @@ class TDTestCase: sql += " %s%d values "%(ctbPrefix,i) for j in range(rowsPerTbl): if(ctbPrefix=="rct"): - sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}, {1500+j*20}, {150+j*2},{5+j}) " + sql += f"({startTs+j*60000}, {80+j}, {90+j}, {85+j}, {30+j*10}, {1.2*j}, {221+j*2}, {20+j*0.2}) " elif ( ctbPrefix=="dct"): status= random.randint(0,1) - sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status},{1500+j*20}, {150+j*2},{5+j} ) " + sql += f"( {startTs+j*60000}, {1+j*0.1},{1400+j*15}, {status} ) " # tdLog.debug("1insert sql:%s"%sql) if (j > 0) and ((j%batchNum == 0) or (j == rowsPerTbl - 1)): # tdLog.debug("2insert sql:%s"%sql) @@ -79,22 +81,22 @@ class TDTestCase: stabname2="diagnostics" ctbnamePre1="rct" ctbnamePre2="dct" - ctbNums=40 + ctbNums=50 self.ctbNums=ctbNums - rowNUms=200 + rowNUms=5000 ts=1451606400000 tdSql.execute(f"create database {dbname};") tdSql.execute(f"use {dbname} ") tdSql.execute(f''' - create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)); + create table {stabname1} (ts timestamp,latitude double,longitude double,elevation double,velocity double,heading double,grade double,fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30),load_capacity double,fuel_capacity double,nominal_fuel_consumption double); ''') tdSql.execute(f''' - create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint,load_capacity double,fuel_capacity double,nominal_fuel_consumption double) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30)) ; + create table {stabname2} (ts timestamp,fuel_state double,current_load double,status bigint) tags (name binary(30),fleet binary(30),driver binary(30),model binary(30),device_version binary(30),load_capacity double,fuel_capacity double,nominal_fuel_consumption double) ; ''') self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums) self.create_ctable(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums) - self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000) - self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=1000) + self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname1,ctbPrefix=ctbnamePre1,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=10000) + self.insertData(tsql=tdSql,dbName=dbname,stbName=stabname2,ctbPrefix=ctbnamePre2,ctbNum=ctbNums,rowsPerTbl=rowNUms,startTs=ts,batchNum=10000) # for i in range(ctbNum): # if i %10 == 0 : # # tdLog.debug(f"create table rct{i} using readings (name,fleet,driver,model,device_version) tags ('truck_{i}','South{i}','Trish{i}', NULL,'v2.3')") @@ -131,7 +133,7 @@ class TDTestCase: # tdLog.info("avg value check pass , it work as expected ,sql is \"%s\" "%check_query ) - def tsbsIotQuery(self): + def tsbsIotQuery(self,insertinto=True): tdSql.execute("use db_tsbs") @@ -143,10 +145,11 @@ class TDTestCase: # test insert into - tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") - tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") - - tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + if insertinto == True : + tdSql.execute("create table testsnode (ts timestamp, c1 float,c2 binary(30),c3 binary(30),c4 binary(30)) ;") + tdSql.query("insert into testsnode SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") + + tdSql.query("insert into testsnode(ts,c1,c2,c3,c4) SELECT ts,avg(velocity) as mean_velocity,name,driver,fleet FROM readings WHERE ts > 1451606400000 AND ts <= 1451606460000 partition BY name,driver,fleet,ts interval(10m);") # test paitition interval fill @@ -182,8 +185,7 @@ class TDTestCase: # # 6. avg-daily-driving-session # #taosc core dumped - # tdSql.execute("create table random_measure2_1 (ts timestamp,ela float, name binary(40))") - # tdSql.query("SELECT ts,diff(mv) AS difka FROM (SELECT ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name,ts interval(10m) fill(value,0)) GROUP BY name,ts;") + tdSql.query(" SELECT _wstart as ts,name,floor(avg(velocity)/5) AS mv FROM readings WHERE name is not null AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0);") # tdSql.query("select name,diff(mv) AS difka FROM (SELECT ts,name,mv FROM (SELECT _wstart as ts,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0))) group BY name ;") # tdSql.query("SELECT _wstart,name,floor(avg(velocity)/10)/floor(avg(velocity)/10) AS mv FROM readings WHERE name!='' AND ts > '2016-01-01T00:00:00Z' AND ts < '2016-01-05T00:00:01Z' partition by name interval(10m) fill(value,0)") @@ -234,7 +236,9 @@ class TDTestCase: tdLog.printNoPrefix("==========step1:create database and table,insert data ==============") self.prepareData() self.tsbsIotQuery() - + tdDnodes.stop(1) + tdDnodes.start(1) + self.tsbsIotQuery(False) def stop(self): tdSql.close() diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh index 1f6e8ce1f5..7941e76df6 100755 --- a/tests/system-test/fulltest.sh +++ b/tests/system-test/fulltest.sh @@ -90,6 +90,12 @@ python3 ./test.py -f 2-query/distribute_agg_max.py python3 ./test.py -f 2-query/distribute_agg_max.py -R python3 ./test.py -f 2-query/distribute_agg_min.py python3 ./test.py -f 2-query/distribute_agg_min.py -R +python3 ./test.py -f 2-query/distribute_agg_spread.py +python3 ./test.py -f 2-query/distribute_agg_spread.py -R +python3 ./test.py -f 2-query/distribute_agg_stddev.py +python3 ./test.py -f 2-query/distribute_agg_stddev.py -R +python3 ./test.py -f 2-query/distribute_agg_sum.py +python3 ./test.py -f 2-query/distribute_agg_sum.py -R @@ -156,9 +162,6 @@ python3 ./test.py -f 2-query/function_stateduration.py python3 ./test.py -f 2-query/statecount.py python3 ./test.py -f 2-query/tail.py python3 ./test.py -f 2-query/ttl_comment.py -python3 ./test.py -f 2-query/distribute_agg_sum.py -python3 ./test.py -f 2-query/distribute_agg_spread.py -python3 ./test.py -f 2-query/distribute_agg_stddev.py python3 ./test.py -f 2-query/twa.py python3 ./test.py -f 2-query/irate.py python3 ./test.py -f 2-query/function_null.py @@ -195,7 +198,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 -# vnode case +# vnode case python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1 python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py -N 4 -M 1 @@ -214,9 +217,9 @@ python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_query # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 -# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 -python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py -N 4 -M 1 +# python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py -N 4 -M 1 # python3 test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py -N 4 -M 1 diff --git a/tools/taos-tools b/tools/taos-tools new file mode 160000 index 0000000000..3c7dafeea3 --- /dev/null +++ b/tools/taos-tools @@ -0,0 +1 @@ +Subproject commit 3c7dafeea3e558968165b73bee0f51024898e3da