From 4ffe3f76caf6ccb5f327889e862e5111c5cf9f7c Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 6 Nov 2022 12:46:48 +0800 Subject: [PATCH 001/123] test: rollback taosadapter for taos-tools ci test --- cmake/taosadapter_CMakeLists.txt.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in index c507ae2536..5b8192831e 100644 --- a/cmake/taosadapter_CMakeLists.txt.in +++ b/cmake/taosadapter_CMakeLists.txt.in @@ -2,7 +2,7 @@ # taosadapter ExternalProject_Add(taosadapter GIT_REPOSITORY https://github.com/taosdata/taosadapter.git - GIT_TAG 8c3d57d + GIT_TAG a11131c SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter" BINARY_DIR "" #BUILD_IN_SOURCE TRUE From a350794ea70f7a93b128e123606782f6aa8616c2 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 31 Jul 2023 21:23:49 +0800 Subject: [PATCH 002/123] test: add test_R.sh --- tests/docs-examples-test/test_R.sh | 14 ++++++++++++++ tests/parallel_test/cases.task | 3 ++- 2 files changed, 16 insertions(+), 1 deletion(-) create mode 100755 tests/docs-examples-test/test_R.sh diff --git a/tests/docs-examples-test/test_R.sh b/tests/docs-examples-test/test_R.sh new file mode 100755 index 0000000000..d59daf5d34 --- /dev/null +++ b/tests/docs-examples-test/test_R.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +set -e + +pgrep taosd || taosd >> /dev/null 2>&1 & +pgrep taosadapter || taosadapter >> /dev/null 2>&1 & + +cd ../../docs/examples/R + +jar_path=`find ../../../../debug/build -name taos-jdbcdriver-*-dist.jar` +echo jar_path=$jar_path +R -f connect_native.r --args $jar_path +# R -f connect_rest.r --args $jar_path # bug 14704 + diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1ec5102d9b..3dce8ccfec 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1,4 +1,4 @@ -#Coulumn Define +#Column Define #caseID,rerunTimes,Run with Sanitizer,casePath,caseCommand #NA,NA,y or n,script,./test.sh -f tsim/user/basic.sim @@ -1218,3 +1218,4 @@ ,,n,docs-examples-test,bash csharp.sh ,,n,docs-examples-test,bash jdbc.sh ,,n,docs-examples-test,bash go.sh +,,n,docs-examples-test,bash test_R.sh From f2728a65d26a4054b7c247f9c74d8ac58d38e6d7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Mon, 31 Jul 2023 21:34:10 +0800 Subject: [PATCH 003/123] test: wget jdbc dist.jar --- tests/docs-examples-test/test_R.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/docs-examples-test/test_R.sh b/tests/docs-examples-test/test_R.sh index d59daf5d34..bc850ecd6e 100755 --- a/tests/docs-examples-test/test_R.sh +++ b/tests/docs-examples-test/test_R.sh @@ -6,8 +6,9 @@ pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & cd ../../docs/examples/R +wget https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar -jar_path=`find ../../../../debug/build -name taos-jdbcdriver-*-dist.jar` +jar_path=`find . -name taos-jdbcdriver-*-dist.jar` echo jar_path=$jar_path R -f connect_native.r --args $jar_path # R -f connect_rest.r --args $jar_path # bug 14704 From 0666baaf0696f8c36582193ddf0355deebea0586 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 1 Aug 2023 13:37:47 +0800 Subject: [PATCH 004/123] fix: wget check if file exists --- tests/docs-examples-test/test_R.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/docs-examples-test/test_R.sh b/tests/docs-examples-test/test_R.sh index bc850ecd6e..707ea02704 100755 --- a/tests/docs-examples-test/test_R.sh +++ b/tests/docs-examples-test/test_R.sh @@ -6,7 +6,7 @@ pgrep taosd || taosd >> /dev/null 2>&1 & pgrep taosadapter || taosadapter >> /dev/null 2>&1 & cd ../../docs/examples/R -wget https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar +wget -N https://repo1.maven.org/maven2/com/taosdata/jdbc/taos-jdbcdriver/3.2.4/taos-jdbcdriver-3.2.4-dist.jar jar_path=`find . -name taos-jdbcdriver-*-dist.jar` echo jar_path=$jar_path From 872844592e31d08aec4ae96e5f0785ee9710b4af Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 1 Aug 2023 13:39:20 +0800 Subject: [PATCH 005/123] test: add readme.txt --- docs/examples/R/readme.txt | 1 + 1 file changed, 1 insertion(+) create mode 100644 docs/examples/R/readme.txt diff --git a/docs/examples/R/readme.txt b/docs/examples/R/readme.txt new file mode 100644 index 0000000000..131a324aa4 --- /dev/null +++ b/docs/examples/R/readme.txt @@ -0,0 +1 @@ +apt install -y libbz2-dev libpcre2-dev libicu-dev From 2caaae392a11fb19cdeb70f6736ceebd07a3704f Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 1 Aug 2023 06:38:30 +0000 Subject: [PATCH 006/123] add keep alive parameter --- include/common/tglobal.h | 1 + source/common/src/tglobal.c | 66 ++++++++++++++++++++------- source/libs/transport/inc/transComm.h | 2 +- source/libs/transport/src/transCli.c | 6 +-- source/libs/transport/src/transComm.c | 4 +- source/libs/transport/src/transSvr.c | 2 +- 6 files changed, 57 insertions(+), 24 deletions(-) diff --git a/include/common/tglobal.h b/include/common/tglobal.h index 4f2ed2b065..c1addd9481 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -58,6 +58,7 @@ extern int32_t tsTagFilterResCacheSize; extern int32_t tsNumOfRpcThreads; extern int32_t tsNumOfRpcSessions; extern int32_t tsTimeToGetAvailableConn; +extern int32_t tsKeepAliveIdle; extern int32_t tsNumOfCommitThreads; extern int32_t tsNumOfTaskQueueThreads; extern int32_t tsNumOfMnodeQueryThreads; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index f06eeb230f..e64774ba9c 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -14,8 +14,8 @@ */ #define _DEFAULT_SOURCE -#include "os.h" #include "tglobal.h" +#include "os.h" #include "tconfig.h" #include "tgrant.h" #include "tlog.h" @@ -49,6 +49,8 @@ bool tsPrintAuth = false; int32_t tsNumOfRpcThreads = 1; int32_t tsNumOfRpcSessions = 10000; int32_t tsTimeToGetAvailableConn = 500000; +int32_t tsKeepAliveIdle = 60; + int32_t tsNumOfCommitThreads = 2; int32_t tsNumOfTaskQueueThreads = 4; int32_t tsNumOfMnodeQueryThreads = 4; @@ -63,7 +65,7 @@ int32_t tsNumOfQnodeFetchThreads = 1; int32_t tsNumOfSnodeStreamThreads = 4; int32_t tsNumOfSnodeWriteThreads = 1; int32_t tsMaxStreamBackendCache = 128; // M -int32_t tsPQSortMemThreshold = 16; // M +int32_t tsPQSortMemThreshold = 16; // M // sync raft int32_t tsElectInterval = 25 * 1000; @@ -119,8 +121,8 @@ int32_t tsQueryPolicy = 1; int32_t tsQueryRspPolicy = 0; int64_t tsQueryMaxConcurrentTables = 200; // unit is TSDB_TABLE_NUM_UNIT bool tsEnableQueryHb = true; -bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true -bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true +bool tsEnableScience = false; // on taos-cli show float and doulbe with scientific notation if true +bool tsTtlChangeOnWrite = false; // ttl delete time changes on last write if true int32_t tsQuerySmaOptimize = 0; int32_t tsQueryRsmaTolerance = 1000; // the tolerance time (ms) to judge from which level to query rsma data. bool tsQueryPlannerTrace = false; @@ -372,7 +374,9 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "maxRetryWaitTime", tsMaxRetryWaitTime, 0, 86400000, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddBool(pCfg, "useAdapter", tsUseAdapter, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddBool(pCfg, "crashReporting", tsEnableCrashReport, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, CFG_SCOPE_CLIENT) != 0) return -1; + if (cfgAddInt64(pCfg, "queryMaxConcurrentTables", tsQueryMaxConcurrentTables, INT64_MIN, INT64_MAX, + CFG_SCOPE_CLIENT) != 0) + return -1; if (cfgAddInt32(pCfg, "metaCacheMaxSize", tsMetaCacheMaxSize, -1, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "slowLogThreshold", tsSlowLogThreshold, 0, INT32_MAX, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddString(pCfg, "slowLogScope", "", CFG_SCOPE_CLIENT) != 0) return -1; @@ -385,7 +389,11 @@ static int32_t taosAddClientCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "numOfRpcSessions", tsNumOfRpcSessions, 1, 100000, CFG_SCOPE_BOTH) != 0) return -1; tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 10000000); - if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsTimeToGetAvailableConn, 20, 1000000, CFG_SCOPE_BOTH) != 0) + return -1; + + tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000); + if (cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH) != 0) return -1; tsNumOfTaskQueueThreads = tsNumOfCores / 2; tsNumOfTaskQueueThreads = TMAX(tsNumOfTaskQueueThreads, 4); @@ -445,7 +453,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "statusInterval", tsStatusInterval, 1, 30, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "minSlidingTime", tsMinSlidingTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; if (cfgAddInt32(pCfg, "minIntervalTime", tsMinIntervalTime, 1, 1000000, CFG_SCOPE_CLIENT) != 0) return -1; - if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "maxNumOfDistinctRes", tsMaxNumOfDistinctResults, 10 * 10000, 10000 * 10000, + CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt32(pCfg, "countAlwaysReturnValue", tsCountAlwaysReturnValue, 0, 1, CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "queryBufferSize", tsQueryBufferSize, -1, 500000000000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "printAuth", tsPrintAuth, CFG_SCOPE_SERVER) != 0) return -1; @@ -461,6 +471,9 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsTimeToGetAvailableConn = TRANGE(tsTimeToGetAvailableConn, 20, 1000000); if (cfgAddInt32(pCfg, "timeToGetAvailableConn", tsNumOfRpcSessions, 20, 1000000, CFG_SCOPE_BOTH) != 0) return -1; + tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 72000); + if (cfgAddInt32(pCfg, "keepAliveIdle", tsKeepAliveIdle, 1, 7200000, CFG_SCOPE_BOTH) != 0) return -1; + tsNumOfCommitThreads = tsNumOfCores / 2; tsNumOfCommitThreads = TRANGE(tsNumOfCommitThreads, 2, 4); if (cfgAddInt32(pCfg, "numOfCommitThreads", tsNumOfCommitThreads, 1, 1024, CFG_SCOPE_SERVER) != 0) return -1; @@ -473,7 +486,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfVnodeQueryThreads = TMAX(tsNumOfVnodeQueryThreads, 4); if (cfgAddInt32(pCfg, "numOfVnodeQueryThreads", tsNumOfVnodeQueryThreads, 4, 1024, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddFloat(pCfg, "ratioOfVnodeStreamThreads", tsRatioOfVnodeStreamThreads, 0.01, 100, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfVnodeFetchThreads = tsNumOfCores / 4; tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4); @@ -493,7 +507,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsNumOfSnodeStreamThreads = tsNumOfCores / 4; tsNumOfSnodeStreamThreads = TRANGE(tsNumOfSnodeStreamThreads, 2, 4); - if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "numOfSnodeSharedThreads", tsNumOfSnodeStreamThreads, 2, 1024, CFG_SCOPE_SERVER) != 0) + return -1; tsNumOfSnodeWriteThreads = tsNumOfCores / 4; tsNumOfSnodeWriteThreads = TRANGE(tsNumOfSnodeWriteThreads, 2, 4); @@ -501,14 +516,18 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { tsRpcQueueMemoryAllowed = tsTotalMemoryKB * 1024 * 0.1; tsRpcQueueMemoryAllowed = TRANGE(tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10LL, TSDB_MAX_MSG_SIZE * 10000LL); - if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, CFG_SCOPE_BOTH) != 0) + if (cfgAddInt64(pCfg, "rpcQueueMemoryAllowed", tsRpcQueueMemoryAllowed, TSDB_MAX_MSG_SIZE * 10L, INT64_MAX, + CFG_SCOPE_BOTH) != 0) return -1; if (cfgAddInt32(pCfg, "syncElectInterval", tsElectInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatInterval", tsHeartbeatInterval, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; + if (cfgAddInt32(pCfg, "syncHeartbeatTimeout", tsHeartbeatTimeout, 10, 1000 * 60 * 24 * 2, CFG_SCOPE_SERVER) != 0) + return -1; - if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt64(pCfg, "vndCommitMaxInterval", tsVndCommitMaxIntervalMs, 1000, 1000 * 60 * 60, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddInt64(pCfg, "mndSdbWriteDelta", tsMndSdbWriteDelta, 20, 10000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "mndLogRetention", tsMndLogRetention, 500, 10000, CFG_SCOPE_SERVER) != 0) return -1; @@ -538,7 +557,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt32(pCfg, "uptimeInterval", tsUptimeInterval, 1, 100000, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "queryRsmaTolerance", tsQueryRsmaTolerance, 0, 900000, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, CFG_SCOPE_SERVER) != 0) + if (cfgAddInt64(pCfg, "walFsyncDataSizeLimit", tsWalFsyncDataSizeLimit, 100 * 1024 * 1024, INT64_MAX, + CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddBool(pCfg, "udf", tsStartUdfd, CFG_SCOPE_SERVER) != 0) return -1; @@ -549,7 +569,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) { if (cfgAddInt64(pCfg, "streamBufferSize", tsStreamBufferSize, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt64(pCfg, "checkpointInterval", tsCheckpointInterval, 0, INT64_MAX, CFG_SCOPE_SERVER) != 0) return -1; - if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) return -1; + if (cfgAddInt32(pCfg, "cacheLazyLoadThreshold", tsCacheLazyLoadThreshold, 0, 100000, CFG_SCOPE_SERVER) != 0) + return -1; if (cfgAddBool(pCfg, "filterScalarMode", tsFilterScalarMode, CFG_SCOPE_SERVER) != 0) return -1; if (cfgAddInt32(pCfg, "keepTimeOffset", tsKeepTimeOffset, 0, 23, CFG_SCOPE_SERVER) != 0) return -1; @@ -604,6 +625,13 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) { pItem->stype = stype; } + pItem = cfgGetItem(tsCfg, "keepAliveIdle"); + if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { + tsKeepAliveIdle = TRANGE(tsKeepAliveIdle, 1, 720000); + pItem->i32 = tsKeepAliveIdle; + pItem->stype = stype; + } + pItem = cfgGetItem(tsCfg, "numOfCommitThreads"); if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) { tsNumOfCommitThreads = numOfCores / 2; @@ -833,6 +861,8 @@ static int32_t taosSetClientCfg(SConfig *pCfg) { tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32; + + tsKeepAliveIdle = cfgGetItem(pCfg, "keepAliveIdle")->i32; return 0; } @@ -872,6 +902,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tsNumOfRpcSessions = cfgGetItem(pCfg, "numOfRpcSessions")->i32; tsTimeToGetAvailableConn = cfgGetItem(pCfg, "timeToGetAvailableConn")->i32; + tsKeepAliveIdle = cfgGetItem(pCfg, "keepAliveIdle")->i32; + tsNumOfCommitThreads = cfgGetItem(pCfg, "numOfCommitThreads")->i32; tsNumOfMnodeReadThreads = cfgGetItem(pCfg, "numOfMnodeReadThreads")->i32; tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32; @@ -902,7 +934,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { tstrncpy(tsTelemServer, cfgGetItem(pCfg, "telemetryServer")->str, TSDB_FQDN_LEN); tsTelemPort = (uint16_t)cfgGetItem(pCfg, "telemetryPort")->i32; - tmqMaxTopicNum= cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; + tmqMaxTopicNum = cfgGetItem(pCfg, "tmqMaxTopicNum")->i32; tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32; tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32; @@ -1014,7 +1046,7 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { taosSetCoreDump(enableCore); } else if (strcasecmp("enableQueryHb", name) == 0) { tsEnableQueryHb = cfgGetItem(pCfg, "enableQueryHb")->bval; - } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { + } else if (strcasecmp("ttlChangeOnWrite", name) == 0) { tsTtlChangeOnWrite = cfgGetItem(pCfg, "ttlChangeOnWrite")->bval; } break; diff --git a/source/libs/transport/inc/transComm.h b/source/libs/transport/inc/transComm.h index 3b304e2c77..a6b7a20f76 100644 --- a/source/libs/transport/inc/transComm.h +++ b/source/libs/transport/inc/transComm.h @@ -293,7 +293,7 @@ bool transReadComplete(SConnBuffer* connBuf); int transResetBuffer(SConnBuffer* connBuf); int transDumpFromBuffer(SConnBuffer* connBuf, char** buf); -int transSetConnOption(uv_tcp_t* stream); +int transSetConnOption(uv_tcp_t* stream, int keepalive); void transRefSrvHandle(void* handle); void transUnrefSrvHandle(void* handle); diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 01223a2be9..71379daa50 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -1202,7 +1202,7 @@ static void cliHandleBatchReq(SCliBatch* pBatch, SCliThrd* pThrd) { cliHandleFastFail(conn, -1); return; } - ret = transSetConnOption((uv_tcp_t*)conn->stream); + ret = transSetConnOption((uv_tcp_t*)conn->stream, 20); if (ret != 0) { tError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); cliHandleFastFail(conn, -1); @@ -1610,7 +1610,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { tGTrace("%s conn %p try to connect to %s", pTransInst->label, conn, conn->dstAddr); pThrd->newConnCount++; - int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 4); + int32_t fd = taosCreateSocketWithTimeout(TRANS_CONN_TIMEOUT * 10); if (fd == -1) { tGError("%s conn %p failed to create socket, reason:%s", transLabel(pTransInst), conn, tstrerror(TAOS_SYSTEM_ERROR(errno))); @@ -1624,7 +1624,7 @@ void cliHandleReq(SCliMsg* pMsg, SCliThrd* pThrd) { cliHandleExcept(conn); return; } - ret = transSetConnOption((uv_tcp_t*)conn->stream); + ret = transSetConnOption((uv_tcp_t*)conn->stream, tsKeepAliveIdle); if (ret != 0) { tGError("%s conn %p failed to set socket opt, reason:%s", transLabel(pTransInst), conn, uv_err_name(ret)); cliHandleExcept(conn); diff --git a/source/libs/transport/src/transComm.c b/source/libs/transport/src/transComm.c index b14db9497e..5e602b1ea2 100644 --- a/source/libs/transport/src/transComm.c +++ b/source/libs/transport/src/transComm.c @@ -203,10 +203,10 @@ bool transReadComplete(SConnBuffer* connBuf) { return (p->left == 0 || p->invalid) ? true : false; } -int transSetConnOption(uv_tcp_t* stream) { +int transSetConnOption(uv_tcp_t* stream, int keepalive) { #if defined(WINDOWS) || defined(DARWIN) #else - uv_tcp_keepalive(stream, 1, 20); + uv_tcp_keepalive(stream, 1, keepalive); #endif return uv_tcp_nodelay(stream, 1); // int ret = uv_tcp_keepalive(stream, 5, 60); diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index f23e176c79..a546ee8159 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -760,7 +760,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { uv_tcp_init(pThrd->loop, pConn->pTcp); pConn->pTcp->data = pConn; - transSetConnOption((uv_tcp_t*)pConn->pTcp); + // transSetConnOption((uv_tcp_t*)pConn->pTcp); if (uv_accept(q, (uv_stream_t*)(pConn->pTcp)) == 0) { uv_os_fd_t fd; From fa36001c9f767b6a998338aa254dcd4e0b9b414b Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 1 Aug 2023 17:32:06 +0800 Subject: [PATCH 007/123] add CI test case for ts-3479 by charles --- .../0-others/user_privilege_multi_users.py | 126 ++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 tests/system-test/0-others/user_privilege_multi_users.py diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py new file mode 100644 index 0000000000..8812f42e7b --- /dev/null +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -0,0 +1,126 @@ +from itertools import product +import taos +import random +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.userNum = 100 + self.basic_username = "user" + self.password = "pwd" + + # db info + self.dbname = "user_privilege_multi_users" + self.stbname = 'stb' + self.ctbname_num = 100 + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + self.privilege_list = [] + + def prepare_data(self): + """Create the db and data for test + """ + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdLog.debug("sql:" + f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + tdLog.debug("sql:" + f"use {self.dbname}") + + # create super table + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + for ctbIndex in range(self.ctbname_num): + ctname = f"ctb{ctbIndex}" + tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')") + tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')") + + def create_multiusers(self): + """Create the user for test + """ + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'create user {username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {username} pass "{self.password}"') + + def grant_privilege(self): + """Add the privilege for the users + """ + try: + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + privilege = random.choice(["read", "write", "all"]) + condition = f"ctbname='ctb{userIndex}'" + self.privilege_list.append({ + "username": username, + "privilege": privilege, + "condition": condition + }) + tdSql.execute(f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self): + """Remove the privilege for the users + """ + try: + for item in self.privilege_list: + username = item["username"] + privilege = item["privilege"] + condition = item["condition"] + tdSql.execute(f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + except Exception as ex: + tdLog.exit(ex) + + def run(self): + """ + Check the information from information_schema.ins_user_privileges + """ + self.create_multiusers() + self.prepare_data() + # grant privilege to users + self.grant_privilege() + # check information_schema.ins_user_privileges + tdSql.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(tdSql.queryResult)) + if len(tdSql.queryResult) >= self.userNum: + tdLog.debug("case passed") + else: + tdLog.exit("The privilege number in information_schema.ins_user_privileges is incorrect") + + def stop(self): + # remove the privilege + self.remove_privilege() + # clear env + tdSql.execute(f"drop database {self.dbname}") + # remove the users + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'drop user {username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From cbca55bc6a607252bbfffa9cb1b9b4d56b442d05 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 2 Aug 2023 14:47:51 +0800 Subject: [PATCH 008/123] docs: add r language to connector (#22286) * docs: add r language to connector * docs: add r lang english version in connector * docs: fix include path and note format * Update 60-r-lang.mdx minor changes --------- Co-authored-by: danielclow <106956386+danielclow@users.noreply.github.com> --- .../14-reference/03-connector/60-r-lang.mdx | 86 ++++++++++++++++++ .../03-connector/{10-php.mdx => 80-php.mdx} | 0 docs/examples/R/connect_native.r | 2 +- docs/examples/R/connect_rest.r | 8 +- docs/zh/08-connector/43-r-lang.mdx | 88 +++++++++++++++++++ 5 files changed, 181 insertions(+), 3 deletions(-) create mode 100644 docs/en/14-reference/03-connector/60-r-lang.mdx rename docs/en/14-reference/03-connector/{10-php.mdx => 80-php.mdx} (100%) create mode 100644 docs/zh/08-connector/43-r-lang.mdx diff --git a/docs/en/14-reference/03-connector/60-r-lang.mdx b/docs/en/14-reference/03-connector/60-r-lang.mdx new file mode 100644 index 0000000000..852b2022a5 --- /dev/null +++ b/docs/en/14-reference/03-connector/60-r-lang.mdx @@ -0,0 +1,86 @@ +--- +toc_max_heading_level: 4 +sidebar_label: R +title: R Language Connector +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +import Rdemo from "../../07-develop/01-connect/_connect_r.mdx" + +By using the RJDBC library in R, you can enable R programs to access TDengine data. Here are the installation process, configuration steps, and an example code in R. + +## Installation Process + +Before getting started, make sure you have installed the R language environment. Then, follow these steps to install and configure the RJDBC library: + +1. Install Java Development Kit (JDK): RJDBC library requires Java environment. Download the appropriate JDK for your operating system from the official Oracle website and follow the installation guide. + +2. Install the RJDBC library: Execute the following command in the R console to install the RJDBC library. + +```r +install.packages("RJDBC", repos='http://cran.us.r-project.org') +``` + +:::note +On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components. +On Windows systems, you need to set the **JAVA_HOME** environment variable. +::: + +3. Download the TDengine JDBC driver: Visit the Maven website and download the TDengine JDBC driver (taos-jdbcdriver-X.X.X-dist.jar) to your local machine. + +## Configuration Process + +Once you have completed the installation steps, you need to do some configuration to enable the RJDBC library to connect and access the TDengine time-series database. + +1. Load the RJDBC library and other necessary libraries in your R script: + +```r +library(DBI) +library(rJava) +library(RJDBC) +``` + +2. Set the JDBC driver and JDBC URL: + +```r +# Set the JDBC driver path (specify the location on your local machine) +driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar" + +# Set the JDBC URL (specify the FQDN and credentials of your TDengine cluster) +url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata" +``` + +3. Load the JDBC driver: + +```r +# Load the JDBC driver +drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath) +``` + +4. Create a TDengine database connection: + +```r +# Create a database connection +conn <- dbConnect(drv, url) +``` + +5. Once the connection is established, you can use the ``conn`` object for various database operations such as querying data and inserting data. + +6. Finally, don't forget to close the database connection after you are done: + +```r +# Close the database connection +dbDisconnect(conn) +``` + +## Example Code Using RJDBC in R + +Here's an example code that uses the RJDBC library to connect to a TDengine time-series database and perform a query operation: + + + +Please modify the JDBC driver, JDBC URL, username, password, and SQL query statement according to your specific TDengine time-series database environment and requirements. + +By following the steps and using the provided example code, you can use the RJDBC library in the R language to access the TDengine time-series database and perform tasks such as data querying and analysis. diff --git a/docs/en/14-reference/03-connector/10-php.mdx b/docs/en/14-reference/03-connector/80-php.mdx similarity index 100% rename from docs/en/14-reference/03-connector/10-php.mdx rename to docs/en/14-reference/03-connector/80-php.mdx diff --git a/docs/examples/R/connect_native.r b/docs/examples/R/connect_native.r index 3c5c9e199b..a6abe3e8fb 100644 --- a/docs/examples/R/connect_native.r +++ b/docs/examples/R/connect_native.r @@ -8,7 +8,7 @@ library("rJava") library("RJDBC") args<- commandArgs(trailingOnly = TRUE) -driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.0.0-dist.jar" +driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar" driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path) conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata") dbGetQuery(conn, "SELECT server_version()") diff --git a/docs/examples/R/connect_rest.r b/docs/examples/R/connect_rest.r index 5ceec572fc..bc5da3c15a 100644 --- a/docs/examples/R/connect_rest.r +++ b/docs/examples/R/connect_rest.r @@ -2,11 +2,15 @@ if (! "RJDBC" %in% installed.packages()[, "Package"]) { install.packages('RJDBC', repos='http://cran.us.r-project.org') } +# ANCHOR: demo library("DBI") library("rJava") library("RJDBC") -driver_path = "/home/debug/build/lib/taos-jdbcdriver-2.0.38-dist.jar" + +args<- commandArgs(trailingOnly = TRUE) +driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver-3.2.4-dist.jar" driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path) conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata") dbGetQuery(conn, "SELECT server_version()") -dbDisconnect(conn) \ No newline at end of file +dbDisconnect(conn) +# ANCHOR_END: demo diff --git a/docs/zh/08-connector/43-r-lang.mdx b/docs/zh/08-connector/43-r-lang.mdx new file mode 100644 index 0000000000..3a4ed39748 --- /dev/null +++ b/docs/zh/08-connector/43-r-lang.mdx @@ -0,0 +1,88 @@ +--- +toc_max_heading_level: 4 +sidebar_label: R +title: R Language Connector +--- + +import Tabs from '@theme/Tabs'; +import TabItem from '@theme/TabItem'; + +import Rdemo from "../07-develop/01-connect/_connect_r.mdx" + +通过 R 语言中的 RJDBC 库可以使 R 语言程序支持访问 TDengine 数据。以下是安装过程、配置过程以及 R 语言示例代码。 + +## 安装过程 + +在开始之前,请确保已经安装了R语言环境。然后按照以下步骤安装和配置RJDBC库: + +1. 安装Java Development Kit (JDK):RJDBC库需要依赖Java环境。请从Oracle官方网站下载适合您操作系统的JDK,并按照安装指南进行安装。 + +2. 安装RJDBC库:在R控制台中执行以下命令来安装RJDBC库。 + +```r +install.packages("RJDBC", repos='http://cran.us.r-project.org') +``` + +:::note +在 Linux 上安装 RJDBC 包可能需要安装编译需要的组件,以 Ubuntu 为例执行 `apt install -y libbz2-dev libpcre2-dev libicu-dev` 命令安装。 +在 Windows 系统上需要设置 JAVA_HOME 环境变量。 +::: + +3. 下载 TDengine JDBC 驱动程序:访问 maven.org 网站,下载 TDengine JDBC 驱动程序(taos-jdbcdriver-X.X.X-dist.jar)。 + +4. 将 TDengine JDBC 驱动程序放置在适当的位置:在您的计算机上选择一个合适的位置,将 TDengine JDBC 驱动程序文件(taos-jdbcdriver-X.X.X-dist.jar)保存在此处。 + +## 配置过程 + +完成了安装步骤后,您需要进行一些配置,以便RJDBC库能够正确连接和访问TDengine时序数据库。 + +1. 在 R 脚本中加载 RJDBC 和其他必要的库: + +```r +library(DBI) +library(rJava) +library(RJDBC) +``` + +2. 设置 JDBC 驱动程序和 JDBC URL: + +```r +# 设置JDBC驱动程序路径(根据您实际保存的位置进行修改) +driverPath <- "/path/to/taos-jdbcdriver-X.X.X-dist.jar" + +# 设置JDBC URL(根据您的具体环境进行修改) +url <- "jdbc:TAOS://localhost:6030/?user=root&password=taosdata" +``` + +3. 加载 JDBC 驱动程序: + +```r +# 加载JDBC驱动程序 +drv <- JDBC("com.taosdata.jdbc.TSDBDriver", driverPath) +``` + +4. 创建 TDengine 数据库连接: + +```r +# 创建数据库连接 +conn <- dbConnect(drv, url) +``` + +5. 连接成功后,您可以使用 conn 对象进行各种数据库操作,如查询数据、插入数据等。 + +6. 最后,不要忘记在使用完成后关闭数据库连接: + +```r +# 关闭数据库连接 +dbDisconnect(conn) +``` + +## 使用 RJDBC 的 R 语言示例代码 + +以下是一个使用 RJDBC 库连接 TDengine 时序数据库并执行查询操作的示例代码: + + + +请根据您的实际情况修改JDBC驱动程序、JDBC URL、用户名、密码以及SQL查询语句,以适配您的 TDengine 时序数据库环境和要求。 + +通过以上步骤和示例代码,您可以在 R 语言环境中使用 RJDBC 库访问 TDengine 时序数据库,进行数据查询和分析等操作。 From f90f0b70478725fb0e07b3fe0274b254fe8277e1 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Wed, 2 Aug 2023 15:51:51 +0800 Subject: [PATCH 009/123] disable infinite count loop --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index d74584f844..e0cb54298c 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -359,7 +359,7 @@ static int32_t suidComparFn(const void *target, const void *p2) { if (*uid2 == (*targetUid)) { return 0; } else { - return (*targetUid) < (*uid2) ? -1:1; + return (*targetUid) < (*uid2) ? -1 : 1; } } @@ -381,7 +381,7 @@ static bool existsFromSttBlkStatis(const TStatisBlkArray *pStatisBlkArray, uint6 return false; } - while(i < TARRAY2_SIZE(pStatisBlkArray)) { + while (i < TARRAY2_SIZE(pStatisBlkArray)) { SStatisBlk *p = &pStatisBlkArray->data[i]; if (p->minTbid.suid > suid) { return false; @@ -476,12 +476,12 @@ int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader tsdbDebug("load the stt file info completed, elapsed time:%.2fms, %s", el, idStr); } - bool exists = existsFromSttBlkStatis(pBlockLoadInfo->pSttStatisBlkArray, suid, uid, pIter->pReader); - if (!exists) { - pIter->iSttBlk = -1; - pIter->pSttBlk = NULL; - return TSDB_CODE_SUCCESS; - } + // bool exists = existsFromSttBlkStatis(pBlockLoadInfo->pSttStatisBlkArray, suid, uid, pIter->pReader); + // if (!exists) { + // pIter->iSttBlk = -1; + // pIter->pSttBlk = NULL; + // return TSDB_CODE_SUCCESS; + // } // find the start block, actually we could load the position to avoid repeatly searching for the start position when // the skey is updated. From fb4ed263b6bcaf5de06d650dd9257d263a82e6c8 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Wed, 2 Aug 2023 17:02:42 +0800 Subject: [PATCH 010/123] test: make connect_native.r a bit perplexing (#22291) --- docs/examples/R/connect_native.r | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/examples/R/connect_native.r b/docs/examples/R/connect_native.r index a6abe3e8fb..bd764c3ec4 100644 --- a/docs/examples/R/connect_native.r +++ b/docs/examples/R/connect_native.r @@ -12,5 +12,9 @@ driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver- driver = JDBC("com.taosdata.jdbc.TSDBDriver", driver_path) conn = dbConnect(driver, "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata") dbGetQuery(conn, "SELECT server_version()") +dbSendUpdate(conn, "create database if not exists rtest") +dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))") +dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')") +dbGetQuery(conn, "select * from rtest.test") dbDisconnect(conn) # ANCHOR_END: demo From 673c275117eaad22bdea66003f167935813f4979 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 2 Aug 2023 18:59:35 +0800 Subject: [PATCH 011/123] release 3.1.0.0 --- cmake/cmake.version | 2 +- docs/en/28-releases/01-tdengine.md | 4 ++++ docs/zh/28-releases/01-tdengine.md | 4 ++++ 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/cmake/cmake.version b/cmake/cmake.version index fe35fbe7bd..86afe68188 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.1.0.0.alpha") + SET(TD_VER_NUMBER "3.1.0.1.alpha") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 365b36b14f..6eaa395087 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ For TDengine 2.x installation packages by version, please visit [here](https://t import Release from "/components/ReleaseV3"; +## 3.1.0.0 + + + ## 3.0.7.1 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 52bb9c87a0..afdf2a76d3 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,10 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.1.0.0 + + + ## 3.0.7.1 From 8bc62f5eb040c2498d30f39c79d905102cc7f17b Mon Sep 17 00:00:00 2001 From: gccgdb1234 Date: Thu, 3 Aug 2023 08:26:26 +0800 Subject: [PATCH 012/123] doc: remove 3.1.0.0 --- docs/en/28-releases/01-tdengine.md | 3 --- docs/zh/28-releases/01-tdengine.md | 3 --- 2 files changed, 6 deletions(-) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 6eaa395087..888ef33c93 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,9 +10,6 @@ For TDengine 2.x installation packages by version, please visit [here](https://t import Release from "/components/ReleaseV3"; -## 3.1.0.0 - - ## 3.0.7.1 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index afdf2a76d3..05c05a92e9 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,9 +10,6 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; -## 3.1.0.0 - - ## 3.0.7.1 From 3464979489d1d653f2a6edcf2f1e0c26c548bf9e Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Wed, 2 Aug 2023 17:33:45 +0800 Subject: [PATCH 013/123] set dummy delete data request --- source/libs/executor/src/timewindowoperator.c | 5 ++++- source/libs/stream/src/streamExec.c | 9 +++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 3a5ff91f68..96734b27c3 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -3574,6 +3574,7 @@ static SSDataBlock* doStreamSessionAgg(SOperatorInfo* pOperator) { SStreamSessionAggOperatorInfo* pInfo = pOperator->info; SOptrBasicInfo* pBInfo = &pInfo->binfo; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + qDebug("===stream=== stream session agg"); if (pOperator->status == OP_EXEC_DONE) { return NULL; } else if (pOperator->status == OP_RES_TO_RETURN) { @@ -3736,6 +3737,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) { setSessionOutputBuf(pAggSup, pSeKeyBuf[i].win.skey, pSeKeyBuf[i].win.ekey, pSeKeyBuf[i].groupId, &winInfo); int32_t winNum = compactSessionWindow(pOperator, &winInfo, pInfo->pStUpdated, pInfo->pStDeleted, true); if (winNum > 0) { + qDebug("===stream=== reload state. save result %" PRId64 ", %" PRIu64, winInfo.sessionWin.win.skey, winInfo.sessionWin.groupId); if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_AT_ONCE) { saveResult(winInfo, pInfo->pStUpdated); } else if (pInfo->twAggSup.calTrigger == STREAM_TRIGGER_WINDOW_CLOSE) { @@ -3754,7 +3756,7 @@ void streamSessionReloadState(SOperatorInfo* pOperator) { SOperatorInfo* downstream = pOperator->pDownstream[0]; if (downstream->fpSet.reloadStreamStateFn) { downstream->fpSet.reloadStreamStateFn(downstream); - } + } } SOperatorInfo* createStreamSessionAggOperatorInfo(SOperatorInfo* downstream, SPhysiNode* pPhyNode, @@ -3863,6 +3865,7 @@ static SSDataBlock* doStreamSessionSemiAgg(SOperatorInfo* pOperator) { SExprSupp* pSup = &pOperator->exprSupp; SStreamAggSupporter* pAggSup = &pInfo->streamAggSup; + qDebug("===stream=== stream session semi agg"); if (pOperator->status == OP_EXEC_DONE) { return NULL; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 34370ebce9..d3ab7e820a 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -364,6 +364,15 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { // 7. pause allowed. streamTaskEnablePause(pStreamTask); + if (taosQueueEmpty(pTask->inputQueue->queue)) { + SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);; + SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA); + pDelBlock->info.rows = 0; + pDelBlock->info.version = 0; + pItem->type = STREAM_INPUT__REF_DATA_BLOCK; + pItem->pBlock = pDelBlock; + tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pItem); + } streamSchedExec(pStreamTask); streamMetaReleaseTask(pMeta, pStreamTask); From 48234f2e16c33225234ece124b142ecdfd38f62c Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Thu, 3 Aug 2023 09:15:06 +0800 Subject: [PATCH 014/123] release 3.1.0.0 --- docs/en/28-releases/01-tdengine.md | 3 +++ docs/zh/28-releases/01-tdengine.md | 3 +++ 2 files changed, 6 insertions(+) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 888ef33c93..6eaa395087 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -10,6 +10,9 @@ For TDengine 2.x installation packages by version, please visit [here](https://t import Release from "/components/ReleaseV3"; +## 3.1.0.0 + + ## 3.0.7.1 diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md index 05c05a92e9..afdf2a76d3 100644 --- a/docs/zh/28-releases/01-tdengine.md +++ b/docs/zh/28-releases/01-tdengine.md @@ -10,6 +10,9 @@ TDengine 2.x 各版本安装包请访问[这里](https://www.taosdata.com/all-do import Release from "/components/ReleaseV3"; +## 3.1.0.0 + + ## 3.0.7.1 From 74c69bbb6e08205ba95e5c2ca874b4a3b63d7da3 Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Thu, 3 Aug 2023 11:23:46 +0800 Subject: [PATCH 015/123] reset state window end key --- source/libs/executor/src/timewindowoperator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 3a5ff91f68..8839cef314 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -4373,6 +4373,7 @@ static void compactStateWindow(SOperatorInfo* pOperator, SResultWindowInfo* pCur initSessionOutputBuf(pCurWin, &pCurResult, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset); SResultRow* pWinResult = NULL; initSessionOutputBuf(pNextWin, &pWinResult, pAggSup->pDummyCtx, numOfOutput, pSup->rowEntryInfoOffset); + pCurWin->sessionWin.win.ekey = TMAX(pCurWin->sessionWin.win.ekey, pNextWin->sessionWin.win.ekey); updateTimeWindowInfo(&pInfo->twAggSup.timeWindowData, &pCurWin->sessionWin.win, 1); compactFunctions(pSup->pCtx, pAggSup->pDummyCtx, numOfOutput, pTaskInfo, &pInfo->twAggSup.timeWindowData); From 1594005148fca9fb87a9cd587eaad58a6b7f5431 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 3 Aug 2023 11:44:06 +0800 Subject: [PATCH 016/123] docs: update readme for build contrib on (#22299) * docs: update readme with libgflags * docs: update readme with geos * docs: remove geometry docs * docs: update readme * docs: update readme for 3.0 * docs: update readme for build contrib on --- README-CN.md | 2 +- README.md | 2 +- build.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README-CN.md b/README-CN.md index 53abc5c006..2b1790f4bb 100644 --- a/README-CN.md +++ b/README-CN.md @@ -175,7 +175,7 @@ cd TDengine ```bash mkdir debug cd debug -cmake .. -DBUILD_TOOLS=true +cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true make ``` diff --git a/README.md b/README.md index 73df4fb187..a8c20ea3f6 100644 --- a/README.md +++ b/README.md @@ -183,7 +183,7 @@ It equals to execute following commands: ```bash mkdir debug cd debug -cmake .. -DBUILD_TOOLS=true +cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true make ``` diff --git a/build.sh b/build.sh index 78f08afa7a..04ca7a11a0 100755 --- a/build.sh +++ b/build.sh @@ -4,5 +4,5 @@ if [ ! -d debug ]; then mkdir debug || echo -e "failed to make directory for build" fi -cd debug && cmake .. -DBUILD_TOOLS=true && make +cd debug && cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true && make From db1bdd172afb4772f4f8504c058c6d1f19e09a21 Mon Sep 17 00:00:00 2001 From: liuyuan <2805658706@qq.com> Date: Thu, 3 Aug 2023 14:36:14 +0800 Subject: [PATCH 017/123] docs: optimization docker deploy --- docs/en/05-get-started/01-docker.md | 15 ++++++ .../index.md => 10-deployment/02-docker.md} | 50 +++++++++-------- docs/en/10-deployment/index.md | 2 +- docs/en/14-reference/11-docker/_category_.yml | 1 - docs/zh/05-get-started/01-docker.md | 14 +++++ .../index.md => 10-deployment/02-docker.md} | 54 +++++++++---------- docs/zh/10-deployment/index.md | 2 +- docs/zh/14-reference/11-docker/_category_.yml | 1 - 8 files changed, 86 insertions(+), 53 deletions(-) rename docs/en/{14-reference/11-docker/index.md => 10-deployment/02-docker.md} (91%) delete mode 100644 docs/en/14-reference/11-docker/_category_.yml rename docs/zh/{14-reference/11-docker/index.md => 10-deployment/02-docker.md} (90%) delete mode 100644 docs/zh/14-reference/11-docker/_category_.yml diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index 2049e1615f..e3f40ac2c7 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -32,6 +32,21 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043 Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. +If you need to persist data to a specific directory on your local machine, And then run the following command: +```shell +docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ + -v ~/data/taos/dnode/log:/var/log/taos \ + -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine +``` +:::note + +* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode/data to your own data directory +* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your own log directory + + ::: + + + Run the following command to ensure that your container is running: ```shell diff --git a/docs/en/14-reference/11-docker/index.md b/docs/en/10-deployment/02-docker.md similarity index 91% rename from docs/en/14-reference/11-docker/index.md rename to docs/en/10-deployment/02-docker.md index 5a48f2e4b1..ef4c9b0284 100644 --- a/docs/en/14-reference/11-docker/index.md +++ b/docs/en/10-deployment/02-docker.md @@ -1,5 +1,6 @@ --- title: Deploying TDengine with Docker +sidebar_label: Docker description: This chapter describes how to start and access TDengine in a Docker container. --- @@ -10,8 +11,17 @@ This chapter describes how to start the TDengine service in a container and acce The TDengine image starts with the HTTP service activated by default, using the following command: ```shell -docker run -d --name tdengine -p 6041:6041 tdengine/tdengine +docker run -d --name tdengine \ +-v ~/data/taos/dnode/data:/var/lib/taos \ +-v ~/data/taos/dnode/log:/var/log/taos \ +-p 6041:6041 tdengine/tdengine ``` +:::note + +* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode/data to your own data directory +* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your own log directory + + ::: The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. @@ -283,32 +293,30 @@ services: environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" + ports: + - 6041:6041 + - 6030:6030 volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ + # /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory + - ~/data/taos/dnode1/data:/var/lib/taos + # /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory + - ~/data/taos/dnode1/log:/var/log/taos td-2: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ + - ~/data/taos/dnode2/data:/var/lib/taos + - ~/data/taos/dnode2/log:/var/log/taos td-3: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-3" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td3:/var/lib/taos/ - - taoslog-td3:/var/log/taos/ -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - taosdata-td3: - taoslog-td3: + - ~/data/taos/dnode3/data:/var/lib/taos + - ~/data/taos/dnode3/log:/var/log/taos ``` :::note @@ -382,24 +390,22 @@ networks: services: td-1: image: tdengine/tdengine:$VERSION - networks: - - inter environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ + # /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode1/data to your own data directory + - ~/data/taos/dnode1/data:/var/lib/taos + # /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode1/log to your own log directory + - ~/data/taos/dnode1/log:/var/log/taos td-2: image: tdengine/tdengine:$VERSION - networks: - - inter environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ + - ~/data/taos/dnode2/data:/var/lib/taos + - ~/data/taos/dnode2/log:/var/log/taos adapter: image: tdengine/tdengine:$VERSION entrypoint: "taosadapter" diff --git a/docs/en/10-deployment/index.md b/docs/en/10-deployment/index.md index 865fbc2da5..d7c6a7a481 100644 --- a/docs/en/10-deployment/index.md +++ b/docs/en/10-deployment/index.md @@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. -This document describes how to manually deploy a cluster on a host as well as how to deploy on Kubernetes and by using Helm. +This document describes how to manually deploy a cluster on a host, deploy a cluster with Docker, and deploy a cluster with Kubernetes and Helm. ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/en/14-reference/11-docker/_category_.yml b/docs/en/14-reference/11-docker/_category_.yml deleted file mode 100644 index f89ef7112c..0000000000 --- a/docs/en/14-reference/11-docker/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: TDengine Docker images \ No newline at end of file diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index e772447db0..50e91a04c0 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -28,6 +28,20 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043 注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令: + +```shell +docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ + -v ~/data/taos/dnode/log:/var/log/taos \ + -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043-6049/udp tdengine/tdengine +``` + +:::note + +* /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 +* /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 + ::: + 确定该容器已经启动并且在正常运行。 ```shell diff --git a/docs/zh/14-reference/11-docker/index.md b/docs/zh/10-deployment/02-docker.md similarity index 90% rename from docs/zh/14-reference/11-docker/index.md rename to docs/zh/10-deployment/02-docker.md index a6696977f9..2aee5cc152 100644 --- a/docs/zh/14-reference/11-docker/index.md +++ b/docs/zh/10-deployment/02-docker.md @@ -1,5 +1,6 @@ --- title: 用 Docker 部署 TDengine +sidebar_label: Docker description: '本章主要介绍如何在容器中启动 TDengine 服务并访问它' --- @@ -10,8 +11,16 @@ description: '本章主要介绍如何在容器中启动 TDengine 服务并访 TDengine 镜像启动时默认激活 HTTP 服务,使用下列命令 ```shell -docker run -d --name tdengine -p 6041:6041 tdengine/tdengine +docker run -d --name tdengine \ +-v ~/data/taos/dnode/data:/var/lib/taos \ +-v ~/data/taos/dnode/log:/var/log/taos \ +-p 6041:6041 tdengine/tdengine ``` +:::note + +* /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 +* /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 + ::: 以上命令启动了一个名为“tdengine”的容器,并把其中的 HTTP 服务的端 6041 映射到了主机端口 6041。使用如下命令可以验证该容器中提供的 HTTP 服务是否可用: @@ -291,32 +300,30 @@ services: environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" + ports: + - 6041:6041 + - 6030:6030 volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ + # /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/data为你自己的数据目录 + - ~/data/taos/dnode1/data:/var/lib/taos + # /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/log为你自己的日志目录 + - ~/data/taos/dnode1/log:/var/log/taos td-2: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ + - ~/data/taos/dnode2/data:/var/lib/taos + - ~/data/taos/dnode2/log:/var/log/taos td-3: image: tdengine/tdengine:$VERSION environment: TAOS_FQDN: "td-3" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td3:/var/lib/taos/ - - taoslog-td3:/var/log/taos/ -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: - taosdata-td3: - taoslog-td3: + - ~/data/taos/dnode3/data:/var/lib/taos + - ~/data/taos/dnode3/log:/var/log/taos ``` :::note @@ -397,24 +404,22 @@ networks: services: td-1: image: tdengine/tdengine:$VERSION - networks: - - inter environment: TAOS_FQDN: "td-1" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td1:/var/lib/taos/ - - taoslog-td1:/var/log/taos/ + # /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/data为你自己的数据目录 + - ~/data/taos/dnode1/data:/var/lib/taos + # /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode1/log为你自己的日志目录 + - ~/data/taos/dnode1/log:/var/log/taos td-2: image: tdengine/tdengine:$VERSION - networks: - - inter environment: TAOS_FQDN: "td-2" TAOS_FIRST_EP: "td-1" volumes: - - taosdata-td2:/var/lib/taos/ - - taoslog-td2:/var/log/taos/ + - ~/data/taos/dnode2/data:/var/lib/taos + - ~/data/taos/dnode2/log:/var/log/taos adapter: image: tdengine/tdengine:$VERSION entrypoint: "taosadapter" @@ -446,11 +451,6 @@ services: >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; nginx -g 'daemon off;'", ] -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: ``` ## 使用 docker swarm 部署 diff --git a/docs/zh/10-deployment/index.md b/docs/zh/10-deployment/index.md index 4ff1add779..f2ce519837 100644 --- a/docs/zh/10-deployment/index.md +++ b/docs/zh/10-deployment/index.md @@ -6,7 +6,7 @@ description: 部署 TDengine 集群的多种方式 TDengine 支持集群,提供水平扩展的能力。如果需要获得更高的处理能力,只需要多增加节点即可。TDengine 采用虚拟节点技术,将一个节点虚拟化为多个虚拟节点,以实现负载均衡。同时,TDengine可以将多个节点上的虚拟节点组成虚拟节点组,通过多副本机制,以保证供系统的高可用。TDengine的集群功能完全开源。 -本章节主要介绍如何在主机上人工部署集群,以及如何使用 Kubernetes 和 Helm部署集群。 +本章节主要介绍如何在主机上人工部署集群,docker部署,以及如何使用 Kubernetes 和 Helm部署集群。 ```mdx-code-block import DocCardList from '@theme/DocCardList'; diff --git a/docs/zh/14-reference/11-docker/_category_.yml b/docs/zh/14-reference/11-docker/_category_.yml deleted file mode 100644 index 68c16927f4..0000000000 --- a/docs/zh/14-reference/11-docker/_category_.yml +++ /dev/null @@ -1 +0,0 @@ -label: TDengine Docker 镜像 \ No newline at end of file From f53c6c1072a49c55034a165192638b8ef6b7c615 Mon Sep 17 00:00:00 2001 From: liuyuan <2805658706@qq.com> Date: Thu, 3 Aug 2023 14:38:42 +0800 Subject: [PATCH 018/123] docs: optimization docker deploy --- docs/en/10-deployment/02-docker.md | 5 ----- 1 file changed, 5 deletions(-) diff --git a/docs/en/10-deployment/02-docker.md b/docs/en/10-deployment/02-docker.md index ef4c9b0284..9eb551a277 100644 --- a/docs/en/10-deployment/02-docker.md +++ b/docs/en/10-deployment/02-docker.md @@ -437,11 +437,6 @@ services: >> /etc/nginx/nginx.conf;cat /etc/nginx/nginx.conf; nginx -g 'daemon off;'", ] -volumes: - taosdata-td1: - taoslog-td1: - taosdata-td2: - taoslog-td2: ``` ## Deploy with docker swarm From 68a4540eee661fbdbe06dbb80c9e30c92ab29646 Mon Sep 17 00:00:00 2001 From: Yaqiang Li Date: Thu, 3 Aug 2023 14:48:19 +0800 Subject: [PATCH 019/123] Update 01-docker.md --- docs/en/05-get-started/01-docker.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index e3f40ac2c7..fca4572323 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -32,7 +32,7 @@ docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6049:6043-6049 -p 6043-6049:6043 Note that TDengine Server 3.0 uses TCP port 6030. Port 6041 is used by taosAdapter for the REST API service. Ports 6043 through 6049 are used by taosAdapter for other connectors. You can open these ports as needed. -If you need to persist data to a specific directory on your local machine, And then run the following command: +If you need to persist data to a specific directory on your local machine, please run the following command: ```shell docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ -v ~/data/taos/dnode/log:/var/log/taos \ @@ -40,8 +40,8 @@ docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ ``` :::note -* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode/data to your own data directory -* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your own log directory +* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory +* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory ::: From ad3ae7621e60c9e4d6556555d2f3a92e08a15ae8 Mon Sep 17 00:00:00 2001 From: Yaqiang Li Date: Thu, 3 Aug 2023 14:49:40 +0800 Subject: [PATCH 020/123] Update 02-docker.md --- docs/en/10-deployment/02-docker.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/10-deployment/02-docker.md b/docs/en/10-deployment/02-docker.md index 9eb551a277..8366b40869 100644 --- a/docs/en/10-deployment/02-docker.md +++ b/docs/en/10-deployment/02-docker.md @@ -18,8 +18,8 @@ docker run -d --name tdengine \ ``` :::note -* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. you can modify ~/data/taos/dnode/data to your own data directory -* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your own log directory +* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory +* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory ::: From 5021d054565ff6c7f8b311b0e8edc9d5675fc8ab Mon Sep 17 00:00:00 2001 From: Yaqiang Li Date: Thu, 3 Aug 2023 14:51:14 +0800 Subject: [PATCH 021/123] Update index.md --- docs/en/10-deployment/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/10-deployment/index.md b/docs/en/10-deployment/index.md index d7c6a7a481..0079ad3740 100644 --- a/docs/en/10-deployment/index.md +++ b/docs/en/10-deployment/index.md @@ -5,7 +5,7 @@ description: This document describes how to deploy a TDengine cluster on a serve TDengine has a native distributed design and provides the ability to scale out. A few nodes can form a TDengine cluster. If you need higher processing power, you just need to add more nodes into the cluster. TDengine uses virtual node technology to virtualize a node into multiple virtual nodes to achieve load balancing. At the same time, TDengine can group virtual nodes on different nodes into virtual node groups, and use the replication mechanism to ensure the high availability of the system. The cluster feature of TDengine is completely open source. -This document describes how to manually deploy a cluster on a host, deploy a cluster with Docker, and deploy a cluster with Kubernetes and Helm. +This document describes how to manually deploy a cluster on a host directly and deploy a cluster with Docker, Kubernetes or Helm. ```mdx-code-block import DocCardList from '@theme/DocCardList'; From a00f53f8629286385eeb3e988bbf41521797e3af Mon Sep 17 00:00:00 2001 From: liuyuan <2805658706@qq.com> Date: Thu, 3 Aug 2023 15:58:22 +0800 Subject: [PATCH 022/123] docs: fix docker link --- docs/en/05-get-started/01-docker.md | 2 +- docs/zh/05-get-started/01-docker.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index e3f40ac2c7..0f61cef024 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -128,4 +128,4 @@ In the query above you are selecting the first timestamp (ts) in the interval, a ## Additional Information -For more information about deploying TDengine in a Docker environment, see [Using TDengine in Docker](../../reference/docker). +For more information about deploying TDengine in a Docker environment, see [Deploying TDengine with Docker](../../deployment/docker). diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index 50e91a04c0..d0091dd07a 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -122,4 +122,4 @@ SELECT FIRST(ts), AVG(current), MAX(voltage), MIN(phase) FROM test.d10 INTERVAL( ## 其它 -更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [在 Docker 下使用 TDengine](../../reference/docker)。 +更多关于在 Docker 环境下使用 TDengine 的细节,请参考 [用 Docker 部署 TDengine](../../deployment/docker)。 From 01fa0646f3ed1deed1a10d227668039cd6328f0e Mon Sep 17 00:00:00 2001 From: Cary Xu Date: Thu, 3 Aug 2023 16:21:39 +0800 Subject: [PATCH 023/123] platform supported by community --- docs/zh/14-reference/09-support-platform/index.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/docs/zh/14-reference/09-support-platform/index.md b/docs/zh/14-reference/09-support-platform/index.md index c54cbe12e6..ba3b3deee1 100644 --- a/docs/zh/14-reference/09-support-platform/index.md +++ b/docs/zh/14-reference/09-support-platform/index.md @@ -7,12 +7,13 @@ description: "TDengine 服务端、客户端和连接器支持的平台列表" | | **Windows server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 以上** | **统信 UOS** | **银河/中标麒麟** | **凝思 V60/V80** | **macOS** | | ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | ------------ | ----------------- | ---------------- | --------- | -| X64 | ● | ● | ● | ● | ● | ● | ● | ● | -| 树莓派 ARM64 | | | ● | | | | | | -| 华为云 ARM64 | | | | ● | | | | | -| M1 | | | | | | | | ● | +| X64 | ●/E | ●/E | ● | ● | ●/E | ●/E | ●/E | ● | +| 树莓派 ARM64 | | | ● | | | | | | +| 华为云 ARM64 | | | | ● | | | | | +| M1 | | | | | | | | ● | -注: ● 表示经过官方测试验证, ○ 表示非官方测试验证。 +注:1) ● 表示经过官方测试验证, ○ 表示非官方测试验证,E 表示仅企业版支持。 + 2) 社区版仅支持主流操作系统的较新版本,包括 Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS 等。如果有其他操作系统及版本的需求,请联系企业版支持。 ## TDengine 客户端和连接器支持的平台列表 From a531359781a04df6e541f88e9c08a4c19e1807f2 Mon Sep 17 00:00:00 2001 From: liuyuan <2805658706@qq.com> Date: Thu, 3 Aug 2023 16:31:58 +0800 Subject: [PATCH 024/123] docs: fix note style --- docs/en/05-get-started/01-docker.md | 7 +++---- docs/en/10-deployment/02-docker.md | 7 ++++--- docs/zh/05-get-started/01-docker.md | 7 ++++--- docs/zh/10-deployment/02-docker.md | 8 +++++--- 4 files changed, 16 insertions(+), 13 deletions(-) diff --git a/docs/en/05-get-started/01-docker.md b/docs/en/05-get-started/01-docker.md index ec16950252..723194a325 100644 --- a/docs/en/05-get-started/01-docker.md +++ b/docs/en/05-get-started/01-docker.md @@ -40,11 +40,10 @@ docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ ``` :::note -* /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory -* /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory - - ::: +- /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. Also you can modify ~/data/taos/dnode/data to your any local empty data directory +- /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. you can modify ~/data/taos/dnode/log to your any local empty log directory +::: Run the following command to ensure that your container is running: diff --git a/docs/en/10-deployment/02-docker.md b/docs/en/10-deployment/02-docker.md index 8366b40869..63153f3033 100644 --- a/docs/en/10-deployment/02-docker.md +++ b/docs/en/10-deployment/02-docker.md @@ -20,8 +20,8 @@ docker run -d --name tdengine \ * /var/lib/taos: TDengine's default data file directory. The location can be changed via [configuration file]. And also you can modify ~/data/taos/dnode/data to your any other local emtpy data directory * /var/log/taos: TDengine's default log file directory. The location can be changed via [configure file]. And also you can modify ~/data/taos/dnode/log to your any other local empty log directory - - ::: + +::: The above command starts a container named "tdengine" and maps the HTTP service port 6041 to the host port 6041. You can verify that the HTTP service provided in this container is available using the following command. @@ -323,7 +323,8 @@ services: - The `VERSION` environment variable is used to set the tdengine image tag - `TAOS_FIRST_EP` must be set on the newly created instance so that it can join the TDengine cluster; if there is a high availability requirement, `TAOS_SECOND_EP` needs to be used at the same time - ::: + +::: 2. Start the cluster diff --git a/docs/zh/05-get-started/01-docker.md b/docs/zh/05-get-started/01-docker.md index d0091dd07a..efc21d3296 100644 --- a/docs/zh/05-get-started/01-docker.md +++ b/docs/zh/05-get-started/01-docker.md @@ -38,9 +38,10 @@ docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ :::note -* /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 -* /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 - ::: +- /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 +- /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 + +::: 确定该容器已经启动并且在正常运行。 diff --git a/docs/zh/10-deployment/02-docker.md b/docs/zh/10-deployment/02-docker.md index 2aee5cc152..3dbfabca7d 100644 --- a/docs/zh/10-deployment/02-docker.md +++ b/docs/zh/10-deployment/02-docker.md @@ -18,9 +18,10 @@ docker run -d --name tdengine \ ``` :::note -* /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 -* /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 - ::: +- /var/lib/taos: TDengine 默认数据文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/data为你自己的数据目录 +- /var/log/taos: TDengine 默认日志文件目录。可通过[配置文件]修改位置。你可以修改~/data/taos/dnode/log为你自己的日志目录 + +::: 以上命令启动了一个名为“tdengine”的容器,并把其中的 HTTP 服务的端 6041 映射到了主机端口 6041。使用如下命令可以验证该容器中提供的 HTTP 服务是否可用: @@ -330,6 +331,7 @@ services: * `VERSION` 环境变量被用来设置 tdengine image tag * 在新创建的实例上必须设置 `TAOS_FIRST_EP` 以使其能够加入 TDengine 集群;如果有高可用需求,则需要同时使用 `TAOS_SECOND_EP` + ::: 2. 启动集群 From 4cf7515ed39b9a92c8c177f056a89693e61b3029 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 3 Aug 2023 16:35:02 +0800 Subject: [PATCH 025/123] docs: geometry supplemental docs (#22298) * docs: geometry supplemental docs * docs: minor correction minor correction --------- Co-authored-by: danielclow <106956386+danielclow@users.noreply.github.com> --- docs/en/12-taos-sql/01-data-type.md | 2 +- docs/en/12-taos-sql/10-function.md | 158 ++++++++++++++++++++++++++++ docs/zh/12-taos-sql/10-function.md | 137 ++++++++++++++++++++++++ 3 files changed, 296 insertions(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/01-data-type.md b/docs/en/12-taos-sql/01-data-type.md index b9d51bcfcd..f81aaceca3 100644 --- a/docs/en/12-taos-sql/01-data-type.md +++ b/docs/en/12-taos-sql/01-data-type.md @@ -42,7 +42,7 @@ In TDengine, the data types below can be used when specifying a column or tag. | 14 | NCHAR | User Defined | Multi-byte string that can include multi byte characters like Chinese characters. Each character of NCHAR type consumes 4 bytes storage. The string value should be quoted with single quotes. Literal single quote inside the string must be preceded with backslash, like `\'`. The length must be specified when defining a column or tag of NCHAR type, for example nchar(10) means it can store at most 10 characters of nchar type and will consume fixed storage of 40 bytes. An error will be reported if the string value exceeds the length defined. | | 15 | JSON | | JSON type can only be used on tags. A tag of json type is excluded with any other tags of any other type. | | 16 | VARCHAR | User-defined | Alias of BINARY | -| 16 | GEOMETRY | User-defined | Geometry | +| 17 | GEOMETRY | User-defined | Geometry | :::note - Each row of the table cannot be longer than 48KB (64KB since version 3.0.5.0) (note that each BINARY/NCHAR/GEOMETRY column takes up an additional 2 bytes of storage space). diff --git a/docs/en/12-taos-sql/10-function.md b/docs/en/12-taos-sql/10-function.md index afc1581c22..ad6d5d77fb 100644 --- a/docs/en/12-taos-sql/10-function.md +++ b/docs/en/12-taos-sql/10-function.md @@ -1274,3 +1274,161 @@ SELECT SERVER_STATUS(); ``` **Description**: The server status. + + +## Geometry Functions + +### Geometry Input Functions + +Geometry input functions create geometry data from WTK. + +#### ST_GeomFromText + +```sql +ST_GeomFromText(VARCHAR WKT expr) +``` + +**Description**: Return a specified GEOMETRY value from Well-Known Text representation (WKT). + +**Return value type**: GEOMETRY + +**Applicable data types**: VARCHAR + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- The input can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION. +- The output is a GEOMETRY data type, internal defined as binary string. + +### Geometry Output Functions + +Geometry output functions convert geometry data into WTK. + +#### ST_AsText + +```sql +ST_AsText(GEOMETRY geom) +``` + +**Description**: Return a specified Well-Known Text representation (WKT) value from GEOMETRY data. + +**Return value type**: VARCHAR + +**Applicable data types**: GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- The output can be one of WTK string, like POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRYCOLLECTION. + +### Geometry Relationships Functions + +Geometry relationships functions determine spatial relationships between geometries. + +#### ST_Intersects + +```sql +ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Compares two geometries and returns true if they intersect. + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- Geometries intersect if they have any point in common. + + +#### ST_Equals + +```sql +ST_Equals(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Returns TRUE if the given geometries are "spatially equal". + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- 'Spatially equal' means ST_Contains(A,B) = true and ST_Contains(B,A) = true, and the ordering of points can be different but represent the same geometry structure. + + +#### ST_Touches + +```sql +ST_Touches(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Returns TRUE if A and B intersect, but their interiors do not intersect. + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- A and B have at least one point in common, and the common points lie in at least one boundary. +- For Point/Point inputs the relationship is always FALSE, since points do not have a boundary. + + +#### ST_Covers + +```sql +ST_Covers(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Returns TRUE if every point in Geometry B lies inside (intersects the interior or boundary of) Geometry A. + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- A covers B means no point of B lies outside (in the exterior of) A. + + +#### ST_Contains + +```sql +ST_Contains(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Returns TRUE if geometry A contains geometry B. + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- A contains B if and only if all points of B lie inside (i.e. in the interior or boundary of) A (or equivalently, no points of B lie in the exterior of A), and the interiors of A and B have at least one point in common. + + +#### ST_ContainsProperly + +```sql +ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) +``` + +**Description**: Returns TRUE if every point of B lies inside A. + +**Return value type**: BOOL + +**Applicable data types**: GEOMETRY, GEOMETRY + +**Applicable table types**: standard tables and supertables + +**Explanations**: +- There is no point of B that lies on the boundary of A or in the exterior of A. diff --git a/docs/zh/12-taos-sql/10-function.md b/docs/zh/12-taos-sql/10-function.md index fc0cfbe330..773ea67989 100644 --- a/docs/zh/12-taos-sql/10-function.md +++ b/docs/zh/12-taos-sql/10-function.md @@ -1265,3 +1265,140 @@ SELECT SERVER_STATUS(); ``` **说明**:检测服务端是否所有 dnode 都在线,如果是则返回成功,否则返回无法建立连接的错误。 + + +## Geometry 函数 + +### Geometry 输入函数: + +#### ST_GeomFromText + +```sql +ST_GeomFromText(VARCHAR WKT expr) +``` + +**功能说明**:根据 Well-Known Text (WKT) 表示从指定的几何值创建几何数据。 + +**返回值类型**:GEOMETRY + +**适用数据类型**:VARCHAR + +**适用表类型**:标准表和超表 + +**使用说明**:输入可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。输出是以二进制字符串形式定义的 GEOMETRY 数据类型。 + +### Geometry 输出函数: + +#### ST_AsText + +```sql +ST_AsText(GEOMETRY geom) +``` + +**功能说明**:从几何数据中返回指定的 Well-Known Text (WKT) 表示。 + +**返回值类型**:VARCHAR + +**适用数据类型**:GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:输出可以是 WKT 字符串之一,例如点(POINT)、线串(LINESTRING)、多边形(POLYGON)、多点集(MULTIPOINT)、多线串(MULTILINESTRING)、多多边形(MULTIPOLYGON)、几何集合(GEOMETRYCOLLECTION)。 + +### Geometry 关系函数: + +#### ST_Intersects + +```sql +ST_Intersects(GEOMETRY geomA, GEOMETRY geomB) +``` + +##功能说明**:比较两个几何对象,并在它们相交时返回 true。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:如果两个几何对象有任何一个共享点,则它们相交。 + +#### ST_Equals + +```sql +ST_Equals(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果给定的几何对象是"空间相等"的,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:"空间相等"意味着 ST_Contains(A,B) = true 和 ST_Contains(B,A) = true,并且点的顺序可能不同,但表示相同的几何结构。 + +#### ST_Touches + +```sql +ST_Touches(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 和 B 相交,但它们的内部不相交,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 和 B 至少有一个公共点,并且这些公共点位于至少一个边界中。对于点/点输入,关系始终为 FALSE,因为点没有边界。 + +#### ST_Covers + +```sql +ST_Covers(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 中的每个点都位于几何形状 A 内部(与内部或边界相交),则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 意味着 B 中的没有点位于 A 的外部(在外部)。 + +#### ST_Contains + +```sql +ST_Contains(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 A 包含 B,描述:如果几何形状 A 包含几何形状 B,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:A 包含 B 当且仅当 B 的所有点位于 A 的内部(即位于内部或边界上)(或等效地,B 的没有点位于 A 的外部),并且 A 和 B 的内部至少有一个公共点。 + +#### ST_ContainsProperly + +```sql +ST_ContainsProperly(GEOMETRY geomA, GEOMETRY geomB) +``` + +**功能说明**:如果 B 的每个点都位于 A 内部,则返回 TRUE。 + +**返回值类型**:BOOL + +**适用数据类型**:GEOMETRY,GEOMETRY + +**适用表类型**:标准表和超表 + +**使用说明**:B 的没有点位于 A 的边界或外部。 From 556d73618acd664e57d9eba7e04dc5900fcd1e22 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 3 Aug 2023 16:40:46 +0800 Subject: [PATCH 026/123] docs: platform supported by community edition --- docs/en/14-reference/09-support-platform/index.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index 21fe6fc1dc..f399cf67e6 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -7,10 +7,10 @@ description: This document describes the supported platforms for the TDengine se | | **Windows Server 2016/2019** | **Windows 10/11** | **CentOS 7.9/8** | **Ubuntu 18 or later** | **macOS** | | ------------ | ---------------------------- | ----------------- | ---------------- | ---------------- | --------- | -| X64 | ● | ● | ● | ● | ● | +| X64 | ●/E | ●/E | ● | ● | ● | | ARM64 | | | ● | | ● | -Note: ● means officially tested and verified, ○ means unofficially tested and verified. +Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition. ## List of supported platforms for TDengine clients and connectors From 23b14085a66450f70ecec5b1fdd666c3090b80b2 Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 3 Aug 2023 16:44:02 +0800 Subject: [PATCH 027/123] docs: update the grammar --- docs/en/14-reference/09-support-platform/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/14-reference/09-support-platform/index.md b/docs/en/14-reference/09-support-platform/index.md index f399cf67e6..779882f582 100644 --- a/docs/en/14-reference/09-support-platform/index.md +++ b/docs/en/14-reference/09-support-platform/index.md @@ -10,7 +10,7 @@ description: This document describes the supported platforms for the TDengine se | X64 | ●/E | ●/E | ● | ● | ● | | ARM64 | | | ● | | ● | -Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition. +Note: 1) ● means officially tested and verified, ○ means unofficially tested and verified, E means only supported by the enterprise edition. 2) The community edition only supports newer versions of mainstream operating systems, including Ubuntu 18+/CentOS 7+/RetHat/Debian/CoreOS/FreeBSD/OpenSUSE/SUSE Linux/Fedora/macOS, etc. If you have requirements for other operating systems and editions, please contact support of the enterprise edition. ## List of supported platforms for TDengine clients and connectors From 1cb1451e41f11af3e3974fe37d3121782cd368a8 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 3 Aug 2023 18:26:29 +0800 Subject: [PATCH 028/123] fix: set nextIndex when get neighbouring index --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index e1756333c5..b68f82d847 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1121,6 +1121,8 @@ static bool getNeighborBlockOfSameTable(SFileDataBlockInfo* pBlockInfo, STableBl SBrinRecord* p = taosArrayGet(pTableBlockScanInfo->pBlockList, pBlockInfo->tbBlockIdx + step); memcpy(pRecord, p, sizeof(SBrinRecord)); + *nextIndex = pBlockInfo->tbBlockIdx + step; + // tMapDataGetItemByIdx(&pTableBlockScanInfo->mapData, pIndex->ordinalIndex, pBlock, tGetDataBlk); return true; } From f7dc9497ef27016f04c4cbc04e1be928dc29753a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 3 Aug 2023 20:28:34 +0800 Subject: [PATCH 029/123] add stream verion --- source/dnode/mnode/impl/src/mndStream.c | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 06ab1bb638..cf1d445149 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -28,7 +28,7 @@ #include "parser.h" #include "tname.h" -#define MND_STREAM_VER_NUMBER 2 +#define MND_STREAM_VER_NUMBER 3 #define MND_STREAM_RESERVE_SIZE 64 #define MND_STREAM_MAX_NUM 60 @@ -142,8 +142,8 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) { int8_t sver = 0; if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto STREAM_DECODE_OVER; - if (sver != 1 && sver != 2) { - terrno = TSDB_CODE_SDB_INVALID_DATA_VER; + if (sver != MND_STREAM_VER_NUMBER) { + terrno = 0; goto STREAM_DECODE_OVER; } @@ -1264,7 +1264,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock // task id pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - char idstr[128] = {0}; + char idstr[128] = {0}; int32_t len = tintToHex(pTask->id.taskId, &idstr[4]); idstr[2] = '0'; idstr[3] = 'x'; @@ -1304,7 +1304,7 @@ static int32_t mndRetrieveStreamTask(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock colDataSetVal(pColInfo, numOfRows, (const char *)&level, false); // status - char status[20 + VARSTR_HEADER_SIZE] = {0}; + char status[20 + VARSTR_HEADER_SIZE] = {0}; int8_t taskStatus = atomic_load_8(&pTask->status.taskStatus); if (taskStatus == TASK_STATUS__NORMAL) { memcpy(varDataVal(status), "normal", 6); @@ -1370,7 +1370,7 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) { return 0; } -int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray* tasks) { +int32_t mndPauseAllStreamTaskImpl(STrans *pTrans, SArray *tasks) { int32_t size = taosArrayGetSize(tasks); for (int32_t i = 0; i < size; i++) { SArray *pTasks = taosArrayGetP(tasks, i); @@ -1491,7 +1491,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { return TSDB_CODE_ACTION_IN_PROGRESS; } - static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t igUntreated) { SVResumeStreamTaskReq *pReq = taosMemoryCalloc(1, sizeof(SVResumeStreamTaskReq)); if (pReq == NULL) { From 658c64f16fd7af7945eaf1a5f4001f393d1b495a Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 3 Aug 2023 13:00:06 +0800 Subject: [PATCH 030/123] fix: make statekey of state window operator not target node --- source/libs/executor/src/timewindowoperator.c | 2 +- source/libs/planner/src/planPhysiCreater.c | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 03f24919f2..7b76c5090f 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1858,7 +1858,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi } int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; - SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; + SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey); if (pStateNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; diff --git a/source/libs/planner/src/planPhysiCreater.c b/source/libs/planner/src/planPhysiCreater.c index 1b92dcd2e7..06859e195d 100644 --- a/source/libs/planner/src/planPhysiCreater.c +++ b/source/libs/planner/src/planPhysiCreater.c @@ -1303,9 +1303,9 @@ static int32_t createStateWindowPhysiNode(SPhysiPlanContext* pCxt, SNodeList* pC if (TSDB_CODE_SUCCESS == code) { code = setNodeSlotId(pCxt, pChildTupe->dataBlockId, -1, pStateKey, &pState->pStateKey); - if (TSDB_CODE_SUCCESS == code) { - code = addDataBlockSlot(pCxt, &pState->pStateKey, pState->window.node.pOutputDataBlockDesc); - } + // if (TSDB_CODE_SUCCESS == code) { + // code = addDataBlockSlot(pCxt, &pState->pStateKey, pState->window.node.pOutputDataBlockDesc); + // } } if (TSDB_CODE_SUCCESS == code) { From 7676afff1979e625d58ed9eb1ccb2f7334539ae9 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 3 Aug 2023 15:17:27 +0800 Subject: [PATCH 031/123] fix: stream use column node --- source/libs/executor/src/timewindowoperator.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 7b76c5090f..d2e385200d 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -1858,7 +1858,7 @@ SOperatorInfo* createStatewindowOperatorInfo(SOperatorInfo* downstream, SStateWi } int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; - SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey); + SColumnNode* pColNode = (SColumnNode*)(pStateNode->pStateKey); if (pStateNode->window.pExprs != NULL) { int32_t numOfScalarExpr = 0; @@ -4453,7 +4453,7 @@ SOperatorInfo* createStreamStateAggOperatorInfo(SOperatorInfo* downstream, SPhys SExecTaskInfo* pTaskInfo, SReadHandle* pHandle) { SStreamStateWinodwPhysiNode* pStateNode = (SStreamStateWinodwPhysiNode*)pPhyNode; int32_t tsSlotId = ((SColumnNode*)pStateNode->window.pTspk)->slotId; - SColumnNode* pColNode = (SColumnNode*)((STargetNode*)pStateNode->pStateKey)->pExpr; + SColumnNode* pColNode = (SColumnNode*)(pStateNode->pStateKey); int32_t code = TSDB_CODE_SUCCESS; SStreamStateAggOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamStateAggOperatorInfo)); From 2deeec93bf7d5c5a5ede8f9e542091abe6ce63e3 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 3 Aug 2023 21:38:29 +0800 Subject: [PATCH 032/123] fix: explain with state window --- source/libs/command/src/explain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 884c0f7b20..9afe79f775 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1009,7 +1009,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT, - nodesGetNameFromColumnNode(((STargetNode *)pStateNode->pStateKey)->pExpr)); + nodesGetNameFromColumnNode((SColumnNode*)pStateNode->pStateKey)); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); From cb9c6c12561cb57189508831dae34a7c725b21bd Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 3 Aug 2023 21:47:33 +0800 Subject: [PATCH 033/123] fix: compilation error --- source/libs/command/src/explain.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/command/src/explain.c b/source/libs/command/src/explain.c index 9afe79f775..e917de33dd 100644 --- a/source/libs/command/src/explain.c +++ b/source/libs/command/src/explain.c @@ -1009,7 +1009,7 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i SStateWinodwPhysiNode *pStateNode = (SStateWinodwPhysiNode *)pNode; EXPLAIN_ROW_NEW(level, EXPLAIN_STATE_WINDOW_FORMAT, - nodesGetNameFromColumnNode((SColumnNode*)pStateNode->pStateKey)); + nodesGetNameFromColumnNode(pStateNode->pStateKey)); EXPLAIN_ROW_APPEND(EXPLAIN_LEFT_PARENTHESIS_FORMAT); if (pResNode->pExecInfo) { QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen)); From 97b15ed718f2f6821f7a46c305a733407fc56ab0 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 27 Jul 2023 17:08:43 +0800 Subject: [PATCH 034/123] fix: delete SLDataItem if stt level files num is smaller --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 49992eb58f..1ba1d3d366 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -814,6 +814,12 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { SLDataIter *pIter = taosMemoryCalloc(1, sizeof(SLDataIter)); taosArrayPush(pList, &pIter); } + } else if (numOfIter > TARRAY2_SIZE(pSttLevel->fobjArr)){ + int32_t inc = numOfIter - TARRAY2_SIZE(pSttLevel->fobjArr); + for (int i = 0; i < inc; ++i) { + SLDataIter *pIter = taosArrayPop(pList); + destroyLDataIter(pIter); + } } for (int32_t i = 0; i < TARRAY2_SIZE(pSttLevel->fobjArr); ++i) { // open all last file From 5322e38dfec7a8085ac2281b2f3c5188d34ed2be Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 27 Jul 2023 17:23:35 +0800 Subject: [PATCH 035/123] fix: add init add check it --- source/dnode/vnode/src/inc/tsdb.h | 1 + source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index fa42248c69..02d4a3cdd4 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -792,6 +792,7 @@ typedef struct SLDataIter { SVersionRange verRange; SSttBlockLoadInfo *pBlockLoadInfo; bool ignoreEarlierTs; + bool bInit; struct SSttFileReader *pReader; } SLDataIter; diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 1ba1d3d366..f616f92ece 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -850,7 +850,7 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { if (code != TSDB_CODE_SUCCESS) { goto _end; } - + pIter->bInit = true; bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr); if (hasVal) { tMergeTreeAddIter(pMTree, pIter); From b85c1ae8a13b65442b62e4f655b919db41221114 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Thu, 27 Jul 2023 17:29:31 +0800 Subject: [PATCH 036/123] fix: remove the core --- source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index f616f92ece..12ce435970 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -146,8 +146,10 @@ void *destroySttBlockReader(SArray *pLDataIterArray, int64_t *blocks, double *el SArray *pList = taosArrayGetP(pLDataIterArray, i); for (int32_t j = 0; j < taosArrayGetSize(pList); ++j) { SLDataIter *pIter = taosArrayGetP(pList, j); - *el += pIter->pBlockLoadInfo->elapsedTime; - *blocks += pIter->pBlockLoadInfo->loadBlocks; + if (pIter->bInit) { + *el += pIter->pBlockLoadInfo->elapsedTime; + *blocks += pIter->pBlockLoadInfo->loadBlocks; + } destroyLDataIter(pIter); } taosArrayDestroy(pList); From fef091b13d9ecf896362a6e391eb57d8fb256e20 Mon Sep 17 00:00:00 2001 From: slzhou Date: Wed, 2 Aug 2023 11:02:27 +0800 Subject: [PATCH 037/123] fix: keep the stt iterator valid --- source/dnode/vnode/src/inc/tsdb.h | 1 - source/dnode/vnode/src/tsdb/tsdbMergeTree.c | 18 ++++++++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h index 02d4a3cdd4..fa42248c69 100644 --- a/source/dnode/vnode/src/inc/tsdb.h +++ b/source/dnode/vnode/src/inc/tsdb.h @@ -792,7 +792,6 @@ typedef struct SLDataIter { SVersionRange verRange; SSttBlockLoadInfo *pBlockLoadInfo; bool ignoreEarlierTs; - bool bInit; struct SSttFileReader *pReader; } SLDataIter; diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c index 12ce435970..ce6ee4345e 100644 --- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c +++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c @@ -146,10 +146,8 @@ void *destroySttBlockReader(SArray *pLDataIterArray, int64_t *blocks, double *el SArray *pList = taosArrayGetP(pLDataIterArray, i); for (int32_t j = 0; j < taosArrayGetSize(pList); ++j) { SLDataIter *pIter = taosArrayGetP(pList, j); - if (pIter->bInit) { - *el += pIter->pBlockLoadInfo->elapsedTime; - *blocks += pIter->pBlockLoadInfo->loadBlocks; - } + *el += pIter->pBlockLoadInfo->elapsedTime; + *blocks += pIter->pBlockLoadInfo->loadBlocks; destroyLDataIter(pIter); } taosArrayDestroy(pList); @@ -446,6 +444,13 @@ int32_t tLDataIterOpen2(struct SLDataIter *pIter, SSttFileReader *pSttFileReader pIter->pReader = pSttFileReader; pIter->pBlockLoadInfo = pBlockLoadInfo; + if (pIter->pReader == NULL) { + tsdbError("stt file reader is null, %s", idStr); + pIter->pSttBlk = NULL; + pIter->iSttBlk = -1; + return TSDB_CODE_SUCCESS; + } + if (!pBlockLoadInfo->sttBlockLoaded) { int64_t st = taosGetTimestampUs(); @@ -837,7 +842,8 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { code = tsdbSttFileReaderOpen(pSttLevel->fobjArr->data[i]->fname, &conf, &pSttFileReader); if (code != TSDB_CODE_SUCCESS) { - return code; + tsdbError("open stt file reader error. file name %s, code %s, %s", pSttLevel->fobjArr->data[i]->fname, + tstrerror(code), pMTree->idStr); } } @@ -852,7 +858,7 @@ int32_t tMergeTreeOpen2(SMergeTree *pMTree, SMergeTreeConf *pConf) { if (code != TSDB_CODE_SUCCESS) { goto _end; } - pIter->bInit = true; + bool hasVal = tLDataIterNextRow(pIter, pMTree->idStr); if (hasVal) { tMergeTreeAddIter(pMTree, pIter); From 9773bb25f961083c89d9911724ed404eda8ad558 Mon Sep 17 00:00:00 2001 From: shenglian-zhou Date: Fri, 4 Aug 2023 09:55:08 +0800 Subject: [PATCH 038/123] Update 06-select.md --- docs/zh/12-taos-sql/06-select.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/06-select.md b/docs/zh/12-taos-sql/06-select.md index 5bc67755f0..9560c3c4df 100644 --- a/docs/zh/12-taos-sql/06-select.md +++ b/docs/zh/12-taos-sql/06-select.md @@ -315,7 +315,7 @@ WHERE (column|tbname) match/MATCH/nmatch/NMATCH _regex_ ### 使用限制 -只能针对表名(即 tbname 筛选)、binary/nchar 类型标签值进行正则表达式过滤,不支持普通列的过滤。 +只能针对表名(即 tbname 筛选)、binary/nchar 类型值进行正则表达式过滤。 正则匹配字符串长度不能超过 128 字节。可以通过参数 _maxRegexStringLen_ 设置和调整最大允许的正则匹配字符串,该参数是客户端配置参数,需要重启才能生效。 From f3133f4c909382c97cd267d5f9a60dc9997d0241 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 10:01:11 +0800 Subject: [PATCH 039/123] fix: modify english version --- docs/en/12-taos-sql/16-operators.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/12-taos-sql/16-operators.md b/docs/en/12-taos-sql/16-operators.md index 9328d1688a..6b7adb4a3d 100644 --- a/docs/en/12-taos-sql/16-operators.md +++ b/docs/en/12-taos-sql/16-operators.md @@ -54,7 +54,7 @@ LIKE is used together with wildcards to match strings. Its usage is described as MATCH and NMATCH are used together with regular expressions to match strings. Their usage is described as follows: - Use POSIX regular expression syntax. For more information, see Regular Expressions. -- Regular expression can be used against only table names, i.e. `tbname`, and tags of binary/nchar types, but can't be used against data columns. +- Regular expression can be used against only table names, i.e. `tbname`, and tags/columns of binary/nchar types. - The maximum length of regular expression string is 128 bytes. Configuration parameter `maxRegexStringLen` can be used to set the maximum allowed regular expression. It's a configuration parameter on the client side, and will take effect after restarting the client. ## Logical Operators From d9ef9c45841afe489d6e2593468520414d28b82e Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 10:15:15 +0800 Subject: [PATCH 040/123] fix: fix _wstart,_wennd not matching with interval for ns/us databases --- source/libs/function/src/builtins.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/source/libs/function/src/builtins.c b/source/libs/function/src/builtins.c index 6eb2be34b3..844bfb07fc 100644 --- a/source/libs/function/src/builtins.c +++ b/source/libs/function/src/builtins.c @@ -468,7 +468,8 @@ static int32_t translateStddevMerge(SFunctionNode* pFunc, char* pErrBuf, int32_t static int32_t translateWduration(SFunctionNode* pFunc, char* pErrBuf, int32_t len) { // pseudo column do not need to check parameters - pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT}; + pFunc->node.resType = (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_BIGINT].bytes, .type = TSDB_DATA_TYPE_BIGINT, + .precision = pFunc->node.resType.precision}; return TSDB_CODE_SUCCESS; } @@ -491,7 +492,8 @@ static int32_t translateTimePseudoColumn(SFunctionNode* pFunc, char* pErrBuf, in // pseudo column do not need to check parameters pFunc->node.resType = - (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP}; + (SDataType){.bytes = tDataTypes[TSDB_DATA_TYPE_TIMESTAMP].bytes, .type = TSDB_DATA_TYPE_TIMESTAMP, + .precision = pFunc->node.resType.precision}; return TSDB_CODE_SUCCESS; } From 95205694962414def797e7a1d1dc4ebcebe73300 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 10:17:26 +0800 Subject: [PATCH 041/123] add test cases --- tests/system-test/99-TDcase/TS-3311.py | 121 +++++++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 tests/system-test/99-TDcase/TS-3311.py diff --git a/tests/system-test/99-TDcase/TS-3311.py b/tests/system-test/99-TDcase/TS-3311.py new file mode 100644 index 0000000000..ce39597a7b --- /dev/null +++ b/tests/system-test/99-TDcase/TS-3311.py @@ -0,0 +1,121 @@ +import taos +import sys +import time +import socket +import os +import threading + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * + +class TDTestCase: + hostname = socket.gethostname() + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + #tdSql.init(conn.cursor()) + tdSql.init(conn.cursor(), logSql) # output sql.txt file + + def getBuildPath(self): + selfPath = os.path.dirname(os.path.realpath(__file__)) + + if ("community" in selfPath): + projPath = selfPath[:selfPath.find("community")] + else: + projPath = selfPath[:selfPath.find("tests")] + + for root, dirs, files in os.walk(projPath): + if ("taosd" in files or "taosd.exe" in files): + rootRealPath = os.path.dirname(os.path.realpath(root)) + if ("packaging" not in rootRealPath): + buildPath = root[:len(root) - len("/build/bin")] + break + return buildPath + + def create_tables(self): + tdSql.execute("create database if not exists dbus precision 'us'") + tdSql.execute("create database if not exists dbns precision 'ns'") + + tdSql.execute("use dbus") + + tdSql.execute(f"CREATE STABLE `stb_us` (`ts` TIMESTAMP, `ip_value` FLOAT, `ip_quality` INT) TAGS (`t1` INT)") + tdSql.execute(f"CREATE TABLE `ctb1_us` USING `stb_us` (`t1`) TAGS (1)") + tdSql.execute(f"CREATE TABLE `ctb2_us` USING `stb_us` (`t1`) TAGS (2)") + + tdSql.execute("use dbns") + + tdSql.execute(f"CREATE STABLE `stb_ns` (`ts` TIMESTAMP, `ip_value` FLOAT, `ip_quality` INT) TAGS (`t1` INT)") + tdSql.execute(f"CREATE TABLE `ctb1_ns` USING `stb_ns` (`t1`) TAGS (1)") + tdSql.execute(f"CREATE TABLE `ctb2_ns` USING `stb_ns` (`t1`) TAGS (2)") + + def insert_data(self): + tdLog.debug("start to insert data ............") + + tdSql.execute(f"INSERT INTO `dbus`.`ctb1_us` VALUES ('2023-07-01 00:00:00.000', 10.30000, 100)") + tdSql.execute(f"INSERT INTO `dbus`.`ctb2_us` VALUES ('2023-08-01 00:00:00.000', 20.30000, 200)") + + tdSql.execute(f"INSERT INTO `dbns`.`ctb1_ns` VALUES ('2023-07-01 00:00:00.000', 10.30000, 100)") + tdSql.execute(f"INSERT INTO `dbns`.`ctb2_ns` VALUES ('2023-08-01 00:00:00.000', 20.30000, 200)") + + tdLog.debug("insert data ............ [OK]") + + def run(self): + tdSql.prepare() + self.create_tables() + self.insert_data() + tdLog.printNoPrefix("======== test TS-3311") + + # test ns + tdSql.query(f"select _wstart, _wend, count(*) from `dbns`.`stb_ns` interval(1n)") + tdSql.checkRows(2) + + tdSql.checkData(0, 0, '2023-07-01 00:00:00.000000000') + tdSql.checkData(1, 0, '2023-08-01 00:00:00.000000000') + + tdSql.checkData(0, 1, '2023-08-01 00:00:00.000000000') + tdSql.checkData(1, 1, '2023-09-01 00:00:00.000000000') + + tdSql.query(f"select _wstart, _wend, count(*) from `dbns`.`stb_ns` interval(12n)") + tdSql.checkRows(1) + + tdSql.checkData(0, 0, '2023-01-01 00:00:00.000000000') + tdSql.checkData(0, 1, '2024-01-01 00:00:00.000000000') + + tdSql.query(f"select _wstart, _wend, count(*) from `dbns`.`stb_ns` interval(1y)") + tdSql.checkRows(1) + + tdSql.checkData(0, 0, '2023-01-01 00:00:00.000000000') + tdSql.checkData(0, 1, '2024-01-01 00:00:00.000000000') + + + ## test us + tdSql.query(f"select _wstart, _wend, count(*) from `dbus`.`stb_us` interval(1n)") + tdSql.checkRows(2) + + tdSql.checkData(0, 0, '2023-07-01 00:00:00.000000') + tdSql.checkData(1, 0, '2023-08-01 00:00:00.000000') + + tdSql.checkData(0, 1, '2023-08-01 00:00:00.000000') + tdSql.checkData(1, 1, '2023-09-01 00:00:00.000000') + + tdSql.query(f"select _wstart, _wend, count(*) from `dbus`.`stb_us` interval(12n)") + tdSql.checkRows(1) + + tdSql.checkData(0, 0, '2023-01-01 00:00:00.000000') + tdSql.checkData(0, 1, '2024-01-01 00:00:00.000000') + + tdSql.query(f"select _wstart, _wend, count(*) from `dbus`.`stb_us` interval(1y)") + tdSql.checkRows(1) + + tdSql.checkData(0, 0, '2023-01-01 00:00:00.000000') + tdSql.checkData(0, 1, '2024-01-01 00:00:00.000000') + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From 666a9a17af5482a6cd78c91b2fbbf8c8f88f9a69 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 10:17:38 +0800 Subject: [PATCH 042/123] add test cases --- tests/parallel_test/cases.task | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1f0dd3bf26..fb67ee51cd 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -135,6 +135,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TD-21561.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3404.py ,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3581.py +,,y,system-test,./pytest.sh python3 ./test.py -f 99-TDcase/TS-3311.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/taosShell.py From 9cb481dd6ee4636efffda723cb7804e90fc9e2bd Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Fri, 4 Aug 2023 10:22:45 +0800 Subject: [PATCH 043/123] use stream task --- source/libs/stream/src/streamExec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index d3ab7e820a..cbe7b95bf0 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -364,14 +364,15 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { // 7. pause allowed. streamTaskEnablePause(pStreamTask); - if (taosQueueEmpty(pTask->inputQueue->queue)) { + if (taosQueueEmpty(pStreamTask->inputQueue->queue)) { SStreamRefDataBlock* pItem = taosAllocateQitem(sizeof(SStreamRefDataBlock), DEF_QITEM, 0);; SSDataBlock* pDelBlock = createSpecialDataBlock(STREAM_DELETE_DATA); pDelBlock->info.rows = 0; pDelBlock->info.version = 0; pItem->type = STREAM_INPUT__REF_DATA_BLOCK; pItem->pBlock = pDelBlock; - tAppendDataToInputQueue(pTask, (SStreamQueueItem*)pItem); + int32_t code = tAppendDataToInputQueue(pStreamTask, (SStreamQueueItem*)pItem); + qDebug("s-task:%s append dummy delete block,res:%d", pStreamTask->id.idStr, code); } streamSchedExec(pStreamTask); From dbc48f50f904355d54a30275c5e07ef4bababc89 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Fri, 4 Aug 2023 11:27:52 +0800 Subject: [PATCH 044/123] remove extra character --- packaging/tools/install.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 961631561e..2741291f34 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -912,10 +912,10 @@ function updateProduct() { else echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ./${serverName2}${NC}" [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter &${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter ${NC}" fi - echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper &${NC}" + echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper ${NC}" if [ ${openresty_work} = 'true' ]; then echo -e "${GREEN_DARK}To access ${productName2} ${NC}: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}" @@ -996,10 +996,10 @@ function installProduct() { else echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${serverName2}${NC}" [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter &${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter ${NC}" fi - echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper &${NC}" + echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper ${NC}" if [ ! -z "$firstEp" ]; then tmpFqdn=${firstEp%%:*} From 973bb073a2ec804847531a83e0778aa9d8063328 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 045/123] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From 6dc93a8decc294c96b2b005afefbf711dcb1d4a5 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 13:52:20 +0800 Subject: [PATCH 046/123] add stream ver --- include/libs/stream/tstream.h | 28 +-- source/dnode/mnode/impl/src/mndDef.c | 207 +++++++++--------- source/dnode/mnode/impl/src/mndStream.c | 10 +- source/libs/stream/src/streamBackendRocksdb.c | 11 +- source/libs/stream/src/streamMeta.c | 21 +- source/libs/stream/src/streamTask.c | 11 +- 6 files changed, 156 insertions(+), 132 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index b4ae30910c..b241ae9b41 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -30,6 +30,7 @@ extern "C" { typedef struct SStreamTask SStreamTask; +#define SSTREAM_TASK_VER 1 enum { STREAM_STATUS__NORMAL = 0, STREAM_STATUS__STOP, @@ -266,13 +267,13 @@ typedef struct SCheckpointInfo { } SCheckpointInfo; typedef struct SStreamStatus { - int8_t taskStatus; - int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set - int8_t schedStatus; - int8_t keepTaskStatus; - bool transferState; - int8_t timerActive; // timer is active - int8_t pauseAllowed; // allowed task status to be set to be paused + int8_t taskStatus; + int8_t downstreamReady; // downstream tasks are all ready now, if this flag is set + int8_t schedStatus; + int8_t keepTaskStatus; + bool transferState; + int8_t timerActive; // timer is active + int8_t pauseAllowed; // allowed task status to be set to be paused } SStreamStatus; typedef struct SHistDataRange { @@ -309,6 +310,7 @@ typedef struct { } STaskTimestamp; struct SStreamTask { + int64_t ver; SStreamId id; SSTaskBasicInfo info; STaskOutputInfo outputInfo; @@ -589,10 +591,10 @@ bool streamTaskShouldPause(const SStreamStatus* pStatus); bool streamTaskIsIdle(const SStreamTask* pTask); int32_t streamTaskEndScanWAL(SStreamTask* pTask); -SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId); -int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize); +SStreamChildEpInfo* streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId); +int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize); -char* createStreamTaskIdStr(int64_t streamId, int32_t taskId); +char* createStreamTaskIdStr(int64_t streamId, int32_t taskId); // recover and fill history void streamTaskCheckDownstreamTasks(SStreamTask* pTask); @@ -628,7 +630,8 @@ int32_t streamDispatchTransferStateMsg(SStreamTask* pTask); // agg level int32_t streamTaskScanHistoryPrepare(SStreamTask* pTask); -int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq *pReq, SRpcHandleInfo* pRpcInfo); +int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistoryFinishReq* pReq, + SRpcHandleInfo* pRpcInfo); int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask); // stream task meta @@ -642,7 +645,7 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded); int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId); -int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it +int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); @@ -659,7 +662,6 @@ int32_t streamTaskReleaseState(SStreamTask* pTask); int32_t streamTaskReloadState(SStreamTask* pTask); int32_t streamAlignTransferState(SStreamTask* pTask); - #ifdef __cplusplus } #endif diff --git a/source/dnode/mnode/impl/src/mndDef.c b/source/dnode/mnode/impl/src/mndDef.c index a8a719edda..3dab144eef 100644 --- a/source/dnode/mnode/impl/src/mndDef.c +++ b/source/dnode/mnode/impl/src/mndDef.c @@ -70,6 +70,7 @@ int32_t tEncodeSStreamObj(SEncoder *pEncoder, const SStreamObj *pObj) { if (tEncodeI32(pEncoder, innerSz) < 0) return -1; for (int32_t j = 0; j < innerSz; j++) { SStreamTask *pTask = taosArrayGetP(pArray, j); + pTask->ver = SSTREAM_TASK_VER; if (tEncodeStreamTask(pEncoder, pTask) < 0) return -1; } } @@ -154,7 +155,7 @@ int32_t tDecodeSStreamObj(SDecoder *pDecoder, SStreamObj *pObj, int32_t sver) { return 0; } -static void* freeStreamTasks(SArray* pTaskLevel) { +static void *freeStreamTasks(SArray *pTaskLevel) { int32_t numOfLevel = taosArrayGetSize(pTaskLevel); for (int32_t i = 0; i < numOfLevel; i++) { SArray *pLevel = taosArrayGetP(pTaskLevel, i); @@ -192,14 +193,14 @@ SMqVgEp *tCloneSMqVgEp(const SMqVgEp *pVgEp) { SMqVgEp *pVgEpNew = taosMemoryMalloc(sizeof(SMqVgEp)); if (pVgEpNew == NULL) return NULL; pVgEpNew->vgId = pVgEp->vgId; -// pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg); + // pVgEpNew->qmsg = taosStrdup(pVgEp->qmsg); pVgEpNew->epSet = pVgEp->epSet; return pVgEpNew; } void tDeleteSMqVgEp(SMqVgEp *pVgEp) { if (pVgEp) { -// taosMemoryFreeClear(pVgEp->qmsg); + // taosMemoryFreeClear(pVgEp->qmsg); taosMemoryFree(pVgEp); } } @@ -207,14 +208,14 @@ void tDeleteSMqVgEp(SMqVgEp *pVgEp) { int32_t tEncodeSMqVgEp(void **buf, const SMqVgEp *pVgEp) { int32_t tlen = 0; tlen += taosEncodeFixedI32(buf, pVgEp->vgId); -// tlen += taosEncodeString(buf, pVgEp->qmsg); + // tlen += taosEncodeString(buf, pVgEp->qmsg); tlen += taosEncodeSEpSet(buf, &pVgEp->epSet); return tlen; } void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { buf = taosDecodeFixedI32(buf, &pVgEp->vgId); - if(sver == 1){ + if (sver == 1) { uint64_t size = 0; buf = taosDecodeVariantU64(buf, &size); buf = POINTER_SHIFT(buf, size); @@ -223,7 +224,7 @@ void *tDecodeSMqVgEp(const void *buf, SMqVgEp *pVgEp, int8_t sver) { return (void *)buf; } -SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { +SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char *cgroup) { SMqConsumerObj *pConsumer = taosMemoryCalloc(1, sizeof(SMqConsumerObj)); if (pConsumer == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -260,12 +261,12 @@ SMqConsumerObj *tNewSMqConsumerObj(int64_t consumerId, char* cgroup) { } void tDeleteSMqConsumerObj(SMqConsumerObj *pConsumer, bool delete) { - if(pConsumer == NULL) return; + if (pConsumer == NULL) return; taosArrayDestroyP(pConsumer->currentTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebNewTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->rebRemovedTopics, (FDelete)taosMemoryFree); taosArrayDestroyP(pConsumer->assignedTopics, (FDelete)taosMemoryFree); - if(delete){ + if (delete) { taosMemoryFree(pConsumer); } } @@ -392,7 +393,7 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s taosArrayPush(pConsumer->assignedTopics, &topic); } - if(sver > 1){ + if (sver > 1) { buf = taosDecodeFixedI8(buf, &pConsumer->withTbName); buf = taosDecodeFixedI8(buf, &pConsumer->autoCommit); buf = taosDecodeFixedI32(buf, &pConsumer->autoCommitInterval); @@ -401,18 +402,18 @@ void *tDecodeSMqConsumerObj(const void *buf, SMqConsumerObj *pConsumer, int8_t s return (void *)buf; } -//SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) { -// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp)); -// if (pConsumerEpNew == NULL) return NULL; -// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId; -// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL); -// return pConsumerEpNew; -//} +// SMqConsumerEp *tCloneSMqConsumerEp(const SMqConsumerEp *pConsumerEpOld) { +// SMqConsumerEp *pConsumerEpNew = taosMemoryMalloc(sizeof(SMqConsumerEp)); +// if (pConsumerEpNew == NULL) return NULL; +// pConsumerEpNew->consumerId = pConsumerEpOld->consumerId; +// pConsumerEpNew->vgs = taosArrayDup(pConsumerEpOld->vgs, NULL); +// return pConsumerEpNew; +// } // -//void tDeleteSMqConsumerEp(void *data) { -// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data; -// taosArrayDestroy(pConsumerEp->vgs); -//} +// void tDeleteSMqConsumerEp(void *data) { +// SMqConsumerEp *pConsumerEp = (SMqConsumerEp *)data; +// taosArrayDestroy(pConsumerEp->vgs); +// } int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) { int32_t tlen = 0; @@ -420,7 +421,7 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) { tlen += taosEncodeArray(buf, pConsumerEp->vgs, (FEncode)tEncodeSMqVgEp); int32_t szVgs = taosArrayGetSize(pConsumerEp->offsetRows); tlen += taosEncodeFixedI32(buf, szVgs); - for (int32_t j= 0; j < szVgs; ++j) { + for (int32_t j = 0; j < szVgs; ++j) { OffsetRows *offRows = taosArrayGet(pConsumerEp->offsetRows, j); tlen += taosEncodeFixedI32(buf, offRows->vgId); tlen += taosEncodeFixedI64(buf, offRows->rows); @@ -434,28 +435,28 @@ int32_t tEncodeSMqConsumerEp(void **buf, const SMqConsumerEp *pConsumerEp) { // do nothing } } -//#if 0 -// int32_t sz = taosArrayGetSize(pConsumerEp->vgs); -// tlen += taosEncodeFixedI32(buf, sz); -// for (int32_t i = 0; i < sz; i++) { -// SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i); -// tlen += tEncodeSMqVgEp(buf, pVgEp); -// } -//#endif + // #if 0 + // int32_t sz = taosArrayGetSize(pConsumerEp->vgs); + // tlen += taosEncodeFixedI32(buf, sz); + // for (int32_t i = 0; i < sz; i++) { + // SMqVgEp *pVgEp = taosArrayGetP(pConsumerEp->vgs, i); + // tlen += tEncodeSMqVgEp(buf, pVgEp); + // } + // #endif return tlen; } void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t sver) { buf = taosDecodeFixedI64(buf, &pConsumerEp->consumerId); buf = taosDecodeArray(buf, &pConsumerEp->vgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver); - if (sver > 1){ + if (sver > 1) { int32_t szVgs = 0; buf = taosDecodeFixedI32(buf, &szVgs); - if(szVgs > 0){ + if (szVgs > 0) { pConsumerEp->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows)); if (NULL == pConsumerEp->offsetRows) return NULL; - for (int32_t j= 0; j < szVgs; ++j) { - OffsetRows* offRows = taosArrayReserve(pConsumerEp->offsetRows, 1); + for (int32_t j = 0; j < szVgs; ++j) { + OffsetRows *offRows = taosArrayReserve(pConsumerEp->offsetRows, 1); buf = taosDecodeFixedI32(buf, &offRows->vgId); buf = taosDecodeFixedI64(buf, &offRows->rows); buf = taosDecodeFixedI8(buf, &offRows->offset.type); @@ -470,21 +471,21 @@ void *tDecodeSMqConsumerEp(const void *buf, SMqConsumerEp *pConsumerEp, int8_t s } } } -//#if 0 -// int32_t sz; -// buf = taosDecodeFixedI32(buf, &sz); -// pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *)); -// for (int32_t i = 0; i < sz; i++) { -// SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp)); -// buf = tDecodeSMqVgEp(buf, pVgEp); -// taosArrayPush(pConsumerEp->vgs, &pVgEp); -// } -//#endif + // #if 0 + // int32_t sz; + // buf = taosDecodeFixedI32(buf, &sz); + // pConsumerEp->vgs = taosArrayInit(sz, sizeof(void *)); + // for (int32_t i = 0; i < sz; i++) { + // SMqVgEp *pVgEp = taosMemoryMalloc(sizeof(SMqVgEp)); + // buf = tDecodeSMqVgEp(buf, pVgEp); + // taosArrayPush(pConsumerEp->vgs, &pVgEp); + // } + // #endif return (void *)buf; } -SMqSubscribeObj *tNewSubscribeObj(const char* key) { +SMqSubscribeObj *tNewSubscribeObj(const char *key) { SMqSubscribeObj *pSubObj = taosMemoryCalloc(1, sizeof(SMqSubscribeObj)); if (pSubObj == NULL) { return NULL; @@ -577,7 +578,7 @@ int32_t tEncodeSubscribeObj(void **buf, const SMqSubscribeObj *pSub) { int32_t szVgs = taosArrayGetSize(pSub->offsetRows); tlen += taosEncodeFixedI32(buf, szVgs); - for (int32_t j= 0; j < szVgs; ++j) { + for (int32_t j = 0; j < szVgs; ++j) { OffsetRows *offRows = taosArrayGet(pSub->offsetRows, j); tlen += taosEncodeFixedI32(buf, offRows->vgId); tlen += taosEncodeFixedI64(buf, offRows->rows); @@ -617,14 +618,14 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) { buf = taosDecodeArray(buf, &pSub->unassignedVgs, (FDecode)tDecodeSMqVgEp, sizeof(SMqVgEp), sver); buf = taosDecodeStringTo(buf, pSub->dbName); - if (sver > 1){ + if (sver > 1) { int32_t szVgs = 0; buf = taosDecodeFixedI32(buf, &szVgs); - if(szVgs > 0){ + if (szVgs > 0) { pSub->offsetRows = taosArrayInit(szVgs, sizeof(OffsetRows)); if (NULL == pSub->offsetRows) return NULL; - for (int32_t j= 0; j < szVgs; ++j) { - OffsetRows* offRows = taosArrayReserve(pSub->offsetRows, 1); + for (int32_t j = 0; j < szVgs; ++j) { + OffsetRows *offRows = taosArrayReserve(pSub->offsetRows, 1); buf = taosDecodeFixedI32(buf, &offRows->vgId); buf = taosDecodeFixedI64(buf, &offRows->rows); buf = taosDecodeFixedI8(buf, &offRows->offset.type); @@ -639,71 +640,71 @@ void *tDecodeSubscribeObj(const void *buf, SMqSubscribeObj *pSub, int8_t sver) { } } buf = taosDecodeString(buf, &pSub->qmsg); - }else{ + } else { pSub->qmsg = taosStrdup(""); } return (void *)buf; } -//SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) { -// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry)); -// if (pEntryNew == NULL) return NULL; -// pEntryNew->epoch = pEntry->epoch; -// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp); -// return pEntryNew; -//} +// SMqSubActionLogEntry *tCloneSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) { +// SMqSubActionLogEntry *pEntryNew = taosMemoryMalloc(sizeof(SMqSubActionLogEntry)); +// if (pEntryNew == NULL) return NULL; +// pEntryNew->epoch = pEntry->epoch; +// pEntryNew->consumers = taosArrayDup(pEntry->consumers, (__array_item_dup_fn_t)tCloneSMqConsumerEp); +// return pEntryNew; +// } // -//void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) { -// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp); -//} +// void tDeleteSMqSubActionLogEntry(SMqSubActionLogEntry *pEntry) { +// taosArrayDestroyEx(pEntry->consumers, (FDelete)tDeleteSMqConsumerEp); +// } -//int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) { -// int32_t tlen = 0; -// tlen += taosEncodeFixedI32(buf, pEntry->epoch); -// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry); -// return tlen; -//} +// int32_t tEncodeSMqSubActionLogEntry(void **buf, const SMqSubActionLogEntry *pEntry) { +// int32_t tlen = 0; +// tlen += taosEncodeFixedI32(buf, pEntry->epoch); +// tlen += taosEncodeArray(buf, pEntry->consumers, (FEncode)tEncodeSMqSubActionLogEntry); +// return tlen; +// } // -//void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) { -// buf = taosDecodeFixedI32(buf, &pEntry->epoch); -// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry)); -// return (void *)buf; -//} +// void *tDecodeSMqSubActionLogEntry(const void *buf, SMqSubActionLogEntry *pEntry) { +// buf = taosDecodeFixedI32(buf, &pEntry->epoch); +// buf = taosDecodeArray(buf, &pEntry->consumers, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry)); +// return (void *)buf; +// } -//SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) { -// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj)); -// if (pLogNew == NULL) return pLogNew; -// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN); -// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp); -// return pLogNew; -//} +// SMqSubActionLogObj *tCloneSMqSubActionLogObj(SMqSubActionLogObj *pLog) { +// SMqSubActionLogObj *pLogNew = taosMemoryMalloc(sizeof(SMqSubActionLogObj)); +// if (pLogNew == NULL) return pLogNew; +// memcpy(pLogNew->key, pLog->key, TSDB_SUBSCRIBE_KEY_LEN); +// pLogNew->logs = taosArrayDup(pLog->logs, (__array_item_dup_fn_t)tCloneSMqConsumerEp); +// return pLogNew; +// } // -//void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) { -// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp); -//} +// void tDeleteSMqSubActionLogObj(SMqSubActionLogObj *pLog) { +// taosArrayDestroyEx(pLog->logs, (FDelete)tDeleteSMqConsumerEp); +// } -//int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) { -// int32_t tlen = 0; -// tlen += taosEncodeString(buf, pLog->key); -// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry); -// return tlen; -//} +// int32_t tEncodeSMqSubActionLogObj(void **buf, const SMqSubActionLogObj *pLog) { +// int32_t tlen = 0; +// tlen += taosEncodeString(buf, pLog->key); +// tlen += taosEncodeArray(buf, pLog->logs, (FEncode)tEncodeSMqSubActionLogEntry); +// return tlen; +// } // -//void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) { -// buf = taosDecodeStringTo(buf, pLog->key); -// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry)); -// return (void *)buf; -//} +// void *tDecodeSMqSubActionLogObj(const void *buf, SMqSubActionLogObj *pLog) { +// buf = taosDecodeStringTo(buf, pLog->key); +// buf = taosDecodeArray(buf, &pLog->logs, (FDecode)tDecodeSMqSubActionLogEntry, sizeof(SMqSubActionLogEntry)); +// return (void *)buf; +// } // -//int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) { -// int32_t tlen = 0; -// tlen += taosEncodeString(buf, pOffset->key); -// tlen += taosEncodeFixedI64(buf, pOffset->offset); -// return tlen; -//} +// int32_t tEncodeSMqOffsetObj(void **buf, const SMqOffsetObj *pOffset) { +// int32_t tlen = 0; +// tlen += taosEncodeString(buf, pOffset->key); +// tlen += taosEncodeFixedI64(buf, pOffset->offset); +// return tlen; +// } // -//void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) { -// buf = taosDecodeStringTo(buf, pOffset->key); -// buf = taosDecodeFixedI64(buf, &pOffset->offset); -// return buf; -//} +// void *tDecodeSMqOffsetObj(void *buf, SMqOffsetObj *pOffset) { +// buf = taosDecodeStringTo(buf, pOffset->key); +// buf = taosDecodeFixedI64(buf, &pOffset->offset); +// return buf; +// } diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index cf1d445149..dd53f47964 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -140,7 +140,11 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) { void *buf = NULL; int8_t sver = 0; - if (sdbGetRawSoftVer(pRaw, &sver) != 0) goto STREAM_DECODE_OVER; + if (sdbGetRawSoftVer(pRaw, &sver) != 0) { + mError("stream read invalid data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream manually", + tsDataDir); + goto STREAM_DECODE_OVER; + } if (sver != MND_STREAM_VER_NUMBER) { terrno = 0; @@ -429,9 +433,11 @@ FAIL: return 0; } -int32_t mndPersistTaskDeployReq(STrans *pTrans, const SStreamTask *pTask) { +int32_t mndPersistTaskDeployReq(STrans *pTrans, SStreamTask *pTask) { SEncoder encoder; tEncoderInit(&encoder, NULL, 0); + + pTask->ver = SSTREAM_TASK_VER; tEncodeStreamTask(&encoder, pTask); int32_t size = encoder.pos; diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 8534f3b0a1..4a0ce81e68 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -218,11 +218,11 @@ _EXIT: } void streamBackendCleanup(void* arg) { SBackendWrapper* pHandle = (SBackendWrapper*)arg; - RocksdbCfInst** pIter = (RocksdbCfInst**)taosHashIterate(pHandle->cfInst, NULL); + void* pIter = taosHashIterate(pHandle->cfInst, NULL); while (pIter != NULL) { - RocksdbCfInst* inst = *pIter; + RocksdbCfInst* inst = *(RocksdbCfInst**)pIter; destroyRocksdbCfInst(inst); - taosHashIterate(pHandle->cfInst, pIter); + pIter = taosHashIterate(pHandle->cfInst, pIter); } taosHashCleanup(pHandle->cfInst); @@ -833,7 +833,10 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t qDebug("succ to open rocksdb cf"); } // close default cf - if (((rocksdb_column_family_handle_t**)cfHandle)[0] != 0) rocksdb_column_family_handle_destroy(cfHandle[0]); + if (((rocksdb_column_family_handle_t**)cfHandle)[0] != 0) { + rocksdb_column_family_handle_destroy(cfHandle[0]); + cfHandle[0] = NULL; + } rocksdb_options_destroy(cfOpts[0]); handle->db = db; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index ae07738868..3a60dcdb80 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -202,6 +202,7 @@ int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { void* buf = NULL; int32_t len; int32_t code; + pTask->ver = SSTREAM_TASK_VER; tEncodeSize(tEncodeStreamTask, pTask, len, code); if (code < 0) { return -1; @@ -331,7 +332,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { qDebug("s-task:0x%x set task status:%s", taskId, streamGetTaskStatusStr(TASK_STATUS__DROPPING)); - while(1) { + while (1) { taosRLockLatch(&pMeta->lock); ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); @@ -443,10 +444,16 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { taosArrayDestroy(pRecycleList); return -1; } - tDecoderInit(&decoder, (uint8_t*)pVal, vLen); - tDecodeStreamTask(&decoder, pTask); - tDecoderClear(&decoder); + if (tDecodeStreamTask(&decoder, pTask) < 0) { + tDecoderClear(&decoder); + tdbFree(pKey); + tdbFree(pVal); + tdbTbcClose(pCur); + taosArrayDestroy(pRecycleList); + tFreeStreamTask(pTask); + return -1; + } if (pTask->status.taskStatus == TASK_STATUS__DROPPING) { int32_t taskId = pTask->id.taskId; @@ -500,13 +507,13 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { } if (taosArrayGetSize(pRecycleList) > 0) { - for(int32_t i = 0; i < taosArrayGetSize(pRecycleList); ++i) { - int32_t taskId = *(int32_t*) taosArrayGet(pRecycleList, i); + for (int32_t i = 0; i < taosArrayGetSize(pRecycleList); ++i) { + int32_t taskId = *(int32_t*)taosArrayGet(pRecycleList, i); streamMetaRemoveTask(pMeta, taskId); } } - qDebug("vgId:%d load %d task from disk", pMeta->vgId, (int32_t) taosArrayGetSize(pMeta->pTaskList)); + qDebug("vgId:%d load %d task from disk", pMeta->vgId, (int32_t)taosArrayGetSize(pMeta->pTaskList)); taosArrayDestroy(pRecycleList); return 0; } diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index 1eb8d11916..dc4e5ff4a6 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -26,13 +26,14 @@ static int32_t addToTaskset(SArray* pArray, SStreamTask* pTask) { return 0; } -SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam, SArray* pTaskList) { +SStreamTask* tNewStreamTask(int64_t streamId, int8_t taskLevel, int8_t fillHistory, int64_t triggerParam, + SArray* pTaskList) { SStreamTask* pTask = (SStreamTask*)taosMemoryCalloc(1, sizeof(SStreamTask)); if (pTask == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return NULL; } - + pTask->ver = SSTREAM_TASK_VER; pTask->id.taskId = tGenIdPI32(); pTask->id.streamId = streamId; pTask->info.taskLevel = taskLevel; @@ -72,6 +73,7 @@ int32_t tDecodeStreamEpInfo(SDecoder* pDecoder, SStreamChildEpInfo* pInfo) { int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { if (tStartEncode(pEncoder) < 0) return -1; + if (tEncodeI64(pEncoder, pTask->ver) < 0) return -1; if (tEncodeI64(pEncoder, pTask->id.streamId) < 0) return -1; if (tEncodeI32(pEncoder, pTask->id.taskId) < 0) return -1; if (tEncodeI32(pEncoder, pTask->info.totalLevel) < 0) return -1; @@ -135,6 +137,9 @@ int32_t tEncodeStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) { int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (tStartDecode(pDecoder) < 0) return -1; + if (tDecodeI64(pDecoder, &pTask->ver) < 0) return -1; + if (pTask->ver != SSTREAM_TASK_VER) return -1; + if (tDecodeI64(pDecoder, &pTask->id.streamId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->id.taskId) < 0) return -1; if (tDecodeI32(pDecoder, &pTask->info.totalLevel) < 0) return -1; @@ -163,7 +168,7 @@ int32_t tDecodeStreamTask(SDecoder* pDecoder, SStreamTask* pTask) { if (tDecodeI64(pDecoder, &pTask->dataRange.window.skey)) return -1; if (tDecodeI64(pDecoder, &pTask->dataRange.window.ekey)) return -1; - int32_t epSz; + int32_t epSz = -1; if (tDecodeI32(pDecoder, &epSz) < 0) return -1; pTask->pUpstreamEpInfoList = taosArrayInit(epSz, POINTER_BYTES); From 67bca400c4a640d9e9917c7268479e71b2ea1d12 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Fri, 4 Aug 2023 13:57:34 +0800 Subject: [PATCH 047/123] fix: refine example (#22325) * fix: use latest version of jdbc connector * fix: remove locale and timezone to avoid confusing user * fix: update readme.md --- examples/JDBC/JDBCDemo/readme.md | 10 ++-------- .../java/com/taosdata/example/JdbcRestfulDemo.java | 2 -- 2 files changed, 2 insertions(+), 10 deletions(-) diff --git a/examples/JDBC/JDBCDemo/readme.md b/examples/JDBC/JDBCDemo/readme.md index da638a0bcc..21f9153935 100644 --- a/examples/JDBC/JDBCDemo/readme.md +++ b/examples/JDBC/JDBCDemo/readme.md @@ -20,18 +20,12 @@ mvn clean compile exec:java -Dexec.mainClass="com.taosdata.example.JdbcDemo" -De ``` ## Compile the Demo Code and Run It -To compile taos-jdbcdriver, go to the source directory ``TDengine/src/connector/jdbc`` and execute ``` mvn clean package -Dmaven.test.skip=true ``` -To compile the demo project, go to the source directory ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute +To run JDBCDemo.jar, execute ``` -mvn clean package assembly:single -``` - -To run JDBCDemo.jar, go to ``TDengine/tests/examples/JDBC/JDBCDemo`` and execute -``` -java -Djava.ext.dirs=../../../../src/connector/jdbc/target:$JAVA_HOME/jre/lib/ext -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host [HOSTNAME] +java -jar target/JDBCDemo-SNAPSHOT-jar-with-dependencies.jar -host [HOSTNAME] ``` diff --git a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java index d89476b8ca..69ef91d380 100644 --- a/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java +++ b/examples/JDBC/JDBCDemo/src/main/java/com/taosdata/example/JdbcRestfulDemo.java @@ -16,8 +16,6 @@ public class JdbcRestfulDemo { Properties properties = new Properties(); properties.setProperty("charset", "UTF-8"); - properties.setProperty("locale", "en_US.UTF-8"); - properties.setProperty("timezone", "UTC-8"); Connection conn = DriverManager.getConnection(url, properties); Statement stmt = conn.createStatement(); From 81908be32f060d28b1484e9284e2ce9d13873e9d Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 14:02:03 +0800 Subject: [PATCH 048/123] add stream ver --- source/libs/stream/src/streamMeta.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 3a60dcdb80..57a097869b 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -452,6 +452,10 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { tdbTbcClose(pCur); taosArrayDestroy(pRecycleList); tFreeStreamTask(pTask); + qError( + "stream read incompatible data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream " + "manually", + tsDataDir); return -1; } From 56ed422f8863fdd6fc54ed0bf4dba47171308816 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 14:09:00 +0800 Subject: [PATCH 049/123] add stream ver --- source/dnode/mnode/impl/src/mndStream.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index dd53f47964..0a4bb980df 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -141,8 +141,6 @@ SSdbRow *mndStreamActionDecode(SSdbRaw *pRaw) { int8_t sver = 0; if (sdbGetRawSoftVer(pRaw, &sver) != 0) { - mError("stream read invalid data, rm %s/vnode/vnode*/tq/stream if taosd cannot start, and rebuild stream manually", - tsDataDir); goto STREAM_DECODE_OVER; } From d3a9429c3b5ad4c69bbdec2e305b824d511d80ea Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 050/123] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 89572d1c06..1883a50e59 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From b20b114100f6b675342552d9393a7ad96e82993d Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:37:17 +0800 Subject: [PATCH 051/123] comment out splitVgroup.py --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1f0dd3bf26..8715a0d13b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -160,7 +160,7 @@ ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py -,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 +#,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 ,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3 From fba43e17480b3423bae78c5e9ca638449bf58efb Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Thu, 3 Aug 2023 18:05:52 +0800 Subject: [PATCH 052/123] feature: optimize interval with limit --- source/libs/executor/inc/executorInt.h | 9 + source/libs/executor/src/executil.c | 3 +- source/libs/executor/src/operator.c | 1 - source/libs/executor/src/scanoperator.c | 47 ++-- source/libs/executor/src/timewindowoperator.c | 92 +++++- source/libs/planner/src/planLogicCreater.c | 1 - source/libs/planner/src/planOptimizer.c | 126 +++++++-- source/libs/planner/src/planSpliter.c | 12 + tests/parallel_test/cases.task | 1 + .../tsim/query/r/explain_tsorder.result | 212 +++++++------- .../system-test/2-query/interval_limit_opt.py | 266 ++++++++++++++++++ 11 files changed, 607 insertions(+), 163 deletions(-) create mode 100644 tests/system-test/2-query/interval_limit_opt.py diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 5baf0978cd..1bff9fce9e 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -25,6 +25,7 @@ extern "C" { #include "tsort.h" #include "ttszip.h" #include "tvariant.h" +#include "theap.h" #include "dataSinkMgt.h" #include "executil.h" @@ -418,6 +419,14 @@ typedef struct SIntervalAggOperatorInfo { EOPTR_EXEC_MODEL execModel; // operator execution model [batch model|stream model] STimeWindowAggSupp twAggSup; SArray* pPrevValues; // SArray used to keep the previous not null value for interpolation. + // for limit optimization + bool limited; + int64_t limit; + bool slimited; + int64_t slimit; + uint64_t curGroupId; // initialize to UINT64_MAX + uint64_t handledGroupNum; + BoundedQueue* pBQ; } SIntervalAggOperatorInfo; typedef struct SMergeAlignedIntervalAggOperatorInfo { diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index e1bf4e7cb0..aa0c7945b0 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -2118,8 +2118,9 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* if (code != TSDB_CODE_SUCCESS) { return code; } + if (pScanNode->groupOrderScan) pTableListInfo->numOfOuputGroups = taosArrayGetSize(pTableListInfo->pTableList); - if (groupSort) { + if (groupSort || pScanNode->groupOrderScan) { code = sortTableGroup(pTableListInfo); } } diff --git a/source/libs/executor/src/operator.c b/source/libs/executor/src/operator.c index 2db5ea2f1e..8ddcc8fd15 100644 --- a/source/libs/executor/src/operator.c +++ b/source/libs/executor/src/operator.c @@ -275,7 +275,6 @@ SOperatorInfo* createOperator(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SR SNode* pTagIndexCond, const char* pUser, const char* dbname) { int32_t type = nodeType(pPhyNode); const char* idstr = GET_TASKID(pTaskInfo); - if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) { SOperatorInfo* pOperator = NULL; if (QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN == type) { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6c8d9ed59f..b15cf56c3d 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -848,30 +848,29 @@ static SSDataBlock* doTableScan(SOperatorInfo* pOperator) { return result; } - if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { - setOperatorCompleted(pOperator); - return NULL; + while (1) { + if ((++pInfo->currentGroupId) >= tableListGetOutputGroups(pInfo->base.pTableListInfo)) { + setOperatorCompleted(pOperator); + return NULL; + } + + // reset value for the next group data output + pOperator->status = OP_OPENED; + resetLimitInfoForNextGroup(&pInfo->base.limitInfo); + + int32_t num = 0; + STableKeyInfo* pList = NULL; + tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); + + pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); + pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); + pInfo->scanTimes = 0; + + result = doGroupedTableScan(pOperator); + if (result != NULL) { + return result; + } } - - // reset value for the next group data output - pOperator->status = OP_OPENED; - resetLimitInfoForNextGroup(&pInfo->base.limitInfo); - - int32_t num = 0; - STableKeyInfo* pList = NULL; - tableListGetGroupList(pInfo->base.pTableListInfo, pInfo->currentGroupId, &pList, &num); - - pAPI->tsdReader.tsdSetQueryTableList(pInfo->base.dataReader, pList, num); - pAPI->tsdReader.tsdReaderResetStatus(pInfo->base.dataReader, &pInfo->base.cond); - pInfo->scanTimes = 0; - - result = doGroupedTableScan(pOperator); - if (result != NULL) { - return result; - } - - setOperatorCompleted(pOperator); - return NULL; } } @@ -3551,4 +3550,4 @@ static void destoryTableCountScanOperator(void* param) { taosArrayDestroy(pTableCountScanInfo->stbUidList); taosMemoryFreeClear(param); -} \ No newline at end of file +} diff --git a/source/libs/executor/src/timewindowoperator.c b/source/libs/executor/src/timewindowoperator.c index 0a46def23d..4f8a3acd15 100644 --- a/source/libs/executor/src/timewindowoperator.c +++ b/source/libs/executor/src/timewindowoperator.c @@ -876,7 +876,67 @@ bool needDeleteWindowBuf(STimeWindow* pWin, STimeWindowAggSupp* pTwSup) { return pTwSup->maxTs != INT64_MIN && pWin->ekey < pTwSup->maxTs - pTwSup->deleteMark; } -static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, +static bool tsKeyCompFn(void* l, void* r, void* param) { + TSKEY* lTS = (TSKEY*)l; + TSKEY* rTS = (TSKEY*)r; + SIntervalAggOperatorInfo* pInfo = param; + return pInfo->binfo.outputTsOrder == ORDER_ASC ? *lTS < *rTS : *lTS > *rTS; +} + +static bool isCalculatedWin(SIntervalAggOperatorInfo* pInfo, const STimeWindow* win, uint64_t tableGroupId) { + char keyBuf[sizeof(TSKEY) + sizeof(uint64_t)] = {0}; + SET_RES_WINDOW_KEY(keyBuf, (char*)&win->skey, sizeof(TSKEY), tableGroupId); + return tSimpleHashGet(pInfo->aggSup.pResultRowHashTable, keyBuf, GET_RES_WINDOW_KEY_LEN(sizeof(TSKEY))) != NULL; +} + +/** + * @brief check if cur window should be filtered out by limit info + * @retval true if should be filtered out + * @retval false if not filtering out + * @note If no limit info, we skip filtering. + * If input/output ts order mismatch, we skip filtering too. + * eg. input ts order: desc, and output ts order: asc, limit: 10 + * IntervalOperator should output the first 10 windows, however, we can't find the first 10 windows until we scan + * every tuple in every block. + * And the boundedQueue keeps refreshing all records with smaller ts key. + */ +static bool filterWindowWithLimit(SIntervalAggOperatorInfo* pOperatorInfo, STimeWindow* win, uint64_t groupId) { + if (!pOperatorInfo->limited // if no limit info, no filter will be applied + || pOperatorInfo->binfo.inputTsOrder != + pOperatorInfo->binfo.outputTsOrder // if input/output ts order mismatch, no filter + ) { + return false; + } + if (pOperatorInfo->limit == 0) return true; + + if (pOperatorInfo->pBQ == NULL) { + pOperatorInfo->pBQ = createBoundedQueue(pOperatorInfo->limit - 1, tsKeyCompFn, taosMemoryFree, pOperatorInfo); + } + + bool shouldFilter = false; + // if BQ has been full, compare it with top of BQ + if (taosBQSize(pOperatorInfo->pBQ) == taosBQMaxSize(pOperatorInfo->pBQ) + 1) { + PriorityQueueNode* top = taosBQTop(pOperatorInfo->pBQ); + shouldFilter = tsKeyCompFn(top->data, &win->skey, pOperatorInfo); + } + if (shouldFilter) { + return true; + } else if (isCalculatedWin(pOperatorInfo, win, groupId)) { + return false; + } + + // cur win not been filtered out and not been pushed into BQ yet, push it into BQ + PriorityQueueNode node = {.data = taosMemoryMalloc(sizeof(TSKEY))}; + *((TSKEY*)node.data) = win->skey; + + if (NULL == taosBQPush(pOperatorInfo->pBQ, &node)) { + taosMemoryFree(node.data); + return true; + } + return false; +} + +static bool hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResultRowInfo, SSDataBlock* pBlock, int32_t scanFlag) { SIntervalAggOperatorInfo* pInfo = (SIntervalAggOperatorInfo*)pOperatorInfo->info; @@ -891,8 +951,21 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul TSKEY ts = getStartTsKey(&pBlock->info.window, tsCols); SResultRow* pResult = NULL; + if (tableGroupId != pInfo->curGroupId) { + pInfo->handledGroupNum += 1; + if (pInfo->slimited && pInfo->handledGroupNum > pInfo->slimit) { + return true; + } else { + pInfo->curGroupId = tableGroupId; + destroyBoundedQueue(pInfo->pBQ); + pInfo->pBQ = NULL; + } + } + STimeWindow win = getActiveTimeWindow(pInfo->aggSup.pResultBuf, pResultRowInfo, ts, &pInfo->interval, pInfo->binfo.inputTsOrder); + if (filterWindowWithLimit(pInfo, &win, tableGroupId)) return false; + int32_t ret = setTimeWindowOutputBuf(pResultRowInfo, &win, (scanFlag == MAIN_SCAN), &pResult, tableGroupId, pSup->pCtx, numOfOutput, pSup->rowEntryInfoOffset, &pInfo->aggSup, pTaskInfo); if (ret != TSDB_CODE_SUCCESS || pResult == NULL) { @@ -929,7 +1002,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul while (1) { int32_t prevEndPos = forwardRows - 1 + startPos; startPos = getNextQualifiedWindow(&pInfo->interval, &nextWin, &pBlock->info, tsCols, prevEndPos, pInfo->binfo.inputTsOrder); - if (startPos < 0) { + if (startPos < 0 || filterWindowWithLimit(pInfo, &nextWin, tableGroupId)) { break; } // null data, failed to allocate more memory buffer @@ -963,6 +1036,7 @@ static void hashIntervalAgg(SOperatorInfo* pOperatorInfo, SResultRowInfo* pResul if (pInfo->timeWindowInterpo) { saveDataBlockLastRow(pInfo->pPrevValues, pBlock, pInfo->pInterpCols); } + return false; } void doCloseWindow(SResultRowInfo* pResultRowInfo, const SIntervalAggOperatorInfo* pInfo, SResultRow* pResult) { @@ -1043,7 +1117,7 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pSup, pBlock, pInfo->binfo.inputTsOrder, scanFlag, true); - hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag); + if (hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag)) break; } initGroupedResultInfo(&pInfo->groupResInfo, pInfo->aggSup.pResultRowHashTable, pInfo->binfo.outputTsOrder); @@ -1495,6 +1569,7 @@ void destroyIntervalOperatorInfo(void* param) { cleanupGroupResInfo(&pInfo->groupResInfo); colDataDestroy(&pInfo->twAggSup.timeWindowData); + destroyBoundedQueue(pInfo->pBQ); taosMemoryFreeClear(param); } @@ -1658,6 +1733,17 @@ SOperatorInfo* createIntervalOperatorInfo(SOperatorInfo* downstream, SIntervalPh pInfo->interval = interval; pInfo->twAggSup = as; pInfo->binfo.mergeResultBlock = pPhyNode->window.mergeDataBlock; + if (pPhyNode->window.node.pLimit) { + SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pLimit; + pInfo->limited = true; + pInfo->limit = pLimit->limit + pLimit->offset; + } + if (pPhyNode->window.node.pSlimit) { + SLimitNode* pLimit = (SLimitNode*)pPhyNode->window.node.pSlimit; + pInfo->slimited = true; + pInfo->slimit = pLimit->limit + pLimit->offset; + pInfo->curGroupId = UINT64_MAX; + } if (pPhyNode->window.pExprs != NULL) { int32_t numOfScalar = 0; diff --git a/source/libs/planner/src/planLogicCreater.c b/source/libs/planner/src/planLogicCreater.c index 713f12e229..854f3fc4c6 100644 --- a/source/libs/planner/src/planLogicCreater.c +++ b/source/libs/planner/src/planLogicCreater.c @@ -847,7 +847,6 @@ static int32_t createWindowLogicNodeByInterval(SLogicPlanContext* pCxt, SInterva : (pSelect->hasTimeLineFunc ? getRequireDataOrder(true, pSelect) : DATA_ORDER_LEVEL_IN_BLOCK); pWindow->node.resultDataOrder = pCxt->pPlanCxt->streamQuery ? DATA_ORDER_LEVEL_GLOBAL : getRequireDataOrder(true, pSelect); - pWindow->pTspk = nodesCloneNode(pInterval->pCol); if (NULL == pWindow->pTspk) { nodesDestroyNode((SNode*)pWindow); diff --git a/source/libs/planner/src/planOptimizer.c b/source/libs/planner/src/planOptimizer.c index 32721d8060..b2f71adbd7 100644 --- a/source/libs/planner/src/planOptimizer.c +++ b/source/libs/planner/src/planOptimizer.c @@ -368,7 +368,7 @@ static void scanPathOptSetGroupOrderScan(SScanLogicNode* pScan) { if (pScan->node.pParent && nodeType(pScan->node.pParent) == QUERY_NODE_LOGIC_PLAN_AGG) { SAggLogicNode* pAgg = (SAggLogicNode*)pScan->node.pParent; - bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); + bool withSlimit = pAgg->node.pSlimit != NULL || (pAgg->node.pParent && pAgg->node.pParent->pSlimit); if (withSlimit && isPartTableAgg(pAgg)) { pScan->groupOrderScan = pAgg->node.forceCreateNonBlockingOptr = true; } @@ -1546,11 +1546,33 @@ static bool planOptNodeListHasTbname(SNodeList* pKeys) { } static bool partTagsIsOptimizableNode(SLogicNode* pNode) { - return ((QUERY_NODE_LOGIC_PLAN_PARTITION == nodeType(pNode) || - (QUERY_NODE_LOGIC_PLAN_AGG == nodeType(pNode) && NULL != ((SAggLogicNode*)pNode)->pGroupKeys && - NULL != ((SAggLogicNode*)pNode)->pAggFuncs)) && - 1 == LIST_LENGTH(pNode->pChildren) && - QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0))); + bool ret = 1 == LIST_LENGTH(pNode->pChildren) && + QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(nodesListGetNode(pNode->pChildren, 0)); + if (!ret) return ret; + switch (nodeType(pNode)) { + case QUERY_NODE_LOGIC_PLAN_PARTITION: { + if (pNode->pParent && nodeType(pNode->pParent) == QUERY_NODE_LOGIC_PLAN_WINDOW) { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode->pParent; + if (pWindow->winType == WINDOW_TYPE_INTERVAL) { + // if interval has slimit, we push down partition node to scan, and scan will set groupOrderScan to true + // we want to skip groups of blocks after slimit satisfied + // if interval only has limit, we do not push down partition node to scan + // we want to get grouped output from partition node and make use of limit + // if no slimit and no limit, we push down partition node and groupOrderScan is false, cause we do not need + // group ordered output + if (!pWindow->node.pSlimit && pWindow->node.pLimit) ret = false; + } + } + } break; + case QUERY_NODE_LOGIC_PLAN_AGG: { + SAggLogicNode* pAgg = (SAggLogicNode*)pNode; + ret = pAgg->pGroupKeys && pAgg->pAggFuncs; + } break; + default: + ret = false; + break; + } + return ret; } static SNodeList* partTagsGetPartKeys(SLogicNode* pNode) { @@ -1691,6 +1713,8 @@ static int32_t partTagsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSub scanPathOptSetGroupOrderScan(pScan); pParent->hasGroupKeyOptimized = true; } + if (pNode->pParent->pSlimit) + pScan->groupOrderScan = true; NODES_CLEAR_LIST(pNode->pChildren); nodesDestroyNode((SNode*)pNode); @@ -2644,23 +2668,79 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp } static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) { - if (NULL == pNode->pLimit || 1 != LIST_LENGTH(pNode->pChildren)) { + if ((NULL == pNode->pLimit && pNode->pSlimit == NULL) || 1 != LIST_LENGTH(pNode->pChildren)) { return false; } SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); - // push down to sort node - if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { - // if we have pushed down, we skip it - if (pChild->pLimit) return false; - } else if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pChild) || QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pNode)) { - // push down to table scan node - // if pNode is sortNode, we skip push down limit info to table scan node - return false; - } + if (pChild->pLimit || pChild->pSlimit) return false; return true; } +static void swapLimit(SLogicNode* pParent, SLogicNode* pChild) { + pChild->pLimit = pParent->pLimit; + pParent->pLimit = NULL; +} + +static void cloneLimit(SLogicNode* pParent, SLogicNode* pChild) { + SLimitNode* pLimit = NULL; + if (pParent->pLimit) { + pChild->pLimit = nodesCloneNode(pParent->pLimit); + pLimit = (SLimitNode*)pChild->pLimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + } + + if (pParent->pSlimit) { + pChild->pSlimit = nodesCloneNode(pParent->pSlimit); + pLimit = (SLimitNode*)pChild->pSlimit; + pLimit->limit += pLimit->offset; + pLimit->offset = 0; + } +} + +static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo); +static bool pushDownLimitTo(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { + switch (nodeType(pNodeLimitPushTo)) { + case QUERY_NODE_LOGIC_PLAN_WINDOW: { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNodeLimitPushTo; + if (pWindow->winType != WINDOW_TYPE_INTERVAL) break; + cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + return true; + } + case QUERY_NODE_LOGIC_PLAN_FILL: + case QUERY_NODE_LOGIC_PLAN_SORT: { + cloneLimit(pNodeWithLimit, pNodeLimitPushTo); + SNode* pChild = NULL; + FOREACH(pChild, pNodeLimitPushTo->pChildren) { pushDownLimitHow(pNodeLimitPushTo, (SLogicNode*)pChild); } + return true; + } + case QUERY_NODE_LOGIC_PLAN_SCAN: + if (nodeType(pNodeWithLimit) == QUERY_NODE_LOGIC_PLAN_PROJECT && pNodeWithLimit->pLimit) { + swapLimit(pNodeWithLimit, pNodeLimitPushTo); + return true; + } + default: + break; + } + return false; +} + +static bool pushDownLimitHow(SLogicNode* pNodeWithLimit, SLogicNode* pNodeLimitPushTo) { + switch (nodeType(pNodeWithLimit)) { + case QUERY_NODE_LOGIC_PLAN_PROJECT: + case QUERY_NODE_LOGIC_PLAN_FILL: + return pushDownLimitTo(pNodeWithLimit, pNodeLimitPushTo); + case QUERY_NODE_LOGIC_PLAN_SORT: { + SSortLogicNode* pSort = (SSortLogicNode*)pNodeWithLimit; + if (sortPriKeyOptIsPriKeyOrderBy(pSort->pSortKeys)) return pushDownLimitTo(pNodeWithLimit, pNodeLimitPushTo); + } + default: + break; + } + return false; +} + static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) { SLogicNode* pNode = optFindPossibleNode(pLogicSubplan->pNode, pushDownLimitOptShouldBeOptimized); if (NULL == pNode) { @@ -2669,17 +2749,9 @@ static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0); nodesDestroyNode(pChild->pLimit); - if (QUERY_NODE_LOGIC_PLAN_SORT == nodeType(pChild)) { - pChild->pLimit = nodesCloneNode(pNode->pLimit); - SLimitNode* pLimit = (SLimitNode*)pChild->pLimit; - pLimit->limit += pLimit->offset; - pLimit->offset = 0; - } else { - pChild->pLimit = pNode->pLimit; - pNode->pLimit = NULL; + if (pushDownLimitHow(pNode, pChild)) { + pCxt->optimized = true; } - pCxt->optimized = true; - return TSDB_CODE_SUCCESS; } @@ -2980,6 +3052,7 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "sortNonPriKeyOptimize", .optimizeFunc = sortNonPriKeyOptimize}, {.pName = "SortPrimaryKey", .optimizeFunc = sortPrimaryKeyOptimize}, {.pName = "SmaIndex", .optimizeFunc = smaIndexOptimize}, + {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}, {.pName = "PartitionTags", .optimizeFunc = partTagsOptimize}, {.pName = "MergeProjects", .optimizeFunc = mergeProjectsOptimize}, {.pName = "EliminateProject", .optimizeFunc = eliminateProjOptimize}, @@ -2988,7 +3061,6 @@ static const SOptimizeRule optimizeRuleSet[] = { {.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize}, {.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize}, {.pName = "TagScan", .optimizeFunc = tagScanOptimize}, - {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}, {.pName = "TableCountScan", .optimizeFunc = tableCountScanOptimize}, }; // clang-format on diff --git a/source/libs/planner/src/planSpliter.c b/source/libs/planner/src/planSpliter.c index 84a486649e..3f6c73b4e5 100644 --- a/source/libs/planner/src/planSpliter.c +++ b/source/libs/planner/src/planSpliter.c @@ -498,6 +498,18 @@ static int32_t stbSplRewriteFromMergeNode(SMergeLogicNode* pMerge, SLogicNode* p } break; } + case QUERY_NODE_LOGIC_PLAN_WINDOW: { + SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode; + if (pMerge->node.pLimit) { + nodesDestroyNode(pMerge->node.pLimit); + pMerge->node.pLimit = NULL; + } + if (pMerge->node.pSlimit) { + nodesDestroyNode(pMerge->node.pSlimit); + pMerge->node.pSlimit = NULL; + } + break; + } default: break; } diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 9ec12197ec..dc72c67402 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -25,6 +25,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/nestedQuery_26.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/interval_limit_opt.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqShow.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/tmqDropStb.py ,,y,system-test,./pytest.sh python3 ./test.py -f 7-tmq/subscribeStb0.py diff --git a/tests/script/tsim/query/r/explain_tsorder.result b/tests/script/tsim/query/r/explain_tsorder.result index b69a77ada5..95f8c38fbf 100644 --- a/tests/script/tsim/query/r/explain_tsorder.result +++ b/tests/script/tsim/query/r/explain_tsorder.result @@ -1603,58 +1603,58 @@ QUERY_PLAN: Time Range: [-9223372036854775808, taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart desc; _wstart | last(ts) | avg(c2) | ================================================================================ - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | taos> select _wstart, last(ts), avg(c2) from meters interval(10s) order by _wstart asc; _wstart | last(ts) | avg(c2) | ================================================================================ - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart asc; _wstart | first(ts) | avg(c2) | ================================================================================ - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | taos> select _wstart, first(ts), avg(c2) from meters interval(10s) order by _wstart desc; _wstart | first(ts) | avg(c2) | ================================================================================ - 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000 | - 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000 | - 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000 | - 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000 | - 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000 | - 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000 | - 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000 | - 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000 | - 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000 | - 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000 | + 2022-05-24 00:01:00.000 | 2022-05-24 00:01:08.000 | 210.000000000000000 | + 2022-05-23 00:01:00.000 | 2022-05-23 00:01:08.000 | 116.000000000000000 | + 2022-05-22 00:01:00.000 | 2022-05-22 00:01:08.000 | 196.000000000000000 | + 2022-05-21 00:01:00.000 | 2022-05-21 00:01:08.000 | 11.000000000000000 | + 2022-05-20 00:01:00.000 | 2022-05-20 00:01:08.000 | 120.000000000000000 | + 2022-05-19 00:01:00.000 | 2022-05-19 00:01:08.000 | 243.000000000000000 | + 2022-05-18 00:01:00.000 | 2022-05-18 00:01:08.000 | 58.000000000000000 | + 2022-05-17 00:01:00.000 | 2022-05-17 00:01:08.000 | 59.000000000000000 | + 2022-05-16 00:01:00.000 | 2022-05-16 00:01:08.000 | 136.000000000000000 | + 2022-05-15 00:01:00.000 | 2022-05-15 00:01:08.000 | 234.000000000000000 | taos> select last(a) as d from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s)) order by d; d | @@ -1792,35 +1792,35 @@ taos> select last(b) as d from (select last(ts) as b, avg(c2) as c from meters i taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc; _wstart | d | avg(c) | ================================================================================ - 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | - 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | - 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | - 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | - 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | - 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | - 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | - 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | - 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | - 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | - 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | - 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | - 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | - 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | - 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | - 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | - 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | - 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | - 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | - 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000000000 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000000000 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.599999999999994 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000000000 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000000000 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000000000 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000000003 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.399999999999999 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000000001 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.799999999999997 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000000000 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000000006 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.799999999999997 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000000003 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.599999999999994 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000000000 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | taos> explain verbose true select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by d desc\G; *************************** 1.row *************************** @@ -2673,51 +2673,51 @@ taos> select ts, c2 from d1 order by ts asc, c2 desc limit 5,5; taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc; _wstart | d | avg(c) | ================================================================================ - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | - 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000 | - 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000 | - 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.600000000 | - 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000 | - 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000 | - 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.600000000 | - 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000 | - 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000 | - 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000 | - 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000 | - 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.800000000 | - 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000 | - 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000 | - 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000 | - 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.800000000 | - 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000 | - 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.400000000 | - 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000 | - 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000 | - 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000 | - 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | + 2022-05-18 13:00:00.000 | 2022-05-18 14:25:00.000 | 169.000000000000000 | + 2022-05-15 15:00:00.000 | 2022-05-15 18:01:00.000 | 160.500000000000000 | + 2022-05-19 19:00:00.000 | 2022-05-19 19:13:00.000 | 144.599999999999994 | + 2022-05-15 20:00:00.000 | 2022-05-16 00:01:00.000 | 136.000000000000000 | + 2022-05-18 08:00:00.000 | 2022-05-18 09:37:00.000 | 132.000000000000000 | + 2022-05-16 01:00:00.000 | 2022-05-16 04:49:00.000 | 120.599999999999994 | + 2022-05-20 00:00:00.000 | 2022-05-20 00:01:00.000 | 120.000000000000000 | + 2022-05-16 06:00:00.000 | 2022-05-16 09:37:00.000 | 105.200000000000003 | + 2022-05-18 03:00:00.000 | 2022-05-18 04:49:00.000 | 95.000000000000000 | + 2022-05-20 05:00:00.000 | 2022-05-20 06:01:00.000 | 92.750000000000000 | + 2022-05-16 11:00:00.000 | 2022-05-16 14:25:00.000 | 89.799999999999997 | + 2022-05-16 16:00:00.000 | 2022-05-16 19:13:00.000 | 74.400000000000006 | + 2022-05-20 10:00:00.000 | 2022-05-20 12:01:00.000 | 65.500000000000000 | + 2022-05-16 21:00:00.000 | 2022-05-17 00:01:00.000 | 59.000000000000000 | + 2022-05-17 02:00:00.000 | 2022-05-17 04:49:00.000 | 58.799999999999997 | + 2022-05-17 07:00:00.000 | 2022-05-17 09:37:00.000 | 58.600000000000001 | + 2022-05-17 12:00:00.000 | 2022-05-17 14:25:00.000 | 58.399999999999999 | + 2022-05-17 17:00:00.000 | 2022-05-17 19:13:00.000 | 58.200000000000003 | + 2022-05-17 22:00:00.000 | 2022-05-18 00:01:00.000 | 58.000000000000000 | + 2022-05-20 15:00:00.000 | 2022-05-20 18:01:00.000 | 38.250000000000000 | + 2022-05-20 20:00:00.000 | 2022-05-21 00:01:00.000 | 11.000000000000000 | taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2; _wstart | d | avg(c) | ================================================================================ - 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000 | - 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000 | + 2022-05-18 23:00:00.000 | 2022-05-19 00:01:00.000 | 243.000000000000000 | + 2022-05-15 00:00:00.000 | 2022-05-15 00:01:00.000 | 234.000000000000000 | taos> select _wstart, first(a) as d, avg(c) from (select _wstart as a, last(ts) as b, avg(c2) as c from meters interval(10s) order by a desc) where a > '2022-05-15 00:01:00.000' and a < '2022-05-21 00:01:08.000' interval(5h) fill(linear) order by avg(c) desc limit 2,6; _wstart | d | avg(c) | ================================================================================ - 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000 | - 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000 | - 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000 | - 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000 | - 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000 | - 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.200000000 | + 2022-05-19 04:00:00.000 | 2022-05-19 04:49:00.000 | 218.400000000000006 | + 2022-05-15 05:00:00.000 | 2022-05-15 06:01:00.000 | 209.500000000000000 | + 2022-05-18 18:00:00.000 | 2022-05-18 19:13:00.000 | 206.000000000000000 | + 2022-05-19 09:00:00.000 | 2022-05-19 09:37:00.000 | 193.800000000000011 | + 2022-05-15 10:00:00.000 | 2022-05-15 12:01:00.000 | 185.000000000000000 | + 2022-05-19 14:00:00.000 | 2022-05-19 14:25:00.000 | 169.199999999999989 | taos> select last(ts), c2 as d from d1 group by c2 order by c2 desc limit 10; last(ts) | d | diff --git a/tests/system-test/2-query/interval_limit_opt.py b/tests/system-test/2-query/interval_limit_opt.py new file mode 100644 index 0000000000..fef6e9facd --- /dev/null +++ b/tests/system-test/2-query/interval_limit_opt.py @@ -0,0 +1,266 @@ +import taos +import sys +import time +import socket +import os +import threading +import math + +from util.log import * +from util.sql import * +from util.cases import * +from util.dnodes import * +from util.common import * +# from tmqCommon import * + +class TDTestCase: + def __init__(self): + self.vgroups = 4 + self.ctbNum = 10 + self.rowsPerTbl = 10000 + self.duraion = '1h' + + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug(f"start to excute {__file__}") + tdSql.init(conn.cursor(), False) + + def create_database(self,tsql, dbName,dropFlag=1,vgroups=2,replica=1, duration:str='1d'): + if dropFlag == 1: + tsql.execute("drop database if exists %s"%(dbName)) + + tsql.execute("create database if not exists %s vgroups %d replica %d duration %s"%(dbName, vgroups, replica, duration)) + tdLog.debug("complete to create database %s"%(dbName)) + return + + def create_stable(self,tsql, paraDict): + colString = tdCom.gen_column_type_str(colname_prefix=paraDict["colPrefix"], column_elm_list=paraDict["colSchema"]) + tagString = tdCom.gen_tag_type_str(tagname_prefix=paraDict["tagPrefix"], tag_elm_list=paraDict["tagSchema"]) + sqlString = f"create table if not exists %s.%s (%s) tags (%s)"%(paraDict["dbName"], paraDict["stbName"], colString, tagString) + tdLog.debug("%s"%(sqlString)) + tsql.execute(sqlString) + return + + def create_ctable(self,tsql=None, dbName='dbx',stbName='stb',ctbPrefix='ctb',ctbNum=1,ctbStartIdx=0): + for i in range(ctbNum): + sqlString = "create table %s.%s%d using %s.%s tags(%d, 'tb%d', 'tb%d', %d, %d, %d)" % \ + (dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,(i+ctbStartIdx) % 5,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx) + tsql.execute(sqlString) + + tdLog.debug("complete to create %d child tables by %s.%s" %(ctbNum, dbName, stbName)) + return + + def insert_data(self,tsql,dbName,ctbPrefix,ctbNum,rowsPerTbl,batchNum,startTs,tsStep): + tdLog.debug("start to insert data ............") + tsql.execute("use %s" %dbName) + pre_insert = "insert into " + sql = pre_insert + + for i in range(ctbNum): + rowsBatched = 0 + sql += " %s%d values "%(ctbPrefix,i) + for j in range(rowsPerTbl): + if (i < ctbNum/2): + sql += "(%d, %d, %d, %d,%d,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10, j%10, j%10) + else: + sql += "(%d, %d, NULL, %d,NULL,%d,%d,true,'binary%d', 'nchar%d') "%(startTs + j*tsStep, j%10, j%10, j%10, j%10, j%10, j%10) + rowsBatched += 1 + if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)): + tsql.execute(sql) + rowsBatched = 0 + if j < rowsPerTbl - 1: + sql = "insert into %s%d values " %(ctbPrefix,i) + else: + sql = "insert into " + if sql != pre_insert: + tsql.execute(sql) + tdLog.debug("insert data ............ [OK]") + return + + def prepareTestEnv(self): + tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ") + paraDict = {'dbName': 'test', + 'dropFlag': 1, + 'vgroups': 2, + 'stbName': 'meters', + 'colPrefix': 'c', + 'tagPrefix': 't', + 'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'FLOAT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'smallint', 'count':1},{'type': 'tinyint', 'count':1},{'type': 'bool', 'count':1},{'type': 'binary', 'len':10, 'count':1},{'type': 'nchar', 'len':10, 'count':1}], + 'tagSchema': [{'type': 'INT', 'count':1},{'type': 'nchar', 'len':20, 'count':1},{'type': 'binary', 'len':20, 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'smallint', 'count':1},{'type': 'DOUBLE', 'count':1}], + 'ctbPrefix': 't', + 'ctbStartIdx': 0, + 'ctbNum': 100, + 'rowsPerTbl': 10000, + 'batchNum': 3000, + 'startTs': 1537146000000, + 'tsStep': 600000} + + paraDict['vgroups'] = self.vgroups + paraDict['ctbNum'] = self.ctbNum + paraDict['rowsPerTbl'] = self.rowsPerTbl + + tdLog.info("create database") + self.create_database(tsql=tdSql, dbName=paraDict["dbName"], dropFlag=paraDict["dropFlag"], vgroups=paraDict["vgroups"], replica=self.replicaVar, duration=self.duraion) + + tdLog.info("create stb") + self.create_stable(tsql=tdSql, paraDict=paraDict) + + tdLog.info("create child tables") + self.create_ctable(tsql=tdSql, dbName=paraDict["dbName"], \ + stbName=paraDict["stbName"],ctbPrefix=paraDict["ctbPrefix"],\ + ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict["ctbStartIdx"]) + self.insert_data(tsql=tdSql, dbName=paraDict["dbName"],\ + ctbPrefix=paraDict["ctbPrefix"],ctbNum=paraDict["ctbNum"],\ + rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],\ + startTs=paraDict["startTs"],tsStep=paraDict["tsStep"]) + return + + def check_first_rows(self, all_rows, limited_rows, offset: int = 0): + for i in range(0, len(limited_rows) - 1): + if limited_rows[i] != all_rows[i + offset]: + tdLog.info("row: %d, row in all: %s" % (i+offset+1, str(all_rows[i+offset]))) + tdLog.info("row: %d, row in limted: %s" % (i+1, str(limited_rows[i]))) + tdLog.exit("row data check failed") + tdLog.info("all rows are the same as query without limit..") + + def query_and_check_with_slimit(self, sql: str, max_limit: int, step: int, offset: int = 0): + self.query_and_check_with_limit(sql, max_limit, step, offset, ' slimit ') + + def query_and_check_with_limit(self, sql: str, max_limit: int, step: int, offset: int = 0, limit_str: str = ' limit '): + for limit in range(0, max_limit, step): + limited_sql = sql + limit_str + str(offset) + "," + str(limit) + tdLog.info("query with sql: %s " % (sql) + limit_str + " %d,%d" % (offset, limit)) + all_rows = tdSql.getResult(sql) + limited_rows = tdSql.getResult(limited_sql) + tdLog.info("all rows: %d, limited rows: %d" % (len(all_rows), len(limited_rows))) + if limit_str == ' limit ': + if limit + offset <= len(all_rows) and len(limited_rows) != limit: + tdLog.exit("limited sql has less rows than limit value which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif limit + offset > len(all_rows) and offset < len(all_rows) and offset + len(limited_rows) != len(all_rows): + tdLog.exit("limited sql has less rows than all_rows which is not right, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + elif offset >= len(all_rows) and len(limited_rows) != 0: + tdLog.exit("limited rows should be zero, \ + limit: %d, limited_rows: %d, all_rows: %d, offset: %d" % (limit, len(limited_rows), len(all_rows), offset)) + + self.check_first_rows(all_rows, limited_rows, offset) + + def test_interval_limit_asc(self, offset: int = 0): + sqls = ["select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from meters interval(1d) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), first(ts) from t1 interval(1d) "] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 500, offset) + + def test_interval_limit_desc(self, offset: int = 0): + sqls = ["select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from meters interval(1d) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1s) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1m) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1h) ", + "select _wstart, _wend, count(*), sum(c1), avg(c2), last(ts) from t1 interval(1d) "] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 500, offset) + + def test_interval_limit_offset(self): + for offset in range(0, 1000, 500): + self.test_interval_limit_asc(offset) + self.test_interval_limit_desc(offset) + self.test_interval_fill_limit(offset) + self.test_interval_order_by_limit(offset) + self.test_interval_partition_by_slimit(offset) + + def test_interval_fill_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1s) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1m) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1h) fill(linear)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-09-17 09:30:00.000' interval(1d) fill(linear)" + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 5000, 1000, offset) + + def test_interval_order_by_limit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), last(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) order by a, count(*), sum(c1)", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by b", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a desc", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by count(*), sum(c1), a", + "select _wstart as a, _wend as b, count(*), sum(c1), avg(c2), first(ts) from meters \ + where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' interval(1m) fill(linear) order by a, count(*), sum(c1)", + ] + for sql in sqls: + self.query_and_check_with_limit(sql, 6000, 2000, offset) + + def test_interval_partition_by_slimit(self, offset: int = 0): + sqls = [ + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1m)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by t1 interval(1h)", + "select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts) from meters " + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m)", + ] + for sql in sqls: + self.query_and_check_with_slimit(sql, 10, 2, offset) + + def test_interval_partition_by_slimit_limit(self): + sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \ + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m) slimit 10 limit 2) order by c3 asc" + tdSql.query(sql) + tdSql.checkRows(20) + tdSql.checkData(0, 4, 0) + tdSql.checkData(1, 4, 0) + tdSql.checkData(2, 4, 1) + tdSql.checkData(3, 4, 1) + tdSql.checkData(18, 4, 9) + tdSql.checkData(19, 4, 9) + + sql = "select * from (select _wstart as a, _wend as b, count(*), sum(c1), last(c2), first(ts),c3 from meters " \ + "where ts >= '2018-09-17 09:00:00.000' and ts <= '2018-10-17 09:30:00.000' partition by c3 interval(1m) slimit 2,2 limit 2) order by c3 asc" + tdSql.query(sql) + tdSql.checkRows(4) + tdSql.checkData(0, 4, 2) + tdSql.checkData(1, 4, 2) + tdSql.checkData(2, 4, 9) + tdSql.checkData(3, 4, 9) + + def run(self): + self.prepareTestEnv() + self.test_interval_limit_offset() + self.test_interval_partition_by_slimit_limit() + + def stop(self): + tdSql.close() + tdLog.success(f"{__file__} successfully executed") + +event = threading.Event() + +tdCases.addLinux(__file__, TDTestCase()) +tdCases.addWindows(__file__, TDTestCase()) From f7a2e0cdab7e44e12476af7a87d4ebe6777f6ab7 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 15:21:23 +0800 Subject: [PATCH 053/123] fix: ins_indexes vgroup_id using null value instead of -1 for invalid value --- source/dnode/mnode/impl/src/mndIndex.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndIndex.c b/source/dnode/mnode/impl/src/mndIndex.c index efaff7ffc4..2157804559 100644 --- a/source/dnode/mnode/impl/src/mndIndex.c +++ b/source/dnode/mnode/impl/src/mndIndex.c @@ -515,7 +515,6 @@ int32_t mndRetrieveTagIdx(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, i if (pDb == NULL) return 0; } SSmaAndTagIter *pIter = pShow->pIter; - int invalid = -1; while (numOfRows < rows) { pIter->pIdxIter = sdbFetch(pSdb, SDB_IDX, pIter->pIdxIter, (void **)&pIdx); if (pIter->pIdxIter == NULL) break; @@ -552,7 +551,7 @@ int32_t mndRetrieveTagIdx(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, i pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); - colDataSetVal(pColInfo, numOfRows, (const char *)&invalid, false); + colDataSetVal(pColInfo, numOfRows, NULL, true); pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataSetVal(pColInfo, numOfRows, (const char *)&pIdx->createdTime, false); From a4da6e9a883d6879fda85c8626cab869fdf55108 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 15:59:34 +0800 Subject: [PATCH 054/123] fix mem leak --- source/libs/stream/src/streamMeta.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 57a097869b..7886091401 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -458,6 +458,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { tsDataDir); return -1; } + tDecoderClear(&decoder); if (pTask->status.taskStatus == TASK_STATUS__DROPPING) { int32_t taskId = pTask->id.taskId; From 9f3af66ae06501db3bcd7e3de60d4f0182035005 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 16:03:06 +0800 Subject: [PATCH 055/123] fix: fix tsort crash caused by not enough disk space --- source/libs/executor/src/tsort.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/tsort.c b/source/libs/executor/src/tsort.c index 0a8d7ee376..7226e98323 100644 --- a/source/libs/executor/src/tsort.c +++ b/source/libs/executor/src/tsort.c @@ -995,7 +995,12 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) { SArray* aExtSrc = taosArrayInit(nSrc, POINTER_BYTES); size_t maxBufSize = pHandle->numOfPages * pHandle->pageSize; - createPageBuf(pHandle); + + int32_t code = createPageBuf(pHandle); + if (code != TSDB_CODE_SUCCESS) { + taosArrayDestroy(aExtSrc); + return code; + } SSortSource* pSrc = taosArrayGetP(pHandle->pOrderedSource, 0); int32_t szSort = 0; @@ -1070,7 +1075,7 @@ static int32_t createBlocksMergeSortInitialSources(SSortHandle* pHandle) { taosArrayDestroy(aExtSrc); pHandle->type = SORT_SINGLESOURCE_SORT; - return 0; + return TSDB_CODE_SUCCESS; } static int32_t createBlocksQuickSortInitialSources(SSortHandle* pHandle) { From 10da0ddfb544b10a11a828c81eeb2243220158d2 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 4 Aug 2023 16:20:22 +0800 Subject: [PATCH 056/123] fix mem leak --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 1f0dd3bf26..8715a0d13b 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -160,7 +160,7 @@ ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py -,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 +#,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 ,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3 From 944c1f68d2d284047412c59b27501b0d48db54e6 Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Thu, 3 Aug 2023 18:09:36 +0800 Subject: [PATCH 057/123] delete atomic_exchange_64 --- source/dnode/mnode/impl/src/mndStream.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 06ab1bb638..af814b1b27 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -198,12 +198,13 @@ static int32_t mndStreamActionDelete(SSdb *pSdb, SStreamObj *pStream) { static int32_t mndStreamActionUpdate(SSdb *pSdb, SStreamObj *pOldStream, SStreamObj *pNewStream) { mTrace("stream:%s, perform update action", pOldStream->name); - atomic_exchange_64(&pOldStream->updateTime, pNewStream->updateTime); + atomic_exchange_32(&pOldStream->version, pNewStream->version); taosWLockLatch(&pOldStream->lock); pOldStream->status = pNewStream->status; + pOldStream->updateTime = pNewStream->updateTime; taosWUnLockLatch(&pOldStream->lock); return 0; @@ -520,7 +521,6 @@ int32_t mndPersistDropStreamLog(SMnode *pMnode, STrans *pTrans, SStreamObj *pStr SSdbRaw *pCommitRaw = mndStreamActionEncode(pStream); if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); return -1; } @@ -537,7 +537,6 @@ static int32_t mndSetStreamRecover(SMnode *pMnode, STrans *pTrans, const SStream if (pCommitRaw == NULL) return -1; if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); return -1; } (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); @@ -1409,7 +1408,6 @@ static int32_t mndPersistStreamLog(STrans *pTrans, const SStreamObj *pStream, in if (pCommitRaw == NULL) return -1; if (mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { mError("stream trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); - mndTransDrop(pTrans); return -1; } (void)sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); @@ -1431,7 +1429,6 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { if (pStream == NULL) { if (pauseReq.igNotExists) { mInfo("stream:%s, not exist, if exist is set", pauseReq.name); - sdbRelease(pMnode->pSdb, pStream); return 0; } else { terrno = TSDB_CODE_MND_STREAM_NOT_EXIST; @@ -1440,6 +1437,7 @@ static int32_t mndProcessPauseStreamReq(SRpcMsg *pReq) { } if (pStream->status == STREAM_STATUS__PAUSE) { + sdbRelease(pMnode->pSdb, pStream); return 0; } From d508fad945713602dca5fee0e5ba0e9acf2528e3 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Fri, 4 Aug 2023 17:02:16 +0800 Subject: [PATCH 058/123] open split vgroup case --- tests/parallel_test/cases.task | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 747eb909a0..fb67ee51cd 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -161,7 +161,7 @@ ,,n,system-test,python3 ./test.py -f 0-others/tag_index_basic.py ,,n,system-test,python3 ./test.py -f 0-others/udfpy_main.py ,,n,system-test,python3 ./test.py -N 3 -f 0-others/walRetention.py -#,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 +,,n,system-test,python3 ./test.py -f 0-others/splitVGroup.py -N 5 ,,n,system-test,python3 ./test.py -f 0-others/timeRangeWise.py -N 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_database.py ,,y,system-test,./pytest.sh python3 ./test.py -f 1-insert/alter_replica.py -N 3 From 84bf3e591fc97efef7e8057243446d6069644e73 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Fri, 4 Aug 2023 17:08:10 +0800 Subject: [PATCH 059/123] fix test cases --- tests/system-test/0-others/show_tag_index.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/system-test/0-others/show_tag_index.py b/tests/system-test/0-others/show_tag_index.py index 6c19dbce0d..663426b7ff 100644 --- a/tests/system-test/0-others/show_tag_index.py +++ b/tests/system-test/0-others/show_tag_index.py @@ -64,7 +64,7 @@ class TDTestCase: tdSql.checkData(0, 0, 'idx1') tdSql.checkData(0, 1, 'db') tdSql.checkData(0, 2, 'stb') - tdSql.checkData(0, 3, -1) + tdSql.checkData(0, 3, None) tdSql.checkData(0, 5, 't1') tdSql.checkData(0, 6, 'tag_index') From 5154d0e1a3a23eb79b2446e02e428b36cdbdfccc Mon Sep 17 00:00:00 2001 From: Shungang Li Date: Fri, 4 Aug 2023 17:24:35 +0800 Subject: [PATCH 060/123] fix: alter ttlChangeOnWrite note info --- source/dnode/mnode/impl/src/mndDnode.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c index 1f566b14c7..234e81a670 100644 --- a/source/dnode/mnode/impl/src/mndDnode.c +++ b/source/dnode/mnode/impl/src/mndDnode.c @@ -41,7 +41,7 @@ static const char *offlineReason[] = { "timezone not match", "locale not match", "charset not match", - "ttl change on write not match" + "ttlChangeOnWrite not match", "unknown", }; From 314c749965d6f0a5baaf52b1f6f336065b64ab46 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 5 Aug 2023 20:58:49 +0800 Subject: [PATCH 061/123] docs: supplement r language connnector note (#22340) * docs: add r language to connector * docs: add r lang english version in connector * docs: fix include path and note format * Update 60-r-lang.mdx minor changes * docs: supplement r language note --------- Co-authored-by: danielclow <106956386+danielclow@users.noreply.github.com> --- docs/en/14-reference/03-connector/60-r-lang.mdx | 5 +++-- docs/examples/R/connect_rest.r | 4 ++++ docs/zh/08-connector/43-r-lang.mdx | 5 +++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/docs/en/14-reference/03-connector/60-r-lang.mdx b/docs/en/14-reference/03-connector/60-r-lang.mdx index 852b2022a5..f1cbb89f7b 100644 --- a/docs/en/14-reference/03-connector/60-r-lang.mdx +++ b/docs/en/14-reference/03-connector/60-r-lang.mdx @@ -24,8 +24,9 @@ install.packages("RJDBC", repos='http://cran.us.r-project.org') ``` :::note -On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components. -On Windows systems, you need to set the **JAVA_HOME** environment variable. +1. The default R language package version 4.2 which shipped with Ubuntu might lead unresponsive bug. Please install latest version of R language package from the [official website](https://www.r-project.org/). +2. On Linux systems, installing the RJDBC package may require installing the necessary components for compilation. For example, on Ubuntu, you can execute the command ``apt install -y libbz2-dev libpcre2-dev libicu-dev`` to install the required components. +3. On Windows systems, you need to set the **JAVA_HOME** environment variable. ::: 3. Download the TDengine JDBC driver: Visit the Maven website and download the TDengine JDBC driver (taos-jdbcdriver-X.X.X-dist.jar) to your local machine. diff --git a/docs/examples/R/connect_rest.r b/docs/examples/R/connect_rest.r index bc5da3c15a..a5221d2c3b 100644 --- a/docs/examples/R/connect_rest.r +++ b/docs/examples/R/connect_rest.r @@ -12,5 +12,9 @@ driver_path = args[1] # path to jdbc-driver for example: "/root/taos-jdbcdriver- driver = JDBC("com.taosdata.jdbc.rs.RestfulDriver", driver_path) conn = dbConnect(driver, "jdbc:TAOS-RS://localhost:6041?user=root&password=taosdata") dbGetQuery(conn, "SELECT server_version()") +dbSendUpdate(conn, "create database if not exists rtest") +dbSendUpdate(conn, "create table if not exists rtest.test (ts timestamp, current float, voltage int, devname varchar(20))") +dbSendUpdate(conn, "insert into rtest.test values (now, 1.2, 220, 'test')") +dbGetQuery(conn, "select * from rtest.test") dbDisconnect(conn) # ANCHOR_END: demo diff --git a/docs/zh/08-connector/43-r-lang.mdx b/docs/zh/08-connector/43-r-lang.mdx index 3a4ed39748..a181f68aba 100644 --- a/docs/zh/08-connector/43-r-lang.mdx +++ b/docs/zh/08-connector/43-r-lang.mdx @@ -24,8 +24,9 @@ install.packages("RJDBC", repos='http://cran.us.r-project.org') ``` :::note -在 Linux 上安装 RJDBC 包可能需要安装编译需要的组件,以 Ubuntu 为例执行 `apt install -y libbz2-dev libpcre2-dev libicu-dev` 命令安装。 -在 Windows 系统上需要设置 JAVA_HOME 环境变量。 +1. Ubuntu 系统自带的 R 语言软件版本 4.2 在调用 RJDBC 库会产生无响应 bug,请安装 R 语言[官网](https://www.r-project.org/)的安装包。 +2. 在 Linux 上安装 RJDBC 包可能需要安装编译需要的组件,以 Ubuntu 为例执行 `apt install -y libbz2-dev libpcre2-dev libicu-dev` 命令安装。 +3. 在 Windows 系统上需要设置 JAVA_HOME 环境变量。 ::: 3. 下载 TDengine JDBC 驱动程序:访问 maven.org 网站,下载 TDengine JDBC 驱动程序(taos-jdbcdriver-X.X.X-dist.jar)。 From 4b137f1ca0e65c7d583d8515fb9aa8f4a0d817cc Mon Sep 17 00:00:00 2001 From: jiacy-jcy <714897623@qq.com> Date: Sun, 6 Aug 2023 23:32:54 +0800 Subject: [PATCH 062/123] fix: mktime on windows platform --- source/os/src/osTime.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 39d1de0437..a9965c57c2 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -368,7 +368,7 @@ int32_t taosGetTimeOfDay(struct timeval *tv) { time_t taosTime(time_t *t) { return time(t); } time_t taosMktime(struct tm *timep) { -#ifdef WINDOWS +#ifdef WINDOWS_STASH struct tm tm1 = {0}; LARGE_INTEGER t; FILETIME f; From 0e4f1942f1933dbe85c895698012cfab13ec8805 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Mon, 7 Aug 2023 11:13:52 +0800 Subject: [PATCH 063/123] fix: resolve Vgroup refcount leak in drop topic --- source/dnode/mnode/impl/src/mndTopic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/dnode/mnode/impl/src/mndTopic.c b/source/dnode/mnode/impl/src/mndTopic.c index 85e6f1caf6..621a80338d 100644 --- a/source/dnode/mnode/impl/src/mndTopic.c +++ b/source/dnode/mnode/impl/src/mndTopic.c @@ -799,6 +799,7 @@ static int32_t mndProcessDropTopicReq(SRpcMsg *pReq) { mndTransDrop(pTrans); return -1; } + sdbRelease(pSdb, pVgroup); } } From 1d4abc33e529047ebfcea27b961d2e6cd4b59b3a Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Mon, 7 Aug 2023 11:18:47 +0800 Subject: [PATCH 064/123] update packaging script --- packaging/tools/install.sh | 54 +++++++++++++++++++++----------------- packaging/tools/makepkg.sh | 5 ++++ 2 files changed, 35 insertions(+), 24 deletions(-) diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 2741291f34..f9a11f5540 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -613,6 +613,11 @@ function install_examples() { fi } +function install_web() { + if [ -d "${script_dir}/share" ]; then + ${csudo}cp -rf ${script_dir}/share/* ${install_main_dir}/share > /dev/null 2>&1 ||: + fi +} function clean_service_on_sysvinit() { if ps aux | grep -v grep | grep ${serverName2} &>/dev/null; then @@ -888,6 +893,7 @@ function updateProduct() { fi install_examples + install_web if [ -z $1 ]; then install_bin install_service @@ -898,29 +904,29 @@ function updateProduct() { openresty_work=false echo - echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}" + echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}" [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml" + echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml" if ((${service_mod} == 0)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}" [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}" elif ((${service_mod} == 1)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}" [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}" else - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ./${serverName2}${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ./${serverName2}${NC}" [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter ${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}" fi - echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper ${NC}" + echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}" if [ ${openresty_work} = 'true' ]; then - echo -e "${GREEN_DARK}To access ${productName2} ${NC}: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}" + echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell OR from ${GREEN_UNDERLINE}http://127.0.0.1:${web_port}${NC}" else - echo -e "${GREEN_DARK}To access ${productName2} ${NC}: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}" + echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: use ${GREEN_UNDERLINE}${clientName2} -h $serverFqdn${NC} in shell${NC}" fi if ((${prompt_force} == 1)); then @@ -968,7 +974,7 @@ function installProduct() { install_connector fi install_examples - + install_web if [ -z $1 ]; then # install service and client # For installing new install_bin @@ -982,24 +988,24 @@ function installProduct() { # Ask if to start the service echo - echo -e "${GREEN_DARK}To configure ${productName2} ${NC}: edit ${cfg_install_dir}/${configFile2}" + echo -e "${GREEN_DARK}To configure ${productName2} ${NC}\t: edit ${cfg_install_dir}/${configFile2}" [ -f ${configDir}/${clientName2}adapter.toml ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}: edit ${configDir}/${clientName2}adapter.toml" + echo -e "${GREEN_DARK}To configure ${clientName2}Adapter ${NC}\t: edit ${configDir}/${clientName2}adapter.toml" if ((${service_mod} == 0)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}systemctl start ${serverName2}${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}systemctl start ${serverName2}${NC}" [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}systemctl start ${clientName2}adapter ${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}systemctl start ${clientName2}adapter ${NC}" elif ((${service_mod} == 1)); then - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${csudo}service ${serverName2} start${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${csudo}service ${serverName2} start${NC}" [ -f ${service_config_dir}/${clientName2}adapter.service ] && [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${csudo}service ${clientName2}adapter start${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${csudo}service ${clientName2}adapter start${NC}" else - echo -e "${GREEN_DARK}To start ${productName2} ${NC}: ${serverName2}${NC}" + echo -e "${GREEN_DARK}To start ${productName2} ${NC}\t: ${serverName2}${NC}" [ -f ${installDir}/bin/${clientName2}adapter ] && \ - echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}: ${clientName2}adapter ${NC}" + echo -e "${GREEN_DARK}To start ${clientName2}Adapter ${NC}\t: ${clientName2}adapter ${NC}" fi - echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}: sudo systemctl enable ${clientName2}keeper ${NC}" + echo -e "${GREEN_DARK}To enable ${clientName2}keeper ${NC}\t: sudo systemctl enable ${clientName2}keeper ${NC}" if [ ! -z "$firstEp" ]; then tmpFqdn=${firstEp%%:*} @@ -1010,14 +1016,14 @@ function installProduct() { tmpPort="" fi if [[ "$tmpPort" != "" ]]; then - echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" + echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn -P $tmpPort${GREEN_DARK} to login into cluster, then${NC}" else - echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" + echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $tmpFqdn${GREEN_DARK} to login into cluster, then${NC}" fi echo -e "${GREEN_DARK}execute ${NC}: create dnode 'newDnodeFQDN:port'; ${GREEN_DARK}to add this new node${NC}" echo elif [ ! -z "$serverFqdn" ]; then - echo -e "${GREEN_DARK}To access ${productName2} ${NC}: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}" + echo -e "${GREEN_DARK}To access ${productName2} ${NC}\t: ${clientName2} -h $serverFqdn${GREEN_DARK} to login into ${productName2} server${NC}" echo fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index a48d264d5d..ad64ca431e 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -319,6 +319,11 @@ if [[ $dbName == "taos" ]]; then mkdir -p ${install_dir}/examples/taosbenchmark-json && cp ${examples_dir}/../tools/taos-tools/example/* ${install_dir}/examples/taosbenchmark-json fi + if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then + mkdir -p ${install_dir}/share/ + cp -rf ${build_dir}/share/{etc,srv} ${install_dir}/share ||: + fi + fi # Copy driver From 773a9454d513f40ee2820a34cb0958ec4508518a Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 065/123] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From 3e7187b2229f5705429446a0d5d3e90f4a33258c Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 066/123] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 586425ec1d..7a1e8d61c8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From 6cccc155eb547336325f04c2acf611cef399e256 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 7 Aug 2023 13:39:30 +0800 Subject: [PATCH 067/123] enhance: enhance test case --- tests/script/tsim/query/join_pk.sim | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim index 66bb20da24..da5c13e9c0 100644 --- a/tests/script/tsim/query/join_pk.sim +++ b/tests/script/tsim/query/join_pk.sim @@ -38,5 +38,18 @@ sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval( sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +sql create table sst(ts timestamp, ts2 timestamp, f int) tags(t int); +sql insert into sct1 using sst tags(1) values('2023-08-07 13:30:56', '2023-08-07 13:30:56', 0)('2023-08-07 13:30:57', '2023-08-07 13:30:57', 1) +sql insert into sct2 using sst tags(2) values('2023-08-07 13:30:58', '2023-08-07 13:30:58', 2)('2023-08-07 13:30:59', '2023-08-07 13:30:59', 3) +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts-1s as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 4 then + return -1 +endi +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 3 then + return -1 +endi +sql_error select * from (select ts2 - 1s as jts from sst) as t1 inner join (select ts2 as jts from sst) as t2 on t1.jts = t2.jts + #system sh/exec.sh -n dnode1 -s stop -x SIGINT From 661f2aca9869df48a123bb42672862169616c713 Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 7 Aug 2023 14:13:44 +0800 Subject: [PATCH 068/123] fix: memory leak --- source/dnode/vnode/src/tsdb/tsdbFS2.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbFS2.c b/source/dnode/vnode/src/tsdb/tsdbFS2.c index 6e7595c6ef..afa294d3b0 100644 --- a/source/dnode/vnode/src/tsdb/tsdbFS2.c +++ b/source/dnode/vnode/src/tsdb/tsdbFS2.c @@ -780,19 +780,20 @@ static int32_t tsdbFSRunBgTask(void *arg) { return 0; } -static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), void (*free)(void *), - void *arg, int64_t *taskid) { +static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32_t (*run)(void *), + void (*destroy)(void *), void *arg, int64_t *taskid) { if (fs->stop) { + if (destroy) { + destroy(arg); + } return 0; // TODO: use a better error code } - // check if same task is on - // if (fs->bgTaskRunning && fs->bgTaskRunning->type == type) { - // return 0; - // } - for (STFSBgTask *task = fs->bgTaskQueue->next; task != fs->bgTaskQueue; task = task->next) { if (task->type == type) { + if (destroy) { + destroy(arg); + } return 0; } } @@ -804,7 +805,7 @@ static int32_t tsdbFSScheduleBgTaskImpl(STFileSystem *fs, EFSBgTaskT type, int32 task->type = type; task->run = run; - task->free = free; + task->free = destroy; task->arg = arg; task->scheduleTime = taosGetTimestampMs(); task->taskid = ++fs->taskid; From a545d45b5e9c85eb4e6d8f246d8c578d8b7cd86a Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Mon, 7 Aug 2023 16:12:03 +0800 Subject: [PATCH 069/123] fix: snapshot invalid read --- source/dnode/vnode/src/tsdb/tsdbSnapshot.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c index 011b9bd5a4..bdcf4a87c1 100644 --- a/source/dnode/vnode/src/tsdb/tsdbSnapshot.c +++ b/source/dnode/vnode/src/tsdb/tsdbSnapshot.c @@ -342,18 +342,18 @@ static int32_t tsdbSnapCmprTombData(STsdbSnapReader* reader, uint8_t** data) { int32_t code = 0; int32_t lino = 0; - int64_t size = sizeof(SSnapDataHdr); + int64_t size = 0; for (int32_t i = 0; i < ARRAY_SIZE(reader->tombBlock->dataArr); i++) { size += TARRAY2_DATA_LEN(reader->tombBlock->dataArr + i); } - data[0] = taosMemoryMalloc(size); + data[0] = taosMemoryMalloc(size + sizeof(SSnapDataHdr)); if (data[0] == NULL) { code = TSDB_CODE_OUT_OF_MEMORY; TSDB_CHECK_CODE(code, lino, _exit); } - SSnapDataHdr* hdr = (SSnapDataHdr*)data[0]; + SSnapDataHdr* hdr = (SSnapDataHdr*)(data[0]); hdr->type = SNAP_DATA_DEL; hdr->size = size; @@ -938,7 +938,7 @@ static int32_t tsdbSnapWriteDecmprTombBlock(SSnapDataHdr* hdr, STombBlock* tombB int32_t code = 0; int32_t lino = 0; - int64_t size = hdr->size - sizeof(*hdr); + int64_t size = hdr->size; ASSERT(size % TOMB_RECORD_ELEM_NUM == 0); size = size / TOMB_RECORD_ELEM_NUM; ASSERT(size % sizeof(int64_t) == 0); From 6fd2ae138e7ab2d67d6212d654ab2b9e98218244 Mon Sep 17 00:00:00 2001 From: jiacy-jcy <714897623@qq.com> Date: Mon, 7 Aug 2023 17:32:20 +0800 Subject: [PATCH 070/123] fix: taosMktime on windows platform --- include/os/osTime.h | 2 ++ source/common/src/ttime.c | 40 ------------------------------ source/os/src/osTime.c | 51 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 53 insertions(+), 40 deletions(-) diff --git a/include/os/osTime.h b/include/os/osTime.h index 51a285a139..87df3a2650 100644 --- a/include/os/osTime.h +++ b/include/os/osTime.h @@ -95,6 +95,8 @@ struct tm *taosLocalTime(const time_t *timep, struct tm *result, char *buf); struct tm *taosLocalTimeNolock(struct tm *result, const time_t *timep, int dst); time_t taosTime(time_t *t); time_t taosMktime(struct tm *timep); +int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour, + const uint32_t min, const uint32_t sec, int64_t time_zone); #ifdef __cplusplus } diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 7a5581efbe..e9313e0591 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -25,46 +25,6 @@ #include "tlog.h" -/* - * mktime64 - Converts date to seconds. - * Converts Gregorian date to seconds since 1970-01-01 00:00:00. - * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 - * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. - * - * [For the Julian calendar (which was used in Russia before 1917, - * Britain & colonies before 1752, anywhere else before 1582, - * and is still in use by some communities) leave out the - * -year/100+year/400 terms, and add 10.] - * - * This algorithm was first published by Gauss (I think). - * - * A leap second can be indicated by calling this function with sec as - * 60 (allowable under ISO 8601). The leap second is treated the same - * as the following second since they don't exist in UNIX time. - * - * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight - * tomorrow - (allowable under ISO 8601) is supported. - */ -static int64_t user_mktime64(const uint32_t year0, const uint32_t mon0, const uint32_t day, const uint32_t hour, - const uint32_t min, const uint32_t sec, int64_t time_zone) { - uint32_t mon = mon0, year = year0; - - /* 1..12 -> 11,12,1..10 */ - if (0 >= (int32_t)(mon -= 2)) { - mon += 12; /* Puts Feb last since it has leap day */ - year -= 1; - } - - // int64_t res = (((((int64_t) (year/4 - year/100 + year/400 + 367*mon/12 + day) + - // year*365 - 719499)*24 + hour)*60 + min)*60 + sec); - int64_t res; - res = 367 * ((int64_t)mon) / 12; - res += year / 4 - year / 100 + year / 400 + day + ((int64_t)year) * 365 - 719499; - res = res * 24; - res = ((res + hour) * 60 + min) * 60 + sec; - - return (res + time_zone); -} // ==== mktime() kernel code =================// static int64_t m_deltaUtc = 0; diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 39d1de0437..0430dd70fc 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -367,8 +367,50 @@ int32_t taosGetTimeOfDay(struct timeval *tv) { time_t taosTime(time_t *t) { return time(t); } +/* + * mktime64 - Converts date to seconds. + * Converts Gregorian date to seconds since 1970-01-01 00:00:00. + * Assumes input in normal date format, i.e. 1980-12-31 23:59:59 + * => year=1980, mon=12, day=31, hour=23, min=59, sec=59. + * + * [For the Julian calendar (which was used in Russia before 1917, + * Britain & colonies before 1752, anywhere else before 1582, + * and is still in use by some communities) leave out the + * -year/100+year/400 terms, and add 10.] + * + * This algorithm was first published by Gauss (I think). + * + * A leap second can be indicated by calling this function with sec as + * 60 (allowable under ISO 8601). The leap second is treated the same + * as the following second since they don't exist in UNIX time. + * + * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight + * tomorrow - (allowable under ISO 8601) is supported. + */ +int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t day, const uint32_t hour, + const uint32_t min, const uint32_t sec, int64_t time_zone) { + uint32_t _mon = mon, _year = year; + + /* 1..12 -> 11,12,1..10 */ + if (0 >= (int32_t)(_mon -= 2)) { + _mon += 12; /* Puts Feb last since it has leap day */ + _year -= 1; + } + + // int64_t _res = (((((int64_t) (_year/4 - _year/100 + _year/400 + 367*_mon/12 + day) + + // _year*365 - 719499)*24 + hour)*60 + min)*60 + sec); + int64_t _res; + _res = 367 * ((int64_t)_mon) / 12; + _res += _year / 4 - _year / 100 + _year / 400 + day + ((int64_t)_year) * 365 - 719499; + _res = _res * 24; + _res = ((_res + hour) * 60 + min) * 60 + sec; + + return (_res + time_zone); +} + time_t taosMktime(struct tm *timep) { #ifdef WINDOWS +#if 0 struct tm tm1 = {0}; LARGE_INTEGER t; FILETIME f; @@ -405,6 +447,15 @@ time_t taosMktime(struct tm *timep) { t.QuadPart -= offset.QuadPart; return (time_t)(t.QuadPart / 10000000); +#else +#ifdef _MSC_VER +#if _MSC_VER >= 1900 + int64_t tz = _timezone; +#endif +#endif + return user_mktime64(timep->tm_year + 1900, timep->tm_mon + 1, timep->tm_mday, timep->tm_hour, timep->tm_min, + timep->tm_sec, tz); +#endif #else return mktime(timep); #endif From 59100a7251fa358c43cded44a1bf054ffc485e02 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 7 Aug 2023 17:52:43 +0800 Subject: [PATCH 071/123] enh: code optimize for mktime --- source/os/src/osTime.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index 0430dd70fc..e758055529 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -399,13 +399,12 @@ int64_t user_mktime64(const uint32_t year, const uint32_t mon, const uint32_t da // int64_t _res = (((((int64_t) (_year/4 - _year/100 + _year/400 + 367*_mon/12 + day) + // _year*365 - 719499)*24 + hour)*60 + min)*60 + sec); - int64_t _res; - _res = 367 * ((int64_t)_mon) / 12; + int64_t _res = 367 * ((int64_t)_mon) / 12; _res += _year / 4 - _year / 100 + _year / 400 + day + ((int64_t)_year) * 365 - 719499; - _res = _res * 24; + _res *= 24; _res = ((_res + hour) * 60 + min) * 60 + sec; - return (_res + time_zone); + return _res + time_zone; } time_t taosMktime(struct tm *timep) { From a0c5d130ae0e9b1ede2d69842516acce05b83573 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Mon, 7 Aug 2023 18:32:58 +0800 Subject: [PATCH 072/123] test:repire test case for windows --- tests/pytest/util/autogen.py | 2 +- tests/script/win-test-file | 2 -- .../1-insert/rowlength64k_benchmark.py | 2 +- tests/system-test/2-query/columnLenUpdated.py | 2 +- tests/system-test/2-query/slimit.py | 2 +- tests/system-test/6-cluster/5dnode2mnode.py | 2 +- .../6-cluster/5dnode3mnodeAdd1Ddnoe.py | 2 +- .../system-test/6-cluster/5dnode3mnodeDrop.py | 2 +- .../6-cluster/5dnode3mnodeDropInsert.py | 2 +- .../6-cluster/5dnode3mnodeRecreateMnode.py | 2 +- .../5dnode3mnodeRestartDnodeInsertData.py | 2 +- ...5dnode3mnodeRestartDnodeInsertDataAsync.py | 2 +- .../5dnode3mnodeSep1VnodeStopDnodeCreateDb.py | 2 +- ...5dnode3mnodeSep1VnodeStopDnodeCreateStb.py | 2 +- ...dnode3mnodeSep1VnodeStopDnodeInsertData.py | 2 +- ...dnode3mnodeSep1VnodeStopDnodeModifyMeta.py | 2 +- ...5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py | 2 +- .../5dnode3mnodeSep1VnodeStopMnodeCreateDb.py | 2 +- ...ode3mnodeSep1VnodeStopMnodeCreateDbRep3.py | 2 +- ...5dnode3mnodeSep1VnodeStopMnodeCreateStb.py | 2 +- ...dnode3mnodeSep1VnodeStopMnodeModifyMeta.py | 2 +- .../5dnode3mnodeSep1VnodeStopVnodeCreateDb.py | 2 +- ...5dnode3mnodeSep1VnodeStopVnodeCreateStb.py | 2 +- ...5dnode3mnodeSepVnodeStopDnodeCreateUser.py | 2 +- .../system-test/6-cluster/5dnode3mnodeStop.py | 2 +- .../6-cluster/5dnode3mnodeStop2Follower.py | 2 +- .../6-cluster/5dnode3mnodeStopConnect.py | 2 +- .../5dnode3mnodeStopFollowerLeader.py | 2 +- .../6-cluster/5dnode3mnodeStopInsert.py | 2 +- .../6-cluster/5dnode3mnodeStopLoop.py | 2 +- ...6dnode3mnodeInsertDataRebootAlterRep1-3.py | 2 +- ...eInsertDataRebootModifyMetaAlterRep1to3.py | 2 +- ...eInsertDataRebootModifyMetaAlterRep3to1.py | 2 +- ...dnode3mnodeInsertDatarRebootAlterRep1-3.py | 2 +- ...node3mnodeInsertLessDataAlterRep3to1to3.py | 2 +- .../6dnode3mnodeStopDnodeInsertDatatb.py | 2 +- .../4dnode1mnode_basic_createDb_replica1.py | 2 +- ...4dnode1mnode_basic_replica1_insertdatas.py | 2 +- ...mnode_basic_replica1_insertdatas_querys.py | 2 +- ...4dnode1mnode_basic_replica3_insertdatas.py | 2 +- ...lica3_insertdatas_force_stop_all_dnodes.py | 2 +- ...mnode_basic_replica3_insertdatas_querys.py | 2 +- ...sertdatas_querys_loop_restart_all_vnode.py | 2 +- ...nsertdatas_querys_loop_restart_follower.py | 2 +- ..._insertdatas_querys_loop_restart_leader.py | 2 +- ...ic_replica3_insertdatas_stop_all_dnodes.py | 2 +- ...replica3_insertdatas_stop_follower_sync.py | 2 +- ...plica3_insertdatas_stop_follower_unsync.py | 2 +- ...rtdatas_stop_follower_unsync_force_stop.py | 2 +- ..._basic_replica3_insertdatas_stop_leader.py | 2 +- ...ca3_insertdatas_stop_leader_forece_stop.py | 2 +- ...asic_replica3_mnode3_insertdatas_querys.py | 2 +- ...basic_replica3_querydatas_stop_follower.py | 2 +- ...ca3_querydatas_stop_follower_force_stop.py | 2 +- ...e_basic_replica3_querydatas_stop_leader.py | 2 +- ...lica3_querydatas_stop_leader_force_stop.py | 2 +- .../4dnode1mnode_basic_replica3_vgroups.py | 2 +- ...de1mnode_basic_replica3_vgroups_stopOne.py | 2 +- tests/system-test/win-test-file | 25 ++++++++++++++++--- 59 files changed, 78 insertions(+), 63 deletions(-) diff --git a/tests/pytest/util/autogen.py b/tests/pytest/util/autogen.py index d5f1faa3f7..6f5c89d15b 100644 --- a/tests/pytest/util/autogen.py +++ b/tests/pytest/util/autogen.py @@ -17,7 +17,7 @@ class AutoGen: def __init__(self): self.ts = 1600000000000 self.batch_size = 100 - seed = time.clock_gettime(time.CLOCK_REALTIME) + seed = time.time() % 10000 random.seed(seed) # set start ts diff --git a/tests/script/win-test-file b/tests/script/win-test-file index d394ce6876..dc3093e0ea 100644 --- a/tests/script/win-test-file +++ b/tests/script/win-test-file @@ -26,10 +26,8 @@ ./test.sh -f tsim/user/basic.sim ./test.sh -f tsim/user/password.sim ./test.sh -f tsim/user/privilege_db.sim -./test.sh -f tsim/user/privilege_sysinfo.sim ./test.sh -f tsim/user/privilege_topic.sim ./test.sh -f tsim/user/privilege_table.sim -./test.sh -f tsim/user/privilege_create_db.sim ./test.sh -f tsim/db/alter_option.sim ./test.sh -f tsim/db/alter_replica_31.sim ./test.sh -f tsim/db/basic1.sim diff --git a/tests/system-test/1-insert/rowlength64k_benchmark.py b/tests/system-test/1-insert/rowlength64k_benchmark.py index e95f35fc7f..98a72e1166 100755 --- a/tests/system-test/1-insert/rowlength64k_benchmark.py +++ b/tests/system-test/1-insert/rowlength64k_benchmark.py @@ -48,7 +48,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] diff --git a/tests/system-test/2-query/columnLenUpdated.py b/tests/system-test/2-query/columnLenUpdated.py index 93d9a492f9..4c92236fca 100644 --- a/tests/system-test/2-query/columnLenUpdated.py +++ b/tests/system-test/2-query/columnLenUpdated.py @@ -26,7 +26,7 @@ def taos_command (buildPath, key, value, expectString, sqlString=''): taosCmd = buildPath + '/build/bin/taos ' cfgPath = buildPath + "/../sim/psim/cfg" - taosCmd = taosCmd + ' -c' + cfgPath + ' -' + key + taosCmd = taosCmd + ' -c ' + cfgPath + ' -' + key if len(value) != 0: taosCmd = taosCmd + ' ' + value diff --git a/tests/system-test/2-query/slimit.py b/tests/system-test/2-query/slimit.py index 48209da59a..b5aa187980 100644 --- a/tests/system-test/2-query/slimit.py +++ b/tests/system-test/2-query/slimit.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root)-len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode2mnode.py b/tests/system-test/6-cluster/5dnode2mnode.py index 6054ef69f8..ca7d6a58d5 100644 --- a/tests/system-test/6-cluster/5dnode2mnode.py +++ b/tests/system-test/6-cluster/5dnode2mnode.py @@ -35,7 +35,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py index 8a5f43e1f9..223b91be6f 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py +++ b/tests/system-test/6-cluster/5dnode3mnodeAdd1Ddnoe.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py index c4b30a16f7..aefa7a09f8 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py @@ -47,7 +47,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py index 01d08ee839..db183d80c1 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeDropInsert.py @@ -48,7 +48,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py index d75cd4923c..650bc347aa 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRecreateMnode.py @@ -42,7 +42,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py index 392b0d7764..b96a9b8175 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertData.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py index 04c69ad618..da16d39ac2 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py index b55c689eee..3a972ff4e9 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateDb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py index 97e6195037..2a8f4fd526 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py index 296e9daeca..7eaf756737 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeInsertData.py @@ -42,7 +42,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py index 06d626b77c..c1c47fd55c 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeModifyMeta.py @@ -42,7 +42,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py index 9d99980b88..27b15d4c99 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeRCreateDb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py index 15d18d5090..8fe3b24d3b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py index 98842e3358..8d483919a5 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateDbRep3.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py index cb16059524..9395dd2a2b 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeModifyMeta.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeModifyMeta.py index 3e4dc2483f..0522a72d38 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeModifyMeta.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopMnodeModifyMeta.py @@ -42,7 +42,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py index 21caf23ea6..e39855b42e 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateDb.py @@ -42,7 +42,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py index 84236529d1..2fb196635f 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py b/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py index 94e02b77b3..bcc7edf5cb 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py +++ b/tests/system-test/6-cluster/5dnode3mnodeSepVnodeStopDnodeCreateUser.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop.py b/tests/system-test/6-cluster/5dnode3mnodeStop.py index 522ba4c2fc..8e76033c27 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop.py @@ -39,7 +39,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py index 0596dd84ed..e89df638d0 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStop2Follower.py @@ -39,7 +39,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py index 2c735ed9b6..a87cd23b38 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopConnect.py @@ -39,7 +39,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py index d7176e142f..a8ebfbace5 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopFollowerLeader.py @@ -39,7 +39,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py index d08ce79a9b..9d2430506f 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopInsert.py @@ -47,7 +47,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py index 52d61fb529..11869f8ee4 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py +++ b/tests/system-test/6-cluster/5dnode3mnodeStopLoop.py @@ -39,7 +39,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py index aa3ed8e3fd..0d3b920bb4 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootAlterRep1-3.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep1to3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep1to3.py index 7d46b3143d..06636c1ae9 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep1to3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep1to3.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep3to1.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep3to1.py index 5b5fb04969..40b2291548 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep3to1.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDataRebootModifyMetaAlterRep3to1.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py index aa3ed8e3fd..0d3b920bb4 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertDatarRebootAlterRep1-3.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py index 16ad3506c8..fb9872a8f6 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/manually-test/6dnode3mnodeStopDnodeInsertDatatb.py b/tests/system-test/6-cluster/manually-test/6dnode3mnodeStopDnodeInsertDatatb.py index ee48b973c9..2ada32e075 100644 --- a/tests/system-test/6-cluster/manually-test/6dnode3mnodeStopDnodeInsertDatatb.py +++ b/tests/system-test/6-cluster/manually-test/6dnode3mnodeStopDnodeInsertDatatb.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py index 139be74a08..52d675208b 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py @@ -34,7 +34,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py index 4a0522ad35..9cc97543ad 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py index 82ba256122..4ea00ff2e2 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py index 3751391d65..51da6fc723 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py index 73153c5825..6e5043940d 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py @@ -49,7 +49,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py index 24b4ff63dd..a111e0bab5 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py index 6ef239382b..66eca7143d 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py index 35ea3f392c..db9139dca2 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py index ab5d05f362..4fc4507c3f 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py index fc6d3c0683..f06b539ff2 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py @@ -49,7 +49,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py index 6e9aacebc2..eb77c6d003 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py @@ -49,7 +49,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py index a55bc3c39f..9079bedb7c 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py @@ -49,7 +49,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py index dd8b6b374a..35cbceb268 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py @@ -49,7 +49,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py index 0af157ebff..95b099b0a1 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py @@ -46,7 +46,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py index 124bf838bb..bf2ebadd06 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py @@ -46,7 +46,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py index 791b58d28d..25aba29235 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py @@ -41,7 +41,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py index 4fcfbfaf08..d29ab6b74e 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py @@ -51,7 +51,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py index 42d9e944f9..16ac90c31d 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py @@ -51,7 +51,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py index 6b87bee5a3..a5f86a1e31 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py @@ -51,7 +51,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py index c53e909417..c272da0c2b 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py @@ -51,7 +51,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py index 53a9463d64..45ceb73059 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py @@ -40,7 +40,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py index ddb765085a..3f72f33951 100644 --- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py +++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py @@ -43,7 +43,7 @@ class TDTestCase: projPath = selfPath[:selfPath.find("tests")] for root, dirs, files in os.walk(projPath): - if ("taosd" in files): + if ("taosd" in files or "taosd.exe" in files): rootRealPath = os.path.dirname(os.path.realpath(root)) if ("packaging" not in rootRealPath): buildPath = root[:len(root) - len("/build/bin")] diff --git a/tests/system-test/win-test-file b/tests/system-test/win-test-file index 24972d388b..0f644666cb 100644 --- a/tests/system-test/win-test-file +++ b/tests/system-test/win-test-file @@ -25,6 +25,13 @@ python3 ./test.py -f 7-tmq/subscribeStb2.py python3 ./test.py -f 7-tmq/subscribeStb3.py python3 ./test.py -f 7-tmq/subscribeDb0.py -N 3 -n 3 python3 ./test.py -f 7-tmq/ins_topics_test.py +python3 ./test.py -f 7-tmq/tmqMaxTopic.py +python3 ./test.py -f 7-tmq/tmqParamsTest.py +python3 ./test.py -f 7-tmq/tmqClientConsLog.py +python3 ./test.py -f 7-tmq/tmqMaxGroupIds.py +python3 ./test.py -f 7-tmq/tmqConsumeDiscontinuousData.py +python3 ./test.py -f 7-tmq/tmqOffset.py +python3 ./test.py -f 7-tmq/tmqDropConsumer.py python3 ./test.py -f 1-insert/delete_stable.py python3 ./test.py -f 2-query/out_of_order.py -Q 3 python3 ./test.py -f 2-query/out_of_order.py @@ -85,6 +92,7 @@ python3 ./test.py -f 7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py +python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py python3 ./test.py -f 7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py python3 ./test.py -f 7-tmq/tmqAutoCreateTbl.py python3 ./test.py -f 7-tmq/tmqDnodeRestart.py @@ -110,6 +118,8 @@ python3 ./test.py -f 7-tmq/tmq3mnodeSwitch.py -N 6 -M 3 -n 3 python3 ./test.py -f 99-TDcase/TD-19201.py python3 ./test.py -f 99-TDcase/TD-21561.py python3 ./test.py -f 99-TDcase/TS-3404.py +python3 ./test.py -f 99-TDcase/TS-3581.py +python3 ./test.py -f 99-TDcase/TS-3311.py python3 ./test.py -f 0-others/balance_vgroups_r1.py -N 6 python3 ./test.py -f 0-others/taosShell.py python3 ./test.py -f 0-others/taosShellError.py @@ -128,6 +138,7 @@ python3 ./test.py -f 0-others/multilevel.py python3 ./test.py -f 0-others/compatibility.py python3 ./test.py -f 0-others/tag_index_basic.py python3 ./test.py -N 3 -f 0-others/walRetention.py +python3 ./test.py -f 0-others/timeRangeWise.py -N 3 python3 ./test.py -f 1-insert/alter_database.py python3 ./test.py -f 1-insert/alter_replica.py -N 3 python3 ./test.py -f 1-insert/influxdb_line_taosc_insert.py @@ -173,7 +184,10 @@ python3 ./test.py -f 1-insert/rowlength64k_4.py -R python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 2 python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 3 python3 ./test.py -f 1-insert/rowlength64k_4.py -Q 4 +python3 ./test.py -f 1-insert/precisionUS.py +python3 ./test.py -f 1-insert/precisionNS.py python3 ./test.py -f 0-others/show.py +python3 ./test.py -f 0-others/show_tag_index.py python3 ./test.py -f 0-others/information_schema.py python3 ./test.py -f 2-query/abs.py python3 ./test.py -f 2-query/abs.py -R @@ -286,6 +300,7 @@ python3 ./test.py -f 2-query/mode.py python3 ./test.py -f 2-query/mode.py -R python3 ./test.py -f 2-query/Now.py python3 ./test.py -f 2-query/Now.py -R +python3 ./test.py -f 2-query/orderBy.py -N 5 python3 ./test.py -f 2-query/percentile.py python3 ./test.py -f 2-query/percentile.py -R python3 ./test.py -f 2-query/pow.py @@ -303,6 +318,8 @@ python3 ./test.py -f 2-query/sin.py -R python3 ./test.py -f 2-query/smaBasic.py -N 3 python3 ./test.py -f 2-query/smaTest.py python3 ./test.py -f 2-query/smaTest.py -R +python3 ./test.py -f 0-others/sma_index.py +python3 ./test.py -f 2-query/sml_TS-3724.py python3 ./test.py -f 2-query/sml.py python3 ./test.py -f 2-query/sml.py -R python3 ./test.py -f 2-query/spread.py @@ -377,9 +394,7 @@ python3 ./test.py -f 2-query/csum.py python3 ./test.py -f 2-query/function_diff.py python3 ./test.py -f 2-query/tagFilter.py python3 ./test.py -f 2-query/projectionDesc.py -python3 ./test.py -f 2-query/ts_3398.py -N 3 -n 3 -python3 ./test.py -f 2-query/ts_3405.py -N 3 -n 3 -python3 ./test.py -f 2-query/ts_3423.py -N 3 -n 3 +python3 ./test.py -f 2-query/ts_3405_3398_3423.py -N 3 -n 3 python3 ./test.py -f 2-query/queryQnode.py python3 ./test.py -f 6-cluster/5dnode1mnode.py python3 ./test.py -f 6-cluster/5dnode2mnode.py -N 5 @@ -408,7 +423,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 - python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 -python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 5 -M 3 +python3 ./test.py -f 6-cluster/5dnode3mnodeRecreateMnode.py -N 6 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStopFollowerLeader.py -N 5 -M 3 python3 ./test.py -f 6-cluster/5dnode3mnodeStop2Follower.py -N 5 -M 3 python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1 @@ -709,3 +724,5 @@ python3 ./test.py -f 2-query/projectionDesc.py -Q 4 python3 ./test.py -f 2-query/odbc.py python3 ./test.py -f 99-TDcase/TD-21561.py -Q 4 python3 ./test.py -f 99-TDcase/TD-20582.py +python3 ./test.py -f 5-taos-tools/taosbenchmark/insertMix.py -N 3 +python3 ./test.py -f 5-taos-tools/taosbenchmark/stt.py -N 3 From 2da67392c6af1838a4bb649e4066936335282f56 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 7 Aug 2023 19:30:23 +0800 Subject: [PATCH 073/123] fix(stream): commit the update to make sure the new state of tasks will be persistent to disk. --- source/libs/stream/src/streamRecover.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index df45ff2759..35612eb180 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -495,11 +495,13 @@ int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) { taosWLockLatch(&pMeta->lock); streamMetaSaveTask(pMeta, pTask); + streamMetaCommit(pMeta); taosWUnLockLatch(&pMeta->lock); // history data scan in the stream time window finished, now let's enable the pause streamTaskEnablePause(pTask); + // for source tasks, let's continue execute. if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { streamSchedExec(pTask); } From b598c2651df65f5c91b5bfad301ce78d3777a096 Mon Sep 17 00:00:00 2001 From: kailixu Date: Mon, 7 Aug 2023 19:30:54 +0800 Subject: [PATCH 074/123] fix: use mktime after 19700101 on windows --- source/os/src/osTime.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/source/os/src/osTime.c b/source/os/src/osTime.c index e758055529..05233065fa 100644 --- a/source/os/src/osTime.c +++ b/source/os/src/osTime.c @@ -447,6 +447,10 @@ time_t taosMktime(struct tm *timep) { t.QuadPart -= offset.QuadPart; return (time_t)(t.QuadPart / 10000000); #else + time_t result = mktime(timep); + if (result != -1) { + return result; + } #ifdef _MSC_VER #if _MSC_VER >= 1900 int64_t tz = _timezone; From 9f83719559c1e926dc44f79e053cb93a2ff35c71 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Tue, 8 Aug 2023 09:34:58 +0800 Subject: [PATCH 075/123] docs: add upgrade notes to releases page (#22354) --- docs/en/28-releases/01-tdengine.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md index 6eaa395087..31484dc1c5 100644 --- a/docs/en/28-releases/01-tdengine.md +++ b/docs/en/28-releases/01-tdengine.md @@ -12,6 +12,11 @@ import Release from "/components/ReleaseV3"; ## 3.1.0.0 +:::note IMPORTANT +- Once you upgrade to TDengine 3.1.0.0, you cannot roll back to any previous version of TDengine. Upgrading to 3.1.0.0 will alter your data such that it cannot be read by previous versions. +- You must remove all streams before upgrading to TDengine 3.1.0.0. If you upgrade a deployment that contains streams, the upgrade will fail and your deployment will become nonoperational. +::: + ## 3.0.7.1 From 51e0b903b0bdc806c39afe31454db19da2f5a4a9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 8 Aug 2023 09:36:42 +0800 Subject: [PATCH 076/123] refactor: do some internal refactor. --- source/libs/stream/src/streamExec.c | 31 ++++++++++++++++++++++++++--- 1 file changed, 28 insertions(+), 3 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 4ef7d6084d..239d6ed8e3 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -21,6 +21,7 @@ #define MAX_STREAM_RESULT_DUMP_THRESHOLD 100 static int32_t updateCheckPointInfo(SStreamTask* pTask); +static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask); bool streamTaskShouldStop(const SStreamStatus* pStatus) { int32_t status = atomic_load_8((int8_t*)&pStatus->taskStatus); @@ -357,6 +358,26 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) { } static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { + int32_t code = TSDB_CODE_SUCCESS; + if (!pTask->status.transferState) { + return code; + } + + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__SOURCE) { + streamTaskFillHistoryFinished(pTask); + streamTaskEndScanWAL(pTask); + } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. + code = streamDoTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo handle this + return code; + } + } + + return code; +} + +static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId); @@ -621,12 +642,16 @@ int32_t streamTryExec(SStreamTask* pTask) { return -1; } + // todo the task should be commit here // todo the task should be commit here if (taosQueueEmpty(pTask->inputQueue->queue)) { // fill-history WAL scan has completed - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && pTask->status.transferState == true) { - streamTaskRecoverSetAllStepFinished(pTask); - streamTaskEndScanWAL(pTask); + if (pTask->status.transferState) { + code = streamTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + streamSchedExec(pTask); } else { atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), From 82ab81810b0b2be294a0b710b2a81558a6ce1a7d Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 8 Aug 2023 09:40:21 +0800 Subject: [PATCH 077/123] refactor: do some internal refactor. --- source/libs/stream/inc/streamInt.h | 1 + source/libs/stream/src/streamRecover.c | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/source/libs/stream/inc/streamInt.h b/source/libs/stream/inc/streamInt.h index add893c8c7..32d6dc65d9 100644 --- a/source/libs/stream/inc/streamInt.h +++ b/source/libs/stream/inc/streamInt.h @@ -62,6 +62,7 @@ SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* int32_t streamAddEndScanHistoryMsg(SStreamTask* pTask, SRpcHandleInfo* pRpcInfo, SStreamScanHistoryFinishReq* pReq); int32_t streamNotifyUpstreamContinue(SStreamTask* pTask); +int32_t streamTaskFillHistoryFinished(SStreamTask* pTask); extern int32_t streamBackendId; extern int32_t streamBackendCfWrapperId; diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 35612eb180..5bb8b65b0b 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -485,6 +485,12 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory return 0; } +int32_t streamTaskFillHistoryFinished(SStreamTask* pTask) { + void* exec = pTask->exec.pExecutor; + qResetStreamInfoTimeWindow(exec); + return 0; +} + int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) { ASSERT(pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY); SStreamMeta* pMeta = pTask->pMeta; From 143f39b6f79b6c4105f7b2eaf44dc5fb5db7190a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 31 Jul 2023 14:22:46 +0800 Subject: [PATCH 078/123] fix(stream): set the correct end key of delete block. --- source/libs/executor/src/scanoperator.c | 90 ++++++++++++++++++++++++- 1 file changed, 89 insertions(+), 1 deletion(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 3d5e4a7d5f..6e6231a7e7 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1550,7 +1550,95 @@ static void checkUpdateData(SStreamScanInfo* pInfo, bool invertible, SSDataBlock } } -static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, bool filter) { +static void doBlockDataWindowFilter(SSDataBlock* pBlock, int32_t tsIndex, STimeWindow* pWindow, const char* id) { + if (pWindow->skey != INT64_MIN || pWindow->ekey != INT64_MAX) { + bool* p = taosMemoryCalloc(pBlock->info.rows, sizeof(bool)); + bool hasUnqualified = false; + + SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsIndex); + + if (pWindow->skey != INT64_MIN) { + qDebug("%s filter for additional history window, skey:%" PRId64, id, pWindow->skey); + + ASSERT(pCol->pData != NULL); + for (int32_t i = 0; i < pBlock->info.rows; ++i) { + int64_t* ts = (int64_t*)colDataGetData(pCol, i); + p[i] = (*ts >= pWindow->skey); + + if (!p[i]) { + hasUnqualified = true; + } + } + } else if (pWindow->ekey != INT64_MAX) { + qDebug("%s filter for additional history window, ekey:%" PRId64, id, pWindow->ekey); + for (int32_t i = 0; i < pBlock->info.rows; ++i) { + int64_t* ts = (int64_t*)colDataGetData(pCol, i); + p[i] = (*ts <= pWindow->ekey); + + if (!p[i]) { + hasUnqualified = true; + } + } + } + + if (hasUnqualified) { + trimDataBlock(pBlock, pBlock->info.rows, p); + } + + taosMemoryFree(p); + } +} + +// re-build the delete block, ONLY according to the split timestamp +static void rebuildDeleteBlockData(SSDataBlock* pBlock, STimeWindow* pWindow, const char* id) { + int32_t numOfRows = pBlock->info.rows; + bool* p = taosMemoryCalloc(numOfRows, sizeof(bool)); + bool hasUnqualified = false; + int64_t skey = pWindow->skey; + int64_t ekey = pWindow->ekey; + + SColumnInfoData* pSrcStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); + uint64_t* tsStartCol = (uint64_t*)pSrcStartCol->pData; + SColumnInfoData* pSrcEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); + uint64_t* tsEndCol = (uint64_t*)pSrcEndCol->pData; + + if (pWindow->skey != INT64_MIN) { + for (int32_t i = 0; i < numOfRows; i++) { + if (tsStartCol[i] < skey) { + tsStartCol[i] = skey; + } + + if (tsEndCol[i] >= skey) { + p[i] = true; + } else { // this row should be removed, since it is not in this query time window, which is [skey, INT64_MAX] + hasUnqualified = true; + } + } + } else if (pWindow->ekey != INT64_MAX) { + for(int32_t i = 0; i < numOfRows; ++i) { + if (tsEndCol[i] > ekey) { + tsEndCol[i] = ekey; + } + + if (tsStartCol[i] <= ekey) { + p[i] = true; + } else { + hasUnqualified = true; + } + } + } + + if (hasUnqualified) { + trimDataBlock(pBlock, pBlock->info.rows, p); + qDebug("%s re-build delete datablock, start key revised to:%"PRId64", rows:%"PRId64, id, skey, pBlock->info.rows); + } else { + qDebug("%s not update the delete block", id); + } + + taosMemoryFree(p); +} + +static int32_t setBlockIntoRes(SStreamScanInfo* pInfo, const SSDataBlock* pBlock, STimeWindow* pTimeWindow, bool filter) { SDataBlockInfo* pBlockInfo = &pInfo->pRes->info; SOperatorInfo* pOperator = pInfo->pStreamScanOp; SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; From ca2183acfde0778cd67890f95e2dc4290234e394 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 31 Jul 2023 15:24:35 +0800 Subject: [PATCH 079/123] refactor: do some internal refactor. --- include/libs/executor/executor.h | 6 +- include/libs/stream/tstream.h | 2 - source/dnode/vnode/src/tq/tq.c | 2 +- source/libs/executor/inc/querytask.h | 4 +- source/libs/executor/src/executor.c | 36 ++----- source/libs/stream/src/streamExec.c | 132 +++++++------------------ source/libs/stream/src/streamRecover.c | 12 +-- 7 files changed, 46 insertions(+), 148 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index 3bef15f3a7..f90c38f341 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -221,13 +221,9 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan int32_t qStreamRecoverFinish(qTaskInfo_t tinfo); int32_t qRestoreStreamOperatorOption(qTaskInfo_t tinfo); bool qStreamRecoverScanFinished(qTaskInfo_t tinfo); -bool qStreamRecoverScanStep1Finished(qTaskInfo_t tinfo); -bool qStreamRecoverScanStep2Finished(qTaskInfo_t tinfo); -int32_t qStreamRecoverSetAllStepFinished(qTaskInfo_t tinfo); +int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo); void resetTaskInfo(qTaskInfo_t tinfo); -void qResetStreamInfoTimeWindow(qTaskInfo_t tinfo); - int32_t qStreamOperatorReleaseState(qTaskInfo_t tInfo); int32_t qStreamOperatorReloadState(qTaskInfo_t tInfo); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 066f83fbcb..9d3a42f502 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -607,8 +607,6 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask); int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated); bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer); -bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask); -bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask); int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask); // common diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index ccdf0c88a5..373bd77c29 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1296,7 +1296,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { "s-task:%s scan-history in stream time window completed, no related fill-history task, reset the time " "window:%" PRId64 " - %" PRId64, id, pWindow->skey, pWindow->ekey); - qResetStreamInfoTimeWindow(pTask->exec.pExecutor); + qStreamInfoResetTimewindowFilter(pTask->exec.pExecutor); } else { // when related fill-history task exists, update the fill-history time window only when the // state transfer is completed. diff --git a/source/libs/executor/inc/querytask.h b/source/libs/executor/inc/querytask.h index cdf37bcc6b..7241b015a0 100644 --- a/source/libs/executor/inc/querytask.h +++ b/source/libs/executor/inc/querytask.h @@ -62,8 +62,8 @@ typedef struct { SSchemaWrapper* schema; char tbName[TSDB_TABLE_NAME_LEN]; // this is the current scan table: todo refactor int8_t recoverStep; - bool recoverStep1Finished; - bool recoverStep2Finished; +// bool recoverStep1Finished; +// bool recoverStep2Finished; int8_t recoverScanFinished; SQueryTableDataCond tableCond; SVersionRange fillHistoryVer; diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index b85305b32d..e4ddf9ca6c 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -116,17 +116,6 @@ void resetTaskInfo(qTaskInfo_t tinfo) { clearStreamBlock(pTaskInfo->pRoot); } -void qResetStreamInfoTimeWindow(qTaskInfo_t tinfo) { - SExecTaskInfo* pTaskInfo = (SExecTaskInfo*) tinfo; - if (pTaskInfo == NULL) { - return; - } - - qDebug("%s set stream fill-history window:%" PRId64"-%"PRId64, GET_TASKID(pTaskInfo), INT64_MIN, INT64_MAX); - pTaskInfo->streamInfo.fillHistoryWindow.skey = INT64_MIN; - pTaskInfo->streamInfo.fillHistoryWindow.ekey = INT64_MAX; -} - static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t numOfBlocks, int32_t type, const char* id) { if (pOperator->operatorType != QUERY_NODE_PHYSICAL_PLAN_STREAM_SCAN) { if (pOperator->numOfDownstream == 0) { @@ -341,6 +330,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v return NULL; } + qStreamInfoResetTimewindowFilter(pTaskInfo); return pTaskInfo; } @@ -890,8 +880,6 @@ int32_t qStreamSourceScanParamForHistoryScanStep1(qTaskInfo_t tinfo, SVersionRan pStreamInfo->fillHistoryVer = *pVerRange; pStreamInfo->fillHistoryWindow = *pWindow; pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE1; - pStreamInfo->recoverStep1Finished = false; - pStreamInfo->recoverStep2Finished = false; qDebug("%s step 1. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64 " - %" PRId64, @@ -909,8 +897,6 @@ int32_t qStreamSourceScanParamForHistoryScanStep2(qTaskInfo_t tinfo, SVersionRan pStreamInfo->fillHistoryVer = *pVerRange; pStreamInfo->fillHistoryWindow = *pWindow; pStreamInfo->recoverStep = STREAM_RECOVER_STEP__PREPARE2; - pStreamInfo->recoverStep1Finished = true; - pStreamInfo->recoverStep2Finished = false; qDebug("%s step 2. set param for stream scanner for scan-history data, verRange:%" PRId64 " - %" PRId64 ", window:%" PRId64 " - %" PRId64, @@ -1049,23 +1035,15 @@ bool qStreamRecoverScanFinished(qTaskInfo_t tinfo) { return pTaskInfo->streamInfo.recoverScanFinished; } -bool qStreamRecoverScanStep1Finished(qTaskInfo_t tinfo) { +int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - return pTaskInfo->streamInfo.recoverStep1Finished; -} + STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow; -bool qStreamRecoverScanStep2Finished(qTaskInfo_t tinfo) { - SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - return pTaskInfo->streamInfo.recoverStep2Finished; -} + qDebug("%s set remove scan-history filter window:%" PRId64 "-%" PRId64 ", new window:%" PRId64 "-%" PRId64, + GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX); -int32_t qStreamRecoverSetAllStepFinished(qTaskInfo_t tinfo) { - SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; - pTaskInfo->streamInfo.recoverStep1Finished = true; - pTaskInfo->streamInfo.recoverStep2Finished = true; - - // reset the time window - pTaskInfo->streamInfo.fillHistoryWindow.skey = INT64_MIN; + pWindow->skey = INT64_MIN; + pWindow->ekey = INT64_MAX; return 0; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 239d6ed8e3..315a04b6bf 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -164,15 +164,14 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i } int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { - int32_t code = 0; - ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE); - void* exec = pTask->exec.pExecutor; + int32_t code = TSDB_CODE_SUCCESS; + void* exec = pTask->exec.pExecutor; + bool finished = false; qSetStreamOpOpen(exec); - bool finished = false; - while (1) { + while (!finished) { if (streamTaskShouldPause(&pTask->status)) { double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0; qDebug("s-task:%s paused from the scan-history task, elapsed time:%.2fsec", pTask->id.idStr, el); @@ -185,44 +184,30 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { return -1; } - int32_t batchCnt = 0; + int32_t numOfBlocks = 0; while (1) { if (streamTaskShouldStop(&pTask->status)) { taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); return 0; } + if (streamTaskShouldPause(&pTask->status)) { + break; + } + SSDataBlock* output = NULL; uint64_t ts = 0; if (qExecTask(exec, &output, &ts) < 0) { continue; } - if (output == NULL) { - if (qStreamRecoverScanFinished(exec)) { - finished = true; - } else { - qSetStreamOpOpen(exec); - if (streamTaskShouldPause(&pTask->status)) { - SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); - if (qRes == NULL) { - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } - - qRes->type = STREAM_INPUT__DATA_BLOCK; - qRes->blocks = pRes; - code = streamTaskOutputResultBlock(pTask, qRes); - if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - taosFreeQitem(qRes); - return code; - } - return 0; - } - } + if (output == NULL && qStreamRecoverScanFinished(exec)) { + finished = true; break; + } else { + if (output == NULL) { + ASSERT(0); + } } SSDataBlock block = {0}; @@ -230,86 +215,37 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { block.info.childId = pTask->info.selfChildId; taosArrayPush(pRes, &block); - batchCnt++; - - qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d", pTask->id.idStr, batchCnt, batchSz); - if (batchCnt >= batchSz) { + numOfBlocks++; + qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d", pTask->id.idStr, numOfBlocks, batchSz); + if (numOfBlocks >= batchSz) { break; } } - if (taosArrayGetSize(pRes) == 0) { - taosArrayDestroy(pRes); - - if (finished) { - qDebug("s-task:%s finish recover exec task ", pTask->id.idStr); - break; - } else { - qDebug("s-task:%s continue recover exec task ", pTask->id.idStr); - continue; + if (taosArrayGetSize(pRes) > 0) { + SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); + if (qRes == NULL) { + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); + terrno = TSDB_CODE_OUT_OF_MEMORY; + return -1; } - } - SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM, 0); - if (qRes == NULL) { - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - terrno = TSDB_CODE_OUT_OF_MEMORY; - return -1; - } + qRes->type = STREAM_INPUT__DATA_BLOCK; + qRes->blocks = pRes; - qRes->type = STREAM_INPUT__DATA_BLOCK; - qRes->blocks = pRes; - code = streamTaskOutputResultBlock(pTask, qRes); - if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { - taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); - taosFreeQitem(qRes); - return code; - } - - if (finished) { - break; - } - } - return 0; -} - -#if 0 -int32_t streamBatchExec(SStreamTask* pTask, int32_t batchLimit) { - // fetch all queue item, merge according to batchLimit - int32_t numOfItems = taosReadAllQitems(pTask->inputQueue1, pTask->inputQall); - if (numOfItems == 0) { - qDebug("task: %d, stream task exec over, queue empty", pTask->id.taskId); - return 0; - } - SStreamQueueItem* pMerged = NULL; - SStreamQueueItem* pItem = NULL; - taosGetQitem(pTask->inputQall, (void**)&pItem); - if (pItem == NULL) { - if (pMerged != NULL) { - // process merged item + code = streamTaskOutputResultBlock(pTask, qRes); + if (code == TSDB_CODE_UTIL_QUEUE_OUT_OF_MEMORY) { + taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes); + taosFreeQitem(qRes); + return code; + } } else { - return 0; + taosArrayDestroy(pRes); } } - // if drop - if (pItem->type == STREAM_INPUT__DESTROY) { - // set status drop - return -1; - } - - if (pTask->info.taskLevel == TASK_LEVEL__SINK) { - ASSERT(((SStreamQueueItem*)pItem)->type == STREAM_INPUT__DATA_BLOCK); - streamTaskOutputResultBlock(pTask, (SStreamDataBlock*)pItem); - } - - // exec impl - - // output - // try dispatch return 0; } -#endif int32_t updateCheckPointInfo(SStreamTask* pTask) { int64_t ckId = 0; @@ -425,7 +361,7 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { // expand the query time window for stream scanner pTimeWindow->skey = INT64_MIN; - qResetStreamInfoTimeWindow(pStreamTask->exec.pExecutor); + qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); // transfer the ownership of executor state streamTaskReleaseState(pTask); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 5bb8b65b0b..59caf31340 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -655,19 +655,9 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) { return 0; } -bool streamTaskRecoverScanStep1Finished(SStreamTask* pTask) { - void* exec = pTask->exec.pExecutor; - return qStreamRecoverScanStep1Finished(exec); -} - -bool streamTaskRecoverScanStep2Finished(SStreamTask* pTask) { - void* exec = pTask->exec.pExecutor; - return qStreamRecoverScanStep2Finished(exec); -} - int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask) { void* exec = pTask->exec.pExecutor; - return qStreamRecoverSetAllStepFinished(exec); + return qStreamInfoResetTimewindowFilter(exec); } bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) { From d43cb3fcdf2b1b86fbe18cf68ba2a365c952d20a Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 31 Jul 2023 18:07:37 +0800 Subject: [PATCH 080/123] fix(stream): kill task when pause the stream execution. --- include/libs/stream/tstream.h | 6 +-- source/dnode/vnode/src/tq/tq.c | 14 +++---- source/libs/stream/src/streamExec.c | 9 ++--- source/libs/stream/src/streamRecover.c | 51 +++++++++++++++----------- 4 files changed, 38 insertions(+), 42 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 9d3a42f502..db0509d81d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -604,13 +604,10 @@ int32_t streamSendCheckRsp(const SStreamMeta* pMeta, const SStreamTaskCheckReq* int32_t streamProcessCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp); int32_t streamLaunchFillHistoryTask(SStreamTask* pTask); int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask); -int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated); +int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated); bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer); -int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask); - // common -int32_t streamSetParamForScanHistory(SStreamTask* pTask); int32_t streamRestoreParam(SStreamTask* pTask); int32_t streamSetStatusNormal(SStreamTask* pTask); const char* streamGetTaskStatusStr(int32_t status); @@ -624,7 +621,6 @@ void streamTaskEnablePause(SStreamTask* pTask); // source level int32_t streamSetParamForStreamScannerStep1(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow); int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange* pVerRange, STimeWindow* pWindow); -int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated); int32_t streamSourceScanHistoryData(SStreamTask* pTask); int32_t streamDispatchScanHistoryFinishMsg(SStreamTask* pTask); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 373bd77c29..0feaeb92c5 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1567,9 +1567,8 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg SStreamMeta* pMeta = pTq->pStreamMeta; SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId); if (pTask == NULL) { - tqError("vgId:%d failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId, + tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId, pReq->taskId); - // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active return TSDB_CODE_SUCCESS; } @@ -1581,9 +1580,8 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg if (pTask->historyTaskId.taskId != 0) { pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.taskId); if (pHistoryTask == NULL) { - tqError("vgId:%d failed to acquire fill-history task:0x%x, it may have been dropped already. Pause success", + tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%x, it may have been dropped already", pMeta->vgId, pTask->historyTaskId.taskId); - streamMetaReleaseTask(pMeta, pTask); // since task is in [STOP|DROPPING] state, it is safe to assume the pause is active @@ -1591,14 +1589,12 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg } tqDebug("s-task:%s fill-history task handle paused along with related stream task", pHistoryTask->id.idStr); - streamTaskPause(pHistoryTask); - } - streamMetaReleaseTask(pMeta, pTask); - if (pHistoryTask != NULL) { + streamTaskPause(pHistoryTask); streamMetaReleaseTask(pMeta, pHistoryTask); } + streamMetaReleaseTask(pMeta, pTask); return TSDB_CODE_SUCCESS; } @@ -1627,7 +1623,7 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, } if (level == TASK_LEVEL__SOURCE && pTask->info.fillHistory && pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY) { - streamStartRecoverTask(pTask, igUntreated); + streamStartScanHistoryAsync(pTask, igUntreated); } else if (level == TASK_LEVEL__SOURCE && (taosQueueItemSize(pTask->inputQueue->queue) == 0)) { tqStartStreamTasks(pTq); } else { diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 315a04b6bf..bab15bb31d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -582,12 +582,9 @@ int32_t streamTryExec(SStreamTask* pTask) { // todo the task should be commit here if (taosQueueEmpty(pTask->inputQueue->queue)) { // fill-history WAL scan has completed - if (pTask->status.transferState) { - code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - streamSchedExec(pTask); + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && pTask->status.transferState == true) { + streamTaskFillHistoryFinished(pTask); + streamTaskEndScanWAL(pTask); } else { atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 59caf31340..7c308902f2 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -17,23 +17,30 @@ #include "ttimer.h" #include "wal.h" -static void launchFillHistoryTask(SStreamTask* pTask); -static void streamTaskSetRangeStreamCalc(SStreamTask* pTask); +typedef struct SStreamTaskRetryInfo { + SStreamMeta* pMeta; + int32_t taskId; +} SStreamTaskRetryInfo; -static void streamTaskSetForReady(SStreamTask* pTask, int32_t numOfReqs) { +static int32_t streamSetParamForScanHistory(SStreamTask* pTask); +static void launchFillHistoryTask(SStreamTask* pTask); +static void streamTaskSetRangeStreamCalc(SStreamTask* pTask); +static int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated); + +static void streamTaskSetReady(SStreamTask* pTask, int32_t numOfReqs) { ASSERT(pTask->status.downstreamReady == 0); pTask->status.downstreamReady = 1; - int64_t el = (taosGetTimestampMs() - pTask->tsInfo.init); + int64_t el = (taosGetTimestampMs() - pTask->tsInfo.init); qDebug("s-task:%s all %d downstream ready, init completed, elapsed time:%dms, task status:%s", pTask->id.idStr, numOfReqs, (int32_t) el, streamGetTaskStatusStr(pTask->status.taskStatus)); } -int32_t streamStartRecoverTask(SStreamTask* pTask, int8_t igUntreated) { +int32_t streamStartScanHistoryAsync(SStreamTask* pTask, int8_t igUntreated) { SStreamScanHistoryReq req; - streamBuildSourceRecover1Req(pTask, &req, igUntreated); - int32_t len = sizeof(SStreamScanHistoryReq); + initScanHistoryReq(pTask, &req, igUntreated); + int32_t len = sizeof(SStreamScanHistoryReq); void* serializedReq = rpcMallocCont(len); if (serializedReq == NULL) { return -1; @@ -65,9 +72,9 @@ static int32_t doLaunchScanHistoryTask(SStreamTask* pTask) { if (pTask->info.fillHistory) { streamSetParamForScanHistory(pTask); } - streamSetParamForStreamScannerStep1(pTask, pRange, &pTask->dataRange.window); - int32_t code = streamStartRecoverTask(pTask, 0); + streamSetParamForStreamScannerStep1(pTask, pRange, &pTask->dataRange.window); + int32_t code = streamStartScanHistoryAsync(pTask, 0); return code; } @@ -142,7 +149,7 @@ int32_t streamTaskDoCheckDownstreamTasks(SStreamTask* pTask) { } else { qDebug("s-task:%s (vgId:%d) set downstream ready, since no downstream", pTask->id.idStr, pTask->info.nodeId); - streamTaskSetForReady(pTask, 0); + streamTaskSetReady(pTask, 0); streamTaskSetRangeStreamCalc(pTask); streamTaskLaunchScanHistory(pTask); @@ -188,7 +195,7 @@ int32_t streamTaskCheckStatus(SStreamTask* pTask) { } static void doProcessDownstreamReadyRsp(SStreamTask* pTask, int32_t numOfReqs) { - streamTaskSetForReady(pTask, numOfReqs); + streamTaskSetReady(pTask, numOfReqs); const char* id = pTask->id.idStr; int8_t status = pTask->status.taskStatus; @@ -319,7 +326,7 @@ int32_t streamSetParamForStreamScannerStep2(SStreamTask* pTask, SVersionRange *p return qStreamSourceScanParamForHistoryScanStep2(pTask->exec.pExecutor, pVerRange, pWindow); } -int32_t streamBuildSourceRecover1Req(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated) { +int32_t initScanHistoryReq(SStreamTask* pTask, SStreamScanHistoryReq* pReq, int8_t igUntreated) { pReq->msgHead.vgId = pTask->info.nodeId; pReq->streamId = pTask->id.streamId; pReq->taskId = pTask->id.taskId; @@ -532,11 +539,6 @@ static void doCheckDownstreamStatus(SStreamTask* pTask, SStreamTask* pHTask) { streamTaskDoCheckDownstreamTasks(pHTask); } -typedef struct SStreamTaskRetryInfo { - SStreamMeta* pMeta; - int32_t taskId; -} SStreamTaskRetryInfo; - static void tryLaunchHistoryTask(void* param, void* tmrId) { SStreamTaskRetryInfo* pInfo = param; SStreamMeta* pMeta = pInfo->pMeta; @@ -646,7 +648,7 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) { } } - // dispatch recover finish req to all related downstream task + // dispatch scan-history finish req to all related downstream task code = streamDispatchScanHistoryFinishMsg(pTask); if (code < 0) { return -1; @@ -655,7 +657,7 @@ int32_t streamTaskScanHistoryDataComplete(SStreamTask* pTask) { return 0; } -int32_t streamTaskRecoverSetAllStepFinished(SStreamTask* pTask) { +int32_t streamTaskFillHistoryFinished(SStreamTask* pTask) { void* exec = pTask->exec.pExecutor; return qStreamInfoResetTimewindowFilter(exec); } @@ -667,7 +669,7 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) { int64_t nextStartVer = pRange->maxVer + 1; if (nextStartVer > latestVer - 1) { // no input data yet. no need to execute the secondardy scan while stream task halt - streamTaskRecoverSetAllStepFinished(pTask); + streamTaskFillHistoryFinished(pTask); qDebug( "s-task:%s no need to perform secondary scan-history data(step 2), since no data ingest during step1 scan, " "related stream task currentVer:%" PRId64, @@ -682,7 +684,6 @@ bool streamHistoryTaskSetVerRangeStep2(SStreamTask* pTask, int64_t latestVer) { } } - int32_t tEncodeStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->reqId) < 0) return -1; @@ -855,7 +856,7 @@ void streamTaskPause(SStreamTask* pTask) { taosMsleep(100); } - // todo: use the lock of the task. + // todo: use the task lock, stead of meta lock taosWLockLatch(&pMeta->lock); status = pTask->status.taskStatus; @@ -869,6 +870,12 @@ void streamTaskPause(SStreamTask* pTask) { atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__PAUSE); taosWUnLockLatch(&pMeta->lock); + // in case of fill-history task, stop the tsdb file scan operation. + if (pTask->info.fillHistory == 1) { + void* pExecutor = pTask->exec.pExecutor; + qKillTask(pExecutor, TSDB_CODE_SUCCESS); + } + int64_t el = taosGetTimestampMs() - st; qDebug("vgId:%d s-task:%s set pause flag, prev:%s, elapsed time:%dms", pMeta->vgId, pTask->id.idStr, streamGetTaskStatusStr(pTask->status.keepTaskStatus), (int32_t)el); From 3bcc7ed83e792c0f1367302352e06142815f1106 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 31 Jul 2023 19:12:23 +0800 Subject: [PATCH 081/123] fix(stream): transfer the state for agg tasks. --- source/libs/stream/src/streamExec.c | 76 +++++++++++++++-------------- 1 file changed, 39 insertions(+), 37 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index bab15bb31d..4f942b951d 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -293,32 +293,12 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) { } } -static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { - int32_t code = TSDB_CODE_SUCCESS; - if (!pTask->status.transferState) { - return code; - } - - int32_t level = pTask->info.taskLevel; - if (level == TASK_LEVEL__SOURCE) { - streamTaskFillHistoryFinished(pTask); - streamTaskEndScanWAL(pTask); - } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. - code = streamDoTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this - return code; - } - } - - return code; -} - static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId); if (pStreamTask == NULL) { - // todo: destroy this task here + // todo: destroy the fill-history task here qError("s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed", pTask->id.idStr, pTask->streamTaskId.taskId); return TSDB_CODE_STREAM_TASK_NOT_EXIST; @@ -359,34 +339,36 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { qDebug("s-task:%s no need to update time window for non-source task", pStreamTask->id.idStr); } - // expand the query time window for stream scanner + // 1. expand the query time window for stream task of WAL scanner pTimeWindow->skey = INT64_MIN; qStreamInfoResetTimewindowFilter(pStreamTask->exec.pExecutor); - // transfer the ownership of executor state + // 2. transfer the ownership of executor state streamTaskReleaseState(pTask); streamTaskReloadState(pStreamTask); - // clear the link between fill-history task and stream task info + // 3. clear the link between fill-history task and stream task info pStreamTask->historyTaskId.taskId = 0; + + // 4. resume the state of stream task, after this function, the stream task will run immidately. But it can not be + // pause, since the pause allowed attribute is not set yet. streamTaskResumeFromHalt(pStreamTask); qDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", pTask->id.idStr); int32_t taskId = pTask->id.taskId; - // free it and remove it from disk meta-store + // 5. free it and remove fill-history task from disk meta-store streamMetaUnregisterTask(pMeta, taskId); - // save to disk + // 6. save to disk taosWLockLatch(&pMeta->lock); - streamMetaSaveTask(pMeta, pStreamTask); if (streamMetaCommit(pMeta) < 0) { // persist to disk } taosWUnLockLatch(&pMeta->lock); - // pause allowed + // 7. pause allowed. streamTaskEnablePause(pStreamTask); streamSchedExec(pStreamTask); @@ -394,6 +376,25 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { return TSDB_CODE_SUCCESS; } +static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { + int32_t code = TSDB_CODE_SUCCESS; + if (!pTask->status.transferState) { + return code; + } + + if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + streamTaskFillHistoryFinished(pTask); + streamTaskEndScanWAL(pTask); + } else { // do transfer task operator states. + code = streamDoTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo handle this + return code; + } + } + + return code; +} + static int32_t extractMsgFromInputQ(SStreamTask* pTask, SStreamQueueItem** pInput, int32_t* numOfBlocks, const char* id) { int32_t retryTimes = 0; @@ -547,17 +548,16 @@ int32_t streamTaskEndScanWAL(SStreamTask* pTask) { double el = (taosGetTimestampMs() - pTask->tsInfo.step2Start) / 1000.0; qDebug("s-task:%s scan-history from WAL stage(step 2) ended, elapsed time:%.2fs", id, el); - // 3. notify downstream tasks to transfer executor state after handle all history blocks. - pTask->status.transferState = true; - + // 1. notify all downstream tasks to transfer executor state after handle all history blocks. int32_t code = streamDispatchTransferStateMsg(pTask); if (code != TSDB_CODE_SUCCESS) { // todo handle error } - // the last execution of fill-history task, in order to transfer task operator states. - code = streamTransferStateToStreamTask(pTask); - if (code != TSDB_CODE_SUCCESS) { // todo handle this + // 2. do transfer stream task operator states. + pTask->status.transferState = true; + code = streamDoTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { // todo handle error return code; } @@ -582,9 +582,11 @@ int32_t streamTryExec(SStreamTask* pTask) { // todo the task should be commit here if (taosQueueEmpty(pTask->inputQueue->queue)) { // fill-history WAL scan has completed - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE && pTask->status.transferState == true) { - streamTaskFillHistoryFinished(pTask); - streamTaskEndScanWAL(pTask); + if (pTask->status.transferState) { + code = streamTransferStateToStreamTask(pTask); + if (code != TSDB_CODE_SUCCESS) { + return code; + } } else { atomic_store_8(&pTask->status.schedStatus, TASK_SCHED_STATUS__INACTIVE); qDebug("s-task:%s exec completed, status:%s, sched-status:%d", id, streamGetTaskStatusStr(pTask->status.taskStatus), From 1e680d4df1ac6f22be2697aedf9751dd67619a04 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 31 Jul 2023 19:23:53 +0800 Subject: [PATCH 082/123] fix(stream): ignore the sink task transfer state. --- source/libs/stream/src/streamExec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 4f942b951d..16341cdbbc 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -382,10 +382,11 @@ static int32_t streamTransferStateToStreamTask(SStreamTask* pTask) { return code; } - if (pTask->info.taskLevel == TASK_LEVEL__SOURCE) { + int32_t level = pTask->info.taskLevel; + if (level == TASK_LEVEL__SOURCE) { streamTaskFillHistoryFinished(pTask); streamTaskEndScanWAL(pTask); - } else { // do transfer task operator states. + } else if (level == TASK_LEVEL__AGG) { // do transfer task operator states. code = streamDoTransferStateToStreamTask(pTask); if (code != TSDB_CODE_SUCCESS) { // todo handle this return code; From 0e3fd5277d54dae82a6e5a10fbb29286f3136fda Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 1 Aug 2023 00:50:30 +0800 Subject: [PATCH 083/123] fix(stream): dump results to sink node before paused. --- source/libs/executor/src/executor.c | 5 ----- source/libs/stream/src/streamExec.c | 5 ++++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index e4ddf9ca6c..231653c728 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -186,11 +186,6 @@ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId) { doSetTaskId(pTaskInfo->pRoot, &pTaskInfo->storageAPI); } -//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code) { -// SExecTaskInfo* pTaskInfo = tinfo; -// pTaskInfo->code = code; -//} - int32_t qSetStreamOpOpen(qTaskInfo_t tinfo) { if (tinfo == NULL) { return TSDB_CODE_APP_ERROR; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 16341cdbbc..fb262a96d9 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -197,10 +197,13 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { SSDataBlock* output = NULL; uint64_t ts = 0; - if (qExecTask(exec, &output, &ts) < 0) { + code = qExecTask(exec, &output, &ts); + if (code != TSDB_CODE_TSC_QUERY_KILLED && code != TSDB_CODE_SUCCESS) { + qError("%s scan-history data error occurred code:%s, continue scan", pTask->id.idStr, tstrerror(code)); continue; } + // the generated results before fill-history task been paused, should be dispatched to sink node if (output == NULL && qStreamRecoverScanFinished(exec)) { finished = true; break; From ac00e1d520c1743f087b45c0e5f007ffa3c6488f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 1 Aug 2023 10:16:14 +0800 Subject: [PATCH 084/123] fix(stream): fetch all data before paused and dump to sink node. --- include/libs/stream/tstream.h | 2 +- source/libs/executor/src/executor.c | 30 ++++++++++++++++++++--------- source/libs/stream/src/streamExec.c | 21 +++++++------------- 3 files changed, 29 insertions(+), 24 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index db0509d81d..b4ae30910c 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -590,7 +590,7 @@ bool streamTaskIsIdle(const SStreamTask* pTask); int32_t streamTaskEndScanWAL(SStreamTask* pTask); SStreamChildEpInfo * streamTaskGetUpstreamTaskEpInfo(SStreamTask* pTask, int32_t taskId); -int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz); +int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize); char* createStreamTaskIdStr(int64_t streamId, int32_t taskId); diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 231653c728..05767db286 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -647,23 +647,33 @@ int32_t qExecTask(qTaskInfo_t tinfo, SSDataBlock** pRes, uint64_t* useconds) { *pRes = NULL; int64_t curOwner = 0; - if ((curOwner = atomic_val_compare_exchange_64(&pTaskInfo->owner, 0, threadId)) != 0) { + + // todo extract method + taosRLockLatch(&pTaskInfo->lock); + bool isKilled = isTaskKilled(pTaskInfo); + if (isKilled) { + clearStreamBlock(pTaskInfo->pRoot); + qDebug("%s already killed, abort", GET_TASKID(pTaskInfo)); + + taosRUnLockLatch(&pTaskInfo->lock); + return TSDB_CODE_SUCCESS; + } + + if (pTaskInfo->owner != 0) { qError("%s-%p execTask is now executed by thread:%p", GET_TASKID(pTaskInfo), pTaskInfo, (void*)curOwner); pTaskInfo->code = TSDB_CODE_QRY_IN_EXEC; + + taosRUnLockLatch(&pTaskInfo->lock); return pTaskInfo->code; } + pTaskInfo->owner = threadId; + taosRUnLockLatch(&pTaskInfo->lock); + if (pTaskInfo->cost.start == 0) { pTaskInfo->cost.start = taosGetTimestampUs(); } - if (isTaskKilled(pTaskInfo)) { - clearStreamBlock(pTaskInfo->pRoot); - atomic_store_64(&pTaskInfo->owner, 0); - qDebug("%s already killed, abort", GET_TASKID(pTaskInfo)); - return TSDB_CODE_SUCCESS; - } - // error occurs, record the error code and return to client int32_t ret = setjmp(pTaskInfo->env); if (ret != TSDB_CODE_SUCCESS) { @@ -767,11 +777,13 @@ int32_t qKillTask(qTaskInfo_t tinfo, int32_t rspCode) { qDebug("%s sync killed execTask", GET_TASKID(pTaskInfo)); setTaskKilled(pTaskInfo, TSDB_CODE_TSC_QUERY_KILLED); + taosWLockLatch(&pTaskInfo->lock); while (qTaskIsExecuting(pTaskInfo)) { taosMsleep(10); } - pTaskInfo->code = rspCode; + taosWUnLockLatch(&pTaskInfo->lock); + return TSDB_CODE_SUCCESS; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index fb262a96d9..cf6cab2401 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -163,7 +163,7 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, SStreamQueueItem* pItem, i return code; } -int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { +int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) { ASSERT(pTask->info.taskLevel == TASK_LEVEL__SOURCE); int32_t code = TSDB_CODE_SUCCESS; void* exec = pTask->exec.pExecutor; @@ -175,7 +175,7 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { if (streamTaskShouldPause(&pTask->status)) { double el = (taosGetTimestampMs() - pTask->tsInfo.step1Start) / 1000.0; qDebug("s-task:%s paused from the scan-history task, elapsed time:%.2fsec", pTask->id.idStr, el); - return 0; + break; } SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock)); @@ -191,10 +191,6 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { return 0; } - if (streamTaskShouldPause(&pTask->status)) { - break; - } - SSDataBlock* output = NULL; uint64_t ts = 0; code = qExecTask(exec, &output, &ts); @@ -204,13 +200,9 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { } // the generated results before fill-history task been paused, should be dispatched to sink node - if (output == NULL && qStreamRecoverScanFinished(exec)) { - finished = true; + if (output == NULL) { + finished = qStreamRecoverScanFinished(exec); break; - } else { - if (output == NULL) { - ASSERT(0); - } } SSDataBlock block = {0}; @@ -219,8 +211,9 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz) { taosArrayPush(pRes, &block); numOfBlocks++; - qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d", pTask->id.idStr, numOfBlocks, batchSz); - if (numOfBlocks >= batchSz) { + if (numOfBlocks >= batchSize || code != TSDB_CODE_SUCCESS) { + qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d, code:%s", pTask->id.idStr, numOfBlocks, batchSize, + tstrerror(code)); break; } } From e89d3c18d63d65df7cc71fb50cb3b5c5ba0f46e9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 1 Aug 2023 10:30:24 +0800 Subject: [PATCH 085/123] fix(stream): remove invalid check. --- source/libs/stream/src/streamExec.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index cf6cab2401..9495dcd69c 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -210,10 +210,8 @@ int32_t streamScanExec(SStreamTask* pTask, int32_t batchSize) { block.info.childId = pTask->info.selfChildId; taosArrayPush(pRes, &block); - numOfBlocks++; - if (numOfBlocks >= batchSize || code != TSDB_CODE_SUCCESS) { - qDebug("s-task:%s scan exec numOfBlocks:%d, limit:%d, code:%s", pTask->id.idStr, numOfBlocks, batchSize, - tstrerror(code)); + if ((++numOfBlocks) >= batchSize) { + qDebug("s-task:%s scan exec numOfBlocks:%d, output limit:%d reached", pTask->id.idStr, numOfBlocks, batchSize); break; } } From d6d63ec54c48143acb5ae7209d200383a3543c35 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 1 Aug 2023 14:31:41 +0800 Subject: [PATCH 086/123] fix(stream): add more check. --- source/libs/stream/src/streamExec.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 9495dcd69c..fdb895ffc0 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -305,11 +305,12 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { STimeWindow* pTimeWindow = &pStreamTask->dataRange.window; + // todo. the dropping status should be append to the status after the halt completed. // It must be halted for a source stream task, since when the related scan-history-data task start scan the history - // for the step 2. For a agg task + // for the step 2. int8_t status = pStreamTask->status.taskStatus; if (pStreamTask->info.taskLevel == TASK_LEVEL__SOURCE) { - ASSERT(status == TASK_STATUS__HALT); + ASSERT(status == TASK_STATUS__HALT || status == TASK_STATUS__DROPPING); } else { ASSERT(status == TASK_STATUS__SCAN_HISTORY); pStreamTask->status.taskStatus = TASK_STATUS__HALT; From 327e13c3f7fd30e56961198360217ae6d18fa7d2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 8 Aug 2023 10:07:28 +0800 Subject: [PATCH 087/123] fix(stream): fix the syntax error. --- source/libs/executor/src/scanoperator.c | 81 ++----------------------- source/libs/stream/src/streamRecover.c | 6 -- 2 files changed, 4 insertions(+), 83 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 6e6231a7e7..065b876eaf 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -1754,7 +1754,8 @@ static SSDataBlock* doQueueScan(SOperatorInfo* pOperator) { qDebug("doQueueScan get data from log %" PRId64 " rows, version:%" PRId64, pRes->info.rows, pTaskInfo->streamInfo.currentOffset.version); blockDataCleanup(pInfo->pRes); - setBlockIntoRes(pInfo, pRes, true); + STimeWindow defaultWindow = {.skey = INT64_MIN, .ekey = INT64_MAX}; + setBlockIntoRes(pInfo, pRes, &defaultWindow, true); if (pInfo->pRes->info.rows > 0) { return pInfo->pRes; } @@ -1863,80 +1864,6 @@ void streamScanOperatorDecode(void* pBuff, int32_t len, SStreamScanInfo* pInfo) } } -static void doBlockDataWindowFilter(SSDataBlock* pBlock, int32_t tsIndex, STimeWindow* pWindow, const char* id) { - if (pWindow->skey != INT64_MIN || pWindow->ekey != INT64_MAX) { - bool* p = taosMemoryCalloc(pBlock->info.rows, sizeof(bool)); - bool hasUnqualified = false; - - SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, tsIndex); - - if (pWindow->skey != INT64_MIN) { - qDebug("%s filter for additional history window, skey:%" PRId64, id, pWindow->skey); - - for (int32_t i = 0; i < pBlock->info.rows; ++i) { - int64_t* ts = (int64_t*)colDataGetData(pCol, i); - p[i] = (*ts >= pWindow->skey); - - if (!p[i]) { - hasUnqualified = true; - } - } - } else if (pWindow->ekey != INT64_MAX) { - qDebug("%s filter for additional history window, ekey:%" PRId64, id, pWindow->ekey); - for (int32_t i = 0; i < pBlock->info.rows; ++i) { - int64_t* ts = (int64_t*)colDataGetData(pCol, i); - p[i] = (*ts <= pWindow->ekey); - - if (!p[i]) { - hasUnqualified = true; - } - } - } - - if (hasUnqualified) { - trimDataBlock(pBlock, pBlock->info.rows, p); - } - - taosMemoryFree(p); - } -} - -// re-build the delete block, ONLY according to the split timestamp -static void rebuildDeleteBlockData(SSDataBlock* pBlock, int64_t skey, const char* id) { - if (skey == INT64_MIN) { - return; - } - - int32_t numOfRows = pBlock->info.rows; - - bool* p = taosMemoryCalloc(numOfRows, sizeof(bool)); - bool hasUnqualified = false; - - SColumnInfoData* pSrcStartCol = taosArrayGet(pBlock->pDataBlock, START_TS_COLUMN_INDEX); - uint64_t* tsStartCol = (uint64_t*)pSrcStartCol->pData; - SColumnInfoData* pSrcEndCol = taosArrayGet(pBlock->pDataBlock, END_TS_COLUMN_INDEX); - uint64_t* tsEndCol = (uint64_t*)pSrcEndCol->pData; - - for (int32_t i = 0; i < numOfRows; i++) { - if (tsStartCol[i] < skey) { - tsStartCol[i] = skey; - } - - if (tsEndCol[i] >= skey) { - p[i] = true; - } else { // this row should be removed, since it is not in this query time window, which is [skey, INT64_MAX] - hasUnqualified = true; - } - } - - if (hasUnqualified) { - trimDataBlock(pBlock, pBlock->info.rows, p); - } - - qDebug("%s re-build delete datablock, start key revised to:%"PRId64", rows:%"PRId64, id, skey, pBlock->info.rows); - taosMemoryFree(p); -} - static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) { // NOTE: this operator does never check if current status is done or not SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo; @@ -2109,7 +2036,7 @@ FETCH_NEXT_BLOCK: } setBlockGroupIdByUid(pInfo, pDelBlock); - rebuildDeleteBlockData(pDelBlock, pStreamInfo->fillHistoryWindow.skey, id); + rebuildDeleteBlockData(pDelBlock, &pStreamInfo->fillHistoryWindow, id); printDataBlock(pDelBlock, "stream scan delete recv filtered"); if (pDelBlock->info.rows == 0) { if (pInfo->tqReader) { @@ -2254,7 +2181,7 @@ FETCH_NEXT_BLOCK: continue; } - setBlockIntoRes(pInfo, pRes, false); + setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false); if (pInfo->pCreateTbRes->info.rows > 0) { pInfo->scanMode = STREAM_SCAN_FROM_RES; qDebug("create table res exists, rows:%"PRId64" return from stream scan, %s", pInfo->pCreateTbRes->info.rows, id); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 7c308902f2..ad486c3f20 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -492,12 +492,6 @@ int32_t streamProcessScanHistoryFinishReq(SStreamTask* pTask, SStreamScanHistory return 0; } -int32_t streamTaskFillHistoryFinished(SStreamTask* pTask) { - void* exec = pTask->exec.pExecutor; - qResetStreamInfoTimeWindow(exec); - return 0; -} - int32_t streamProcessScanHistoryFinishRsp(SStreamTask* pTask) { ASSERT(pTask->status.taskStatus == TASK_STATUS__SCAN_HISTORY); SStreamMeta* pMeta = pTask->pMeta; From ff36f5f6cad624315529bf54bad38f2461dfba50 Mon Sep 17 00:00:00 2001 From: slzhou Date: Tue, 8 Aug 2023 13:23:35 +0800 Subject: [PATCH 088/123] fix: reset rowsize since it is set during blockDataAppendColInfo --- source/common/src/tdatablock.c | 1 + 1 file changed, 1 insertion(+) diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 887a110831..5188b1e27c 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -549,6 +549,7 @@ SSDataBlock* blockDataExtractBlock(SSDataBlock* pBlock, int32_t startIndex, int3 pDst->info = pBlock->info; pDst->info.rows = 0; pDst->info.capacity = 0; + pDst->info.rowSize = 0; size_t numOfCols = taosArrayGetSize(pBlock->pDataBlock); for (int32_t i = 0; i < numOfCols; ++i) { SColumnInfoData colInfo = {0}; From 9b4bdd819bc4f9a2d2ceef8ac2b7928bc9590980 Mon Sep 17 00:00:00 2001 From: slzhou Date: Thu, 27 Jul 2023 09:07:13 +0800 Subject: [PATCH 089/123] enhance: subquery can use expr primary key +/- value as primary key --- source/libs/parser/src/parTranslater.c | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 8ce68a5c8c..554dc7cce8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -821,7 +821,19 @@ static bool isPrimaryKeyImpl(SNode* pExpr) { FUNCTION_TYPE_IROWTS == pFunc->funcType) { return true; } - } + } else if (QUERY_NODE_OPERATOR == nodeType(pExpr)) { + SOperatorNode* pOper = (SOperatorNode*)pExpr; + if (OP_TYPE_ADD != pOper->opType && OP_TYPE_SUB != pOper->opType) { + return false; + } + if (!isPrimaryKeyImpl(pOper->pLeft)) { + return false; + } + if (QUERY_NODE_VALUE != nodeType(pOper->pRight)) { + return false; + } + return true; + } return false; } From d43db6e8a8250dbfd87de01e32ead4ccf299b7b7 Mon Sep 17 00:00:00 2001 From: slzhou Date: Fri, 4 Aug 2023 14:17:18 +0800 Subject: [PATCH 090/123] fix: add test case --- tests/parallel_test/cases.task | 1 + tests/script/tsim/query/join_pk.sim | 42 +++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 tests/script/tsim/query/join_pk.sim diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 586425ec1d..7a1e8d61c8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -954,6 +954,7 @@ ,,n,script,./test.sh -f tsim/query/udfpy.sim ,,y,script,./test.sh -f tsim/query/udf_with_const.sim ,,y,script,./test.sh -f tsim/query/join_interval.sim +,,y,script,./test.sh -f tsim/query/join_pk.sim ,,y,script,./test.sh -f tsim/query/unionall_as_table.sim ,,y,script,./test.sh -f tsim/query/multi_order_by.sim ,,y,script,./test.sh -f tsim/query/sys_tbname.sim diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim new file mode 100644 index 0000000000..66bb20da24 --- /dev/null +++ b/tests/script/tsim/query/join_pk.sim @@ -0,0 +1,42 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database test; +sql use test; +sql create table st(ts timestamp, f int) tags(t int); +sql insert into ct1 using st tags(1) values(now, 0)(now+1s, 1) +sql insert into ct2 using st tags(2) values(now+2s, 2)(now+3s, 3) +sql select * from (select _wstart - 1s as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +if $rows != 3 then + return -1 +endi +if $data01 != 1 then + return -1 +endi +if $data11 != 1 then + return -1 +endi + +if $data21 != 1 then + return -1 +endi +if $data03 != 1 then + return -1 +endi + +if $data13 != 1 then + return -1 +endi +if $data23 != 1 then + return -1 +endi +sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts + +sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +#system sh/exec.sh -n dnode1 -s stop -x SIGINT + From 67d4647fd56962da5e5131061d0d690316a5daf3 Mon Sep 17 00:00:00 2001 From: slzhou Date: Mon, 7 Aug 2023 13:39:30 +0800 Subject: [PATCH 091/123] enhance: enhance test case --- tests/script/tsim/query/join_pk.sim | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/tests/script/tsim/query/join_pk.sim b/tests/script/tsim/query/join_pk.sim index 66bb20da24..da5c13e9c0 100644 --- a/tests/script/tsim/query/join_pk.sim +++ b/tests/script/tsim/query/join_pk.sim @@ -38,5 +38,18 @@ sql select * from (select _wstart - 1d as ts, count(*) as num1 from st interval( sql select * from (select _wstart + 1a as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts sql_error select * from (select _wstart * 3 as ts, count(*) as num1 from st interval(1s)) as t1 inner join (select _wstart as ts, count(*) as num2 from st interval(1s)) as t2 on t1.ts = t2.ts +sql create table sst(ts timestamp, ts2 timestamp, f int) tags(t int); +sql insert into sct1 using sst tags(1) values('2023-08-07 13:30:56', '2023-08-07 13:30:56', 0)('2023-08-07 13:30:57', '2023-08-07 13:30:57', 1) +sql insert into sct2 using sst tags(2) values('2023-08-07 13:30:58', '2023-08-07 13:30:58', 2)('2023-08-07 13:30:59', '2023-08-07 13:30:59', 3) +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts-1s as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 4 then + return -1 +endi +sql select * from (select ts - 1s as jts from sst) as t1 inner join (select ts as jts from sst) as t2 on t1.jts = t2.jts +if $rows != 3 then + return -1 +endi +sql_error select * from (select ts2 - 1s as jts from sst) as t1 inner join (select ts2 as jts from sst) as t2 on t1.jts = t2.jts + #system sh/exec.sh -n dnode1 -s stop -x SIGINT From c8f106106a0c43d453a1f701adbc621d2f30bfdd Mon Sep 17 00:00:00 2001 From: Hongze Cheng Date: Tue, 8 Aug 2023 14:49:05 +0800 Subject: [PATCH 092/123] fix: update assert issue --- source/dnode/vnode/src/tsdb/tsdbCommit2.c | 63 ++++++++++++----------- 1 file changed, 34 insertions(+), 29 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCommit2.c b/source/dnode/vnode/src/tsdb/tsdbCommit2.c index 0639cd91a5..d4fa4de510 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCommit2.c +++ b/source/dnode/vnode/src/tsdb/tsdbCommit2.c @@ -49,7 +49,7 @@ typedef struct { } ctx[1]; // reader - SSttFileReader *sttReader; + TSttFileReaderArray sttReaderArray[1]; // iter TTsdbIterArray dataIterArray[1]; @@ -226,7 +226,7 @@ static int32_t tsdbCommitOpenReader(SCommitter2 *committer) { int32_t code = 0; int32_t lino = 0; - ASSERT(committer->sttReader == NULL); + ASSERT(TARRAY2_SIZE(committer->sttReaderArray) == 0); if (committer->ctx->fset == NULL // || committer->sttTrigger > 1 // @@ -241,31 +241,32 @@ static int32_t tsdbCommitOpenReader(SCommitter2 *committer) { ASSERT(lvl->level == 0); - if (TARRAY2_SIZE(lvl->fobjArr) == 0) { - return 0; + STFileObj *fobj = NULL; + TARRAY2_FOREACH(lvl->fobjArr, fobj) { + SSttFileReader *sttReader; + + SSttFileReaderConfig config = { + .tsdb = committer->tsdb, + .szPage = committer->szPage, + .file = fobj->f[0], + }; + + code = tsdbSttFileReaderOpen(fobj->fname, &config, &sttReader); + TSDB_CHECK_CODE(code, lino, _exit); + + code = TARRAY2_APPEND(committer->sttReaderArray, sttReader); + TSDB_CHECK_CODE(code, lino, _exit); + + STFileOp op = { + .optype = TSDB_FOP_REMOVE, + .fid = fobj->f->fid, + .of = fobj->f[0], + }; + + code = TARRAY2_APPEND(committer->fopArray, op); + TSDB_CHECK_CODE(code, lino, _exit); } - ASSERT(TARRAY2_SIZE(lvl->fobjArr) == 1); - - STFileObj *fobj = TARRAY2_FIRST(lvl->fobjArr); - - SSttFileReaderConfig config = { - .tsdb = committer->tsdb, - .szPage = committer->szPage, - .file = fobj->f[0], - }; - code = tsdbSttFileReaderOpen(fobj->fname, &config, &committer->sttReader); - TSDB_CHECK_CODE(code, lino, _exit); - - STFileOp op = { - .optype = TSDB_FOP_REMOVE, - .fid = fobj->f->fid, - .of = fobj->f[0], - }; - - code = TARRAY2_APPEND(committer->fopArray, op); - TSDB_CHECK_CODE(code, lino, _exit); - _exit: if (code) { TSDB_ERROR_LOG(TD_VID(committer->tsdb->pVnode), lino, code); @@ -273,7 +274,10 @@ _exit: return code; } -static int32_t tsdbCommitCloseReader(SCommitter2 *committer) { return tsdbSttFileReaderClose(&committer->sttReader); } +static int32_t tsdbCommitCloseReader(SCommitter2 *committer) { + TARRAY2_CLEAR(committer->sttReaderArray, tsdbSttFileReaderClose); + return 0; +} static int32_t tsdbCommitOpenIter(SCommitter2 *committer) { int32_t code = 0; @@ -310,10 +314,11 @@ static int32_t tsdbCommitOpenIter(SCommitter2 *committer) { TSDB_CHECK_CODE(code, lino, _exit); // STT - if (committer->sttReader) { + SSttFileReader *sttReader; + TARRAY2_FOREACH(committer->sttReaderArray, sttReader) { // data iter config.type = TSDB_ITER_TYPE_STT; - config.sttReader = committer->sttReader; + config.sttReader = sttReader; code = tsdbIterOpen(&config, &iter); TSDB_CHECK_CODE(code, lino, _exit); @@ -323,7 +328,7 @@ static int32_t tsdbCommitOpenIter(SCommitter2 *committer) { // tomb iter config.type = TSDB_ITER_TYPE_STT_TOMB; - config.sttReader = committer->sttReader; + config.sttReader = sttReader; code = tsdbIterOpen(&config, &iter); TSDB_CHECK_CODE(code, lino, _exit); From 17c6143471b88c32d34f91a7fdb16815c6ff3e78 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 8 Aug 2023 15:08:11 +0800 Subject: [PATCH 093/123] fix(stream): check more status when handling the state transfer. --- source/dnode/vnode/src/tq/tq.c | 16 +++++++--------- source/libs/executor/src/executor.c | 2 +- source/libs/stream/src/streamExec.c | 6 ++++-- source/libs/stream/src/streamMeta.c | 3 ++- 4 files changed, 14 insertions(+), 13 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 4d433042ad..41e9268452 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1277,7 +1277,6 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { if (done) { pTask->tsInfo.step2Start = taosGetTimestampMs(); streamTaskEndScanWAL(pTask); - streamMetaReleaseTask(pMeta, pTask); } else { STimeWindow* pWindow = &pTask->dataRange.window; tqDebug("s-task:%s level:%d verRange:%" PRId64 " - %" PRId64 " window:%" PRId64 "-%" PRId64 @@ -1303,13 +1302,11 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { streamSetStatusNormal(pTask); } - // 4. 1) transfer the ownership of executor state, 2) update the scan data range for source task. - // 5. resume the related stream task. - streamMetaReleaseTask(pMeta, pTask); - streamMetaReleaseTask(pMeta, pStreamTask); - tqStartStreamTasks(pTq); } + + streamMetaReleaseTask(pMeta, pTask); + streamMetaReleaseTask(pMeta, pStreamTask); } else { // todo update the chkInfo version for current task. // this task has an associated history stream task, so we need to scan wal from the end version of @@ -1515,7 +1512,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { if (pTask != NULL) { // even in halt status, the data in inputQ must be processed int8_t st = pTask->status.taskStatus; - if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY/* || st == TASK_STATUS__SCAN_HISTORY_WAL*/) { + if (st == TASK_STATUS__NORMAL || st == TASK_STATUS__SCAN_HISTORY) { tqDebug("vgId:%d s-task:%s start to process block from inputQ, last chk point:%" PRId64, vgId, pTask->id.idStr, pTask->chkInfo.version); streamProcessRunReq(pTask); @@ -1528,8 +1525,9 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { streamMetaReleaseTask(pTq->pStreamMeta, pTask); tqStartStreamTasks(pTq); return 0; - } else { - tqError("vgId:%d failed to found s-task, taskId:%d", vgId, taskId); + } else { // NOTE: pTask->status.schedStatus is not updated since it is not be handled by the run exec. + // todo add one function to handle this + tqError("vgId:%d failed to found s-task, taskId:0x%x may have been dropped", vgId, taskId); return -1; } } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 05767db286..7832834cee 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -1046,7 +1046,7 @@ int32_t qStreamInfoResetTimewindowFilter(qTaskInfo_t tinfo) { SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo; STimeWindow* pWindow = &pTaskInfo->streamInfo.fillHistoryWindow; - qDebug("%s set remove scan-history filter window:%" PRId64 "-%" PRId64 ", new window:%" PRId64 "-%" PRId64, + qDebug("%s remove scan-history filter window:%" PRId64 "-%" PRId64 ", set new window:%" PRId64 "-%" PRId64, GET_TASKID(pTaskInfo), pWindow->skey, pWindow->ekey, INT64_MIN, INT64_MAX); pWindow->skey = INT64_MIN; diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 62296efe84..1fd2f7edf4 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -545,8 +545,11 @@ int32_t streamExecForAll(SStreamTask* pTask) { return 0; } +// the task may be set dropping/stopping, while it is still in the task queue, therefore, the sched-status can not +// be updated by tryExec function, therefore, the schedStatus will always be the TASK_SCHED_STATUS__WAITING. bool streamTaskIsIdle(const SStreamTask* pTask) { - return (pTask->status.schedStatus == TASK_SCHED_STATUS__INACTIVE); + return (pTask->status.schedStatus == TASK_SCHED_STATUS__INACTIVE || pTask->status.taskStatus == TASK_STATUS__STOP || + pTask->status.taskStatus == TASK_STATUS__DROPPING); } int32_t streamTaskEndScanWAL(SStreamTask* pTask) { @@ -584,7 +587,6 @@ int32_t streamTryExec(SStreamTask* pTask) { return -1; } - // todo the task should be commit here // todo the task should be commit here if (taosQueueEmpty(pTask->inputQueue->queue)) { // fill-history WAL scan has completed diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 7886091401..80b690e20d 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -330,7 +330,8 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { } taosWUnLockLatch(&pMeta->lock); - qDebug("s-task:0x%x set task status:%s", taskId, streamGetTaskStatusStr(TASK_STATUS__DROPPING)); + qDebug("s-task:0x%x set task status:%s and start to unregister it", taskId, + streamGetTaskStatusStr(TASK_STATUS__DROPPING)); while (1) { taosRLockLatch(&pMeta->lock); From db92546cf77424302f03533d7a457c097cd60b16 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 8 Aug 2023 16:09:33 +0800 Subject: [PATCH 094/123] fix(stream): remove invalid filter. --- source/libs/executor/src/scanoperator.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 3a2ccb2047..da4bd1e23c 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2180,15 +2180,11 @@ FETCH_NEXT_BLOCK: continue; } - // filter the block extracted from WAL files, according to the time window - // apply additional time window filter - doBlockDataWindowFilter(pRes, pInfo->primaryTsIndex, &pStreamInfo->fillHistoryWindow, id); - blockDataUpdateTsWindow(pInfo->pRes, pInfo->primaryTsIndex); - if (pRes->info.rows == 0) { + setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false); + if (pInfo->pRes->info.rows == 0) { continue; } - setBlockIntoRes(pInfo, pRes, &pStreamInfo->fillHistoryWindow, false); if (pInfo->pCreateTbRes->info.rows > 0) { pInfo->scanMode = STREAM_SCAN_FROM_RES; qDebug("create table res exists, rows:%"PRId64" return from stream scan, %s", pInfo->pCreateTbRes->info.rows, id); From 0ce702f002da515a4d776c0231b0a55073b9bcac Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 8 Aug 2023 16:58:04 +0800 Subject: [PATCH 095/123] docs: fix connection param in taosws in python connector --- docs/en/14-reference/03-connector/07-python.mdx | 2 +- docs/zh/08-connector/30-python.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 831e79eeb7..5067c33e2d 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 15c11d05c3..ab98b5b8de 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -375,7 +375,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") From 90008bf17c7d8cc872657c8e4fe615599a2398ed Mon Sep 17 00:00:00 2001 From: sunpeng Date: Tue, 8 Aug 2023 16:58:04 +0800 Subject: [PATCH 096/123] docs: fix connection param in taosws in python connector --- docs/en/14-reference/03-connector/07-python.mdx | 2 +- docs/zh/08-connector/30-python.mdx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/en/14-reference/03-connector/07-python.mdx b/docs/en/14-reference/03-connector/07-python.mdx index 831e79eeb7..5067c33e2d 100644 --- a/docs/en/14-reference/03-connector/07-python.mdx +++ b/docs/en/14-reference/03-connector/07-python.mdx @@ -373,7 +373,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") diff --git a/docs/zh/08-connector/30-python.mdx b/docs/zh/08-connector/30-python.mdx index 15c11d05c3..ab98b5b8de 100644 --- a/docs/zh/08-connector/30-python.mdx +++ b/docs/zh/08-connector/30-python.mdx @@ -375,7 +375,7 @@ conn.execute("CREATE STABLE weather(ts TIMESTAMP, temperature FLOAT) TAGS (locat ```python -conn = taosws.connect(url="ws://localhost:6041") +conn = taosws.connect("taosws://localhost:6041") # Execute a sql, ignore the result set, just get affected rows. It's useful for DDL and DML statement. conn.execute("DROP DATABASE IF EXISTS test") conn.execute("CREATE DATABASE test") From f7b393562803b87298d1ab00f302502f7c8a5caa Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 8 Aug 2023 17:08:30 +0800 Subject: [PATCH 097/123] udpate user_privilege_show test case and add it to cases.task by charles --- tests/parallel_test/cases.task | 3 + .../0-others/user_privilege_all.py | 62 ++++++++++++++++++- 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 986d36e177..0e4894d741 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -155,6 +155,9 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_show.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_multi_users.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py index 2e796882c8..846b76317e 100644 --- a/tests/system-test/0-others/user_privilege_all.py +++ b/tests/system-test/0-others/user_privilege_all.py @@ -258,6 +258,66 @@ class TDTestCase: "insert into tb values(now, 20.0, 20);", "select * from tb;"], "res": [True, True, True, True, False, True, False] + }, + "test_db_all_childtable_none": { + "db_privilege": "all", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 20.2, 20)", + "insert into ct1 using stb tags('ct1') values(now, 21.21, 21)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 22.22, 22);", + "select * from tb;"], + "res": [True, True, True, True, True, True, True] + }, + "test_db_none_stable_all_childtable_none": { + "db_privilege": "none", + "stable_priviege": "all", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 23.23, 23)", + "insert into ct1 using stb tags('ct1') values(now, 24.24, 24)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 25.25, 25);", + "select * from tb;"], + "res": [True, True, True, True, True, False, False] + }, + "test_db_no_permission_childtable_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "all", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 28.28, 28);", + "select * from tb;"], + "res": [False, True, True, True, False, False, False] + }, + "test_db_none_stable_none_table_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "all", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 29.29, 29);", + "select * from tb;"], + "res": [False, False, False, False, False, True, True] } } @@ -361,7 +421,7 @@ class TDTestCase: data = res.fetch_all() tdLog.debug("query result: {}".format(data)) # check query results by cases - if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;": + if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read", "test_db_no_permission_childtable_all"] and self.cases[case_name]["sql"][index] == "select * from ct2;": if not self.cases[case_name]["res"][index]: if 0 == len(data): tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index])) From e4d16e594cd1f78e3a8938aba13d1b9b787a9947 Mon Sep 17 00:00:00 2001 From: Benguang Zhao Date: Tue, 8 Aug 2023 17:46:37 +0800 Subject: [PATCH 098/123] enh: check if disk space sufficient at primary dir with tfs --- include/libs/tfs/tfs.h | 19 ++++++ source/dnode/mgmt/mgmt_vnode/src/vmInt.c | 16 +----- source/dnode/mgmt/mgmt_vnode/src/vmWorker.c | 12 +++- source/dnode/mgmt/node_mgmt/inc/dmMgmt.h | 4 +- source/dnode/mgmt/node_mgmt/src/dmEnv.c | 64 +++++++++++++++------ source/dnode/mgmt/node_util/inc/dmUtil.h | 2 + source/libs/tfs/src/tfs.c | 34 +++++++++++ source/os/src/osEnv.c | 4 +- 8 files changed, 118 insertions(+), 37 deletions(-) diff --git a/include/libs/tfs/tfs.h b/include/libs/tfs/tfs.h index 509f8dc9e8..2b90e3226c 100644 --- a/include/libs/tfs/tfs.h +++ b/include/libs/tfs/tfs.h @@ -300,6 +300,25 @@ void tfsClosedir(STfsDir *pDir); */ int32_t tfsGetMonitorInfo(STfs *pTfs, SMonDiskInfo *pInfo); +/** + * @brief Check if disk space available at level + * + * @param pTfs The fs object. + * #param level the level + * @return bool + */ +bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level); + +/** + * @brief Check if disk space sufficient at disk of level + * + * @param pTfs The fs object. + * @param level the level + * @param disk the disk + * @return bool + */ +bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk); + #ifdef __cplusplus } #endif diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c index 94a753062c..0ff2537e4c 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c @@ -460,7 +460,6 @@ static void vmCleanup(SVnodeMgmt *pMgmt) { vmCloseVnodes(pMgmt); vmStopWorker(pMgmt); vnodeCleanup(); - tfsClose(pMgmt->pTfs); taosThreadRwlockDestroy(&pMgmt->lock); taosMemoryFree(pMgmt); } @@ -535,20 +534,9 @@ static int32_t vmInit(SMgmtInputOpt *pInput, SMgmtOutputOpt *pOutput) { pMgmt->msgCb.mgmt = pMgmt; taosThreadRwlockInit(&pMgmt->lock, NULL); - SDiskCfg dCfg = {0}; - tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN); - dCfg.level = 0; - dCfg.primary = 1; - SDiskCfg *pDisks = tsDiskCfg; - int32_t numOfDisks = tsDiskCfgNum; - if (numOfDisks <= 0 || pDisks == NULL) { - pDisks = &dCfg; - numOfDisks = 1; - } - - pMgmt->pTfs = tfsOpen(pDisks, numOfDisks); + pMgmt->pTfs = pInput->pTfs; if (pMgmt->pTfs == NULL) { - dError("failed to init tfs since %s", terrstr()); + dError("tfs is null."); goto _OVER; } tmsgReportStartup("vnode-tfs", "initialized"); diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c index 247c1729a3..d567f1128e 100644 --- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c +++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c @@ -15,6 +15,7 @@ #define _DEFAULT_SOURCE #include "vmInt.h" +#include "vnodeInt.h" static inline void vmSendRsp(SRpcMsg *pMsg, int32_t code) { if (pMsg->info.handle == NULL) return; @@ -158,6 +159,15 @@ static void vmSendResponse(SRpcMsg *pMsg) { } } +static bool vmDataSpaceSufficient(SVnodeObj *pVnode) { + STfs *pTfs = pVnode->pImpl->pTfs; + if (pTfs) { + return tfsDiskSpaceSufficient(pTfs, 0, pVnode->diskPrimary); + } else { + return osDataSpaceSufficient(); + } +} + static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtype) { const STraceId *trace = &pMsg->info.traceId; if (pMsg->contLen < sizeof(SMsgHead)) { @@ -203,7 +213,7 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp taosWriteQitem(pVnode->pFetchQ, pMsg); break; case WRITE_QUEUE: - if (!osDataSpaceSufficient()) { + if (!vmDataSpaceSufficient(pVnode)) { terrno = TSDB_CODE_NO_ENOUGH_DISKSPACE; code = terrno; dError("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code)); diff --git a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h index 02cd678433..98489433b9 100644 --- a/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h +++ b/source/dnode/mgmt/node_mgmt/inc/dmMgmt.h @@ -20,6 +20,7 @@ #include "uv.h" #include "dmInt.h" +#include "tfs.h" #ifdef __cplusplus extern "C" { @@ -79,6 +80,7 @@ typedef struct SDnode { TdThreadMutex mutex; TdFilePtr lockfile; SDnodeData data; + STfs *pTfs; SMgmtWrapper wrappers[NODE_END]; } SDnode; @@ -124,4 +126,4 @@ void dmGetQnodeLoads(SQnodeLoad *pInfo); } #endif -#endif /*_TD_DND_MGMT_H_*/ \ No newline at end of file +#endif /*_TD_DND_MGMT_H_*/ diff --git a/source/dnode/mgmt/node_mgmt/src/dmEnv.c b/source/dnode/mgmt/node_mgmt/src/dmEnv.c index 3f9c5bbeaf..a34002161d 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmEnv.c +++ b/source/dnode/mgmt/node_mgmt/src/dmEnv.c @@ -96,28 +96,23 @@ _exit: return code; } -static bool dmCheckDiskSpace() { - osUpdate(); - // sufficiency - if (!osDataSpaceSufficient()) { - dWarn("free data disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsDataSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsDataSpace.reserved / 1024.0 / 1024.0 / 1024.0); +static bool dmDataSpaceAvailable() { + SDnode *pDnode = dmInstance(); + if (pDnode->pTfs) { + return tfsDiskSpaceAvailable(pDnode->pTfs, 0); } - if (!osLogSpaceSufficient()) { - dWarn("free log disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsLogSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsLogSpace.reserved / 1024.0 / 1024.0 / 1024.0); - } - if (!osTempSpaceSufficient()) { - dWarn("free temp disk size: %f GB, not sufficient, expected %f GB at least", - (double)tsTempSpace.size.avail / 1024.0 / 1024.0 / 1024.0, - (double)tsTempSpace.reserved / 1024.0 / 1024.0 / 1024.0); - } - // availability - bool ret = true; if (!osDataSpaceAvailable()) { dError("data disk space unavailable, i.e. %s", tsDataDir); + return false; + } + return true; +} + +static bool dmCheckDiskSpace() { + osUpdate(); + // availability + bool ret = true; + if (!dmDataSpaceAvailable()) { terrno = TSDB_CODE_NO_DISKSPACE; ret = false; } @@ -134,6 +129,34 @@ static bool dmCheckDiskSpace() { return ret; } +int32_t dmDiskInit() { + SDnode *pDnode = dmInstance(); + SDiskCfg dCfg = {0}; + tstrncpy(dCfg.dir, tsDataDir, TSDB_FILENAME_LEN); + dCfg.level = 0; + dCfg.primary = 1; + SDiskCfg *pDisks = tsDiskCfg; + int32_t numOfDisks = tsDiskCfgNum; + if (numOfDisks <= 0 || pDisks == NULL) { + pDisks = &dCfg; + numOfDisks = 1; + } + + pDnode->pTfs = tfsOpen(pDisks, numOfDisks); + if (pDnode->pTfs == NULL) { + dError("failed to init tfs since %s", terrstr()); + return -1; + } + return 0; +} + +int32_t dmDiskClose() { + SDnode *pDnode = dmInstance(); + tfsClose(pDnode->pTfs); + pDnode->pTfs = NULL; + return 0; +} + static bool dmCheckDataDirVersion() { char checkDataDirJsonFileName[PATH_MAX] = {0}; snprintf(checkDataDirJsonFileName, PATH_MAX, "%s/dnode/dnodeCfg.json", tsDataDir); @@ -147,6 +170,7 @@ static bool dmCheckDataDirVersion() { int32_t dmInit() { dInfo("start to init dnode env"); + if (dmDiskInit() != 0) return -1; if (!dmCheckDataDirVersion()) return -1; if (!dmCheckDiskSpace()) return -1; if (dmCheckRepeatInit(dmInstance()) != 0) return -1; @@ -177,6 +201,7 @@ void dmCleanup() { udfcClose(); udfStopUdfd(); taosStopCacheRefreshWorker(); + dmDiskClose(); dInfo("dnode env is cleaned up"); taosCleanupCfg(); @@ -367,6 +392,7 @@ SMgmtInputOpt dmBuildMgmtInputOpt(SMgmtWrapper *pWrapper) { SMgmtInputOpt opt = { .path = pWrapper->path, .name = pWrapper->name, + .pTfs = pWrapper->pDnode->pTfs, .pData = &pWrapper->pDnode->data, .processCreateNodeFp = dmProcessCreateNodeReq, .processAlterNodeTypeFp = dmProcessAlterNodeTypeReq, diff --git a/source/dnode/mgmt/node_util/inc/dmUtil.h b/source/dnode/mgmt/node_util/inc/dmUtil.h index 85057e5916..32c3d22506 100644 --- a/source/dnode/mgmt/node_util/inc/dmUtil.h +++ b/source/dnode/mgmt/node_util/inc/dmUtil.h @@ -37,6 +37,7 @@ #include "monitor.h" #include "qnode.h" #include "sync.h" +#include "tfs.h" #include "wal.h" #include "libs/function/tudf.h" @@ -111,6 +112,7 @@ typedef struct { typedef struct { const char *path; const char *name; + STfs *pTfs; SDnodeData *pData; SMsgCb msgCb; ProcessCreateNodeFp processCreateNodeFp; diff --git a/source/libs/tfs/src/tfs.c b/source/libs/tfs/src/tfs.c index 8adaab91a1..445c24159f 100644 --- a/source/libs/tfs/src/tfs.c +++ b/source/libs/tfs/src/tfs.c @@ -14,6 +14,7 @@ */ #define _DEFAULT_SOURCE +#include "osEnv.h" #include "tfsInt.h" static int32_t tfsMount(STfs *pTfs, SDiskCfg *pCfg); @@ -113,6 +114,39 @@ SDiskSize tfsGetSize(STfs *pTfs) { return size; } +bool tfsDiskSpaceAvailable(STfs *pTfs, int32_t level) { + if (level < 0 || level >= pTfs->nlevel) { + return false; + } + STfsTier *pTier = TFS_TIER_AT(pTfs, level); + for (int32_t id = 0; id < pTier->ndisk; id++) { + SDiskID diskId = {.level = level, .id = id}; + STfsDisk *pDisk = TFS_DISK_AT(pTfs, diskId); + if (pDisk == NULL) { + return false; + } + if (pDisk->size.avail <= 0) { + fError("tfs disk space unavailable. level:%d, disk:%d, path:%s", level, id, pDisk->path); + return false; + } + } + return true; +} + +bool tfsDiskSpaceSufficient(STfs *pTfs, int32_t level, int32_t disk) { + if (level < 0 || level >= pTfs->nlevel) { + return false; + } + + STfsTier *pTier = TFS_TIER_AT(pTfs, level); + if (disk < 0 || disk >= pTier->ndisk) { + return false; + } + SDiskID diskId = {.level = level, .id = disk}; + STfsDisk *pDisk = TFS_DISK_AT(pTfs, diskId); + return pDisk->size.avail >= tsDataSpace.reserved; +} + int32_t tfsGetDisksAtLevel(STfs *pTfs, int32_t level) { if (level < 0 || level >= pTfs->nlevel) { return 0; diff --git a/source/os/src/osEnv.c b/source/os/src/osEnv.c index 7f0e6d1dee..0fc136c693 100644 --- a/source/os/src/osEnv.c +++ b/source/os/src/osEnv.c @@ -95,10 +95,10 @@ void osCleanup() {} bool osLogSpaceAvailable() { return tsLogSpace.size.avail > 0; } -bool osDataSpaceAvailable() { return tsDataSpace.size.avail > 0; } - bool osTempSpaceAvailable() { return tsTempSpace.size.avail > 0; } +bool osDataSpaceAvailable() { return tsDataSpace.size.avail > 0; } + bool osLogSpaceSufficient() { return tsLogSpace.size.avail > tsLogSpace.reserved; } bool osDataSpaceSufficient() { return tsDataSpace.size.avail > tsDataSpace.reserved; } From bcc7df2dda43437e0ab0a99257a5c1bfeba45a74 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Tue, 8 Aug 2023 10:04:41 +0000 Subject: [PATCH 099/123] fix taosd cannot quit --- source/libs/transport/src/transSvr.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c index a546ee8159..c6c412022a 100644 --- a/source/libs/transport/src/transSvr.c +++ b/source/libs/transport/src/transSvr.c @@ -726,7 +726,7 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { tError("read error %s", uv_err_name(nread)); } // TODO(log other failure reason) - tWarn("failed to create connect:%p", q); + tWarn("failed to create connect:%p, reason: %s", q, uv_err_name(nread)); taosMemoryFree(buf->base); uv_close((uv_handle_t*)q, NULL); return; @@ -741,10 +741,17 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) { uv_pipe_t* pipe = (uv_pipe_t*)q; if (!uv_pipe_pending_count(pipe)) { tError("No pending count"); + uv_close((uv_handle_t*)q, NULL); + return; + } + if (pThrd->quit) { + tWarn("thread already received quit msg, ignore incoming conn"); + + uv_close((uv_handle_t*)q, NULL); return; } - uv_handle_type pending = uv_pipe_pending_type(pipe); + // uv_handle_type pending = uv_pipe_pending_type(pipe); SSvrConn* pConn = createConn(pThrd); From 5dbab16bf6e77242f0e2d4d9f5698acfb9edf790 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 8 Aug 2023 18:13:57 +0800 Subject: [PATCH 100/123] test:fix comatibility case --- tests/parallel_test/cases.task | 4 ++- tests/system-test/0-others/compatibility.py | 27 +++++++++++++++++++-- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 747eb909a0..1b43dc5ca1 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -794,9 +794,10 @@ ,,y,script,./test.sh -f tsim/user/basic.sim ,,y,script,./test.sh -f tsim/user/password.sim ,,y,script,./test.sh -f tsim/user/privilege_db.sim -#,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim +,,y,script,./test.sh -f tsim/user/privilege_sysinfo.sim ,,y,script,./test.sh -f tsim/user/privilege_topic.sim ,,y,script,./test.sh -f tsim/user/privilege_table.sim +,,y,script,./test.sh -f tsim/user/privilege_create_db.sim ,,y,script,./test.sh -f tsim/db/alter_option.sim ,,y,script,./test.sh -f tsim/db/alter_replica_31.sim ,,y,script,./test.sh -f tsim/db/basic1.sim @@ -969,6 +970,7 @@ ,,y,script,./test.sh -f tsim/query/tag_scan.sim ,,y,script,./test.sh -f tsim/query/nullColSma.sim ,,y,script,./test.sh -f tsim/query/bug3398.sim +,,y,script,./test.sh -f tsim/query/explain_tsorder.sim ,,y,script,./test.sh -f tsim/qnode/basic1.sim ,,y,script,./test.sh -f tsim/snode/basic1.sim ,,y,script,./test.sh -f tsim/mnode/basic1.sim diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index 98a0fbe18d..2d9b67977c 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -138,6 +138,8 @@ class TDTestCase: tdLog.printNoPrefix(f"==========step1:prepare and check data in old version-{BASEVERSION}") tdLog.info(f" LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ") os.system(f"LD_LIBRARY_PATH=/usr/lib taosBenchmark -t {tableNumbers} -n {recordNumbers1} -y ") + os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database test '") + # os.system(f"LD_LIBRARY_PATH=/usr/lib taos -s 'use test;create stream current_stream into current_stream_output_stb as select _wstart as `start`, _wend as wend, max(current) as max_current from meters where voltage <= 220 interval (5s);' ") # os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;create stream power_stream into power_stream_output_stb as select ts, concat_ws(\\".\\", location, tbname) as meter_location, current*voltage*cos(phase) as active_power, current*voltage*sin(phase) as reactive_power from meters partition by tbname;" ') # os.system('LD_LIBRARY_PATH=/usr/lib taos -s "use test;show streams;" ') @@ -151,6 +153,10 @@ class TDTestCase: os.system("LD_LIBRARY_PATH=/usr/lib taos -s 'flush database db4096 '") os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/TS-3131.tsql") + # add deleted data + os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/deletedData.sql") + + cmd = f" LD_LIBRARY_PATH={bPath}/build/lib {bPath}/build/bin/taos -h localhost ;" tdLog.info(f"new client version connect to old version taosd, commad return value:{cmd}") if os.system(cmd) == 0: @@ -185,11 +191,18 @@ class TDTestCase: # tdsql.query("show streams;") # tdsql.query(f"select count(*) from {stb}") # tdsql.checkData(0,0,tableNumbers*recordNumbers2) - tdsql.query(f"select count(*) from db4096.stb0") + + # checkout db4096 + tdsql.query("select count(*) from db4096.stb0") tdsql.checkData(0,0,50000) + + # checkout deleted data + tdsql.execute("insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );") + tdsql.query("flush database deldata;select avg(c1) from deldata.ct1;") + tdsql=tdCom.newTdSql() - tdLog.printNoPrefix(f"==========step4:verify backticks in taos Sql-TD18542") + tdLog.printNoPrefix("==========step4:verify backticks in taos Sql-TD18542") tdsql.execute("drop database if exists db") tdsql.execute("create database db") tdsql.execute("use db") @@ -203,6 +216,8 @@ class TDTestCase: tdsql.execute("insert into db.`ct4` using db.stb1 TAGS(4) values(now(),14);") tdsql.query("select * from db.ct4") tdsql.checkData(0,1,14) + + #check retentions tdsql=tdCom.newTdSql() tdsql.query("describe information_schema.ins_databases;") qRows=tdsql.queryRows @@ -222,8 +237,12 @@ class TDTestCase: caller = inspect.getframeinfo(inspect.stack()[0][0]) args = (caller.filename, caller.lineno) tdLog.exit("%s(%d) failed" % args) + + # check stream tdsql.query("show streams;") tdsql.checkRows(0) + + #check TS-3131 tdsql.query("select *,tbname from d0.almlog where mcid='m0103';") tdsql.checkRows(6) expectList = [0,3003,20031,20032,20033,30031] @@ -238,6 +257,8 @@ class TDTestCase: tdsql.execute("insert into test.d80 values (now+1s, 11, 103, 0.21);") tdsql.execute("insert into test.d9 values (now+5s, 4.3, 104, 0.4);") + + # check tmq conn = taos.connect() consumer = Consumer( @@ -265,6 +286,8 @@ class TDTestCase: print(block.fetchall()) tdsql.query("show topics;") tdsql.checkRows(1) + + def stop(self): tdSql.close() tdLog.success(f"{__file__} successfully executed") From 0b88efdee93fe69b7c66e3b449a57b069d99f962 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 8 Aug 2023 18:38:39 +0800 Subject: [PATCH 101/123] test:modify base version 3100 in rollingup cases --- tests/parallel_test/cases.task | 2 +- tests/system-test/0-others/compatibility.py | 3 ++- tests/system-test/6-cluster/5dnode3mnodeRoll.py | 10 +++++++++- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 4ff835b292..612ed86e41 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -452,7 +452,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertDataAsync.py -N 6 -M 3 -n 3 ,,n,system-test,python3 ./test.py -f 6-cluster/manually-test/6dnode3mnodeInsertLessDataAlterRep3to1to3.py -N 6 -M 3 -#,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 +,,n,system-test,python ./test.py -f 6-cluster/5dnode3mnodeRoll.py -N 3 -C 1 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 ,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeAdd1Ddnoe.py -N 7 -M 3 -C 6 -n 3 #,,y,system-test,./pytest.sh python3 ./test.py -f 6-cluster/5dnode3mnodeDrop.py -N 5 diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index 2d9b67977c..2e846c0128 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -198,7 +198,8 @@ class TDTestCase: # checkout deleted data tdsql.execute("insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );") - tdsql.query("flush database deldata;select avg(c1) from deldata.ct1;") + tdsql.execute("flush database deldata;") + tdsql.query("select avg(c1) from deldata.ct1;") tdsql=tdCom.newTdSql() diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 8d7d4fb3e5..43a7c948e5 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -27,7 +27,7 @@ import threading import time import json -BASEVERSION = "3.0.7.0" +BASEVERSION = "3.1.0.0" class TDTestCase: @@ -245,6 +245,9 @@ class TDTestCase: os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/TS-3131.tsql") # self.buildTaosd(bPath) + # add deleted data + os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/deletedData.sql") + threads=[] threads.append(threading.Thread(target=self.insertAllData, args=(cPath_temp,dbname,tableNumbers1,recordNumbers1))) for tr in threads: @@ -285,6 +288,11 @@ class TDTestCase: tdsql1.query(f"select count(*) from db4096.stb0") tdsql1.checkData(0,0,50000) + # checkout deleted data + tdsql.execute("insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a );") + tdsql.query("flush database deldata;select avg(c1) from deldata.ct1;") + + # tdsql1.query("show streams;") # tdsql1.checkRows(2) tdsql1.query("select *,tbname from d0.almlog where mcid='m0103';") From 8f7fbf154cdafb5cbd740c6a67e8fdd525873952 Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Tue, 8 Aug 2023 19:20:41 +0800 Subject: [PATCH 102/123] test:fix comatibility case --- tests/system-test/0-others/deletedData.sql | 11 +++++++++++ 1 file changed, 11 insertions(+) create mode 100644 tests/system-test/0-others/deletedData.sql diff --git a/tests/system-test/0-others/deletedData.sql b/tests/system-test/0-others/deletedData.sql new file mode 100644 index 0000000000..781b9562cf --- /dev/null +++ b/tests/system-test/0-others/deletedData.sql @@ -0,0 +1,11 @@ +drop database if exists deldata; +create database deldata duration 300; +use deldata; +create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); +create table deldata.ct1 using deldata.stb1 tags ( 1 ); +insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); +select avg(c1) from deldata.ct1; +delete from deldata.stb1; +flush database deldata; +insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); +delete from deldata.ct1; From cfb5247e30c0d3f9cd9f962aa6abb6f652e0f992 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 9 Aug 2023 00:46:18 +0000 Subject: [PATCH 103/123] change var name --- include/libs/transport/trpc.h | 2 +- source/client/src/clientEnv.c | 2 +- source/dnode/mgmt/node_mgmt/src/dmTransport.c | 2 +- source/libs/transport/inc/transportInt.h | 6 +++--- source/libs/transport/src/trans.c | 2 +- source/libs/transport/src/transCli.c | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/include/libs/transport/trpc.h b/include/libs/transport/trpc.h index 93e4d72ad7..e5955aad54 100644 --- a/include/libs/transport/trpc.h +++ b/include/libs/transport/trpc.h @@ -89,7 +89,7 @@ typedef struct SRpcInit { int32_t retryMinInterval; // retry init interval int32_t retryStepFactor; // retry interval factor int32_t retryMaxInterval; // retry max interval - int64_t retryMaxTimouet; + int64_t retryMaxTimeout; int32_t failFastThreshold; int32_t failFastInterval; diff --git a/source/client/src/clientEnv.c b/source/client/src/clientEnv.c index 238b3613f5..40c27bf164 100644 --- a/source/client/src/clientEnv.c +++ b/source/client/src/clientEnv.c @@ -169,7 +169,7 @@ void *openTransporter(const char *user, const char *auth, int32_t numOfThread) { rpcInit.retryMinInterval = tsRedirectPeriod; rpcInit.retryStepFactor = tsRedirectFactor; rpcInit.retryMaxInterval = tsRedirectMaxPeriod; - rpcInit.retryMaxTimouet = tsMaxRetryWaitTime; + rpcInit.retryMaxTimeout = tsMaxRetryWaitTime; int32_t connLimitNum = tsNumOfRpcSessions / (tsNumOfRpcThreads * 3); connLimitNum = TMAX(connLimitNum, 10); diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c index df54f8abba..e0f7da3ac4 100644 --- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c +++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c @@ -299,7 +299,7 @@ int32_t dmInitClient(SDnode *pDnode) { rpcInit.retryMinInterval = tsRedirectPeriod; rpcInit.retryStepFactor = tsRedirectFactor; rpcInit.retryMaxInterval = tsRedirectMaxPeriod; - rpcInit.retryMaxTimouet = tsMaxRetryWaitTime; + rpcInit.retryMaxTimeout = tsMaxRetryWaitTime; rpcInit.failFastInterval = 5000; // interval threshold(ms) rpcInit.failFastThreshold = 3; // failed threshold diff --git a/source/libs/transport/inc/transportInt.h b/source/libs/transport/inc/transportInt.h index ca48da690b..cc2c0d4e84 100644 --- a/source/libs/transport/inc/transportInt.h +++ b/source/libs/transport/inc/transportInt.h @@ -46,14 +46,14 @@ typedef struct { int8_t connType; char label[TSDB_LABEL_LEN]; char user[TSDB_UNI_LEN]; // meter ID - int32_t compatibilityVer; + int32_t compatibilityVer; int32_t compressSize; // -1: no compress, 0 : all data compressed, size: compress data if larger than size int8_t encryption; // encrypt or not - + int32_t retryMinInterval; // retry init interval int32_t retryStepFactor; // retry interval factor int32_t retryMaxInterval; // retry max interval - int32_t retryMaxTimouet; + int32_t retryMaxTimeout; int32_t failFastThreshold; int32_t failFastInterval; diff --git a/source/libs/transport/src/trans.c b/source/libs/transport/src/trans.c index 08b0451982..ed94521df0 100644 --- a/source/libs/transport/src/trans.c +++ b/source/libs/transport/src/trans.c @@ -55,7 +55,7 @@ void* rpcOpen(const SRpcInit* pInit) { pRpc->retryMinInterval = pInit->retryMinInterval; // retry init interval pRpc->retryStepFactor = pInit->retryStepFactor; pRpc->retryMaxInterval = pInit->retryMaxInterval; - pRpc->retryMaxTimouet = pInit->retryMaxTimouet; + pRpc->retryMaxTimeout = pInit->retryMaxTimeout; pRpc->failFastThreshold = pInit->failFastThreshold; pRpc->failFastInterval = pInit->failFastInterval; diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c index 71379daa50..cfdc5b5e8b 100644 --- a/source/libs/transport/src/transCli.c +++ b/source/libs/transport/src/transCli.c @@ -2287,7 +2287,7 @@ bool cliGenRetryRule(SCliConn* pConn, STransMsg* pResp, SCliMsg* pMsg) { pCtx->retryMinInterval = pTransInst->retryMinInterval; pCtx->retryMaxInterval = pTransInst->retryMaxInterval; pCtx->retryStepFactor = pTransInst->retryStepFactor; - pCtx->retryMaxTimeout = pTransInst->retryMaxTimouet; + pCtx->retryMaxTimeout = pTransInst->retryMaxTimeout; pCtx->retryInitTimestamp = taosGetTimestampMs(); pCtx->retryNextInterval = pCtx->retryMinInterval; pCtx->retryStep = 0; From 2c65ffc33b691e7ba9dad89f33243cdd662271f0 Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 8 Aug 2023 17:08:30 +0800 Subject: [PATCH 104/123] remove the case user_privilege_multi_users.py from cases.task --- tests/parallel_test/cases.task | 2 + .../0-others/user_privilege_all.py | 62 ++++++++++++++++++- 2 files changed, 63 insertions(+), 1 deletion(-) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 986d36e177..c9204e9ce8 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -155,6 +155,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_show.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_all.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_all.py b/tests/system-test/0-others/user_privilege_all.py index 2e796882c8..846b76317e 100644 --- a/tests/system-test/0-others/user_privilege_all.py +++ b/tests/system-test/0-others/user_privilege_all.py @@ -258,6 +258,66 @@ class TDTestCase: "insert into tb values(now, 20.0, 20);", "select * from tb;"], "res": [True, True, True, True, False, True, False] + }, + "test_db_all_childtable_none": { + "db_privilege": "all", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 20.2, 20)", + "insert into ct1 using stb tags('ct1') values(now, 21.21, 21)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 22.22, 22);", + "select * from tb;"], + "res": [True, True, True, True, True, True, True] + }, + "test_db_none_stable_all_childtable_none": { + "db_privilege": "none", + "stable_priviege": "all", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 23.23, 23)", + "insert into ct1 using stb tags('ct1') values(now, 24.24, 24)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 25.25, 25);", + "select * from tb;"], + "res": [True, True, True, True, True, False, False] + }, + "test_db_no_permission_childtable_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "all", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "none", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 28.28, 28);", + "select * from tb;"], + "res": [False, True, True, True, False, False, False] + }, + "test_db_none_stable_none_table_all": { + "db_privilege": "none", + "stable_priviege": "none", + "child_table_ct1_privilege": "none", + "child_table_ct2_privilege": "none", + "table_tb_privilege": "all", + "sql": ["insert into ct2 using stb tags('ct2') values(now, 26.26, 26)", + "insert into ct1 using stb tags('ct1') values(now, 27.27, 27)", + "select * from stb;", + "select * from ct1;", + "select * from ct2;", + "insert into tb values(now, 29.29, 29);", + "select * from tb;"], + "res": [False, False, False, False, False, True, True] } } @@ -361,7 +421,7 @@ class TDTestCase: data = res.fetch_all() tdLog.debug("query result: {}".format(data)) # check query results by cases - if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read"] and self.cases[case_name]["sql"][index] == "select * from ct2;": + if case_name in ["test_db_no_permission_childtable_read", "test_db_write_childtable_read", "test_db_no_permission_childtable_all"] and self.cases[case_name]["sql"][index] == "select * from ct2;": if not self.cases[case_name]["res"][index]: if 0 == len(data): tdLog.debug("Query with sql {} successfully as expected with empty result".format(self.cases[case_name]["sql"][index])) From f02fd54298b7fb39f8f6c18d4c97dbcd4eb79f39 Mon Sep 17 00:00:00 2001 From: "chao.feng" Date: Tue, 1 Aug 2023 17:32:06 +0800 Subject: [PATCH 105/123] add test case to cases.task by charles --- tests/parallel_test/cases.task | 1 + .../0-others/user_privilege_multi_users.py | 126 ++++++++++++++++++ 2 files changed, 127 insertions(+) create mode 100644 tests/system-test/0-others/user_privilege_multi_users.py diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 89572d1c06..d8f178dcfa 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -154,6 +154,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_control.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_manage.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege.py +,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/user_privilege_multi_users.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/fsync.py ,,y,system-test,./pytest.sh python3 ./test.py -f 0-others/multilevel.py ,,n,system-test,python3 ./test.py -f 0-others/compatibility.py diff --git a/tests/system-test/0-others/user_privilege_multi_users.py b/tests/system-test/0-others/user_privilege_multi_users.py new file mode 100644 index 0000000000..8812f42e7b --- /dev/null +++ b/tests/system-test/0-others/user_privilege_multi_users.py @@ -0,0 +1,126 @@ +from itertools import product +import taos +import random +from taos.tmq import * +from util.cases import * +from util.common import * +from util.log import * +from util.sql import * +from util.sqlset import * + + +class TDTestCase: + def init(self, conn, logSql, replicaVar=1): + self.replicaVar = int(replicaVar) + tdLog.debug("start to execute %s" % __file__) + # init the tdsql + tdSql.init(conn.cursor()) + self.setsql = TDSetSql() + # user info + self.userNum = 100 + self.basic_username = "user" + self.password = "pwd" + + # db info + self.dbname = "user_privilege_multi_users" + self.stbname = 'stb' + self.ctbname_num = 100 + self.column_dict = { + 'ts': 'timestamp', + 'col1': 'float', + 'col2': 'int', + } + self.tag_dict = { + 'ctbname': 'binary(10)' + } + + self.privilege_list = [] + + def prepare_data(self): + """Create the db and data for test + """ + # create datebase + tdSql.execute(f"create database {self.dbname}") + tdLog.debug("sql:" + f"create database {self.dbname}") + tdSql.execute(f"use {self.dbname}") + tdLog.debug("sql:" + f"use {self.dbname}") + + # create super table + tdSql.execute(self.setsql.set_create_stable_sql(self.stbname, self.column_dict, self.tag_dict)) + tdLog.debug("Create stable {} successfully".format(self.stbname)) + for ctbIndex in range(self.ctbname_num): + ctname = f"ctb{ctbIndex}" + tdSql.execute(f"create table {ctname} using {self.stbname} tags('{ctname}')") + tdLog.debug("sql:" + f"create table {ctname} using {self.stbname} tags('{ctname}')") + + def create_multiusers(self): + """Create the user for test + """ + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'create user {username} pass "{self.password}"') + tdLog.debug("sql:" + f'create user {username} pass "{self.password}"') + + def grant_privilege(self): + """Add the privilege for the users + """ + try: + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + privilege = random.choice(["read", "write", "all"]) + condition = f"ctbname='ctb{userIndex}'" + self.privilege_list.append({ + "username": username, + "privilege": privilege, + "condition": condition + }) + tdSql.execute(f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + tdLog.debug("sql:" + f'grant {privilege} on {self.dbname}.{self.stbname} with {condition} to {username}') + except Exception as ex: + tdLog.exit(ex) + + def remove_privilege(self): + """Remove the privilege for the users + """ + try: + for item in self.privilege_list: + username = item["username"] + privilege = item["privilege"] + condition = item["condition"] + tdSql.execute(f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + tdLog.debug("sql:" + f'revoke {privilege} on {self.dbname}.{self.stbname} with {condition} from {username}') + except Exception as ex: + tdLog.exit(ex) + + def run(self): + """ + Check the information from information_schema.ins_user_privileges + """ + self.create_multiusers() + self.prepare_data() + # grant privilege to users + self.grant_privilege() + # check information_schema.ins_user_privileges + tdSql.query("select * from information_schema.ins_user_privileges;") + tdLog.debug("Current information_schema.ins_user_privileges values: {}".format(tdSql.queryResult)) + if len(tdSql.queryResult) >= self.userNum: + tdLog.debug("case passed") + else: + tdLog.exit("The privilege number in information_schema.ins_user_privileges is incorrect") + + def stop(self): + # remove the privilege + self.remove_privilege() + # clear env + tdSql.execute(f"drop database {self.dbname}") + # remove the users + for userIndex in range(self.userNum): + username = f"{self.basic_username}{userIndex}" + tdSql.execute(f'drop user {username}') + # close the connection + tdSql.close() + tdLog.success("%s successfully executed" % __file__) + + +tdCases.addWindows(__file__, TDTestCase()) +tdCases.addLinux(__file__, TDTestCase()) From 50bf8c948a2610910bb1cb004cdf770aa99f7334 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 9 Aug 2023 02:11:27 +0000 Subject: [PATCH 106/123] change default session val --- source/common/src/tglobal.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index d35f9a24a8..a772efc33c 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -47,7 +47,7 @@ bool tsPrintAuth = false; // queue & threads int32_t tsNumOfRpcThreads = 1; -int32_t tsNumOfRpcSessions = 10000; +int32_t tsNumOfRpcSessions = 30000; int32_t tsTimeToGetAvailableConn = 500000; int32_t tsKeepAliveIdle = 60; @@ -1281,9 +1281,9 @@ int32_t taosApplyLocalCfg(SConfig *pCfg, char *name) { // tsSmlDataFormat = cfgGetItem(pCfg, "smlDataFormat")->bval; // } else if (strcasecmp("smlBatchSize", name) == 0) { // tsSmlBatchSize = cfgGetItem(pCfg, "smlBatchSize")->i32; - } else if(strcasecmp("smlTsDefaultName", name) == 0) { + } else if (strcasecmp("smlTsDefaultName", name) == 0) { tstrncpy(tsSmlTsDefaultName, cfgGetItem(pCfg, "smlTsDefaultName")->str, TSDB_COL_NAME_LEN); - } else if(strcasecmp("smlDot2Underline", name) == 0) { + } else if (strcasecmp("smlDot2Underline", name) == 0) { tsSmlDot2Underline = cfgGetItem(pCfg, "smlDot2Underline")->bval; } else if (strcasecmp("shellActivityTimer", name) == 0) { tsShellActivityTimer = cfgGetItem(pCfg, "shellActivityTimer")->i32; From 2b1478f46e7edcbff839429f247903103c5120b9 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 7 Aug 2023 17:13:49 +0800 Subject: [PATCH 107/123] Revert "fix(tsdb/read2): reset stt reader when suspended" This reverts commit 079d7ff69ec525c69be84f30c4295662843cb547. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 133 +++++++++++++----------- 1 file changed, 70 insertions(+), 63 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index b68f82d847..1d9d067685 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -1731,41 +1731,45 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader // row in last file block TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); - int64_t ts = getCurrentKeyInLastBlock(pLastBlockReader); - + int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); if (ASCENDING_TRAVERSE(pReader->info.order)) { - if (key < ts) { // imem, mem are all empty, file blocks (data blocks and last block) exist + if (key < tsLast) { return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); - } else if (key == ts) { - SRow* pTSRow = NULL; - int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader); - - TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); - tsdbRowMergerAdd(pMerger, pRow1, NULL); - - doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, ts, pMerger, &pReader->info.verRange, pReader->idStr); - - code = tsdbRowMergerGetRow(pMerger, &pTSRow); - if (code != TSDB_CODE_SUCCESS) { - return code; - } - - code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); - - taosMemoryFree(pTSRow); - tsdbRowMergerClear(pMerger); - return code; - } else { // key > ts + } else if (key > tsLast) { + return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); + } + } else { + if (key > tsLast) { + return mergeRowsInFileBlocks(pBlockData, pBlockScanInfo, key, pReader); + } else if (key < tsLast) { return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); } - } else { // desc order - return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, pBlockData, true); } + // the following for key == tsLast + SRow* pTSRow = NULL; + int32_t code = tsdbRowMergerAdd(pMerger, &fRow, pReader->info.pSchema); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader); + + TSDBROW* pRow1 = tMergeTreeGetRow(&pLastBlockReader->mergeTree); + tsdbRowMergerAdd(pMerger, pRow1, NULL); + + doMergeRowsInLastBlock(pLastBlockReader, pBlockScanInfo, tsLast, pMerger, &pReader->info.verRange, pReader->idStr); + + code = tsdbRowMergerGetRow(pMerger, &pTSRow); + if (code != TSDB_CODE_SUCCESS) { + return code; + } + + code = doAppendRowFromTSRow(pReader->resBlockInfo.pResBlock, pReader, pTSRow, pBlockScanInfo); + + taosMemoryFree(pTSRow); + tsdbRowMergerClear(pMerger); + return code; + } else { // only last block exists return doMergeFileBlockAndLastBlock(pLastBlockReader, pReader, pBlockScanInfo, NULL, false); } @@ -2192,7 +2196,8 @@ static int32_t buildComposedDataBlockImpl(STsdbReader* pReader, STableBlockScanI SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo; TSDBROW *pRow = NULL, *piRow = NULL; - int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : INT64_MIN; + int64_t key = (pBlockData->nRow > 0 && (!pDumpInfo->allDumped)) ? pBlockData->aTSKEY[pDumpInfo->rowIndex] : + (ASCENDING_TRAVERSE(pReader->info.order) ? INT64_MAX : INT64_MIN); if (pBlockScanInfo->iter.hasVal) { pRow = getValidMemRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); } @@ -2566,9 +2571,8 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { // load the last data block of current table STableBlockScanInfo* pScanInfo = *(STableBlockScanInfo**)pStatus->pTableIter; - if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) { - // reset the index in last block when handing a new file - // doCleanupTableScanInfo(pScanInfo); + if (pScanInfo == NULL) { + tsdbError("table Iter is null, invalid pScanInfo, try next table %s", pReader->idStr); bool hasNexTable = moveToNextTable(pUidList, pStatus); if (!hasNexTable) { return TSDB_CODE_SUCCESS; @@ -2577,8 +2581,15 @@ static int32_t doLoadLastBlockSequentially(STsdbReader* pReader) { continue; } - // reset the index in last block when handing a new file - // doCleanupTableScanInfo(pScanInfo); + if (pReader->pIgnoreTables && taosHashGet(*pReader->pIgnoreTables, &pScanInfo->uid, sizeof(pScanInfo->uid))) { + // reset the index in last block when handing a new file + bool hasNexTable = moveToNextTable(pUidList, pStatus); + if (!hasNexTable) { + return TSDB_CODE_SUCCESS; + } + + continue; + } bool hasDataInLastFile = initLastBlockReader(pLastBlockReader, pScanInfo, pReader); if (!hasDataInLastFile) { @@ -2669,16 +2680,32 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { (ASCENDING_TRAVERSE(pReader->info.order)) ? pBlockInfo->record.firstKey : pBlockInfo->record.lastKey; code = buildDataBlockFromBuf(pReader, pScanInfo, endKey); } else { - if (hasDataInLastBlock(pLastBlockReader) && !ASCENDING_TRAVERSE(pReader->info.order)) { - // only return the rows in last block - int64_t tsLast = getCurrentKeyInLastBlock(pLastBlockReader); - ASSERT(tsLast >= pBlockInfo->record.lastKey); + bool bHasDataInLastBlock = hasDataInLastBlock(pLastBlockReader); + int64_t tsLast = bHasDataInLastBlock ? getCurrentKeyInLastBlock(pLastBlockReader) : INT64_MIN; + if (!bHasDataInLastBlock || ((ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.lastKey < tsLast) || + (!ASCENDING_TRAVERSE(pReader->info.order) && pBlockInfo->record.firstKey > tsLast))) { + // whole block is required, return it directly + SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; + pInfo->rows = pBlockInfo->record.numRow; + pInfo->id.uid = pScanInfo->uid; + pInfo->dataLoad = 0; + pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey}; + setComposedBlockFlag(pReader, false); + setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); + // update the last key for the corresponding table + pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey; + tsdbDebug("%p uid:%" PRIu64 + " clean file block retrieved from file, global index:%d, " + "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", + pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow, + pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); + } else { SBlockData* pBData = &pReader->status.fileBlockData; tBlockDataReset(pBData); SSDataBlock* pResBlock = pReader->resBlockInfo.pResBlock; - tsdbDebug("load data in last block firstly, due to desc scan data, %s", pReader->idStr); + tsdbDebug("load data in last block firstly %s", pReader->idStr); int64_t st = taosGetTimestampUs(); @@ -2709,23 +2736,8 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) { pReader, pResBlock->info.id.uid, pResBlock->info.window.skey, pResBlock->info.window.ekey, pResBlock->info.rows, el, pReader->idStr); } - } else { // whole block is required, return it directly - SDataBlockInfo* pInfo = &pReader->resBlockInfo.pResBlock->info; - pInfo->rows = pBlockInfo->record.numRow; - pInfo->id.uid = pScanInfo->uid; - pInfo->dataLoad = 0; - pInfo->window = (STimeWindow){.skey = pBlockInfo->record.firstKey, .ekey = pBlockInfo->record.lastKey}; - setComposedBlockFlag(pReader, false); - setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlockInfo->record.lastKey, pReader->info.order); - - // update the last key for the corresponding table - pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->info.order) ? pInfo->window.ekey : pInfo->window.skey; - tsdbDebug("%p uid:%" PRIu64 - " clean file block retrieved from file, global index:%d, " - "table index:%d, rows:%d, brange:%" PRId64 "-%" PRId64 ", %s", - pReader, pScanInfo->uid, pBlockIter->index, pBlockInfo->tbBlockIdx, pBlockInfo->record.numRow, - pBlockInfo->record.firstKey, pBlockInfo->record.lastKey, pReader->idStr); } + } return (pReader->code != TSDB_CODE_SUCCESS) ? pReader->code : code; @@ -4099,11 +4111,6 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { tsdbDataFileReaderClose(&pReader->pFileReader); - int64_t loadBlocks = 0; - double elapse = 0; - pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse); - pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); - // resetDataBlockScanInfo excluding lastKey STableBlockScanInfo** p = NULL; int32_t iter = 0; @@ -4174,7 +4181,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } } - tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); + tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false); pReader->pReadSnap = NULL; pReader->flag = READER_STATUS_SUSPEND; From e9e06d1eae55e9b537a0f2513dcdf064e46e4dab Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 7 Aug 2023 17:17:12 +0800 Subject: [PATCH 108/123] fix: restore stt block/data block merge back --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index 1d9d067685..57a649d682 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -439,7 +439,7 @@ static int32_t tsdbReaderCreate(SVnode* pVnode, SQueryTableDataCond* pCond, void return code; _end: - tsdbReaderClose(pReader); + tsdbReaderClose2(pReader); *ppReader = NULL; return code; } @@ -4110,7 +4110,10 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } tsdbDataFileReaderClose(&pReader->pFileReader); - + int64_t loadBlocks = 0; + double elapse = 0; + pReader->status.pLDataIterArray = destroySttBlockReader(pReader->status.pLDataIterArray, &loadBlocks, &elapse); + pReader->status.pLDataIterArray = taosArrayInit(4, POINTER_BYTES); // resetDataBlockScanInfo excluding lastKey STableBlockScanInfo** p = NULL; int32_t iter = 0; @@ -4181,7 +4184,7 @@ int32_t tsdbReaderSuspend2(STsdbReader* pReader) { } } - tsdbUntakeReadSnap(pReader, pReader->pReadSnap, false); + tsdbUntakeReadSnap2(pReader, pReader->pReadSnap, false); pReader->pReadSnap = NULL; pReader->flag = READER_STATUS_SUSPEND; From 2403db06b0241aa40874c5e3a0447d3d699d562b Mon Sep 17 00:00:00 2001 From: chenhaoran Date: Wed, 9 Aug 2023 11:19:56 +0800 Subject: [PATCH 109/123] test:fix comatibility case --- tests/system-test/0-others/compatibility.py | 12 ++++++++++-- tests/system-test/6-cluster/5dnode3mnodeRoll.py | 11 ++++++++++- 2 files changed, 20 insertions(+), 3 deletions(-) diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py index 2e846c0128..cb804aad0c 100644 --- a/tests/system-test/0-others/compatibility.py +++ b/tests/system-test/0-others/compatibility.py @@ -30,7 +30,15 @@ class TDTestCase: self.replicaVar = int(replicaVar) tdLog.debug(f"start to excute {__file__}") tdSql.init(conn.cursor()) - + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300;use deldata; + create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); + create table deldata.ct1 using deldata.stb1 tags ( 1 ); + insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); + select avg(c1) from deldata.ct1; + delete from deldata.stb1; + flush database deldata; + insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); + delete from deldata.ct1;''' def checkProcessPid(self,processName): i=0 while i<60: @@ -154,7 +162,7 @@ class TDTestCase: os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/TS-3131.tsql") # add deleted data - os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/deletedData.sql") + os.system(f'LD_LIBRARY_PATH=/usr/lib taos -s "{self.deletedDataSql}" ') cmd = f" LD_LIBRARY_PATH={bPath}/build/lib {bPath}/build/bin/taos -h localhost ;" diff --git a/tests/system-test/6-cluster/5dnode3mnodeRoll.py b/tests/system-test/6-cluster/5dnode3mnodeRoll.py index 43a7c948e5..38ac47f777 100644 --- a/tests/system-test/6-cluster/5dnode3mnodeRoll.py +++ b/tests/system-test/6-cluster/5dnode3mnodeRoll.py @@ -37,6 +37,15 @@ class TDTestCase: tdSql.init(conn.cursor()) self.host = socket.gethostname() self.replicaVar = int(replicaVar) + self.deletedDataSql= '''drop database if exists deldata;create database deldata duration 300;use deldata; + create table deldata.stb1 (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int); + create table deldata.ct1 using deldata.stb1 tags ( 1 ); + insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); + select avg(c1) from deldata.ct1; + delete from deldata.stb1; + flush database deldata; + insert into deldata.ct1 values ( now()-0s, 0, 0, 0, 0, 0.0, 0.0, 0, 'binary0', 'nchar0', now()+0a ) ( now()-10s, 1, 11111, 111, 11, 1.11, 11.11, 1, 'binary1', 'nchar1', now()+1a ) ( now()-20s, 2, 22222, 222, 22, 2.22, 22.22, 0, 'binary2', 'nchar2', now()+2a ) ( now()-30s, 3, 33333, 333, 33, 3.33, 33.33, 1, 'binary3', 'nchar3', now()+3a ); + delete from deldata.ct1;''' def checkProcessPid(self,processName): i=0 @@ -246,7 +255,7 @@ class TDTestCase: # self.buildTaosd(bPath) # add deleted data - os.system("LD_LIBRARY_PATH=/usr/lib taos -f 0-others/deletedData.sql") + os.system(f'LD_LIBRARY_PATH=/usr/lib taos -s "{self.deletedDataSql}" ') threads=[] threads.append(threading.Thread(target=self.insertAllData, args=(cPath_temp,dbname,tableNumbers1,recordNumbers1))) From 3cdb16352cfa71a86183447011759b0a12c019d8 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 9 Aug 2023 12:03:14 +0800 Subject: [PATCH 110/123] fix(stream): avoid launching check downstream when failed to add stream task into stream meta. --- include/libs/executor/executor.h | 4 +--- source/dnode/snode/src/snode.c | 2 +- source/dnode/vnode/src/sma/smaRollup.c | 2 +- source/dnode/vnode/src/tq/tq.c | 27 +++++++++++--------------- source/libs/executor/src/executor.c | 4 ++-- source/libs/stream/src/streamTask.c | 2 +- 6 files changed, 17 insertions(+), 24 deletions(-) diff --git a/include/libs/executor/executor.h b/include/libs/executor/executor.h index f90c38f341..634d708260 100644 --- a/include/libs/executor/executor.h +++ b/include/libs/executor/executor.h @@ -74,7 +74,7 @@ typedef enum { * @param vgId * @return */ -qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId); +qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId, int32_t taskId); /** * Create the exec task for queue mode @@ -95,8 +95,6 @@ int32_t qGetTableList(int64_t suid, void* pVnode, void* node, SArray **tableList */ void qSetTaskId(qTaskInfo_t tinfo, uint64_t taskId, uint64_t queryId); -//void qSetTaskCode(qTaskInfo_t tinfo, int32_t code); - int32_t qSetStreamOpOpen(qTaskInfo_t tinfo); // todo refactor diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 7235a56691..558180a3c2 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -88,7 +88,7 @@ int32_t sndExpandTask(SSnode *pSnode, SStreamTask *pTask, int64_t ver) { SReadHandle handle = { .vnode = NULL, .numOfVgroups = numOfChildEp, .pStateBackend = pTask->pState, .fillHistory = pTask->info.fillHistory }; initStreamStateAPI(&handle.api); - pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, 0); + pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, 0, pTask->id.taskId); ASSERT(pTask->exec.pExecutor); taosThreadMutexInit(&pTask->lock, NULL); diff --git a/source/dnode/vnode/src/sma/smaRollup.c b/source/dnode/vnode/src/sma/smaRollup.c index 9fd4938448..1e7de3c526 100644 --- a/source/dnode/vnode/src/sma/smaRollup.c +++ b/source/dnode/vnode/src/sma/smaRollup.c @@ -267,7 +267,7 @@ static int32_t tdSetRSmaInfoItemParams(SSma *pSma, SRSmaParam *param, SRSmaStat SReadHandle handle = {.vnode = pVnode, .initTqReader = 1, .pStateBackend = pStreamState}; initStorageAPI(&handle.api); - pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle, TD_VID(pVnode)); + pRSmaInfo->taskInfo[idx] = qCreateStreamExecTaskInfo(param->qmsg[idx], &handle, TD_VID(pVnode), 0); if (!pRSmaInfo->taskInfo[idx]) { terrno = TSDB_CODE_RSMA_QTASKINFO_CREATE; return TSDB_CODE_FAILED; diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 41e9268452..af336adc6a 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -956,7 +956,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { .winRange = pTask->dataRange.window}; initStorageAPI(&handle.api); - pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId); + pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId, pTask->id.taskId); if (pTask->exec.pExecutor == NULL) { return -1; } @@ -983,7 +983,7 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) { .winRange = pTask->dataRange.window}; initStorageAPI(&handle.api); - pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId); + pTask->exec.pExecutor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle, vgId, pTask->id.taskId); if (pTask->exec.pExecutor == NULL) { return -1; } @@ -1149,32 +1149,27 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms taosWLockLatch(&pStreamMeta->lock); code = streamMetaRegisterTask(pStreamMeta, sversion, pTask, &added); int32_t numOfTasks = streamMetaGetNumOfTasks(pStreamMeta); + taosWUnLockLatch(&pStreamMeta->lock); if (code < 0) { tqError("vgId:%d failed to add s-task:0x%x, total:%d", vgId, pTask->id.taskId, numOfTasks); tFreeStreamTask(pTask); - taosWUnLockLatch(&pStreamMeta->lock); return -1; } // not added into meta store - if (!added) { + if (added) { + tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks); + SStreamTask* p = streamMetaAcquireTask(pStreamMeta, taskId); + if (p != NULL) { // reset the downstreamReady flag. + streamTaskCheckDownstreamTasks(p); + } + streamMetaReleaseTask(pStreamMeta, p); + } else { tqWarn("vgId:%d failed to add s-task:0x%x, already exists in meta store", vgId, taskId); tFreeStreamTask(pTask); - pTask = NULL; } - taosWUnLockLatch(&pStreamMeta->lock); - - tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks); - - // 3. It's an fill history task, do nothing. wait for the main task to start it - SStreamTask* p = streamMetaAcquireTask(pStreamMeta, taskId); - if (p != NULL) { // reset the downstreamReady flag. - streamTaskCheckDownstreamTasks(p); - } - - streamMetaReleaseTask(pStreamMeta, p); return 0; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index 7832834cee..a6059c7c42 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -304,7 +304,7 @@ qTaskInfo_t qCreateQueueExecTaskInfo(void* msg, SReadHandle* pReaderHandle, int3 return pTaskInfo; } -qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId) { +qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t vgId, int32_t taskId) { if (msg == NULL) { return NULL; } @@ -317,7 +317,7 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, SReadHandle* readers, int32_t v } qTaskInfo_t pTaskInfo = NULL; - code = qCreateExecTask(readers, vgId, 0, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM); + code = qCreateExecTask(readers, vgId, taskId, pPlan, &pTaskInfo, NULL, NULL, OPTR_EXEC_MODEL_STREAM); if (code != TSDB_CODE_SUCCESS) { nodesDestroyNode((SNode*)pPlan); qDestroyTask(pTaskInfo); diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index dc4e5ff4a6..9056fa8d93 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -216,7 +216,7 @@ static void freeItem(void* p) { } void tFreeStreamTask(SStreamTask* pTask) { - qDebug("free s-task:%s, %p", pTask->id.idStr, pTask); + qDebug("free s-task:0x%x, %p", pTask->id.taskId, pTask); int32_t status = atomic_load_8((int8_t*)&(pTask->status.taskStatus)); if (pTask->inputQueue) { From 93bdefdcff06f1e7ebe330699a7cb27af08eb5f9 Mon Sep 17 00:00:00 2001 From: CityChen81 <39059674+CityChen81@users.noreply.github.com> Date: Wed, 9 Aug 2023 14:08:18 +0800 Subject: [PATCH 111/123] =?UTF-8?q?Update=2005-insert.md=20=E4=B8=AD?= =?UTF-8?q?=E6=96=87=E6=8B=AC=E5=8F=B7=E9=97=AE=E9=A2=98=20(#22377)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- docs/zh/12-taos-sql/05-insert.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/12-taos-sql/05-insert.md b/docs/zh/12-taos-sql/05-insert.md index b72754b154..c03ad9bd8f 100644 --- a/docs/zh/12-taos-sql/05-insert.md +++ b/docs/zh/12-taos-sql/05-insert.md @@ -82,7 +82,7 @@ INSERT INTO d1001 (ts, current, phase) VALUES ('2021-07-13 14:06:33.196', 10.27, ```sql INSERT INTO d1001 VALUES ('2021-07-13 14:06:34.630', 10.2, 219, 0.32) ('2021-07-13 14:06:35.779', 10.15, 217, 0.33) - d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); + d1002 (ts, current, phase) VALUES ('2021-07-13 14:06:34.255', 10.27, 0.31); ``` ## 插入记录时自动建表 From 70f03635a4b091c5ec55fde9d9f258983064f1a4 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 9 Aug 2023 07:39:42 +0000 Subject: [PATCH 112/123] rm duplicate para --- docs/zh/14-reference/12-config/index.md | 99 ++++++++++--------------- 1 file changed, 40 insertions(+), 59 deletions(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index 2f5f0fc3e8..d4560a644a 100755 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -95,30 +95,11 @@ taos -C ### maxShellConns | 属性 | 说明 | -| --------| ----------------------- | +| -------- | ----------------------- | | 适用范围 | 仅服务端适用 | -| 含义 | 一个 dnode 容许的连接数 | +| 含义 | 一个 dnode 容许的连接数 | | 取值范围 | 10-50000000 | -| 缺省值 | 5000 | - -### numOfRpcSessions - -| 属性 | 说明 | -| --------| ---------------------- | -| 适用范围 | 客户端和服务端都适用 | -| 含义 | 一个客户端能创建的最大连接数| -| 取值范围 | 100-100000 | -| 缺省值 | 10000 | - -### timeToGetAvailableConn - -| 属性 | 说明 | -| -------- | --------------------| -| 适用范围 | 客户端和服务端都适用 | -| 含义 |获得可用连接的最长等待时间| -| 取值范围 | 10-50000000(单位为毫秒)| -| 缺省值 | 500000 | - +| 缺省值 | 5000 | ### numOfRpcSessions @@ -392,12 +373,12 @@ charset 的有效值是 UTF-8。 ### metaCacheMaxSize -| 属性 | 说明 | -| -------- | ---------------------------------------------- | -| 适用范围 | 仅客户端适用 | -| 含义 | 指定单个客户端元数据缓存大小的最大值 | -| 单位 | MB | -| 缺省值 | -1 (无限制) | +| 属性 | 说明 | +| -------- | ------------------------------------ | +| 适用范围 | 仅客户端适用 | +| 含义 | 指定单个客户端元数据缓存大小的最大值 | +| 单位 | MB | +| 缺省值 | -1 (无限制) | ## 集群相关 @@ -479,13 +460,13 @@ charset 的有效值是 UTF-8。 ### slowLogScope -| 属性 | 说明 | -| -------- | --------------------------------------------------------------| -| 适用范围 | 仅客户端适用 | -| 含义 | 指定启动记录哪些类型的慢查询 | -| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE | -| 缺省值 | ALL | -| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 | +| 属性 | 说明 | +| -------- | ---------------------------------------------------------- | +| 适用范围 | 仅客户端适用 | +| 含义 | 指定启动记录哪些类型的慢查询 | +| 可选值 | ALL, QUERY, INSERT, OTHERS, NONE | +| 缺省值 | ALL | +| 补充说明 | 默认记录所有类型的慢查询,可通过配置只记录某一类型的慢查询 | ### debugFlag @@ -685,16 +666,16 @@ charset 的有效值是 UTF-8。 | 适用范围 | 仅客户端适用 | | 含义 | schemaless 列数据是否顺序一致,从3.0.3.0开始,该配置废弃 | | 值域 | 0:不一致;1: 一致 | -| 缺省值 | 0 +| 缺省值 | 0 | ### smlTsDefaultName -| 属性 | 说明 | -| -------- | -------------------------------------------------------- | -| 适用范围 | 仅客户端适用 | +| 属性 | 说明 | +| -------- | -------------------------------------------- | +| 适用范围 | 仅客户端适用 | | 含义 | schemaless自动建表的时间列名字通过该配置设置 | | 类型 | 字符串 | -| 缺省值 | _ts | +| 缺省值 | _ts | ## 其他 @@ -728,31 +709,31 @@ charset 的有效值是 UTF-8。 ### ttlChangeOnWrite -| 属性 | 说明 | -| -------- | ------------------ | -| 适用范围 | 仅服务端适用 | -| 含义 | ttl 到期时间是否伴随表的修改操作改变 | -| 取值范围 | 0: 不改变;1:改变 | -| 缺省值 | 0 | +| 属性 | 说明 | +| -------- | ------------------------------------ | +| 适用范围 | 仅服务端适用 | +| 含义 | ttl 到期时间是否伴随表的修改操作改变 | +| 取值范围 | 0: 不改变;1:改变 | +| 缺省值 | 0 | ### keepTimeOffset -| 属性 | 说明 | -| -------- | ------------------ | -| 适用范围 | 仅服务端适用 | -| 含义 | 迁移操作的延时 | -| 单位 | 小时 | -| 取值范围 | 0-23 | -| 缺省值 | 0 | +| 属性 | 说明 | +| -------- | -------------- | +| 适用范围 | 仅服务端适用 | +| 含义 | 迁移操作的延时 | +| 单位 | 小时 | +| 取值范围 | 0-23 | +| 缺省值 | 0 | ### tmqMaxTopicNum -| 属性 | 说明 | -| -------- | ------------------ | -| 适用范围 | 仅服务端适用 | -| 含义 | 订阅最多可建立的 topic 数量 | -| 取值范围 | 1-10000| -| 缺省值 | 20 | +| 属性 | 说明 | +| -------- | --------------------------- | +| 适用范围 | 仅服务端适用 | +| 含义 | 订阅最多可建立的 topic 数量 | +| 取值范围 | 1-10000 | +| 缺省值 | 20 | ## 压缩参数 From b65336ab880021f15044cb29e8a3da39ff27134b Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Wed, 9 Aug 2023 07:41:05 +0000 Subject: [PATCH 113/123] rm duplicate para --- docs/zh/14-reference/12-config/index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md index d4560a644a..519b84ba71 100755 --- a/docs/zh/14-reference/12-config/index.md +++ b/docs/zh/14-reference/12-config/index.md @@ -108,7 +108,7 @@ taos -C | 适用范围 | 客户端和服务端都适用 | | 含义 | 一个客户端能创建的最大连接数 | | 取值范围 | 100-100000 | -| 缺省值 | 10000 | +| 缺省值 | 30000 | ### timeToGetAvailableConn From 4c92997328e070d64a189e868b5ecdd9a03de11e Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 9 Aug 2023 15:52:23 +0800 Subject: [PATCH 114/123] fix(stream): use streamId&taskId to identify the stream task in the stream meta. --- include/common/tmsg.h | 3 ++ include/libs/stream/tstream.h | 2 +- source/dnode/mnode/impl/src/mndStream.c | 6 ++++ source/dnode/snode/src/snode.c | 23 ++++++--------- source/dnode/vnode/src/tq/tq.c | 37 ++++++++++++------------- source/dnode/vnode/src/tq/tqRestore.c | 8 +++--- source/libs/stream/src/streamExec.c | 2 +- source/libs/stream/src/streamMeta.c | 23 ++++++++------- source/libs/stream/src/streamRecover.c | 8 ++++-- 9 files changed, 58 insertions(+), 54 deletions(-) diff --git a/include/common/tmsg.h b/include/common/tmsg.h index e772a47e3d..01923d2b30 100644 --- a/include/common/tmsg.h +++ b/include/common/tmsg.h @@ -2767,6 +2767,7 @@ typedef struct { typedef struct { SMsgHead head; int64_t leftForVer; + int64_t streamId; int32_t taskId; } SVDropStreamTaskReq; @@ -2958,6 +2959,7 @@ int32_t tDecodeMqVgOffset(SDecoder* pDecoder, SMqVgOffset* pOffset); typedef struct { SMsgHead head; + int64_t streamId; int32_t taskId; } SVPauseStreamTaskReq; @@ -2976,6 +2978,7 @@ int32_t tDeserializeSMPauseStreamReq(void* buf, int32_t bufLen, SMPauseStreamReq typedef struct { SMsgHead head; int32_t taskId; + int64_t streamId; int8_t igUntreated; } SVResumeStreamTaskReq; diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index b241ae9b41..add6660cef 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -646,7 +646,7 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded); int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it -SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId); +SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaBegin(SStreamMeta* pMeta); diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index 4001202254..a0d53ec780 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -649,6 +649,8 @@ static int32_t mndPersistTaskDropReq(STrans *pTrans, SStreamTask *pTask) { pReq->head.vgId = htonl(pTask->info.nodeId); pReq->taskId = pTask->id.taskId; + pReq->streamId = pTask->id.streamId; + STransAction action = {0}; memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet)); action.pCont = pReq; @@ -1361,6 +1363,8 @@ static int32_t mndPauseStreamTask(STrans *pTrans, SStreamTask *pTask) { } pReq->head.vgId = htonl(pTask->info.nodeId); pReq->taskId = pTask->id.taskId; + pReq->streamId = pTask->id.streamId; + STransAction action = {0}; memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet)); action.pCont = pReq; @@ -1501,7 +1505,9 @@ static int32_t mndResumeStreamTask(STrans *pTrans, SStreamTask *pTask, int8_t ig } pReq->head.vgId = htonl(pTask->info.nodeId); pReq->taskId = pTask->id.taskId; + pReq->streamId = pTask->id.streamId; pReq->igUntreated = igUntreated; + STransAction action = {0}; memcpy(&action.epSet, &pTask->info.epSet, sizeof(SEpSet)); action.pCont = pReq; diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 558180a3c2..51b25b2476 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -35,9 +35,7 @@ void sndEnqueueStreamDispatch(SSnode *pSnode, SRpcMsg *pMsg) { tDecoderClear(&decoder); - int32_t taskId = req.taskId; - - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId); if (pTask) { SRpcMsg rsp = { .info = pMsg->info, @@ -181,7 +179,7 @@ int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) { SVDropStreamTaskReq *pReq = (SVDropStreamTaskReq *)msg; qDebug("snode:%d receive msg to drop stream task:0x%x", pSnode->pMeta->vgId, pReq->taskId); - SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId); if (pTask == NULL) { qError("vgId:%d failed to acquire s-task:0x%x when dropping it", pSnode->pMeta->vgId, pReq->taskId); return 0; @@ -194,8 +192,7 @@ int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) { int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) { SStreamTaskRunReq *pReq = pMsg->pCont; - int32_t taskId = pReq->taskId; - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId); if (pTask) { streamProcessRunReq(pTask); streamMetaReleaseTask(pSnode->pMeta, pTask); @@ -213,9 +210,8 @@ int32_t sndProcessTaskDispatchReq(SSnode *pSnode, SRpcMsg *pMsg, bool exec) { SDecoder decoder; tDecoderInit(&decoder, (uint8_t *)msgBody, msgLen); tDecodeStreamDispatchReq(&decoder, &req); - int32_t taskId = req.taskId; - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.taskId); if (pTask) { SRpcMsg rsp = { .info = pMsg->info, .code = 0 }; streamProcessDispatchMsg(pTask, &req, &rsp, exec); @@ -235,8 +231,7 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) { tDecoderInit(&decoder, msgBody, msgLen); tDecodeStreamRetrieveReq(&decoder, &req); tDecoderClear(&decoder); - int32_t taskId = req.dstTaskId; - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.dstTaskId); if (pTask) { SRpcMsg rsp = { .info = pMsg->info, .code = 0}; @@ -252,7 +247,7 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) { int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) { SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t taskId = ntohl(pRsp->upstreamTaskId); - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pRsp->streamId, taskId); if (pTask) { streamProcessDispatchRsp(pTask, pRsp, pMsg->code); streamMetaReleaseTask(pSnode->pMeta, pTask); @@ -297,7 +292,7 @@ int32_t sndProcessStreamTaskScanHistoryFinishReq(SSnode *pSnode, SRpcMsg *pMsg) tDecoderClear(&decoder); // find task - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.downstreamTaskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, req.downstreamTaskId); if (pTask == NULL) { return -1; } @@ -340,7 +335,7 @@ int32_t sndProcessStreamTaskCheckReq(SSnode *pSnode, SRpcMsg *pMsg) { .upstreamTaskId = req.upstreamTaskId, }; - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, taskId); + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, req.streamId, taskId); if (pTask != NULL) { rsp.status = streamTaskCheckStatus(pTask); @@ -400,7 +395,7 @@ int32_t sndProcessStreamTaskCheckRsp(SSnode* pSnode, SRpcMsg* pMsg) { qDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d", rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status); - SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, rsp.upstreamTaskId); + SStreamTask* pTask = streamMetaAcquireTask(pSnode->pMeta, rsp.streamId, rsp.upstreamTaskId); if (pTask == NULL) { qError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId, pSnode->pMeta->vgId); diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index af336adc6a..e80aa800bc 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1062,7 +1062,7 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { .upstreamTaskId = req.upstreamTaskId, }; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, taskId); if (pTask != NULL) { rsp.status = streamTaskCheckStatus(pTask); streamMetaReleaseTask(pTq->pStreamMeta, pTask); @@ -1099,7 +1099,7 @@ int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t sversion, SRpcMsg* pMsg) { tqDebug("tq task:0x%x (vgId:%d) recv check rsp(reqId:0x%" PRIx64 ") from 0x%x (vgId:%d) status %d", rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.status); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.upstreamTaskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, rsp.streamId, rsp.upstreamTaskId); if (pTask == NULL) { tqError("tq failed to locate the stream task:0x%x (vgId:%d), it may have been destroyed", rsp.upstreamTaskId, pTq->pStreamMeta->vgId); @@ -1160,7 +1160,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t sversion, char* msg, int32_t ms // not added into meta store if (added) { tqDebug("vgId:%d s-task:0x%x is deployed and add into meta, numOfTasks:%d", vgId, taskId, numOfTasks); - SStreamTask* p = streamMetaAcquireTask(pStreamMeta, taskId); + SStreamTask* p = streamMetaAcquireTask(pStreamMeta, pTask->id.streamId, taskId); if (p != NULL) { // reset the downstreamReady flag. streamTaskCheckDownstreamTasks(p); } @@ -1178,7 +1178,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { SStreamMeta* pMeta = pTq->pStreamMeta; int32_t code = TSDB_CODE_SUCCESS; - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); if (pTask == NULL) { tqError("vgId:%d failed to acquire stream task:0x%x during stream recover, task may have been destroyed", pMeta->vgId, pReq->taskId); @@ -1234,7 +1234,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { bool done = false; // 1. get the related stream task - pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId); + pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); if (pStreamTask == NULL) { // todo delete this task, if the related stream task is dropped qError("failed to find s-task:0x%x, it may have been destroyed, drop fill-history task:%s", @@ -1350,7 +1350,7 @@ int32_t tqProcessTaskTransferStateReq(STQ* pTq, SRpcMsg* pMsg) { tqDebug("vgId:%d start to process transfer state msg, from s-task:0x%x", pTq->pStreamMeta->vgId, req.downstreamTaskId); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.downstreamTaskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId); if (pTask == NULL) { tqError("failed to find task:0x%x, it may have been dropped already. process transfer state failed", req.downstreamTaskId); return -1; @@ -1386,7 +1386,7 @@ int32_t tqProcessTaskScanHistoryFinishReq(STQ* pTq, SRpcMsg* pMsg) { tDecodeStreamScanHistoryFinishReq(&decoder, &req); tDecoderClear(&decoder); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.downstreamTaskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.downstreamTaskId); if (pTask == NULL) { tqError("vgId:%d process scan history finish msg, failed to find task:0x%x, it may be destroyed", pTq->pStreamMeta->vgId, req.downstreamTaskId); @@ -1412,7 +1412,7 @@ int32_t tqProcessTaskScanHistoryFinishRsp(STQ* pTq, SRpcMsg* pMsg) { tDecodeCompleteHistoryDataMsg(&decoder, &req); tDecoderClear(&decoder); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.upstreamTaskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.upstreamTaskId); if (pTask == NULL) { tqError("vgId:%d process scan history finish rsp, failed to find task:0x%x, it may be destroyed", pTq->pStreamMeta->vgId, req.upstreamTaskId); @@ -1503,7 +1503,7 @@ int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg) { return 0; } - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, taskId); if (pTask != NULL) { // even in halt status, the data in inputQ must be processed int8_t st = pTask->status.taskStatus; @@ -1538,7 +1538,7 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { tDecoderInit(&decoder, (uint8_t*)msgBody, msgLen); tDecodeStreamDispatchReq(&decoder, &req); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId); if (pTask) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; streamProcessDispatchMsg(pTask, &req, &rsp, exec); @@ -1553,7 +1553,7 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); int32_t taskId = ntohl(pRsp->upstreamTaskId); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pRsp->streamId, taskId); int32_t vgId = pTq->pStreamMeta->vgId; if (pTask) { @@ -1569,7 +1569,7 @@ int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { int32_t tqProcessTaskDropReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { SVDropStreamTaskReq* pReq = (SVDropStreamTaskReq*)msg; tqDebug("vgId:%d receive msg to drop stream task:0x%x", TD_VID(pTq->pVnode), pReq->taskId); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId); if (pTask == NULL) { tqError("vgId:%d failed to acquire s-task:0x%x when dropping it", pTq->pStreamMeta->vgId, pReq->taskId); return 0; @@ -1584,7 +1584,7 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg SVPauseStreamTaskReq* pReq = (SVPauseStreamTaskReq*)msg; SStreamMeta* pMeta = pTq->pStreamMeta; - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pReq->streamId, pReq->taskId); if (pTask == NULL) { tqError("vgId:%d process pause req, failed to acquire task:0x%x, it may have been dropped already", pMeta->vgId, pReq->taskId); @@ -1597,7 +1597,7 @@ int32_t tqProcessTaskPauseReq(STQ* pTq, int64_t sversion, char* msg, int32_t msg SStreamTask* pHistoryTask = NULL; if (pTask->historyTaskId.taskId != 0) { - pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.taskId); + pHistoryTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId); if (pHistoryTask == NULL) { tqError("vgId:%d process pause req, failed to acquire fill-history task:0x%x, it may have been dropped already", pMeta->vgId, pTask->historyTaskId.taskId); @@ -1656,13 +1656,13 @@ int32_t tqProcessTaskResumeImpl(STQ* pTq, SStreamTask* pTask, int64_t sversion, int32_t tqProcessTaskResumeReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgLen) { SVResumeStreamTaskReq* pReq = (SVResumeStreamTaskReq*)msg; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId); int32_t code = tqProcessTaskResumeImpl(pTq, pTask, sversion, pReq->igUntreated); if (code != 0) { return code; } - SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.taskId); + SStreamTask* pHistoryTask = streamMetaAcquireTask(pTq->pStreamMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId); if (pHistoryTask) { code = tqProcessTaskResumeImpl(pTq, pHistoryTask, sversion, pReq->igUntreated); } @@ -1681,8 +1681,7 @@ int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg) { tDecodeStreamRetrieveReq(&decoder, &req); tDecoderClear(&decoder); - int32_t taskId = req.dstTaskId; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.dstTaskId); if (pTask) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; @@ -1720,7 +1719,7 @@ int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) { tDecoderClear(&decoder); int32_t taskId = req.taskId; - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, taskId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, req.streamId, req.taskId); if (pTask != NULL) { SRpcMsg rsp = {.info = pMsg->info, .code = 0}; streamProcessDispatchMsg(pTask, &req, &rsp, false); diff --git a/source/dnode/vnode/src/tq/tqRestore.c b/source/dnode/vnode/src/tq/tqRestore.c index c3e7d03e43..3d9a91899c 100644 --- a/source/dnode/vnode/src/tq/tqRestore.c +++ b/source/dnode/vnode/src/tq/tqRestore.c @@ -72,8 +72,8 @@ int32_t tqStreamTasksStatusCheck(STQ* pTq) { taosWUnLockLatch(&pMeta->lock); for (int32_t i = 0; i < numOfTasks; ++i) { - int32_t* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, *pTaskId); + SStreamId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pTaskId->streamId, pTaskId->taskId); if (pTask == NULL) { continue; } @@ -242,8 +242,8 @@ int32_t createStreamTaskRunReq(SStreamMeta* pStreamMeta, bool* pScanIdle) { numOfTasks = taosArrayGetSize(pTaskList); for (int32_t i = 0; i < numOfTasks; ++i) { - int32_t* pTaskId = taosArrayGet(pTaskList, i); - SStreamTask* pTask = streamMetaAcquireTask(pStreamMeta, *pTaskId); + SStreamId* pTaskId = taosArrayGet(pTaskList, i); + SStreamTask* pTask = streamMetaAcquireTask(pStreamMeta, pTaskId->streamId, pTaskId->taskId); if (pTask == NULL) { continue; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 1fd2f7edf4..2a35cb4978 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -290,7 +290,7 @@ static void waitForTaskIdle(SStreamTask* pTask, SStreamTask* pStreamTask) { static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; - SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.taskId); + SStreamTask* pStreamTask = streamMetaAcquireTask(pMeta, pTask->streamTaskId.streamId, pTask->streamTaskId.taskId); if (pStreamTask == NULL) { // todo: destroy the fill-history task here qError("s-task:%s failed to find related stream task:0x%x, it may have been destroyed or closed", pTask->id.idStr, diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 80b690e20d..2bfad78ebf 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -66,14 +66,14 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF goto _err; } - _hash_fn_t fp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT); + _hash_fn_t fp = taosGetDefaultHashFunction(TSDB_DATA_TYPE_VARCHAR); pMeta->pTasks = taosHashInit(64, fp, true, HASH_NO_LOCK); if (pMeta->pTasks == NULL) { goto _err; } // task list - pMeta->pTaskList = taosArrayInit(4, sizeof(int32_t)); + pMeta->pTaskList = taosArrayInit(4, sizeof(SStreamId)); if (pMeta->pTaskList == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; goto _err; @@ -248,7 +248,7 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa return -1; } - taosArrayPush(pMeta->pTaskList, &pTask->id.taskId); + taosArrayPush(pMeta->pTaskList, &pTask->id); if (streamMetaSaveTask(pMeta, pTask) < 0) { tFreeStreamTask(pTask); @@ -274,10 +274,11 @@ int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta) { return (int32_t)size; } -SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int32_t taskId) { +SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { taosRLockLatch(&pMeta->lock); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); + int64_t keys[2] = {streamId, taskId}; + SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask != NULL) { if (!streamTaskShouldStop(&(*ppTask)->status)) { int32_t ref = atomic_add_fetch_32(&(*ppTask)->refCnt, 1); @@ -304,10 +305,10 @@ void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask) { } } -static void doRemoveIdFromList(SStreamMeta* pMeta, int32_t num, int32_t taskId) { +static void doRemoveIdFromList(SStreamMeta* pMeta, int32_t num, SStreamId* id) { for (int32_t i = 0; i < num; ++i) { - int32_t* pTaskId = taosArrayGet(pMeta->pTaskList, i); - if (*pTaskId == taskId) { + SStreamId* pTaskId = taosArrayGet(pMeta->pTaskList, i); + if (pTaskId->streamId == id->streamId && pTaskId->taskId == id->taskId) { taosArrayRemove(pMeta->pTaskList, i); break; } @@ -360,9 +361,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING); ASSERT(pTask->status.timerActive == 0); - - int32_t num = taosArrayGetSize(pMeta->pTaskList); - doRemoveIdFromList(pMeta, num, pTask->id.taskId); + doRemoveIdFromList(pMeta, (int32_t)taosArrayGetSize(pMeta->pTaskList), &pTask->id); // remove the ref by timer if (pTask->triggerParam != 0) { @@ -484,7 +483,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { return -1; } - taosArrayPush(pMeta->pTaskList, &pTask->id.taskId); + taosArrayPush(pMeta->pTaskList, &pTask->id); } else { tdbFree(pKey); tdbFree(pVal); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index ad486c3f20..830637adbc 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -19,7 +19,8 @@ typedef struct SStreamTaskRetryInfo { SStreamMeta* pMeta; - int32_t taskId; + int32_t taskId; + int64_t streamId; } SStreamTaskRetryInfo; static int32_t streamSetParamForScanHistory(SStreamTask* pTask); @@ -556,12 +557,12 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) { } taosWUnLockLatch(&pMeta->lock); - SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->taskId); + SStreamTask* pTask = streamMetaAcquireTask(pMeta, pInfo->streamId, pInfo->taskId); if (pTask != NULL) { ASSERT(pTask->status.timerActive == 1); // abort the timer if intend to stop task - SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.taskId); + SStreamTask* pHTask = streamMetaAcquireTask(pMeta, pTask->historyTaskId.streamId, pTask->historyTaskId.taskId); if (pHTask == NULL && (!streamTaskShouldStop(&pTask->status))) { const char* pStatus = streamGetTaskStatusStr(pTask->status.taskStatus); qWarn( @@ -603,6 +604,7 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { SStreamTaskRetryInfo* pInfo = taosMemoryCalloc(1, sizeof(SStreamTaskRetryInfo)); pInfo->taskId = pTask->id.taskId; + pInfo->streamId = pTask->id.streamId; pInfo->pMeta = pTask->pMeta; if (pTask->launchTaskTimer == NULL) { From 1e8579e8c5e8497a93ae00b621a4cab2bd004ed4 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 9 Aug 2023 16:04:48 +0800 Subject: [PATCH 115/123] fix(stream): fix other cases. --- include/libs/stream/tstream.h | 2 +- source/dnode/snode/src/snode.c | 2 +- source/dnode/vnode/src/tq/tq.c | 4 +- source/libs/stream/src/streamExec.c | 3 +- source/libs/stream/src/streamMeta.c | 57 ++++++-------------------- source/libs/stream/src/streamRecover.c | 7 +++- 6 files changed, 22 insertions(+), 53 deletions(-) diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index add6660cef..b9b24917f3 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -644,7 +644,7 @@ void streamMetaClose(SStreamMeta* streamMeta); int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask); int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId); int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded); -int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId); +int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); int32_t streamMetaGetNumOfTasks(SStreamMeta* pMeta); // todo remove it SStreamTask* streamMetaAcquireTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId); void streamMetaReleaseTask(SStreamMeta* pMeta, SStreamTask* pTask); diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 51b25b2476..91346e1d83 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -185,7 +185,7 @@ int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) { return 0; } - streamMetaUnregisterTask(pSnode->pMeta, pReq->taskId); + streamMetaUnregisterTask(pSnode->pMeta, pReq->streamId, pReq->taskId); streamMetaReleaseTask(pSnode->pMeta, pTask); return 0; } diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index e80aa800bc..bf52f77ce3 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1242,7 +1242,7 @@ int32_t tqProcessTaskScanHistory(STQ* pTq, SRpcMsg* pMsg) { tqDebug("s-task:%s fill-history task set status to be dropping", id); - streamMetaUnregisterTask(pMeta, pTask->id.taskId); + streamMetaUnregisterTask(pMeta, pTask->id.streamId, pTask->id.taskId); streamMetaReleaseTask(pMeta, pTask); return -1; } @@ -1575,7 +1575,7 @@ int32_t tqProcessTaskDropReq(STQ* pTq, int64_t sversion, char* msg, int32_t msgL return 0; } - streamMetaUnregisterTask(pTq->pStreamMeta, pReq->taskId); + streamMetaUnregisterTask(pTq->pStreamMeta, pReq->streamId, pReq->taskId); streamMetaReleaseTask(pTq->pStreamMeta, pTask); return 0; } diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c index 2a35cb4978..c7da80fdaf 100644 --- a/source/libs/stream/src/streamExec.c +++ b/source/libs/stream/src/streamExec.c @@ -350,10 +350,9 @@ static int32_t streamDoTransferStateToStreamTask(SStreamTask* pTask) { streamTaskResumeFromHalt(pStreamTask); qDebug("s-task:%s fill-history task set status to be dropping, save the state into disk", pTask->id.idStr); - int32_t taskId = pTask->id.taskId; // 5. free it and remove fill-history task from disk meta-store - streamMetaUnregisterTask(pMeta, taskId); + streamMetaUnregisterTask(pMeta, pTask->id.streamId, pTask->id.taskId); // 6. save to disk taosWLockLatch(&pMeta->lock); diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 2bfad78ebf..6d1dca0561 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -161,43 +161,6 @@ void streamMetaClose(SStreamMeta* pMeta) { taosMemoryFree(pMeta); } -#if 0 -int32_t streamMetaAddSerializedTask(SStreamMeta* pMeta, int64_t ver, char* msg, int32_t msgLen) { - SStreamTask* pTask = taosMemoryCalloc(1, sizeof(SStreamTask)); - if (pTask == NULL) { - return -1; - } - SDecoder decoder; - tDecoderInit(&decoder, (uint8_t*)msg, msgLen); - if (tDecodeStreamTask(&decoder, pTask) < 0) { - tDecoderClear(&decoder); - goto FAIL; - } - tDecoderClear(&decoder); - - if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { - ASSERT(0); - goto FAIL; - } - - if (taosHashPut(pMeta->pTasks, &pTask->id.taskId, sizeof(int32_t), &pTask, sizeof(void*)) < 0) { - goto FAIL; - } - - if (tdbTbUpsert(pMeta->pTaskDb, &pTask->id.taskId, sizeof(int32_t), msg, msgLen, pMeta->txn) < 0) { - taosHashRemove(pMeta->pTasks, &pTask->id.taskId, sizeof(int32_t)); - ASSERT(0); - goto FAIL; - } - - return 0; - -FAIL: - if (pTask) tFreeStreamTask(pTask); - return -1; -} -#endif - int32_t streamMetaSaveTask(SStreamMeta* pMeta, SStreamTask* pTask) { void* buf = NULL; int32_t len; @@ -241,7 +204,8 @@ int32_t streamMetaRemoveTask(SStreamMeta* pMeta, int32_t taskId) { int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTask, bool* pAdded) { *pAdded = false; - void* p = taosHashGet(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId)); + int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId}; + void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (p == NULL) { if (pMeta->expandFunc(pMeta->ahandle, pTask, ver) < 0) { tFreeStreamTask(pTask); @@ -263,7 +227,7 @@ int32_t streamMetaRegisterTask(SStreamMeta* pMeta, int64_t ver, SStreamTask* pTa return 0; } - taosHashPut(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, POINTER_BYTES); + taosHashPut(pMeta->pTasks, keys, sizeof(keys), &pTask, POINTER_BYTES); *pAdded = true; return 0; } @@ -315,12 +279,14 @@ static void doRemoveIdFromList(SStreamMeta* pMeta, int32_t num, SStreamId* id) { } } -int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { +int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t taskId) { SStreamTask* pTask = NULL; // pre-delete operation taosWLockLatch(&pMeta->lock); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); + + int64_t keys[2] = {streamId, taskId}; + SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask) { pTask = *ppTask; atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING); @@ -336,7 +302,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { while (1) { taosRLockLatch(&pMeta->lock); - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); + ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask) { if ((*ppTask)->status.timerActive == 0) { @@ -355,7 +321,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int32_t taskId) { // let's do delete of stream task taosWLockLatch(&pMeta->lock); - ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &taskId, sizeof(int32_t)); + ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask) { taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t)); atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING); @@ -472,7 +438,8 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { } // do duplicate task check. - void* p = taosHashGet(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId)); + int64_t keys[2] = {pTask->id.streamId, pTask->id.taskId}; + void* p = taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (p == NULL) { if (pMeta->expandFunc(pMeta->ahandle, pTask, pTask->chkInfo.version) < 0) { tdbFree(pKey); @@ -492,7 +459,7 @@ int32_t streamLoadTasks(SStreamMeta* pMeta, int64_t ver) { continue; } - if (taosHashPut(pMeta->pTasks, &pTask->id.taskId, sizeof(pTask->id.taskId), &pTask, sizeof(void*)) < 0) { + if (taosHashPut(pMeta->pTasks, keys, sizeof(keys), &pTask, sizeof(void*)) < 0) { tdbFree(pKey); tdbFree(pVal); tdbTbcClose(pCur); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 830637adbc..79f856ee0b 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -541,7 +541,9 @@ static void tryLaunchHistoryTask(void* param, void* tmrId) { qDebug("s-task:0x%x in timer to launch related history task", pInfo->taskId); taosWLockLatch(&pMeta->lock); - SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, &pInfo->taskId, sizeof(int32_t)); + int64_t keys[2] = {pInfo->streamId, pInfo->taskId}; + + SStreamTask** ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask) { ASSERT((*ppTask)->status.timerActive == 1); @@ -596,8 +598,9 @@ int32_t streamLaunchFillHistoryTask(SStreamTask* pTask) { SStreamMeta* pMeta = pTask->pMeta; int32_t hTaskId = pTask->historyTaskId.taskId; + int64_t keys[2] = {pTask->historyTaskId.streamId, pTask->historyTaskId.taskId}; // Set the execute conditions, including the query time window and the version range - SStreamTask** pHTask = taosHashGet(pMeta->pTasks, &hTaskId, sizeof(hTaskId)); + SStreamTask** pHTask = taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (pHTask == NULL) { qWarn("s-task:%s vgId:%d failed to launch history task:0x%x, since it is not built yet", pTask->id.idStr, pMeta->vgId, hTaskId); From 9d698277d6e0143fdf84006f2a5ab70426280068 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 9 Aug 2023 18:35:41 +0800 Subject: [PATCH 116/123] avoid removing taosx and taos-explorer while uninstall taosd --- packaging/tools/remove.sh | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index be2c26c309..eca0c5e973 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -123,8 +123,8 @@ function clean_bin() { ${csudo}rm -f ${bin_link_dir}/set_core || : ${csudo}rm -f ${bin_link_dir}/TDinsight.sh || : ${csudo}rm -f ${bin_link_dir}/${keeperName2} || : - ${csudo}rm -f ${bin_link_dir}/${xName2} || : - ${csudo}rm -f ${bin_link_dir}/${explorerName2} || : + # ${csudo}rm -f ${bin_link_dir}/${xName2} || : + # ${csudo}rm -f ${bin_link_dir}/${explorerName2} || : if [ "$verMode" == "cluster" ] && [ "$clientName" != "$clientName2" ]; then ${csudo}rm -f ${bin_link_dir}/${clientName2} || : @@ -194,26 +194,26 @@ function clean_service_on_systemd() { fi ${csudo}systemctl disable ${tarbitrator_service_name} &>/dev/null || echo &>/dev/null - x_service_config="${service_config_dir}/${xName2}.service" - if [ -e "$x_service_config" ]; then - if systemctl is-active --quiet ${xName2}; then - echo "${productName2} ${xName2} is running, stopping it..." - ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null - fi - ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null - ${csudo}rm -f ${x_service_config} - fi + # x_service_config="${service_config_dir}/${xName2}.service" + # if [ -e "$x_service_config" ]; then + # if systemctl is-active --quiet ${xName2}; then + # echo "${productName2} ${xName2} is running, stopping it..." + # ${csudo}systemctl stop ${xName2} &>/dev/null || echo &>/dev/null + # fi + # ${csudo}systemctl disable ${xName2} &>/dev/null || echo &>/dev/null + # ${csudo}rm -f ${x_service_config} + # fi - explorer_service_config="${service_config_dir}/${explorerName2}.service" - if [ -e "$explorer_service_config" ]; then - if systemctl is-active --quiet ${explorerName2}; then - echo "${productName2} ${explorerName2} is running, stopping it..." - ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null - fi - ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null - ${csudo}rm -f ${explorer_service_config} - ${csudo}rm -f /etc/${clientName2}/explorer.toml - fi + # explorer_service_config="${service_config_dir}/${explorerName2}.service" + # if [ -e "$explorer_service_config" ]; then + # if systemctl is-active --quiet ${explorerName2}; then + # echo "${productName2} ${explorerName2} is running, stopping it..." + # ${csudo}systemctl stop ${explorerName2} &>/dev/null || echo &>/dev/null + # fi + # ${csudo}systemctl disable ${explorerName2} &>/dev/null || echo &>/dev/null + # ${csudo}rm -f ${explorer_service_config} + # ${csudo}rm -f /etc/${clientName2}/explorer.toml + # fi } function clean_service_on_sysvinit() { From d6ed5fe096f7f6e63b45f8a676d956f4db482fc5 Mon Sep 17 00:00:00 2001 From: kailixu Date: Wed, 9 Aug 2023 20:08:19 +0800 Subject: [PATCH 117/123] fix: timezone and qsort for windows --- source/os/src/osMath.c | 2 +- source/os/src/osTimezone.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/source/os/src/osMath.c b/source/os/src/osMath.c index 0cff0f78a6..10d02ab25c 100644 --- a/source/os/src/osMath.c +++ b/source/os/src/osMath.c @@ -25,7 +25,7 @@ int32_t qsortHelper(const void* p1, const void* p2, const void* param) { // todo refactor: 1) move away; 2) use merge sort instead; 3) qsort is not a stable sort actually. void taosSort(void* base, int64_t sz, int64_t width, __compar_fn_t compar) { -#if defined(WINDOWS) || defined(_ALPINE) +#if defined(WINDOWS_STASH) || defined(_ALPINE) void* param = compar; taosqsort(base, sz, width, param, qsortHelper); #else diff --git a/source/os/src/osTimezone.c b/source/os/src/osTimezone.c index cd6ad7cdb5..4280490c68 100644 --- a/source/os/src/osTimezone.c +++ b/source/os/src/osTimezone.c @@ -768,7 +768,7 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 keyValue[4] = (keyValue[4] == '+' ? '-' : '+'); keyValue[10] = 0; sprintf(winStr, "TZ=%s:00", &(keyValue[1])); - *tsTimezone = taosStr2Int32(&keyValue[4], NULL, 10); + *tsTimezone = -taosStr2Int32(&keyValue[4], NULL, 10); } break; } @@ -789,7 +789,7 @@ void taosSetSystemTimezone(const char *inTimezoneStr, char *outTimezoneStr, int8 indexStr = ppp - pp + 3; } sprintf(&winStr[indexStr], "%c%c%c:%c%c:00", (p[0] == '+' ? '-' : '+'), p[1], p[2], p[3], p[4]); - *tsTimezone = taosStr2Int32(p, NULL, 10); + *tsTimezone = -taosStr2Int32(p, NULL, 10); } else { *tsTimezone = 0; } From 458b3ce1378a13ce59adba32e7b84a4d183b63b5 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Wed, 9 Aug 2023 21:09:33 +0800 Subject: [PATCH 118/123] docs: Rename 27-index.md to 27-indexing.md (#22385) the original name caused the document to not be displayed properly --- docs/en/12-taos-sql/{27-index.md => 27-indexing.md} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename docs/en/12-taos-sql/{27-index.md => 27-indexing.md} (100%) diff --git a/docs/en/12-taos-sql/27-index.md b/docs/en/12-taos-sql/27-indexing.md similarity index 100% rename from docs/en/12-taos-sql/27-index.md rename to docs/en/12-taos-sql/27-indexing.md From 0cf81449525e611d44e97cc1a7a3eed370bd4b3f Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 9 Aug 2023 22:28:23 +0800 Subject: [PATCH 119/123] fix(stream): fix the invalid key used by remove data in stream meta hash table. --- source/libs/stream/src/streamMeta.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 6d1dca0561..fe455c0190 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -323,7 +323,7 @@ int32_t streamMetaUnregisterTask(SStreamMeta* pMeta, int64_t streamId, int32_t t taosWLockLatch(&pMeta->lock); ppTask = (SStreamTask**)taosHashGet(pMeta->pTasks, keys, sizeof(keys)); if (ppTask) { - taosHashRemove(pMeta->pTasks, &taskId, sizeof(int32_t)); + taosHashRemove(pMeta->pTasks, keys, sizeof(keys)); atomic_store_8(&pTask->status.taskStatus, TASK_STATUS__DROPPING); ASSERT(pTask->status.timerActive == 0); From 0757e88ced222fa1e03f7045b3dc83582d2e3fda Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Wed, 9 Aug 2023 23:52:14 +0800 Subject: [PATCH 120/123] fix(stream): set the correct hash keys. --- source/dnode/vnode/src/tq/tq.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index bf52f77ce3..8454110e18 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1552,10 +1552,12 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) { int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) { SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t taskId = ntohl(pRsp->upstreamTaskId); - SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, pRsp->streamId, taskId); - int32_t vgId = pTq->pStreamMeta->vgId; + int32_t vgId = pTq->pStreamMeta->vgId; + int32_t taskId = htonl(pRsp->upstreamTaskId); + int64_t streamId = htobe64(pRsp->streamId); + SStreamTask* pTask = streamMetaAcquireTask(pTq->pStreamMeta, streamId, taskId); + if (pTask) { streamProcessDispatchRsp(pTask, pRsp, pMsg->code); streamMetaReleaseTask(pTq->pStreamMeta, pTask); From 4393375e47423530e5f50f5765a687ad1fd98979 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 10 Aug 2023 02:16:04 +0800 Subject: [PATCH 121/123] fix(stream): set correct task id. --- source/dnode/mnode/impl/src/mndScheduler.c | 7 ++++--- source/dnode/snode/src/snode.c | 11 +++++++---- source/dnode/vnode/src/tq/tq.c | 5 +++-- source/libs/stream/src/streamRecover.c | 3 ++- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/source/dnode/mnode/impl/src/mndScheduler.c b/source/dnode/mnode/impl/src/mndScheduler.c index 2aac05b22d..36771147a9 100644 --- a/source/dnode/mnode/impl/src/mndScheduler.c +++ b/source/dnode/mnode/impl/src/mndScheduler.c @@ -232,7 +232,8 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, SArray* pTaskList, SStrea int32_t mndAddSinkTaskToStream(SStreamObj* pStream, SArray* pTaskList, SMnode* pMnode, int32_t vgId, SVgObj* pVgroup, int32_t fillHistory) { - SStreamTask* pTask = tNewStreamTask(pStream->uid, TASK_LEVEL__SINK, fillHistory, 0, pTaskList); + int64_t uid = (fillHistory == 0)? pStream->uid:pStream->hTaskUid; + SStreamTask* pTask = tNewStreamTask(uid, TASK_LEVEL__SINK, fillHistory, 0, pTaskList); if (pTask == NULL) { terrno = TSDB_CODE_OUT_OF_MEMORY; return -1; @@ -335,8 +336,8 @@ static void setHTasksId(SArray* pTaskList, const SArray* pHTaskList) { (*pHTask)->streamTaskId.taskId = (*pStreamTask)->id.taskId; (*pHTask)->streamTaskId.streamId = (*pStreamTask)->id.streamId; - mDebug("s-task:0x%x related history task:0x%x, level:%d", (*pStreamTask)->id.taskId, (*pHTask)->id.taskId, - (*pHTask)->info.taskLevel); + mDebug("s-task:0x%" PRIx64 "-0x%x related history task:0x%" PRIx64 "-0x%x, level:%d", (*pStreamTask)->id.streamId, + (*pStreamTask)->id.taskId, (*pHTask)->id.streamId, (*pHTask)->id.taskId, (*pHTask)->info.taskLevel); } } diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c index 91346e1d83..4000e72835 100644 --- a/source/dnode/snode/src/snode.c +++ b/source/dnode/snode/src/snode.c @@ -192,7 +192,8 @@ int32_t sndProcessTaskDropReq(SSnode *pSnode, char *msg, int32_t msgLen) { int32_t sndProcessTaskRunReq(SSnode *pSnode, SRpcMsg *pMsg) { SStreamTaskRunReq *pReq = pMsg->pCont; - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId); + + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pReq->streamId, pReq->taskId); if (pTask) { streamProcessRunReq(pTask); streamMetaReleaseTask(pSnode->pMeta, pTask); @@ -246,8 +247,11 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) { int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) { SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)); - int32_t taskId = ntohl(pRsp->upstreamTaskId); - SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, pRsp->streamId, taskId); + + int32_t taskId = htonl(pRsp->upstreamTaskId); + int64_t streamId = htobe64(pRsp->streamId); + + SStreamTask *pTask = streamMetaAcquireTask(pSnode->pMeta, streamId, taskId); if (pTask) { streamProcessDispatchRsp(pTask, pRsp, pMsg->code); streamMetaReleaseTask(pSnode->pMeta, pTask); @@ -255,7 +259,6 @@ int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) { } else { return -1; } - return 0; } int32_t sndProcessTaskRetrieveRsp(SSnode *pSnode, SRpcMsg *pMsg) { diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c index 8454110e18..ad1af080fd 100644 --- a/source/dnode/vnode/src/tq/tq.c +++ b/source/dnode/vnode/src/tq/tq.c @@ -1072,8 +1072,9 @@ int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) { pTask->id.idStr, pStatus, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); } else { rsp.status = 0; - tqDebug("tq recv task check(taskId:0x%x not built yet) req(reqId:0x%" PRIx64 ") from task:0x%x (vgId:%d), rsp status %d", - taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); + tqDebug("tq recv task check(taskId:0x%" PRIx64 "-0x%x not built yet) req(reqId:0x%" PRIx64 + ") from task:0x%x (vgId:%d), rsp status %d", + req.streamId, taskId, rsp.reqId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status); } return streamSendCheckRsp(pTq->pStreamMeta, &req, &rsp, &pMsg->info, taskId); diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c index 79f856ee0b..e59b3f682d 100644 --- a/source/libs/stream/src/streamRecover.c +++ b/source/libs/stream/src/streamRecover.c @@ -802,7 +802,8 @@ void launchFillHistoryTask(SStreamTask* pTask) { } ASSERT(pTask->status.downstreamReady == 1); - qDebug("s-task:%s start to launch related fill-history task:0x%x", pTask->id.idStr, tId); + qDebug("s-task:%s start to launch related fill-history task:0x%" PRIx64 "-0x%x", pTask->id.idStr, + pTask->historyTaskId.streamId, tId); // launch associated fill history task streamLaunchFillHistoryTask(pTask); From c73ac53666fb4bed9e98c7301913c699646770b3 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Thu, 10 Aug 2023 08:52:32 +0800 Subject: [PATCH 122/123] fix: add max_speed as keywordw --- docs/en/12-taos-sql/20-keywords.md | 1 + docs/zh/12-taos-sql/20-keywords.md | 1 + 2 files changed, 2 insertions(+) diff --git a/docs/en/12-taos-sql/20-keywords.md b/docs/en/12-taos-sql/20-keywords.md index 3c441ed8d4..d563181b87 100644 --- a/docs/en/12-taos-sql/20-keywords.md +++ b/docs/en/12-taos-sql/20-keywords.md @@ -178,6 +178,7 @@ The following list shows all reserved keywords: - MATCH - MAX_DELAY +- MAX_SPEED - MAXROWS - MERGE - META diff --git a/docs/zh/12-taos-sql/20-keywords.md b/docs/zh/12-taos-sql/20-keywords.md index 35dafc52ef..f52af2f282 100644 --- a/docs/zh/12-taos-sql/20-keywords.md +++ b/docs/zh/12-taos-sql/20-keywords.md @@ -178,6 +178,7 @@ description: TDengine 保留关键字的详细列表 - MATCH - MAX_DELAY +- MAX_SPEED - MAXROWS - MERGE - META From 1ce8d06032f94f2b1c1ce01a4f455057eef1eebd Mon Sep 17 00:00:00 2001 From: kailixu Date: Thu, 10 Aug 2023 09:57:57 +0800 Subject: [PATCH 123/123] fix: proj col compare func --- source/libs/parser/src/parTranslater.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 554dc7cce8..38118c03f8 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -6596,7 +6596,10 @@ typedef struct SProjColPos { } SProjColPos; static int32_t projColPosCompar(const void* l, const void* r) { - return ((SProjColPos*)l)->colId > ((SProjColPos*)r)->colId; + if (((SProjColPos*)l)->colId < ((SProjColPos*)r)->colId) { + return -1; + } + return ((SProjColPos*)l)->colId == ((SProjColPos*)r)->colId ? 0 : 1; } static void projColPosDelete(void* p) { nodesDestroyNode(((SProjColPos*)p)->pProj); }