diff --git a/Jenkinsfile2 b/Jenkinsfile2
index e25be162e8..9e89e5a2f8 100644
--- a/Jenkinsfile2
+++ b/Jenkinsfile2
@@ -175,6 +175,7 @@ def pre_test_build_mac() {
cd ${WK}/debug
cmake .. -DBUILD_TEST=true -DBUILD_HTTPS=false
make -j10
+ ctest -j10 || exit 7
'''
sh '''
date
diff --git a/cmake/taosadapter_CMakeLists.txt.in b/cmake/taosadapter_CMakeLists.txt.in
index 5b8192831e..c507ae2536 100644
--- a/cmake/taosadapter_CMakeLists.txt.in
+++ b/cmake/taosadapter_CMakeLists.txt.in
@@ -2,7 +2,7 @@
# taosadapter
ExternalProject_Add(taosadapter
GIT_REPOSITORY https://github.com/taosdata/taosadapter.git
- GIT_TAG a11131c
+ GIT_TAG 8c3d57d
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taosadapter"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE
diff --git a/docs/en/07-develop/09-udf.md b/docs/en/07-develop/09-udf.md
index deb9c4cdb5..253f1270f5 100644
--- a/docs/en/07-develop/09-udf.md
+++ b/docs/en/07-develop/09-udf.md
@@ -218,9 +218,9 @@ After compiling your function into a DLL, you add it to TDengine. For more infor
## Sample Code
-### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/bit_and.c)
+### Sample scalar function: [bit_and](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/bit_and.c)
-The bit_add function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_add function ignores null values.
+The bit_and function implements bitwise addition for multiple columns. If there is only one column, the column is returned. The bit_and function ignores null values.
bit_and.c
@@ -231,7 +231,7 @@ The bit_add function implements bitwise addition for multiple columns. If there
-### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/develop/tests/script/sh/l2norm.c)
+### Sample aggregate function: [l2norm](https://github.com/taosdata/TDengine/blob/3.0/tests/script/sh/l2norm.c)
The l2norm function finds the second-order norm for all data in the input column. This squares the values, takes a cumulative sum, and finds the square root.
diff --git a/docs/en/14-reference/05-taosbenchmark.md b/docs/en/14-reference/05-taosbenchmark.md
index d21f1829b0..6e08671e34 100644
--- a/docs/en/14-reference/05-taosbenchmark.md
+++ b/docs/en/14-reference/05-taosbenchmark.md
@@ -217,7 +217,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
The parameters listed in this section apply to all function modes.
- **filetype** : The function to be tested, with optional values `insert`, `query` and `subscribe`. These correspond to the insert, query, and subscribe functions, respectively. Users can specify only one of these in each configuration file.
-**cfgdir**: specify the TDengine cluster configuration file's directory. The default path is /etc/taos.
+**cfgdir**: specify the TDengine client configuration file's directory. The default path is /etc/taos.
- **host**: Specify the FQDN of the TDengine server to connect. The default value is `localhost`.
diff --git a/docs/en/14-reference/12-config/index.md b/docs/en/14-reference/12-config/index.md
index 8053c4d25d..aadf563a63 100644
--- a/docs/en/14-reference/12-config/index.md
+++ b/docs/en/14-reference/12-config/index.md
@@ -106,7 +106,7 @@ The parameters described in this document by the effect that they have on the sy
| Applicable | Server only |
| Meaning | The switch for monitoring inside server. The main object of monitoring is to collect information about load on physical nodes, including CPU usage, memory usage, disk usage, and network bandwidth. Monitoring information is sent over HTTP to the taosKeeper service specified by `monitorFqdn` and `monitorProt`.
| Value Range | 0: monitoring disabled, 1: monitoring enabled |
-| Default | 1 |
+| Default | 0 |
### monitorFqdn
@@ -735,19 +735,17 @@ To prevent system resource from being exhausted by multiple concurrent streams,
| 44 | numOfVnodeQueryThreads | No | Yes |
| 45 | numOfVnodeStreamThreads | No | Yes |
| 46 | numOfVnodeFetchThreads | No | Yes |
-| 47 | numOfVnodeWriteThreads | No | Yes |
-| 48 | numOfVnodeSyncThreads | No | Yes |
-| 49 | numOfVnodeRsmaThreads | No | Yes |
-| 50 | numOfQnodeQueryThreads | No | Yes |
-| 51 | numOfQnodeFetchThreads | No | Yes |
-| 52 | numOfSnodeSharedThreads | No | Yes |
-| 53 | numOfSnodeUniqueThreads | No | Yes |
-| 54 | rpcQueueMemoryAllowed | No | Yes |
-| 55 | logDir | Yes | Yes |
-| 56 | minimalLogDirGB | Yes | Yes |
-| 57 | numOfLogLines | Yes | Yes |
-| 58 | asyncLog | Yes | Yes |
-| 59 | logKeepDays | Yes | Yes |
+| 47 | numOfVnodeRsmaThreads | No | Yes |
+| 48 | numOfQnodeQueryThreads | No | Yes |
+| 49 | numOfQnodeFetchThreads | No | Yes |
+| 50 | numOfSnodeSharedThreads | No | Yes |
+| 51 | numOfSnodeUniqueThreads | No | Yes |
+| 52 | rpcQueueMemoryAllowed | No | Yes |
+| 53 | logDir | Yes | Yes |
+| 54 | minimalLogDirGB | Yes | Yes |
+| 55 | numOfLogLines | Yes | Yes |
+| 56 | asyncLog | Yes | Yes |
+| 57 | logKeepDays | Yes | Yes |
| 60 | debugFlag | Yes | Yes |
| 61 | tmrDebugFlag | Yes | Yes |
| 62 | uDebugFlag | Yes | Yes |
diff --git a/docs/en/28-releases/01-tdengine.md b/docs/en/28-releases/01-tdengine.md
index 14096bd400..74eeeb5efb 100644
--- a/docs/en/28-releases/01-tdengine.md
+++ b/docs/en/28-releases/01-tdengine.md
@@ -6,6 +6,10 @@ description: TDengine release history, Release Notes and download links.
import Release from "/components/ReleaseV3";
+## 3.0.1.6
+
+
+
## 3.0.1.5
diff --git a/docs/en/28-releases/02-tools.md b/docs/en/28-releases/02-tools.md
index a7446be4e3..0a96c776e0 100644
--- a/docs/en/28-releases/02-tools.md
+++ b/docs/en/28-releases/02-tools.md
@@ -6,6 +6,10 @@ description: taosTools release history, Release Notes, download links.
import Release from "/components/ReleaseV3";
+## 2.2.7
+
+
+
## 2.2.6
diff --git a/docs/examples/csharp/wsConnect/Program.cs b/docs/examples/csharp/wsConnect/Program.cs
index 2e89372c3e..f9a56c842f 100644
--- a/docs/examples/csharp/wsConnect/Program.cs
+++ b/docs/examples/csharp/wsConnect/Program.cs
@@ -9,9 +9,10 @@ namespace Examples
{
string DSN = "ws://root:taosdata@127.0.0.1:6041/test";
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
+
if (wsConn == IntPtr.Zero)
{
- throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ throw new Exception("get WS connection failed");
}
else
{
diff --git a/docs/examples/csharp/wsInsert/Program.cs b/docs/examples/csharp/wsInsert/Program.cs
index 4cd812cda9..1f2d0a6725 100644
--- a/docs/examples/csharp/wsInsert/Program.cs
+++ b/docs/examples/csharp/wsInsert/Program.cs
@@ -13,7 +13,7 @@ namespace Examples
// Assert if connection is validate
if (wsConn == IntPtr.Zero)
{
- throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ throw new Exception("get WS connection failed");
}
else
{
diff --git a/docs/examples/csharp/wsQuery/Program.cs b/docs/examples/csharp/wsQuery/Program.cs
index de5591aa53..a220cae903 100644
--- a/docs/examples/csharp/wsQuery/Program.cs
+++ b/docs/examples/csharp/wsQuery/Program.cs
@@ -13,7 +13,7 @@ namespace Examples
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
{
- throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ throw new Exception("get WS connection failed");
}
else
{
diff --git a/docs/examples/csharp/wsStmt/Program.cs b/docs/examples/csharp/wsStmt/Program.cs
index 54de77ec1f..8af807ec39 100644
--- a/docs/examples/csharp/wsStmt/Program.cs
+++ b/docs/examples/csharp/wsStmt/Program.cs
@@ -21,7 +21,7 @@ namespace Examples
IntPtr wsConn = LibTaosWS.WSConnectWithDSN(DSN);
if (wsConn == IntPtr.Zero)
{
- throw new Exception($"get WS connection failed,reason:{LibTaosWS.WSErrorStr(IntPtr.Zero)} code:{LibTaosWS.WSErrorNo(IntPtr.Zero)}");
+ throw new Exception($"get WS connection failed");
}
else
{
diff --git a/docs/zh/10-deployment/01-deploy.md b/docs/zh/10-deployment/01-deploy.md
index eecb86ce41..b68bf7b743 100644
--- a/docs/zh/10-deployment/01-deploy.md
+++ b/docs/zh/10-deployment/01-deploy.md
@@ -190,3 +190,16 @@ DROP DNODE dnodeId;
dnodeID 是集群自动分配的,不得人工指定。它在生成时是递增的,不会重复。
:::
+
+## 常见问题
+
+1、建立集群时使用 CREATE DNODE 增加新节点后,新节点始终显示 offline 状态?
+```sql
+ 1)首先要检查增加的新节点上的 taosd 服务是否已经正常启动
+
+ 2)如果已经启动,再检查到新节点的网络是否通畅,可以使用 ping fqdn 验证下
+
+ 3)如果前面两步都没有问题,这一步要检查新节点做为独立集群在运行了,可以使用 taos -h fqdn 连接上后,show dnodes; 命令查看.
+ 如果显示的列表与你主节点上显示的不一致,说明此节点自己单独成立了一个集群,解决的方法是停止新节点上的服务,然后清空新节点上
+ taos.cfg 中配置的 dataDir 目录下的所有文件,重新启动新节点服务即可解决。
+```
diff --git a/docs/zh/14-reference/05-taosbenchmark.md b/docs/zh/14-reference/05-taosbenchmark.md
index 13530923b8..76dd5f12d8 100644
--- a/docs/zh/14-reference/05-taosbenchmark.md
+++ b/docs/zh/14-reference/05-taosbenchmark.md
@@ -217,7 +217,7 @@ taosBenchmark -A INT,DOUBLE,NCHAR,BINARY\(16\)
本节所列参数适用于所有功能模式。
- **filetype** : 要测试的功能,可选值为 `insert`, `query` 和 `subscribe`。分别对应插入、查询和订阅功能。每个配置文件中只能指定其中之一。
-- **cfgdir** : TDengine 集群配置文件所在的目录,默认路径是 /etc/taos 。
+- **cfgdir** : TDengine 客户端配置文件所在的目录,默认路径是 /etc/taos 。
- **host** : 指定要连接的 TDengine 服务端的 FQDN,默认值为 localhost。
diff --git a/docs/zh/14-reference/12-config/index.md b/docs/zh/14-reference/12-config/index.md
index 74af793f9f..54c362de95 100644
--- a/docs/zh/14-reference/12-config/index.md
+++ b/docs/zh/14-reference/12-config/index.md
@@ -106,7 +106,7 @@ taos --dump-config
| 适用范围 | 仅服务端适用 |
| 含义 | 服务器内部的系统监控开关。监控主要负责收集物理节点的负载状况,包括 CPU、内存、硬盘、网络带宽的监控记录,监控信息将通过 HTTP 协议发送给由 `monitorFqdn` 和 `monitorProt` 指定的 TaosKeeper 监控服务 |
| 取值范围 | 0:关闭监控服务, 1:激活监控服务。 |
-| 缺省值 | 1 |
+| 缺省值 | 0 |
### monitorFqdn
@@ -711,19 +711,17 @@ charset 的有效值是 UTF-8。
| 44 | numOfVnodeQueryThreads | 否 | 是 | |
| 45 | numOfVnodeStreamThreads | 否 | 是 | |
| 46 | numOfVnodeFetchThreads | 否 | 是 | |
-| 47 | numOfVnodeWriteThreads | 否 | 是 | |
-| 48 | numOfVnodeSyncThreads | 否 | 是 | |
-| 49 | numOfVnodeRsmaThreads | 否 | 是 | |
-| 50 | numOfQnodeQueryThreads | 否 | 是 | |
-| 51 | numOfQnodeFetchThreads | 否 | 是 | |
-| 52 | numOfSnodeSharedThreads | 否 | 是 | |
-| 53 | numOfSnodeUniqueThreads | 否 | 是 | |
-| 54 | rpcQueueMemoryAllowed | 否 | 是 | |
-| 55 | logDir | 是 | 是 | |
-| 56 | minimalLogDirGB | 是 | 是 | |
-| 57 | numOfLogLines | 是 | 是 | |
-| 58 | asyncLog | 是 | 是 | |
-| 59 | logKeepDays | 是 | 是 | |
+| 47 | numOfVnodeRsmaThreads | 否 | 是 | |
+| 48 | numOfQnodeQueryThreads | 否 | 是 | |
+| 49 | numOfQnodeFetchThreads | 否 | 是 | |
+| 50 | numOfSnodeSharedThreads | 否 | 是 | |
+| 51 | numOfSnodeUniqueThreads | 否 | 是 | |
+| 52 | rpcQueueMemoryAllowed | 否 | 是 | |
+| 53 | logDir | 是 | 是 | |
+| 54 | minimalLogDirGB | 是 | 是 | |
+| 55 | numOfLogLines | 是 | 是 | |
+| 56 | asyncLog | 是 | 是 | |
+| 57 | logKeepDays | 是 | 是 | |
| 60 | debugFlag | 是 | 是 | |
| 61 | tmrDebugFlag | 是 | 是 | |
| 62 | uDebugFlag | 是 | 是 | |
diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md
index a5b3534250..31093ce557 100644
--- a/docs/zh/28-releases/01-tdengine.md
+++ b/docs/zh/28-releases/01-tdengine.md
@@ -6,6 +6,10 @@ description: TDengine 发布历史、Release Notes 及下载链接
import Release from "/components/ReleaseV3";
+## 3.0.1.6
+
+
+
## 3.0.1.5
diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md
index e86481435c..2623391fb9 100644
--- a/docs/zh/28-releases/02-tools.md
+++ b/docs/zh/28-releases/02-tools.md
@@ -6,6 +6,10 @@ description: taosTools 的发布历史、Release Notes 和下载链接
import Release from "/components/ReleaseV3";
+## 2.2.7
+
+
+
## 2.2.6
diff --git a/include/common/tcommon.h b/include/common/tcommon.h
index 3bfbb85958..674bdcf171 100644
--- a/include/common/tcommon.h
+++ b/include/common/tcommon.h
@@ -225,13 +225,13 @@ typedef struct SVarColAttr {
// pBlockAgg->numOfNull == info.rows, all data are null
// pBlockAgg->numOfNull == 0, no data are null.
typedef struct SColumnInfoData {
- SColumnInfo info; // column info
- bool hasNull; // if current column data has null value.
- char* pData; // the corresponding block data in memory
+ char* pData; // the corresponding block data in memory
union {
char* nullbitmap; // bitmap, one bit for each item in the list
SVarColAttr varmeta;
};
+ SColumnInfo info; // column info
+ bool hasNull; // if current column data has null value.
} SColumnInfoData;
typedef struct SQueryTableDataCond {
diff --git a/include/common/tglobal.h b/include/common/tglobal.h
index 99bbfde3e1..681d1beb79 100644
--- a/include/common/tglobal.h
+++ b/include/common/tglobal.h
@@ -55,8 +55,6 @@ extern int32_t tsNumOfMnodeReadThreads;
extern int32_t tsNumOfVnodeQueryThreads;
extern int32_t tsNumOfVnodeStreamThreads;
extern int32_t tsNumOfVnodeFetchThreads;
-extern int32_t tsNumOfVnodeWriteThreads;
-extern int32_t tsNumOfVnodeSyncThreads;
extern int32_t tsNumOfVnodeRsmaThreads;
extern int32_t tsNumOfQnodeQueryThreads;
extern int32_t tsNumOfQnodeFetchThreads;
diff --git a/include/common/tmsg.h b/include/common/tmsg.h
index 76b13579c1..7bc56daab0 100644
--- a/include/common/tmsg.h
+++ b/include/common/tmsg.h
@@ -297,7 +297,6 @@ typedef struct {
typedef struct {
int32_t code;
- int8_t hashMeta;
int64_t uid;
char* tblFName;
int32_t numOfRows;
@@ -1128,6 +1127,7 @@ typedef struct {
SQnodeLoad qload;
SClusterCfg clusterCfg;
SArray* pVloads; // array of SVnodeLoad
+ int32_t statusSeq;
} SStatusReq;
int32_t tSerializeSStatusReq(void* buf, int32_t bufLen, SStatusReq* pReq);
@@ -1149,6 +1149,7 @@ typedef struct {
int64_t dnodeVer;
SDnodeCfg dnodeCfg;
SArray* pDnodeEps; // Array of SDnodeEp
+ int32_t statusSeq;
} SStatusRsp;
int32_t tSerializeSStatusRsp(void* buf, int32_t bufLen, SStatusRsp* pRsp);
diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h
index 7f7d7ea22a..58e7b71bec 100644
--- a/include/common/tmsgdef.h
+++ b/include/common/tmsgdef.h
@@ -241,6 +241,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_STREAM_UNUSED1, "stream-unused1", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_RETRIEVE, "stream-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_RECOVER_FINISH, "vnode-stream-finish", NULL, NULL)
+ TD_DEF_MSG_TYPE(TDMT_STREAM_TASK_CHECK, "vnode-stream-task-check", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_STREAM_MAX_MSG, "stream-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MON_MSG)
diff --git a/include/libs/catalog/catalog.h b/include/libs/catalog/catalog.h
index b957be4267..b310ec8080 100644
--- a/include/libs/catalog/catalog.h
+++ b/include/libs/catalog/catalog.h
@@ -203,13 +203,11 @@ int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
-int32_t catalogGetCachedTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableMeta** pTableMeta);
+int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
-int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
- STableMeta** pTableMeta);
+int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
-int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
- SVgroupInfo* pVgroup, bool* exists);
+int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
/**
* Force refresh DB's local cached vgroup info.
@@ -309,7 +307,7 @@ int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* f
int32_t catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass);
-int32_t catalogChkAuthFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
+int32_t catalogChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass, bool* exists);
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
@@ -326,6 +324,10 @@ SMetaData* catalogCloneMetaData(SMetaData* pData);
void catalogFreeMetaData(SMetaData* pData);
+int32_t ctgdEnableDebug(char *option, bool enable);
+
+int32_t ctgdHandleDbgCommand(char *command);
+
/**
* Destroy catalog and relase all resources
*/
diff --git a/include/libs/qcom/query.h b/include/libs/qcom/query.h
index 3334186d2c..651b379851 100644
--- a/include/libs/qcom/query.h
+++ b/include/libs/qcom/query.h
@@ -248,7 +248,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
-#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
+#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER)
#define NEED_CLIENT_HANDLE_ERROR(_code) \
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))
diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h
index e2616567f5..1c3f905e23 100644
--- a/include/libs/stream/tstream.h
+++ b/include/libs/stream/tstream.h
@@ -46,7 +46,7 @@ enum {
TASK_STATUS__DROPPING,
TASK_STATUS__FAIL,
TASK_STATUS__STOP,
- TASK_STATUS__RECOVER_DOWNSTREAM,
+ TASK_STATUS__WAIT_DOWNSTREAM,
TASK_STATUS__RECOVER_PREPARE,
TASK_STATUS__RECOVER1,
TASK_STATUS__RECOVER2,
@@ -332,7 +332,10 @@ typedef struct SStreamTask {
SStreamState* pState;
// do not serialize
- int32_t recoverWaitingChild;
+ int32_t recoverTryingDownstream;
+ int32_t recoverWaitingUpstream;
+ int64_t checkReqId;
+ SArray* checkReqIds; // shuffle
} SStreamTask;
@@ -418,7 +421,10 @@ typedef struct {
typedef struct {
int64_t streamId;
- int32_t taskId;
+ int32_t upstreamNodeId;
+ int32_t upstreamTaskId;
+ int32_t downstreamNodeId;
+ int32_t downstreamTaskId;
int8_t inputStatus;
} SStreamDispatchRsp;
@@ -440,6 +446,27 @@ typedef struct {
int32_t rspToTaskId;
} SStreamRetrieveRsp;
+typedef struct {
+ int64_t reqId;
+ int64_t streamId;
+ int32_t upstreamNodeId;
+ int32_t upstreamTaskId;
+ int32_t downstreamNodeId;
+ int32_t downstreamTaskId;
+ int32_t childId;
+} SStreamTaskCheckReq;
+
+typedef struct {
+ int64_t reqId;
+ int64_t streamId;
+ int32_t upstreamNodeId;
+ int32_t upstreamTaskId;
+ int32_t downstreamNodeId;
+ int32_t downstreamTaskId;
+ int32_t childId;
+ int8_t status;
+} SStreamTaskCheckRsp;
+
typedef struct {
SMsgHead msgHead;
int64_t streamId;
@@ -455,47 +482,6 @@ typedef struct {
int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq);
int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishReq* pReq);
-#if 0
-typedef struct {
- int64_t streamId;
- int32_t taskId;
- int32_t upstreamTaskId;
- int32_t upstreamNodeId;
-} SStreamTaskRecoverReq;
-
-typedef struct {
- int64_t streamId;
- int32_t rspTaskId;
- int32_t reqTaskId;
- int8_t inputStatus;
-} SStreamTaskRecoverRsp;
-
-int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq);
-int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* pReq);
-
-int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecoverRsp* pRsp);
-int32_t tDecodeStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamTaskRecoverRsp* pRsp);
-
-typedef struct {
- int64_t streamId;
- int32_t taskId;
-} SMStreamTaskRecoverReq;
-
-typedef struct {
- int64_t streamId;
- int32_t taskId;
-} SMStreamTaskRecoverRsp;
-
-int32_t tEncodeSMStreamTaskRecoverReq(SEncoder* pEncoder, const SMStreamTaskRecoverReq* pReq);
-int32_t tDecodeSMStreamTaskRecoverReq(SDecoder* pDecoder, SMStreamTaskRecoverReq* pReq);
-
-int32_t tEncodeSMStreamTaskRecoverRsp(SEncoder* pEncoder, const SMStreamTaskRecoverRsp* pRsp);
-int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp* pRsp);
-
-int32_t streamProcessRecoverReq(SStreamTask* pTask, SStreamTaskRecoverReq* pReq, SRpcMsg* pMsg);
-int32_t streamProcessRecoverRsp(SStreamTask* pTask, SStreamTaskRecoverRsp* pRsp);
-#endif
-
typedef struct {
int64_t streamId;
int32_t downstreamTaskId;
@@ -509,20 +495,18 @@ typedef struct {
SArray* checkpointVer; // SArray
} SStreamRecoverDownstreamRsp;
+int32_t tEncodeSStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq);
+int32_t tDecodeSStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq);
+
+int32_t tEncodeSStreamTaskCheckRsp(SEncoder* pEncoder, const SStreamTaskCheckRsp* pRsp);
+int32_t tDecodeSStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp);
+
int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq);
int32_t tDecodeSStreamTaskRecoverReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq);
int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp);
int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp);
-typedef struct {
- int64_t streamId;
- int32_t taskId;
- int32_t waitingRspCnt;
- int32_t totReq;
- SArray* info; // SArray*>
-} SStreamRecoverStatus;
-
int32_t tDecodeStreamDispatchReq(SDecoder* pDecoder, SStreamDispatchReq* pReq);
int32_t tDecodeStreamRetrieveReq(SDecoder* pDecoder, SStreamRetrieveReq* pReq);
void tDeleteStreamRetrieveReq(SStreamRetrieveReq* pReq);
@@ -533,7 +517,7 @@ int32_t streamSetupTrigger(SStreamTask* pTask);
int32_t streamProcessRunReq(SStreamTask* pTask);
int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, SRpcMsg* pMsg, bool exec);
-int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp);
+int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code);
int32_t streamProcessRetrieveReq(SStreamTask* pTask, SStreamRetrieveReq* pReq, SRpcMsg* pMsg);
int32_t streamProcessRetrieveRsp(SStreamTask* pTask, SStreamRetrieveRsp* pRsp);
@@ -544,6 +528,10 @@ int32_t streamSchedExec(SStreamTask* pTask);
int32_t streamScanExec(SStreamTask* pTask, int32_t batchSz);
// recover and fill history
+int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version);
+int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version);
+int32_t streamProcessTaskCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq);
+int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version);
// common
int32_t streamSetParamForRecover(SStreamTask* pTask);
int32_t streamRestoreParam(SStreamTask* pTask);
diff --git a/include/util/taoserror.h b/include/util/taoserror.h
index 7350a8c390..092ede2281 100644
--- a/include/util/taoserror.h
+++ b/include/util/taoserror.h
@@ -340,7 +340,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0618)
#define TSDB_CODE_TDB_STB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0619)
#define TSDB_CODE_TDB_STB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x061A)
-#define TSDB_CODE_TDB_TABLE_RECREATED TAOS_DEF_ERROR_CODE(0, 0x061B)
+#define TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x061B)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061C)
// query
@@ -612,9 +612,12 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TMQ_CONSUMER_MISMATCH TAOS_DEF_ERROR_CODE(0, 0x4001)
#define TSDB_CODE_TMQ_CONSUMER_CLOSED TAOS_DEF_ERROR_CODE(0, 0x4002)
+// stream
+#define TSDB_CODE_STREAM_TASK_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x4100)
+
// TDLite
-#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x4100)
-#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x4101)
+#define TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS TAOS_DEF_ERROR_CODE(0, 0x5100)
+#define TSDB_CODE_TDLITE_IVLD_OPEN_DIR TAOS_DEF_ERROR_CODE(0, 0x5101)
#ifdef __cplusplus
}
diff --git a/include/util/tqueue.h b/include/util/tqueue.h
index da409a90bb..8b46bbd064 100644
--- a/include/util/tqueue.h
+++ b/include/util/tqueue.h
@@ -59,6 +59,47 @@ typedef enum {
typedef void (*FItem)(SQueueInfo *pInfo, void *pItem);
typedef void (*FItems)(SQueueInfo *pInfo, STaosQall *qall, int32_t numOfItems);
+typedef struct STaosQnode STaosQnode;
+
+typedef struct STaosQnode {
+ STaosQnode *next;
+ STaosQueue *queue;
+ int64_t timestamp;
+ int32_t size;
+ int8_t itype;
+ int8_t reserved[3];
+ char item[];
+} STaosQnode;
+
+typedef struct STaosQueue {
+ STaosQnode *head;
+ STaosQnode *tail;
+ STaosQueue *next; // for queue set
+ STaosQset *qset; // for queue set
+ void *ahandle; // for queue set
+ FItem itemFp;
+ FItems itemsFp;
+ TdThreadMutex mutex;
+ int64_t memOfItems;
+ int32_t numOfItems;
+ int64_t threadId;
+} STaosQueue;
+
+typedef struct STaosQset {
+ STaosQueue *head;
+ STaosQueue *current;
+ TdThreadMutex mutex;
+ tsem_t sem;
+ int32_t numOfQueues;
+ int32_t numOfItems;
+} STaosQset;
+
+typedef struct STaosQall {
+ STaosQnode *current;
+ STaosQnode *start;
+ int32_t numOfItems;
+} STaosQall;
+
STaosQueue *taosOpenQueue();
void taosCloseQueue(STaosQueue *queue);
void taosSetQueueFp(STaosQueue *queue, FItem itemFp, FItems itemsFp);
diff --git a/include/util/tworker.h b/include/util/tworker.h
index 3545aeed89..8766f87a08 100644
--- a/include/util/tworker.h
+++ b/include/util/tworker.h
@@ -26,8 +26,9 @@ typedef struct SQWorkerPool SQWorkerPool;
typedef struct SWWorkerPool SWWorkerPool;
typedef struct SQWorker {
- int32_t id; // worker ID
- TdThread thread; // thread
+ int32_t id; // worker id
+ int64_t pid; // thread pid
+ TdThread thread; // thread id
SQWorkerPool *pool;
} SQWorker;
@@ -43,9 +44,10 @@ typedef struct SQWorkerPool {
typedef struct SWWorker {
int32_t id; // worker id
- TdThread thread; // thread
+ int64_t pid; // thread pid
+ TdThread thread; // thread id
STaosQall *qall;
- STaosQset *qset; // queue set
+ STaosQset *qset;
SWWorkerPool *pool;
} SWWorker;
diff --git a/packaging/release.bat b/packaging/release.bat
index b87ae68e2b..4c82c5ead5 100644
--- a/packaging/release.bat
+++ b/packaging/release.bat
@@ -40,7 +40,7 @@ if not exist %work_dir%\debug\ver-%2-x86 (
)
cd %work_dir%\debug\ver-%2-x64
call vcvarsall.bat x64
-cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64
+cmake ../../ -G "NMake Makefiles JOM" -DCMAKE_MAKE_PROGRAM=jom -DBUILD_TOOLS=true -DWEBSOCKET=true -DBUILD_HTTP=false -DBUILD_TEST=false -DVERNUMBER=%2 -DCPUTYPE=x64
cmake --build .
rd /s /Q C:\TDengine
cmake --install .
@@ -63,4 +63,4 @@ exit /b
:RUNFAILED
echo %*
cd %package_dir%
-goto :eof
\ No newline at end of file
+goto :eof
diff --git a/packaging/testpackage.sh b/packaging/testpackage.sh
index 20f93ecaec..5b9a12179d 100755
--- a/packaging/testpackage.sh
+++ b/packaging/testpackage.sh
@@ -67,6 +67,7 @@ fi
}
+
function wgetFile {
file=$1
@@ -75,7 +76,10 @@ if [ ! -f ${file} ];then
echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
wget https://www.taosdata.com/assets-download/3.0/${file}
else
- echoColor YD "${file} already exists "
+ echoColor YD "${file} already exists and use new file "
+ rm -rf ${file}
+ echoColor BD "wget https://www.taosdata.com/assets-download/3.0/${file}"
+ wget https://www.taosdata.com/assets-download/3.0/${file}
fi
}
@@ -211,7 +215,8 @@ elif [[ ${packgeName} =~ "tar" ]];then
if [ ${diffNumbers} != 0 ];then
echoColor R "The number and names of files is different from the previous installation package"
- echoColor Y `cat ${installPath}/diffFile.log`
+ diffLog=`cat ${installPath}/diffFile.log`
+ echoColor Y "${diffLog}"
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"
diff --git a/packaging/tools/make_install.bat b/packaging/tools/make_install.bat
index f777d10918..f5d1e45690 100644
--- a/packaging/tools/make_install.bat
+++ b/packaging/tools/make_install.bat
@@ -34,7 +34,6 @@ if exist %binary_dir%\\test\\cfg\\taosadapter.toml (
copy %binary_dir%\\test\\cfg\\taosadapter.toml %tagert_dir%\\cfg\\taosadapter.toml > nul
)
)
-
copy %source_dir%\\include\\client\\taos.h %tagert_dir%\\include > nul
copy %source_dir%\\include\\util\\taoserror.h %tagert_dir%\\include > nul
copy %source_dir%\\include\\libs\\function\\taosudf.h %tagert_dir%\\include > nul
@@ -52,6 +51,7 @@ if exist %binary_dir%\\build\\lib\\taosws.dll.lib (
)
if exist %binary_dir%\\build\\lib\\taosws.dll (
copy %binary_dir%\\build\\lib\\taosws.dll %tagert_dir%\\driver > nul
+ copy %source_dir%\\tools\\taosws-rs\\target\\release\\taosws.h %tagert_dir%\\include > nul
)
if exist %binary_dir%\\build\\bin\\taosdump.exe (
copy %binary_dir%\\build\\bin\\taosdump.exe %tagert_dir% > nul
@@ -67,3 +67,4 @@ if exist C:\\TDengine\\driver\\taosws.dll (
copy /y C:\\TDengine\\driver\\taosws.dll C:\\Windows\\System32 > nul
)
sc query "taosd" >nul || sc create "taosd" binPath= "C:\\TDengine\\taosd.exe --win_service" start= DEMAND
+sc query "taosadapter" >nul || sc create "taosadapter" binPath= "C:\\TDengine\\taosadapter.exe" start= DEMAND
diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh
index e9546ba103..2776683a24 100755
--- a/packaging/tools/makepkg.sh
+++ b/packaging/tools/makepkg.sh
@@ -285,10 +285,14 @@ if [[ $dbName == "taos" ]]; then
fi
# Add web files
- if [ -d "${web_dir}/admin" ]; then
- mkdir -p ${install_dir}/share/
- cp ${web_dir}/admin ${install_dir}/share/ -r
- cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
+ if [ "$verMode" == "cluster" ] || [ "$verMode" == "cloud" ]; then
+ if [ -d "${web_dir}/admin" ] ; then
+ mkdir -p ${install_dir}/share/
+ cp ${web_dir}/admin ${install_dir}/share/ -r
+ cp ${web_dir}/png/taos.png ${install_dir}/share/admin/images/taos.png
+ else
+ echo "directory not found for enterprise release: ${web_dir}/admin"
+ fi
fi
fi
@@ -375,4 +379,4 @@ if [ -n "${taostools_bin_files}" ]; then
fi
fi
-cd ${curr_dir}
+cd ${curr_dir}
\ No newline at end of file
diff --git a/packaging/tools/tdengine.iss b/packaging/tools/tdengine.iss
index 272a0dfb5c..ec9c432092 100644
--- a/packaging/tools/tdengine.iss
+++ b/packaging/tools/tdengine.iss
@@ -15,10 +15,10 @@
#define MyAppExeName "\*.exe"
#define MyAppTaosExeName "\taos.bat"
#define MyAppTaosdemoExeName "\taosBenchmark.exe"
-#define MyAppDLLName "\driver\taos.dll"
+#define MyAppDLLName "\driver\*.dll"
;#define MyAppVersion "3.0"
;#define MyAppInstallName "TDengine"
-
+;#define MyAppInstallName "TDengine"
[Setup]
VersionInfoVersion={#MyAppVersion}
AppId={{A0F7A93C-79C4-485D-B2B8-F0D03DF42FAB}
diff --git a/source/client/inc/clientInt.h b/source/client/inc/clientInt.h
index cf8b0babdd..33af862528 100644
--- a/source/client/inc/clientInt.h
+++ b/source/client/inc/clientInt.h
@@ -318,6 +318,7 @@ void* createTscObj(const char* user, const char* auth, const char* db, int32_
void destroyTscObj(void* pObj);
STscObj* acquireTscObj(int64_t rid);
int32_t releaseTscObj(int64_t rid);
+void destroyAppInst(SAppInstInfo *pAppInfo);
uint64_t generateRequestId();
diff --git a/source/client/inc/clientStmt.h b/source/client/inc/clientStmt.h
index 2261a6becb..ef4c05afae 100644
--- a/source/client/inc/clientStmt.h
+++ b/source/client/inc/clientStmt.h
@@ -39,6 +39,7 @@ typedef enum {
STMT_BIND_COL,
STMT_ADD_BATCH,
STMT_EXECUTE,
+ STMT_MAX,
} STMT_STATUS;
typedef struct SStmtTableCache {
@@ -94,12 +95,18 @@ typedef struct STscStmt {
STscObj *taos;
SCatalog *pCatalog;
int32_t affectedRows;
+ uint32_t seqId;
+ uint32_t seqIds[STMT_MAX];
SStmtSQLInfo sql;
SStmtExecInfo exec;
SStmtBindInfo bInfo;
} STscStmt;
+extern char *gStmtStatusStr[];
+
+#define STMT_LOG_SEQ(n) do { (pStmt)->seqId++; (pStmt)->seqIds[n]++; STMT_DLOG("the %dth:%d %s", (pStmt)->seqIds[n], (pStmt)->seqId, gStmtStatusStr[n]); } while (0)
+
#define STMT_STATUS_NE(S) (pStmt->sql.status != STMT_##S)
#define STMT_STATUS_EQ(S) (pStmt->sql.status == STMT_##S)
@@ -128,6 +135,12 @@ typedef struct STscStmt {
} \
} while (0)
+#define STMT_ELOG(param, ...) qError("stmt:%p " param, pStmt, __VA_ARGS__)
+#define STMT_DLOG(param, ...) qDebug("stmt:%p " param, pStmt, __VA_ARGS__)
+
+#define STMT_ELOG_E(param) qError("stmt:%p " param, pStmt)
+#define STMT_DLOG_E(param) qDebug("stmt:%p " param, pStmt)
+
TAOS_STMT *stmtInit(STscObj *taos);
int stmtClose(TAOS_STMT *stmt);
int stmtExec(TAOS_STMT *stmt);
diff --git a/source/client/src/clientImpl.c b/source/client/src/clientImpl.c
index 0c12d78c18..e9e30a0be2 100644
--- a/source/client/src/clientImpl.c
+++ b/source/client/src/clientImpl.c
@@ -133,6 +133,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
taosThreadMutexInit(&p->qnodeMutex, NULL);
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores);
p->pAppHbMgr = appHbMgrInit(p, key);
+ if (NULL == p->pAppHbMgr) {
+ destroyAppInst(p);
+ taosThreadMutexUnlock(&appInfo.mutex);
+ taosMemoryFreeClear(key);
+ return NULL;
+ }
taosHashPut(appInfo.pInstMap, key, strlen(key), &p, POINTER_BYTES);
p->instKey = key;
key = NULL;
@@ -1266,7 +1272,9 @@ static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest) {
pMsgSendInfo->requestObjRefId = pRequest->self;
pMsgSendInfo->requestId = pRequest->requestId;
pMsgSendInfo->fp = getMsgRspHandle(pMsgSendInfo->msgType);
- pMsgSendInfo->param = pRequest;
+ pMsgSendInfo->param = taosMemoryCalloc(1, sizeof(pRequest->self));
+
+ *(int64_t*)pMsgSendInfo->param = pRequest->self;
SConnectReq connectReq = {0};
STscObj* pObj = pRequest->pTscObj;
diff --git a/source/client/src/clientMain.c b/source/client/src/clientMain.c
index b03576ff01..a08eab3a29 100644
--- a/source/client/src/clientMain.c
+++ b/source/client/src/clientMain.c
@@ -286,7 +286,8 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) {
return NULL;
} else {
// assert to avoid un-initialization error
- ASSERT(0);
+ tscError("invalid result passed to taos_fetch_row");
+ return NULL;
}
return NULL;
}
diff --git a/source/client/src/clientMsgHandler.c b/source/client/src/clientMsgHandler.c
index e586018c19..938cc4e41d 100644
--- a/source/client/src/clientMsgHandler.c
+++ b/source/client/src/clientMsgHandler.c
@@ -45,8 +45,13 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
}
int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
- SRequestObj* pRequest = param;
-
+ SRequestObj *pRequest = acquireRequest(*(int64_t*)param);
+ if (NULL == pRequest) {
+ setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
+ tsem_post(&pRequest->body.rspSem);
+ goto End;
+ }
+
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
@@ -55,6 +60,12 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
STscObj* pTscObj = pRequest->pTscObj;
+ if (NULL == pTscObj->pAppInfo || NULL == pTscObj->pAppInfo->pAppHbMgr) {
+ setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
+ tsem_post(&pRequest->body.rspSem);
+ goto End;
+ }
+
SConnectRsp connectRsp = {0};
if (tDeserializeSConnectRsp(pMsg->pData, pMsg->len, &connectRsp) != 0) {
code = TSDB_CODE_TSC_INVALID_VERSION;
@@ -115,10 +126,15 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
pTscObj->pAppInfo->numOfConns);
-
+
tsem_post(&pRequest->body.rspSem);
End:
+ if (pRequest) {
+ releaseRequest(pRequest->self);
+ }
+
+ taosMemoryFree(param);
taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
return code;
diff --git a/source/client/src/clientStmt.c b/source/client/src/clientStmt.c
index 81d0d616c9..7f8d857a0f 100644
--- a/source/client/src/clientStmt.c
+++ b/source/client/src/clientStmt.c
@@ -5,6 +5,8 @@
#include "clientStmt.h"
+char *gStmtStatusStr[] = {"unknown", "init", "prepare", "settbname", "settags", "fetchFields", "bind", "bindCol", "addBatch", "exec"};
+
static int32_t stmtCreateRequest(STscStmt* pStmt) {
int32_t code = 0;
@@ -21,6 +23,10 @@ static int32_t stmtCreateRequest(STscStmt* pStmt) {
int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
int32_t code = 0;
+ if (newStatus >= STMT_INIT && newStatus < STMT_MAX) {
+ STMT_LOG_SEQ(newStatus);
+ }
+
switch (newStatus) {
case STMT_PREPARE:
break;
@@ -528,13 +534,17 @@ TAOS_STMT* stmtInit(STscObj* taos) {
pStmt->bInfo.needParse = true;
pStmt->sql.status = STMT_INIT;
+ STMT_LOG_SEQ(STMT_INIT);
+
+ tscDebug("stmt:%p initialized", pStmt);
+
return pStmt;
}
int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STscStmt* pStmt = (STscStmt*)stmt;
- tscDebug("stmt start to prepare");
+ STMT_DLOG_E("start to prepare");
if (pStmt->sql.status >= STMT_PREPARE) {
STMT_ERR_RET(stmtResetStmt(pStmt));
@@ -555,7 +565,7 @@ int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
STscStmt* pStmt = (STscStmt*)stmt;
- tscDebug("stmt start to set tbName: %s", tbName);
+ STMT_DLOG("start to set tbName: %s", tbName);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTBNAME));
@@ -587,7 +597,7 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
STscStmt* pStmt = (STscStmt*)stmt;
- tscDebug("stmt start to set tbTags");
+ STMT_DLOG_E("start to set tbTags");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTAGS));
@@ -649,7 +659,7 @@ int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields
int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
STscStmt* pStmt = (STscStmt*)stmt;
- tscDebug("start to bind stmt data, colIdx: %d", colIdx);
+ STMT_DLOG("start to bind stmt data, colIdx: %d", colIdx);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_BIND));
@@ -743,7 +753,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
int stmtAddBatch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
- tscDebug("stmt start to add batch");
+ STMT_DLOG_E("start to add batch");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_ADD_BATCH));
@@ -756,8 +766,7 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks);
if (pRsp->nBlocks <= 0) {
- tscError("invalid submit resp block number %d", pRsp->nBlocks);
- STMT_ERR_RET(TSDB_CODE_TSC_APP_ERROR);
+ return TSDB_CODE_SUCCESS;
}
size_t keyLen = 0;
@@ -810,7 +819,7 @@ int stmtExec(TAOS_STMT* stmt) {
SSubmitRsp* pRsp = NULL;
bool autoCreateTbl = pStmt->exec.autoCreateTbl;
- tscDebug("stmt start to exec");
+ STMT_DLOG_E("start to exec");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
@@ -885,6 +894,8 @@ int stmtAffectedRowsOnce(TAOS_STMT* stmt) { return ((STscStmt*)stmt)->exec.affec
int stmtIsInsert(TAOS_STMT* stmt, int* insert) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start is insert");
+
if (pStmt->sql.type) {
*insert = (STMT_TYPE_INSERT == pStmt->sql.type || STMT_TYPE_MULTI_INSERT == pStmt->sql.type);
} else {
@@ -897,6 +908,8 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) {
int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start to get tag fields");
+
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@@ -927,6 +940,8 @@ int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start to get col fields");
+
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@@ -957,6 +972,8 @@ int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start to get param num");
+
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
@@ -986,6 +1003,8 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start to get param");
+
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@@ -1028,6 +1047,8 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
TAOS_RES* stmtUseResult(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
+ STMT_DLOG_E("start to use result");
+
if (STMT_TYPE_QUERY != pStmt->sql.type) {
tscError("useResult only for query statement");
return NULL;
diff --git a/source/client/src/clientTmq.c b/source/client/src/clientTmq.c
index ba639476d8..8e7faf48f6 100644
--- a/source/client/src/clientTmq.c
+++ b/source/client/src/clientTmq.c
@@ -1756,7 +1756,7 @@ TAOS_RES* tmq_consumer_poll(tmq_t* tmq, int64_t timeout) {
rspObj = tmqHandleAllRsp(tmq, timeout, false);
if (rspObj) {
- tscDebug("consumer:%" PRId64 ", return rsp", tmq->consumerId);
+ tscDebug("consumer:%" PRId64 ", return rsp %p", tmq->consumerId, rspObj);
return (TAOS_RES*)rspObj;
} else if (terrno == TSDB_CODE_TQ_NO_COMMITTED_OFFSET) {
tscDebug("consumer:%" PRId64 ", return null since no committed offset", tmq->consumerId);
diff --git a/source/client/test/clientTests.cpp b/source/client/test/clientTests.cpp
index 21a52a4b57..85814305bd 100644
--- a/source/client/test/clientTests.cpp
+++ b/source/client/test/clientTests.cpp
@@ -112,7 +112,7 @@ void createNewTable(TAOS* pConn, int32_t index) {
}
taos_free_result(pRes);
- for(int32_t i = 0; i < 10000; i += 20) {
+ for(int32_t i = 0; i < 20; i += 20) {
char sql[1024] = {0};
sprintf(sql,
"insert into tu%d values(now+%da, %d)(now+%da, %d)(now+%da, %d)(now+%da, %d)"
@@ -692,6 +692,7 @@ TEST(testCase, insert_test) {
taos_free_result(pRes);
taos_close(pConn);
}
+#endif
TEST(testCase, projection_query_tables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
@@ -703,7 +704,7 @@ TEST(testCase, projection_query_tables) {
// }
// taos_free_result(pRes);
- TAOS_RES* pRes = taos_query(pConn, "use benchmarkcpu");
+ TAOS_RES* pRes = taos_query(pConn, "use abc2");
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable st1 (ts timestamp, k int) tags(a int)");
@@ -725,7 +726,7 @@ TEST(testCase, projection_query_tables) {
}
taos_free_result(pRes);
- for (int32_t i = 0; i < 2; ++i) {
+ for (int32_t i = 0; i < 200000; ++i) {
printf("create table :%d\n", i);
createNewTable(pConn, i);
}
@@ -750,7 +751,9 @@ TEST(testCase, projection_query_tables) {
taos_free_result(pRes);
taos_close(pConn);
}
-#endif
+
+
+#if 0
TEST(testCase, tsbs_perf_test) {
TdThread qid[20] = {0};
@@ -761,7 +764,7 @@ TEST(testCase, tsbs_perf_test) {
getchar();
}
-#if 0
+
TEST(testCase, projection_query_stables) {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_NE(pConn, nullptr);
diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c
index 7f4a826c5e..064249d754 100644
--- a/source/common/src/tglobal.c
+++ b/source/common/src/tglobal.c
@@ -50,8 +50,6 @@ int32_t tsNumOfMnodeReadThreads = 1;
int32_t tsNumOfVnodeQueryThreads = 4;
int32_t tsNumOfVnodeStreamThreads = 2;
int32_t tsNumOfVnodeFetchThreads = 4;
-int32_t tsNumOfVnodeWriteThreads = 2;
-int32_t tsNumOfVnodeSyncThreads = 2;
int32_t tsNumOfVnodeRsmaThreads = 2;
int32_t tsNumOfQnodeQueryThreads = 4;
int32_t tsNumOfQnodeFetchThreads = 1;
@@ -374,14 +372,6 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
tsNumOfVnodeFetchThreads = TMAX(tsNumOfVnodeFetchThreads, 4);
if (cfgAddInt32(pCfg, "numOfVnodeFetchThreads", tsNumOfVnodeFetchThreads, 4, 1024, 0) != 0) return -1;
- tsNumOfVnodeWriteThreads = tsNumOfCores;
- tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
- if (cfgAddInt32(pCfg, "numOfVnodeWriteThreads", tsNumOfVnodeWriteThreads, 1, 1024, 0) != 0) return -1;
-
- tsNumOfVnodeSyncThreads = tsNumOfCores * 2;
- tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
- if (cfgAddInt32(pCfg, "numOfVnodeSyncThreads", tsNumOfVnodeSyncThreads, 1, 1024, 0) != 0) return -1;
-
tsNumOfVnodeRsmaThreads = tsNumOfCores;
tsNumOfVnodeRsmaThreads = TMAX(tsNumOfVnodeRsmaThreads, 4);
if (cfgAddInt32(pCfg, "numOfVnodeRsmaThreads", tsNumOfVnodeRsmaThreads, 1, 1024, 0) != 0) return -1;
@@ -506,22 +496,6 @@ static int32_t taosUpdateServerCfg(SConfig *pCfg) {
pItem->stype = stype;
}
- pItem = cfgGetItem(tsCfg, "numOfVnodeWriteThreads");
- if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
- tsNumOfVnodeWriteThreads = numOfCores;
- tsNumOfVnodeWriteThreads = TMAX(tsNumOfVnodeWriteThreads, 1);
- pItem->i32 = tsNumOfVnodeWriteThreads;
- pItem->stype = stype;
- }
-
- pItem = cfgGetItem(tsCfg, "numOfVnodeSyncThreads");
- if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
- tsNumOfVnodeSyncThreads = numOfCores * 2;
- tsNumOfVnodeSyncThreads = TMAX(tsNumOfVnodeSyncThreads, 16);
- pItem->i32 = tsNumOfVnodeSyncThreads;
- pItem->stype = stype;
- }
-
pItem = cfgGetItem(tsCfg, "numOfVnodeRsmaThreads");
if (pItem != NULL && pItem->stype == CFG_STYPE_DEFAULT) {
tsNumOfVnodeRsmaThreads = numOfCores;
@@ -699,8 +673,6 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsNumOfVnodeQueryThreads = cfgGetItem(pCfg, "numOfVnodeQueryThreads")->i32;
tsNumOfVnodeStreamThreads = cfgGetItem(pCfg, "numOfVnodeStreamThreads")->i32;
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
- tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
- tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
tsNumOfQnodeQueryThreads = cfgGetItem(pCfg, "numOfQnodeQueryThreads")->i32;
// tsNumOfQnodeFetchThreads = cfgGetItem(pCfg, "numOfQnodeFetchThreads")->i32;
@@ -943,10 +915,6 @@ int32_t taosSetCfg(SConfig *pCfg, char *name) {
} else if (strcasecmp("numOfVnodeFetchThreads", name) == 0) {
tsNumOfVnodeFetchThreads = cfgGetItem(pCfg, "numOfVnodeFetchThreads")->i32;
*/
- } else if (strcasecmp("numOfVnodeWriteThreads", name) == 0) {
- tsNumOfVnodeWriteThreads = cfgGetItem(pCfg, "numOfVnodeWriteThreads")->i32;
- } else if (strcasecmp("numOfVnodeSyncThreads", name) == 0) {
- tsNumOfVnodeSyncThreads = cfgGetItem(pCfg, "numOfVnodeSyncThreads")->i32;
} else if (strcasecmp("numOfVnodeRsmaThreads", name) == 0) {
tsNumOfVnodeRsmaThreads = cfgGetItem(pCfg, "numOfVnodeRsmaThreads")->i32;
} else if (strcasecmp("numOfQnodeQueryThreads", name) == 0) {
diff --git a/source/common/src/tmsg.c b/source/common/src/tmsg.c
index 073e6bc8c6..4eaa934676 100644
--- a/source/common/src/tmsg.c
+++ b/source/common/src/tmsg.c
@@ -1020,6 +1020,7 @@ int32_t tSerializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tEncodeI64(&encoder, pReq->qload.timeInQueryQueue) < 0) return -1;
if (tEncodeI64(&encoder, pReq->qload.timeInFetchQueue) < 0) return -1;
+ if (tEncodeI32(&encoder, pReq->statusSeq) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -1095,6 +1096,7 @@ int32_t tDeserializeSStatusReq(void *buf, int32_t bufLen, SStatusReq *pReq) {
if (tDecodeI64(&decoder, &pReq->qload.timeInQueryQueue) < 0) return -1;
if (tDecodeI64(&decoder, &pReq->qload.timeInFetchQueue) < 0) return -1;
+ if (tDecodeI32(&decoder, &pReq->statusSeq) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@@ -1126,6 +1128,7 @@ int32_t tSerializeSStatusRsp(void *buf, int32_t bufLen, SStatusRsp *pRsp) {
if (tEncodeU16(&encoder, pDnodeEp->ep.port) < 0) return -1;
}
+ if (tEncodeI32(&encoder, pRsp->statusSeq) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@@ -1167,6 +1170,7 @@ int32_t tDeserializeSStatusRsp(void *buf, int32_t bufLen, SStatusRsp *pRsp) {
}
}
+ if (tDecodeI32(&decoder, &pRsp->statusSeq) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
@@ -5432,9 +5436,12 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pBlock->code) < 0) return -1;
- if (tEncodeI8(pEncoder, pBlock->hashMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pBlock->uid) < 0) return -1;
- if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
+ if (pBlock->tblFName) {
+ if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
+ } else {
+ if (tEncodeCStr(pEncoder, "") < 0) return -1;
+ }
if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1;
if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1;
@@ -5451,7 +5458,6 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
- if (tDecodeI8(pDecoder, &pBlock->hashMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
if (NULL == pBlock->tblFName) return -1;
diff --git a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
index dc4412b77b..c776beb3f0 100644
--- a/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
+++ b/source/dnode/mgmt/mgmt_dnode/inc/dmInt.h
@@ -36,6 +36,7 @@ typedef struct SDnodeMgmt {
GetVnodeLoadsFp getVnodeLoadsFp;
GetMnodeLoadsFp getMnodeLoadsFp;
GetQnodeLoadsFp getQnodeLoadsFp;
+ int32_t statusSeq;
} SDnodeMgmt;
// dmHandle.c
diff --git a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
index f12dce5149..85a09b79fd 100644
--- a/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
+++ b/source/dnode/mgmt/mgmt_dnode/src/dmHandle.c
@@ -32,9 +32,13 @@ static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) {
}
static void dmProcessStatusRsp(SDnodeMgmt *pMgmt, SRpcMsg *pRsp) {
+ const STraceId *trace = &pRsp->info.traceId;
+ dGTrace("status rsp received from mnode, statusSeq:%d code:0x%x", pMgmt->statusSeq, pRsp->code);
+
if (pRsp->code != 0) {
if (pRsp->code == TSDB_CODE_MND_DNODE_NOT_EXIST && !pMgmt->pData->dropped && pMgmt->pData->dnodeId > 0) {
- dInfo("dnode:%d, set to dropped since not exist in mnode", pMgmt->pData->dnodeId);
+ dGInfo("dnode:%d, set to dropped since not exist in mnode, statusSeq:%d", pMgmt->pData->dnodeId,
+ pMgmt->statusSeq);
pMgmt->pData->dropped = 1;
dmWriteEps(pMgmt->pData);
}
@@ -42,9 +46,9 @@ static void dmProcessStatusRsp(SDnodeMgmt *pMgmt, SRpcMsg *pRsp) {
SStatusRsp statusRsp = {0};
if (pRsp->pCont != NULL && pRsp->contLen > 0 &&
tDeserializeSStatusRsp(pRsp->pCont, pRsp->contLen, &statusRsp) == 0) {
- dTrace("status msg received from mnode, dnodeVer:%" PRId64 " saved:%" PRId64, statusRsp.dnodeVer,
- pMgmt->pData->dnodeVer);
if (pMgmt->pData->dnodeVer != statusRsp.dnodeVer) {
+ dGInfo("status rsp received from mnode, statusSeq:%d:%d dnodeVer:%" PRId64 ":%" PRId64, pMgmt->statusSeq,
+ statusRsp.statusSeq, pMgmt->pData->dnodeVer, statusRsp.dnodeVer);
pMgmt->pData->dnodeVer = statusRsp.dnodeVer;
dmUpdateDnodeCfg(pMgmt, &statusRsp.dnodeCfg);
dmUpdateEps(pMgmt->pData, statusRsp.pDnodeEps);
@@ -91,6 +95,9 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
(*pMgmt->getQnodeLoadsFp)(&req.qload);
+ pMgmt->statusSeq++;
+ req.statusSeq = pMgmt->statusSeq;
+
int32_t contLen = tSerializeSStatusReq(NULL, 0, &req);
void *pHead = rpcMallocCont(contLen);
tSerializeSStatusReq(pHead, contLen, &req);
@@ -99,13 +106,13 @@ void dmSendStatusReq(SDnodeMgmt *pMgmt) {
SRpcMsg rpcMsg = {.pCont = pHead, .contLen = contLen, .msgType = TDMT_MND_STATUS, .info.ahandle = (void *)0x9527};
SRpcMsg rpcRsp = {0};
- dTrace("send status msg to mnode, dnodeVer:%" PRId64, req.dnodeVer);
+ dTrace("send status req to mnode, dnodeVer:%" PRId64 " statusSeq:%d", req.dnodeVer, req.statusSeq);
SEpSet epSet = {0};
dmGetMnodeEpSet(pMgmt->pData, &epSet);
rpcSendRecv(pMgmt->msgCb.clientRpc, &epSet, &rpcMsg, &rpcRsp);
if (rpcRsp.code != 0) {
- dError("failed to send status msg since %s, numOfEps:%d inUse:%d", tstrerror(rpcRsp.code), epSet.numOfEps,
+ dError("failed to send status req since %s, numOfEps:%d inUse:%d", tstrerror(rpcRsp.code), epSet.numOfEps,
epSet.inUse);
for (int32_t i = 0; i < epSet.numOfEps; ++i) {
dDebug("index:%d, mnode ep:%s:%u", i, epSet.eps[i].fqdn, epSet.eps[i].port);
diff --git a/source/dnode/mgmt/mgmt_snode/src/smWorker.c b/source/dnode/mgmt/mgmt_snode/src/smWorker.c
index 6a70527541..dbd081338a 100644
--- a/source/dnode/mgmt/mgmt_snode/src/smWorker.c
+++ b/source/dnode/mgmt/mgmt_snode/src/smWorker.c
@@ -58,7 +58,11 @@ static void smProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
dTrace("msg:%p, get from snode-stream queue", pMsg);
int32_t code = sndProcessStreamMsg(pMgmt->pSnode, pMsg);
if (code < 0) {
- dGError("snd, msg:%p failed to process stream since %s", pMsg, terrstr(code));
+ if (pMsg) {
+ dGError("snd, msg:%p failed to process stream msg %s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr(code));
+ } else {
+ dGError("snd, msg:%p failed to process stream empty msg since %s", pMsg, terrstr(code));
+ }
smSendRsp(pMsg, terrno);
}
diff --git a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
index bf1ccc1a7b..b38dc19361 100644
--- a/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
+++ b/source/dnode/mgmt/mgmt_vnode/inc/vmInt.h
@@ -33,10 +33,6 @@ typedef struct SVnodeMgmt {
SQWorkerPool queryPool;
SQWorkerPool streamPool;
SWWorkerPool fetchPool;
- SWWorkerPool syncPool;
- SWWorkerPool syncCtrlPool;
- SWWorkerPool writePool;
- SWWorkerPool applyPool;
SSingleWorker mgmtWorker;
SHashObj *hash;
TdThreadRwlock lock;
@@ -52,19 +48,19 @@ typedef struct {
} SWrapperCfg;
typedef struct {
- int32_t vgId;
- int32_t vgVersion;
- int32_t refCount;
- int8_t dropped;
- char *path;
- SVnode *pImpl;
- STaosQueue *pWriteQ;
- STaosQueue *pSyncQ;
- STaosQueue *pSyncCtrlQ;
- STaosQueue *pApplyQ;
- STaosQueue *pQueryQ;
- STaosQueue *pStreamQ;
- STaosQueue *pFetchQ;
+ int32_t vgId;
+ int32_t vgVersion;
+ int32_t refCount;
+ int8_t dropped;
+ char *path;
+ SVnode *pImpl;
+ SMultiWorker pWriteW;
+ SMultiWorker pSyncW;
+ SMultiWorker pSyncCtrlW;
+ SMultiWorker pApplyW;
+ STaosQueue *pQueryQ;
+ STaosQueue *pStreamQ;
+ STaosQueue *pFetchQ;
} SVnodeObj;
typedef struct {
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
index 4d6596b9d5..51213a6ab3 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmHandle.c
@@ -403,7 +403,6 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_COMMIT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_SCH_QUERY_HEARTBEAT, vmPutMsgToFetchQueue, 0) == NULL) goto _OVER;
- if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DROP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_DEPLOY, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_RUN, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
@@ -412,6 +411,9 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RETRIEVE_RSP, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_STREAM_RECOVER_FINISH, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_STREAM_TASK_CHECK_RSP, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
+ if (dmSetMgmtHandle(pArray, TDMT_VND_STREAM_TRIGGER, vmPutMsgToStreamQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_REPLICA, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIG, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
index f36604eb27..07ebd72379 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmInt.c
@@ -79,29 +79,49 @@ int32_t vmOpenVnode(SVnodeMgmt *pMgmt, SWrapperCfg *pCfg, SVnode *pImpl) {
void vmCloseVnode(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
char path[TSDB_FILENAME_LEN] = {0};
- vnodePreClose(pVnode->pImpl);
-
taosThreadRwlockWrlock(&pMgmt->lock);
taosHashRemove(pMgmt->hash, &pVnode->vgId, sizeof(int32_t));
taosThreadRwlockUnlock(&pMgmt->lock);
vmReleaseVnode(pMgmt, pVnode);
- dTrace("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
- while (pVnode->refCount > 0) taosMsleep(10);
- dTrace("vgId:%d, wait for vnode queue is empty", pVnode->vgId);
+ dInfo("vgId:%d, pre close", pVnode->vgId);
+ vnodePreClose(pVnode->pImpl);
- while (!taosQueueEmpty(pVnode->pWriteQ)) taosMsleep(10);
- while (!taosQueueEmpty(pVnode->pSyncQ)) taosMsleep(10);
- while (!taosQueueEmpty(pVnode->pApplyQ)) taosMsleep(10);
+ dInfo("vgId:%d, wait for vnode ref become 0", pVnode->vgId);
+ while (pVnode->refCount > 0) taosMsleep(10);
+
+ dInfo("vgId:%d, wait for vnode write queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
+ pVnode->pWriteW.queue->threadId);
+ tMultiWorkerCleanup(&pVnode->pWriteW);
+
+ dInfo("vgId:%d, wait for vnode sync queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
+ pVnode->pSyncW.queue->threadId);
+ tMultiWorkerCleanup(&pVnode->pSyncW);
+
+ dInfo("vgId:%d, wait for vnode sync ctrl queue:%p is empty, thread:%08" PRId64, pVnode->vgId,
+ pVnode->pSyncCtrlW.queue, pVnode->pSyncCtrlW.queue->threadId);
+ tMultiWorkerCleanup(&pVnode->pSyncCtrlW);
+
+ dInfo("vgId:%d, wait for vnode apply queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
+ pVnode->pApplyW.queue->threadId);
+ tMultiWorkerCleanup(&pVnode->pApplyW);
+
+ dInfo("vgId:%d, wait for vnode query queue:%p is empty", pVnode->vgId, pVnode->pQueryQ);
while (!taosQueueEmpty(pVnode->pQueryQ)) taosMsleep(10);
+
+ dInfo("vgId:%d, wait for vnode fetch queue:%p is empty, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
+ pVnode->pFetchQ->threadId);
while (!taosQueueEmpty(pVnode->pFetchQ)) taosMsleep(10);
+
+ dInfo("vgId:%d, wait for vnode stream queue:%p is empty", pVnode->vgId, pVnode->pStreamQ);
while (!taosQueueEmpty(pVnode->pStreamQ)) taosMsleep(10);
- dTrace("vgId:%d, vnode queue is empty", pVnode->vgId);
+
+ dInfo("vgId:%d, all vnode queues is empty", pVnode->vgId);
vmFreeQueue(pMgmt, pVnode);
vnodeClose(pVnode->pImpl);
pVnode->pImpl = NULL;
- dDebug("vgId:%d, vnode is closed", pVnode->vgId);
+ dInfo("vgId:%d, vnode is closed", pVnode->vgId);
if (pVnode->dropped) {
dInfo("vgId:%d, vnode is destroyed, dropped:%d", pVnode->vgId, pVnode->dropped);
diff --git a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
index abef82dbd8..d4815e4843 100644
--- a/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
+++ b/source/dnode/mgmt/mgmt_vnode/src/vmWorker.c
@@ -86,7 +86,12 @@ static void vmProcessStreamQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
int32_t code = vnodeProcessFetchMsg(pVnode->pImpl, pMsg, pInfo);
if (code != 0) {
if (terrno != 0) code = terrno;
- dGError("vgId:%d, msg:%p failed to process stream since %s", pVnode->vgId, pMsg, terrstr(code));
+ if (pMsg) {
+ dGError("vgId:%d, msg:%p failed to process stream msg %s since %s", pVnode->vgId, pMsg, TMSG_INFO(pMsg->msgType),
+ terrstr(code));
+ } else {
+ dGError("vgId:%d, msg:%p failed to process stream empty msg since %s", pVnode->vgId, pMsg, terrstr(code));
+ }
vmSendRsp(pMsg, code);
}
@@ -183,30 +188,20 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
dDebug("vgId:%d, msg:%p put into vnode-write queue failed since %s", pVnode->vgId, pMsg, terrstr(code));
} else {
dGTrace("vgId:%d, msg:%p put into vnode-write queue", pVnode->vgId, pMsg);
- taosWriteQitem(pVnode->pWriteQ, pMsg);
-#if 0 // tests for batch writes
- if (pMsg->msgType == TDMT_VND_CREATE_TABLE) {
- SRpcMsg *pDup = taosAllocateQitem(sizeof(SRpcMsg), RPC_QITEM);
- memcpy(pDup, pMsg, sizeof(SRpcMsg));
- pDup->pCont = rpcMallocCont(pMsg->contLen);
- memcpy(pDup->pCont, pMsg->pCont, pMsg->contLen);
- pDup->info.handle = NULL;
- taosWriteQitem(pVnode->pWriteQ, pDup);
- }
-#endif
+ taosWriteQitem(pVnode->pWriteW.queue, pMsg);
}
break;
case SYNC_QUEUE:
dGTrace("vgId:%d, msg:%p put into vnode-sync queue", pVnode->vgId, pMsg);
- taosWriteQitem(pVnode->pSyncQ, pMsg);
+ taosWriteQitem(pVnode->pSyncW.queue, pMsg);
break;
case SYNC_CTRL_QUEUE:
dGTrace("vgId:%d, msg:%p put into vnode-sync-ctrl queue", pVnode->vgId, pMsg);
- taosWriteQitem(pVnode->pSyncCtrlQ, pMsg);
+ taosWriteQitem(pVnode->pSyncCtrlW.queue, pMsg);
break;
case APPLY_QUEUE:
dGTrace("vgId:%d, msg:%p put into vnode-apply queue", pVnode->vgId, pMsg);
- taosWriteQitem(pVnode->pApplyQ, pMsg);
+ taosWriteQitem(pVnode->pApplyW.queue, pMsg);
break;
default:
code = -1;
@@ -218,7 +213,9 @@ static int32_t vmPutMsgToQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg, EQueueType qtyp
return code;
}
-int32_t vmPutMsgToSyncCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_CTRL_QUEUE); }
+int32_t vmPutMsgToSyncCtrlQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) {
+ return vmPutMsgToQueue(pMgmt, pMsg, SYNC_CTRL_QUEUE);
+}
int32_t vmPutMsgToSyncQueue(SVnodeMgmt *pMgmt, SRpcMsg *pMsg) { return vmPutMsgToQueue(pMgmt, pMsg, SYNC_QUEUE); }
@@ -269,13 +266,13 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
if (pVnode != NULL) {
switch (qtype) {
case WRITE_QUEUE:
- size = taosQueueItemSize(pVnode->pWriteQ);
+ size = taosQueueItemSize(pVnode->pWriteW.queue);
break;
case SYNC_QUEUE:
- size = taosQueueItemSize(pVnode->pSyncQ);
+ size = taosQueueItemSize(pVnode->pSyncW.queue);
break;
case APPLY_QUEUE:
- size = taosQueueItemSize(pVnode->pApplyQ);
+ size = taosQueueItemSize(pVnode->pApplyW.queue);
break;
case QUERY_QUEUE:
size = taosQueueItemSize(pVnode->pQueryQ);
@@ -299,40 +296,44 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
}
int32_t vmAllocQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
- pVnode->pWriteQ = tWWorkerAllocQueue(&pMgmt->writePool, pVnode->pImpl, (FItems)vnodeProposeWriteMsg);
- pVnode->pSyncQ = tWWorkerAllocQueue(&pMgmt->syncPool, pVnode, (FItems)vmProcessSyncQueue);
- pVnode->pSyncCtrlQ = tWWorkerAllocQueue(&pMgmt->syncCtrlPool, pVnode, (FItems)vmProcessSyncQueue);
- pVnode->pApplyQ = tWWorkerAllocQueue(&pMgmt->applyPool, pVnode->pImpl, (FItems)vnodeApplyWriteMsg);
+ SMultiWorkerCfg wcfg = {.max = 1, .name = "vnode-write", .fp = (FItems)vnodeProposeWriteMsg, .param = pVnode->pImpl};
+ SMultiWorkerCfg scfg = {.max = 1, .name = "vnode-sync", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
+ SMultiWorkerCfg sccfg = {.max = 1, .name = "vnode-sync-ctrl", .fp = (FItems)vmProcessSyncQueue, .param = pVnode};
+ SMultiWorkerCfg acfg = {.max = 1, .name = "vnode-apply", .fp = (FItems)vnodeApplyWriteMsg, .param = pVnode->pImpl};
+ (void)tMultiWorkerInit(&pVnode->pWriteW, &wcfg);
+ (void)tMultiWorkerInit(&pVnode->pSyncW, &scfg);
+ (void)tMultiWorkerInit(&pVnode->pSyncCtrlW, &sccfg);
+ (void)tMultiWorkerInit(&pVnode->pApplyW, &acfg);
+
pVnode->pQueryQ = tQWorkerAllocQueue(&pMgmt->queryPool, pVnode, (FItem)vmProcessQueryQueue);
pVnode->pStreamQ = tQWorkerAllocQueue(&pMgmt->streamPool, pVnode, (FItem)vmProcessStreamQueue);
pVnode->pFetchQ = tWWorkerAllocQueue(&pMgmt->fetchPool, pVnode, (FItems)vmProcessFetchQueue);
- if (pVnode->pWriteQ == NULL || pVnode->pSyncQ == NULL || pVnode->pApplyQ == NULL || pVnode->pQueryQ == NULL ||
- pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) {
+ if (pVnode->pWriteW.queue == NULL || pVnode->pSyncW.queue == NULL || pVnode->pSyncCtrlW.queue == NULL ||
+ pVnode->pApplyW.queue == NULL || pVnode->pQueryQ == NULL || pVnode->pStreamQ == NULL || pVnode->pFetchQ == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
- dDebug("vgId:%d, write-queue:%p is alloced", pVnode->vgId, pVnode->pWriteQ);
- dDebug("vgId:%d, sync-queue:%p is alloced", pVnode->vgId, pVnode->pSyncQ);
- dDebug("vgId:%d, apply-queue:%p is alloced", pVnode->vgId, pVnode->pApplyQ);
- dDebug("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
- dDebug("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
- dDebug("vgId:%d, fetch-queue:%p is alloced", pVnode->vgId, pVnode->pFetchQ);
+ dInfo("vgId:%d, write-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pWriteW.queue,
+ pVnode->pWriteW.queue->threadId);
+ dInfo("vgId:%d, sync-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncW.queue,
+ pVnode->pSyncW.queue->threadId);
+ dInfo("vgId:%d, sync-ctrl-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pSyncCtrlW.queue,
+ pVnode->pSyncCtrlW.queue->threadId);
+ dInfo("vgId:%d, apply-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pApplyW.queue,
+ pVnode->pApplyW.queue->threadId);
+ dInfo("vgId:%d, query-queue:%p is alloced", pVnode->vgId, pVnode->pQueryQ);
+ dInfo("vgId:%d, fetch-queue:%p is alloced, thread:%08" PRId64, pVnode->vgId, pVnode->pFetchQ,
+ pVnode->pFetchQ->threadId);
+ dInfo("vgId:%d, stream-queue:%p is alloced", pVnode->vgId, pVnode->pStreamQ);
return 0;
}
void vmFreeQueue(SVnodeMgmt *pMgmt, SVnodeObj *pVnode) {
- tWWorkerFreeQueue(&pMgmt->writePool, pVnode->pWriteQ);
- tWWorkerFreeQueue(&pMgmt->applyPool, pVnode->pApplyQ);
- tWWorkerFreeQueue(&pMgmt->syncPool, pVnode->pSyncQ);
- tWWorkerFreeQueue(&pMgmt->syncCtrlPool, pVnode->pSyncCtrlQ);
tQWorkerFreeQueue(&pMgmt->queryPool, pVnode->pQueryQ);
tQWorkerFreeQueue(&pMgmt->streamPool, pVnode->pStreamQ);
tWWorkerFreeQueue(&pMgmt->fetchPool, pVnode->pFetchQ);
- pVnode->pWriteQ = NULL;
- pVnode->pSyncQ = NULL;
- pVnode->pApplyQ = NULL;
pVnode->pQueryQ = NULL;
pVnode->pStreamQ = NULL;
pVnode->pFetchQ = NULL;
@@ -357,26 +358,6 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
pFPool->max = tsNumOfVnodeFetchThreads;
if (tWWorkerInit(pFPool) != 0) return -1;
- SWWorkerPool *pWPool = &pMgmt->writePool;
- pWPool->name = "vnode-write";
- pWPool->max = tsNumOfVnodeWriteThreads;
- if (tWWorkerInit(pWPool) != 0) return -1;
-
- SWWorkerPool *pAPool = &pMgmt->applyPool;
- pAPool->name = "vnode-apply";
- pAPool->max = tsNumOfVnodeWriteThreads;
- if (tWWorkerInit(pAPool) != 0) return -1;
-
- SWWorkerPool *pSPool = &pMgmt->syncPool;
- pSPool->name = "vnode-sync";
- pSPool->max = tsNumOfVnodeSyncThreads;
- if (tWWorkerInit(pSPool) != 0) return -1;
-
- SWWorkerPool *pSCPool = &pMgmt->syncCtrlPool;
- pSCPool->name = "vnode-sync-ctrl";
- pSCPool->max = tsNumOfVnodeSyncThreads;
- if (tWWorkerInit(pSCPool) != 0) return -1;
-
SSingleWorkerCfg mgmtCfg = {
.min = 1,
.max = 1,
@@ -391,10 +372,6 @@ int32_t vmStartWorker(SVnodeMgmt *pMgmt) {
}
void vmStopWorker(SVnodeMgmt *pMgmt) {
- tWWorkerCleanup(&pMgmt->writePool);
- tWWorkerCleanup(&pMgmt->applyPool);
- tWWorkerCleanup(&pMgmt->syncPool);
- tWWorkerCleanup(&pMgmt->syncCtrlPool);
tQWorkerCleanup(&pMgmt->queryPool);
tQWorkerCleanup(&pMgmt->streamPool);
tWWorkerCleanup(&pMgmt->fetchPool);
diff --git a/source/dnode/mgmt/node_mgmt/src/dmTransport.c b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
index 80df01303a..1e5f3139aa 100644
--- a/source/dnode/mgmt/node_mgmt/src/dmTransport.c
+++ b/source/dnode/mgmt/node_mgmt/src/dmTransport.c
@@ -157,7 +157,11 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
_OVER:
if (code != 0) {
if (terrno != 0) code = terrno;
- dGTrace("msg:%p, failed to process since %s", pMsg, terrstr());
+ if (pMsg) {
+ dGTrace("msg:%p, failed to process %s since %s", pMsg, TMSG_INFO(pMsg->msgType), terrstr());
+ } else {
+ dGTrace("msg:%p, failed to process empty msg since %s", pMsg, terrstr());
+ }
if (IsReq(pRpc)) {
SRpcMsg rsp = {.code = code, .info = pRpc->info};
diff --git a/source/dnode/mnode/impl/src/mndDnode.c b/source/dnode/mnode/impl/src/mndDnode.c
index fa7f406cdf..04f340b0ff 100644
--- a/source/dnode/mnode/impl/src/mndDnode.c
+++ b/source/dnode/mnode/impl/src/mndDnode.c
@@ -345,6 +345,19 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
}
}
+ int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE);
+ int64_t curMs = taosGetTimestampMs();
+ bool online = mndIsDnodeOnline(pDnode, curMs);
+ bool dnodeChanged = (statusReq.dnodeVer == 0) || (statusReq.dnodeVer != dnodeVer);
+ bool reboot = (pDnode->rebootTime != statusReq.rebootTime);
+ bool needCheck = !online || dnodeChanged || reboot;
+
+ pDnode->accessTimes++;
+ pDnode->lastAccessTime = curMs;
+ const STraceId *trace = &pReq->info.traceId;
+ mGTrace("dnode:%d, status received, accessTimes:%d check:%d online:%d reboot:%d changed:%d statusSeq:%d", pDnode->id,
+ pDnode->accessTimes, needCheck, online, reboot, dnodeChanged, statusReq.statusSeq);
+
for (int32_t v = 0; v < taosArrayGetSize(statusReq.pVloads); ++v) {
SVnodeLoad *pVload = taosArrayGet(statusReq.pVloads, v);
@@ -396,17 +409,6 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
mndReleaseQnode(pMnode, pQnode);
}
- int64_t dnodeVer = sdbGetTableVer(pMnode->pSdb, SDB_DNODE) + sdbGetTableVer(pMnode->pSdb, SDB_MNODE);
- int64_t curMs = taosGetTimestampMs();
- bool online = mndIsDnodeOnline(pDnode, curMs);
- bool dnodeChanged = (statusReq.dnodeVer == 0) || (statusReq.dnodeVer != dnodeVer);
- bool reboot = (pDnode->rebootTime != statusReq.rebootTime);
- bool needCheck = !online || dnodeChanged || reboot;
-
- pDnode->accessTimes++;
- mTrace("dnode:%d, status received, access times:%d check:%d online:%d reboot:%d changed:%d", pDnode->id,
- pDnode->accessTimes, needCheck, online, reboot, dnodeChanged);
-
if (needCheck) {
if (statusReq.sver != tsVersion) {
if (pDnode != NULL) {
@@ -454,6 +456,7 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
pDnode->memTotal = statusReq.memTotal;
SStatusRsp statusRsp = {0};
+ statusRsp.statusSeq++;
statusRsp.dnodeVer = dnodeVer;
statusRsp.dnodeCfg.dnodeId = pDnode->id;
statusRsp.dnodeCfg.clusterId = pMnode->clusterId;
@@ -474,8 +477,6 @@ static int32_t mndProcessStatusReq(SRpcMsg *pReq) {
pReq->info.rsp = pHead;
}
- pDnode->lastAccessTime = curMs;
- pDnode->accessTimes++;
code = 0;
_OVER:
diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c
index 9f433f3322..36ba0aaf87 100644
--- a/source/dnode/mnode/impl/src/mndStream.c
+++ b/source/dnode/mnode/impl/src/mndStream.c
@@ -705,7 +705,8 @@ static int32_t mndProcessDropStreamReq(SRpcMsg *pReq) {
return -1;
}
- STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_NOTHING, pReq, "drop-stream");
+ STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_RETRY, TRN_CONFLICT_DB_INSIDE, pReq, "drop-stream");
+ mndTransSetDbName(pTrans, pStream->sourceDb, pStream->targetDb);
if (pTrans == NULL) {
mError("stream:%s, failed to drop since %s", dropReq.name, terrstr());
sdbRelease(pMnode->pSdb, pStream);
diff --git a/source/dnode/mnode/impl/src/mndSubscribe.c b/source/dnode/mnode/impl/src/mndSubscribe.c
index 74f2b1288e..a62f15f978 100644
--- a/source/dnode/mnode/impl/src/mndSubscribe.c
+++ b/source/dnode/mnode/impl/src/mndSubscribe.c
@@ -612,14 +612,14 @@ static int32_t mndProcessRebalanceReq(SRpcMsg *pMsg) {
mndReleaseSubscribe(pMnode, pSub);
}
- // TODO replace assert with error check
- ASSERT(mndDoRebalance(pMnode, &rebInput, &rebOutput) == 0);
+ if (mndDoRebalance(pMnode, &rebInput, &rebOutput) < 0) {
+ mError("mq rebalance internal error");
+ }
// if add more consumer to balanced subscribe,
// possibly no vg is changed
/*ASSERT(taosArrayGetSize(rebOutput.rebVgs) != 0);*/
- // TODO replace assert with error check
if (mndPersistRebResult(pMnode, pMsg, &rebOutput) < 0) {
mError("mq rebalance persist rebalance output error, possibly vnode splitted or dropped");
}
diff --git a/source/dnode/snode/src/snode.c b/source/dnode/snode/src/snode.c
index d34159d312..f53350c10b 100644
--- a/source/dnode/snode/src/snode.c
+++ b/source/dnode/snode/src/snode.c
@@ -231,10 +231,10 @@ int32_t sndProcessTaskRetrieveReq(SSnode *pSnode, SRpcMsg *pMsg) {
int32_t sndProcessTaskDispatchRsp(SSnode *pSnode, SRpcMsg *pMsg) {
SStreamDispatchRsp *pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
- int32_t taskId = pRsp->taskId;
+ int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask *pTask = streamMetaGetTask(pSnode->pMeta, taskId);
if (pTask) {
- streamProcessDispatchRsp(pTask, pRsp);
+ streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
return 0;
} else {
return -1;
diff --git a/source/dnode/vnode/inc/vnode.h b/source/dnode/vnode/inc/vnode.h
index 020d2b6049..370103c222 100644
--- a/source/dnode/vnode/inc/vnode.h
+++ b/source/dnode/vnode/inc/vnode.h
@@ -158,7 +158,7 @@ int32_t tsdbReaderOpen(SVnode *pVnode, SQueryTableDataCond *pCond, void *pTableL
void tsdbReaderClose(STsdbReader *pReader);
bool tsdbNextDataBlock(STsdbReader *pReader);
bool tsdbTableNextDataBlock(STsdbReader *pReader, uint64_t uid);
-void tsdbRetrieveDataBlockInfo(const STsdbReader* pReader, int32_t* rows, uint64_t* uid, STimeWindow* pWindow);
+void tsdbRetrieveDataBlockInfo(const STsdbReader *pReader, int32_t *rows, uint64_t *uid, STimeWindow *pWindow);
int32_t tsdbRetrieveDatablockSMA(STsdbReader *pReader, SColumnDataAgg ***pBlockStatis, bool *allHave);
SArray *tsdbRetrieveDataBlock(STsdbReader *pTsdbReadHandle, SArray *pColumnIdList);
int32_t tsdbReaderReset(STsdbReader *pReader, SQueryTableDataCond *pCond);
@@ -240,7 +240,7 @@ bool tqNextDataBlock(STqReader *pReader);
bool tqNextDataBlockFilterOut(STqReader *pReader, SHashObj *filterOutUids);
int32_t tqRetrieveDataBlock(SSDataBlock *pBlock, STqReader *pReader);
-void vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
+int32_t vnodeEnqueueStreamMsg(SVnode *pVnode, SRpcMsg *pMsg);
// sma
int32_t smaGetTSmaDays(SVnodeCfg *pCfg, void *pCont, uint32_t contLen, int32_t *days);
diff --git a/source/dnode/vnode/src/inc/tsdb.h b/source/dnode/vnode/src/inc/tsdb.h
index c4e7ff92b7..afd53b8dda 100644
--- a/source/dnode/vnode/src/inc/tsdb.h
+++ b/source/dnode/vnode/src/inc/tsdb.h
@@ -643,6 +643,7 @@ typedef struct SSttBlockLoadInfo {
STSchema *pSchema;
int16_t *colIds;
int32_t numOfCols;
+ bool sttBlockLoaded;
} SSttBlockLoadInfo;
typedef struct SMergeTree {
diff --git a/source/dnode/vnode/src/inc/vnd.h b/source/dnode/vnode/src/inc/vnd.h
index d5ad500fdb..8f8691cfc2 100644
--- a/source/dnode/vnode/src/inc/vnd.h
+++ b/source/dnode/vnode/src/inc/vnd.h
@@ -97,6 +97,7 @@ bool vnodeShouldRollback(SVnode* pVnode);
// vnodeSync.c
int32_t vnodeSyncOpen(SVnode* pVnode, char* path);
void vnodeSyncStart(SVnode* pVnode);
+void vnodeSyncPreClose(SVnode* pVnode);
void vnodeSyncClose(SVnode* pVnode);
void vnodeRedirectRpcMsg(SVnode* pVnode, SRpcMsg* pMsg);
bool vnodeIsLeader(SVnode* pVnode);
diff --git a/source/dnode/vnode/src/inc/vnodeInt.h b/source/dnode/vnode/src/inc/vnodeInt.h
index 9ec8bb2cfd..44ecf64419 100644
--- a/source/dnode/vnode/src/inc/vnodeInt.h
+++ b/source/dnode/vnode/src/inc/vnodeInt.h
@@ -150,19 +150,19 @@ typedef struct {
int32_t metaGetStbStats(SMeta* pMeta, int64_t uid, SMetaStbStats* pInfo);
// tsdb
-int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg, int8_t rollback);
-int tsdbClose(STsdb** pTsdb);
-int32_t tsdbBegin(STsdb* pTsdb);
-int32_t tsdbCommit(STsdb* pTsdb);
-int32_t tsdbFinishCommit(STsdb* pTsdb);
-int32_t tsdbRollbackCommit(STsdb* pTsdb);
-int32_t tsdbDoRetention(STsdb* pTsdb, int64_t now);
-int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg);
-int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
-int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock,
- SSubmitBlkRsp* pRsp);
-int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
-int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg);
+int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepCfg* pKeepCfg, int8_t rollback);
+int tsdbClose(STsdb** pTsdb);
+int32_t tsdbBegin(STsdb* pTsdb);
+int32_t tsdbCommit(STsdb* pTsdb);
+int32_t tsdbFinishCommit(STsdb* pTsdb);
+int32_t tsdbRollbackCommit(STsdb* pTsdb);
+int32_t tsdbDoRetention(STsdb* pTsdb, int64_t now);
+int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg);
+int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
+int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock,
+ SSubmitBlkRsp* pRsp);
+int32_t tsdbDeleteTableData(STsdb* pTsdb, int64_t version, tb_uid_t suid, tb_uid_t uid, TSKEY sKey, TSKEY eKey);
+int32_t tsdbSetKeepCfg(STsdb* pTsdb, STsdbCfg* pCfg);
// tq
int tqInit();
@@ -183,13 +183,13 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
// tq-stream
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessTaskDropReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
+int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg);
+int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, char* msg, int32_t msgLen);
int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* data, int64_t ver);
int32_t tqProcessDelReq(STQ* pTq, void* pReq, int32_t len, int64_t ver);
int32_t tqProcessTaskRunReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec);
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg);
-// int32_t tqProcessTaskRecoverReq(STQ* pTq, SRpcMsg* pMsg);
-// int32_t tqProcessTaskRecoverRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessTaskRecover1Req(STQ* pTq, SRpcMsg* pMsg);
diff --git a/source/dnode/vnode/src/tq/tq.c b/source/dnode/vnode/src/tq/tq.c
index 094db9ebd0..9c377fe7f5 100644
--- a/source/dnode/vnode/src/tq/tq.c
+++ b/source/dnode/vnode/src/tq/tq.c
@@ -896,6 +896,10 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
pTask->startVer = ver;
// expand executor
+ if (pTask->fillHistory) {
+ pTask->taskStatus = TASK_STATUS__WAIT_DOWNSTREAM;
+ }
+
if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
if (pTask->pState == NULL) {
@@ -911,9 +915,6 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
ASSERT(pTask->exec.executor);
- if (pTask->fillHistory) {
- pTask->taskStatus = TASK_STATUS__RECOVER_PREPARE;
- }
} else if (pTask->taskLevel == TASK_LEVEL__AGG) {
pTask->pState = streamStateOpen(pTq->pStreamMeta->path, pTask, false, -1, -1);
if (pTask->pState == NULL) {
@@ -947,11 +948,90 @@ int32_t tqExpandTask(STQ* pTq, SStreamTask* pTask, int64_t ver) {
streamSetupTrigger(pTask);
- tqInfo("expand stream task on vg %d, task id %d, child id %d", TD_VID(pTq->pVnode), pTask->taskId,
- pTask->selfChildId);
+ tqInfo("expand stream task on vg %d, task id %d, child id %d, level %d", TD_VID(pTq->pVnode), pTask->taskId,
+ pTask->selfChildId, pTask->taskLevel);
return 0;
}
+int32_t tqProcessStreamTaskCheckReq(STQ* pTq, SRpcMsg* pMsg) {
+ char* msgStr = pMsg->pCont;
+ char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
+ int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
+ SStreamTaskCheckReq req;
+ SDecoder decoder;
+ tDecoderInit(&decoder, msgBody, msgLen);
+ tDecodeSStreamTaskCheckReq(&decoder, &req);
+ tDecoderClear(&decoder);
+ int32_t taskId = req.downstreamTaskId;
+ SStreamTaskCheckRsp rsp = {
+ .reqId = req.reqId,
+ .streamId = req.streamId,
+ .childId = req.childId,
+ .downstreamNodeId = req.downstreamNodeId,
+ .downstreamTaskId = req.downstreamTaskId,
+ .upstreamNodeId = req.upstreamNodeId,
+ .upstreamTaskId = req.upstreamTaskId,
+ };
+ SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
+ if (pTask && atomic_load_8(&pTask->taskStatus) == TASK_STATUS__NORMAL) {
+ rsp.status = 1;
+ } else {
+ rsp.status = 0;
+ }
+
+ tqDebug("tq recv task check req(reqId: %" PRId64 ") %d at node %d check req from task %d at node %d, status %d",
+ rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
+
+ SEncoder encoder;
+ int32_t code;
+ int32_t len;
+ tEncodeSize(tEncodeSStreamTaskCheckRsp, &rsp, len, code);
+ if (code < 0) {
+ ASSERT(0);
+ }
+ void* buf = rpcMallocCont(sizeof(SMsgHead) + len);
+ ((SMsgHead*)buf)->vgId = htonl(req.upstreamNodeId);
+
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
+ tEncoderInit(&encoder, (uint8_t*)abuf, len);
+ tEncodeSStreamTaskCheckRsp(&encoder, &rsp);
+ tEncoderClear(&encoder);
+
+ SRpcMsg rspMsg = {
+ .code = 0,
+ .pCont = buf,
+ .contLen = sizeof(SMsgHead) + len,
+ .info = pMsg->info,
+ };
+
+ tmsgSendRsp(&rspMsg);
+ return 0;
+}
+
+int32_t tqProcessStreamTaskCheckRsp(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
+ int32_t code;
+ SStreamTaskCheckRsp rsp;
+
+ SDecoder decoder;
+ tDecoderInit(&decoder, (uint8_t*)msg, msgLen);
+ code = tDecodeSStreamTaskCheckRsp(&decoder, &rsp);
+ if (code < 0) {
+ tDecoderClear(&decoder);
+ return -1;
+ }
+ tDecoderClear(&decoder);
+
+ tqDebug("tq recv task check rsp(reqId: %" PRId64 ") %d at node %d check req from task %d at node %d, status %d",
+ rsp.reqId, rsp.downstreamTaskId, rsp.downstreamNodeId, rsp.upstreamTaskId, rsp.upstreamNodeId, rsp.status);
+
+ SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, rsp.upstreamTaskId);
+ if (pTask == NULL) {
+ return -1;
+ }
+
+ return streamProcessTaskCheckRsp(pTask, &rsp, version);
+}
+
int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msgLen) {
int32_t code;
#if 0
@@ -982,37 +1062,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, int64_t version, char* msg, int32_t msg
// 3.go through recover steps to fill history
if (pTask->fillHistory) {
- if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
- streamSetParamForRecover(pTask);
- streamSourceRecoverPrepareStep1(pTask, version);
-
- SStreamRecoverStep1Req req;
- streamBuildSourceRecover1Req(pTask, &req);
- int32_t len = sizeof(SStreamRecoverStep1Req);
-
- void* serializedReq = rpcMallocCont(len);
- if (serializedReq == NULL) {
- return -1;
- }
-
- memcpy(serializedReq, &req, len);
-
- SRpcMsg rpcMsg = {
- .contLen = len,
- .pCont = serializedReq,
- .msgType = TDMT_VND_STREAM_RECOVER_STEP1,
- };
-
- if (tmsgPutToQueue(&pTq->pVnode->msgCb, STREAM_QUEUE, &rpcMsg) < 0) {
- /*ASSERT(0);*/
- }
-
- } else if (pTask->taskLevel == TASK_LEVEL__AGG) {
- streamSetParamForRecover(pTask);
- streamAggRecoverPrepare(pTask);
- } else if (pTask->taskLevel == TASK_LEVEL__SINK) {
- // do nothing
- }
+ streamTaskCheckDownstream(pTask, version);
}
return 0;
@@ -1268,7 +1318,7 @@ int32_t tqProcessSubmitReq(STQ* pTq, SSubmitReq* pReq, int64_t ver) {
if (pIter == NULL) break;
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->taskLevel != TASK_LEVEL__SOURCE) continue;
- if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE) {
+ if (pTask->taskStatus == TASK_STATUS__RECOVER_PREPARE || pTask->taskStatus == TASK_STATUS__WAIT_DOWNSTREAM) {
tqDebug("skip push task %d, task status %d", pTask->taskId, pTask->taskStatus);
continue;
}
@@ -1335,10 +1385,11 @@ int32_t tqProcessTaskDispatchReq(STQ* pTq, SRpcMsg* pMsg, bool exec) {
int32_t tqProcessTaskDispatchRsp(STQ* pTq, SRpcMsg* pMsg) {
SStreamDispatchRsp* pRsp = POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead));
- int32_t taskId = pRsp->taskId;
+ int32_t taskId = ntohl(pRsp->upstreamTaskId);
SStreamTask* pTask = streamMetaGetTask(pTq->pStreamMeta, taskId);
+ tqDebug("recv dispatch rsp, code: %x", pMsg->code);
if (pTask) {
- streamProcessDispatchRsp(pTask, pRsp);
+ streamProcessDispatchRsp(pTask, pRsp, pMsg->code);
return 0;
} else {
return -1;
@@ -1379,12 +1430,12 @@ int32_t tqProcessTaskRetrieveRsp(STQ* pTq, SRpcMsg* pMsg) {
return 0;
}
-void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
- STQ* pTq = pVnode->pTq;
- char* msgStr = pMsg->pCont;
- char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
- int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
- int32_t code = 0;
+int32_t vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
+ STQ* pTq = pVnode->pTq;
+ SMsgHead* msgStr = pMsg->pCont;
+ char* msgBody = POINTER_SHIFT(msgStr, sizeof(SMsgHead));
+ int32_t msgLen = pMsg->contLen - sizeof(SMsgHead);
+ int32_t code = 0;
SStreamDispatchReq req;
SDecoder decoder;
@@ -1407,16 +1458,45 @@ void vnodeEnqueueStreamMsg(SVnode* pVnode, SRpcMsg* pMsg) {
streamProcessDispatchReq(pTask, &req, &rsp, false);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
- return;
+ return 0;
}
+ code = TSDB_CODE_STREAM_TASK_NOT_EXIST;
+
FAIL:
- if (pMsg->info.handle == NULL) return;
+ if (pMsg->info.handle == NULL) return -1;
+
+ SMsgHead* pRspHead = rpcMallocCont(sizeof(SMsgHead) + sizeof(SStreamDispatchRsp));
+ if (pRspHead == NULL) {
+ SRpcMsg rsp = {
+ .code = TSDB_CODE_OUT_OF_MEMORY,
+ .info = pMsg->info,
+ };
+ tqDebug("send dispatch error rsp, code: %x", code);
+ tmsgSendRsp(&rsp);
+ rpcFreeCont(pMsg->pCont);
+ taosFreeQitem(pMsg);
+ return -1;
+ }
+
+ pRspHead->vgId = htonl(req.upstreamNodeId);
+ SStreamDispatchRsp* pRsp = POINTER_SHIFT(pRspHead, sizeof(SMsgHead));
+ pRsp->streamId = htobe64(req.streamId);
+ pRsp->upstreamTaskId = htonl(req.upstreamTaskId);
+ pRsp->upstreamNodeId = htonl(req.upstreamNodeId);
+ pRsp->downstreamNodeId = htonl(pVnode->config.vgId);
+ pRsp->downstreamTaskId = htonl(req.taskId);
+ pRsp->inputStatus = TASK_OUTPUT_STATUS__NORMAL;
+
SRpcMsg rsp = {
.code = code,
.info = pMsg->info,
+ .contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp),
+ .pCont = pRspHead,
};
+ tqDebug("send dispatch error rsp, code: %x", code);
tmsgSendRsp(&rsp);
rpcFreeCont(pMsg->pCont);
taosFreeQitem(pMsg);
+ return -1;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
index dbc02363ea..2ae3115c0a 100644
--- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c
@@ -333,11 +333,12 @@ int32_t tsdbRetrieveCacheRows(void* pReader, SSDataBlock* pResBlock, const int32
code = TSDB_CODE_INVALID_PARA;
}
- _end:
+_end:
tsdbDataFReaderClose(&pr->pDataFReaderLast);
tsdbDataFReaderClose(&pr->pDataFReader);
tsdbUntakeReadSnap(pr->pVnode->pTsdb, pr->pReadSnap, "cache-l");
+ resetLastBlockLoadInfo(pr->pLoadInfo);
for (int32_t j = 0; j < pr->numOfCols; ++j) {
taosMemoryFree(pRes[j]);
diff --git a/source/dnode/vnode/src/tsdb/tsdbMemTable.c b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
index 0189ced3c6..48b3e9ff77 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMemTable.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMemTable.c
@@ -116,6 +116,13 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
if (info.suid) {
metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info);
}
+ if (pMsgIter->sversion != info.skmVer) {
+ tsdbError("vgId:%d, req sver:%d, skmVer:%d suid:%" PRId64 " uid:%" PRId64,
+ TD_VID(pTsdb->pVnode), pMsgIter->sversion, info.skmVer, suid, uid);
+ code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
+ goto _err;
+ }
+
pRsp->sver = info.skmVer;
// create/get STbData to op
@@ -133,6 +140,7 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
return code;
_err:
+ terrno = code;
return code;
}
diff --git a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
index 745b877f09..01fbcf657f 100644
--- a/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
+++ b/source/dnode/vnode/src/tsdb/tsdbMergeTree.c
@@ -72,6 +72,7 @@ void resetLastBlockLoadInfo(SSttBlockLoadInfo *pLoadInfo) {
pLoadInfo[i].elapsedTime = 0;
pLoadInfo[i].loadBlocks = 0;
+ pLoadInfo[i].sttBlockLoaded = false;
}
}
@@ -278,9 +279,9 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
(*pIter)->pBlockLoadInfo = pBlockLoadInfo;
- size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
- if (size == 0) {
+ if (!pBlockLoadInfo->sttBlockLoaded) {
int64_t st = taosGetTimestampUs();
+ pBlockLoadInfo->sttBlockLoaded = true;
code = tsdbReadSttBlk(pReader, iStt, pBlockLoadInfo->aSttBlk);
if (code) {
@@ -288,7 +289,7 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
}
// only apply to the child tables, ordinary tables will not incur this filter procedure.
- size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
if (size >= 1) {
SSttBlk *pStart = taosArrayGet(pBlockLoadInfo->aSttBlk, 0);
@@ -296,10 +297,10 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
// all identical
if (pStart->suid == pEnd->suid) {
- if (pStart->suid == suid) {
- // do nothing
- } else if (pStart->suid != suid) {
+ if (pStart->suid != suid) {
// no qualified stt block existed
+ taosArrayClear(pBlockLoadInfo->aSttBlk);
+
(*pIter)->iSttBlk = -1;
double el = (taosGetTimestampUs() - st) / 1000.0;
tsdbDebug("load the last file info completed, elapsed time:%.2fms, %s", el, idStr);
@@ -330,7 +331,7 @@ int32_t tLDataIterOpen(struct SLDataIter **pIter, SDataFReader *pReader, int32_t
tsdbDebug("load the last file info completed, elapsed time:%.2fms, %s", el, idStr);
}
- size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
+ size_t size = taosArrayGetSize(pBlockLoadInfo->aSttBlk);
// find the start block
(*pIter)->iSttBlk = binarySearchForStartBlock(pBlockLoadInfo->aSttBlk->pData, size, uid, backward);
diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c
index 2b12968f2e..4099dafa26 100644
--- a/source/dnode/vnode/src/tsdb/tsdbRead.c
+++ b/source/dnode/vnode/src/tsdb/tsdbRead.c
@@ -185,11 +185,11 @@ static int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STabl
SRowMerger* pMerger, SVersionRange* pVerRange);
static int32_t doMergeRowsInBuf(SIterInfo* pIter, uint64_t uid, int64_t ts, SArray* pDelList, SRowMerger* pMerger,
STsdbReader* pReader);
-static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid);
+static int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, STableBlockScanInfo* pInfo);
static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pReader, SBlockData* pBlockData,
int32_t rowIndex);
static void setComposedBlockFlag(STsdbReader* pReader, bool composed);
-static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order, SVersionRange* pRange);
+static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order, SVersionRange* pVerRange);
static int32_t doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList,
STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow);
@@ -208,7 +208,6 @@ static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
static int32_t doBuildDataBlock(STsdbReader* pReader);
static TSDBKEY getCurrentKeyInBuf(STableBlockScanInfo* pScanInfo, STsdbReader* pReader);
static bool hasDataInFileBlock(const SBlockData* pBlockData, const SFileBlockDumpInfo* pDumpInfo);
-static bool hasDataInLastBlock(SLastBlockReader* pLastBlockReader);
static bool outOfTimeWindow(int64_t ts, STimeWindow* pWindow) { return (ts > pWindow->ekey) || (ts < pWindow->skey); }
@@ -534,7 +533,7 @@ static SSDataBlock* createResBlock(SQueryTableDataCond* pCond, int32_t capacity)
}
for (int32_t i = 0; i < pCond->numOfCols; ++i) {
- SColumnInfoData colInfo = {{0}, 0};
+ SColumnInfoData colInfo = {0, {0}};
colInfo.info = pCond->colList[i];
blockDataAppendColInfo(pResBlock, &colInfo);
}
@@ -1529,8 +1528,8 @@ static bool tryCopyDistinctRowFromFileBlock(STsdbReader* pReader, SBlockData* pB
// opt version
// 1. it is not a border point
// 2. the direct next point is not an duplicated timestamp
- if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && pReader->order == TSDB_ORDER_ASC) ||
- (pDumpInfo->rowIndex > 0 && pReader->order == TSDB_ORDER_DESC)) {
+ bool asc = (pReader->order == TSDB_ORDER_ASC);
+ if ((pDumpInfo->rowIndex < pDumpInfo->totalRows - 1 && asc) || (pDumpInfo->rowIndex > 0 && (!asc))) {
int32_t step = pReader->order == TSDB_ORDER_ASC ? 1 : -1;
int64_t nextKey = pBlockData->aTSKEY[pDumpInfo->rowIndex + step];
@@ -1749,7 +1748,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -1770,6 +1769,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
// only last block exists
if ((!mergeBlockData) || (tsLastBlock != pBlockData->aTSKEY[pDumpInfo->rowIndex])) {
if (tryCopyDistinctRowFromSttBlock(&fRow, pLastBlockReader, pBlockScanInfo, tsLastBlock, pReader)) {
+ pBlockScanInfo->lastKey = tsLastBlock;
return TSDB_CODE_SUCCESS;
} else {
int32_t code = tRowMergerInit(&merge, &fRow, pReader->pSchema);
@@ -1786,7 +1786,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -1810,7 +1810,7 @@ static int32_t doMergeFileBlockAndLastBlock(SLastBlockReader* pLastBlockReader,
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -1858,7 +1858,7 @@ static int32_t mergeFileBlockAndLastBlock(STsdbReader* pReader, SLastBlockReader
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -2082,7 +2082,7 @@ static int32_t doMergeMultiLevelRows(STsdbReader* pReader, STableBlockScanInfo*
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -2186,7 +2186,7 @@ static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDum
static bool initLastBlockReader(SLastBlockReader* pLBlockReader, STableBlockScanInfo* pScanInfo, STsdbReader* pReader) {
// the last block reader has been initialized for this table.
if (pLBlockReader->uid == pScanInfo->uid) {
- return true;
+ return hasDataInLastBlock(pLBlockReader);
}
if (pLBlockReader->uid != 0) {
@@ -2233,6 +2233,7 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
STsdbReader* pReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) {
+ pBlockScanInfo->lastKey = key;
return TSDB_CODE_SUCCESS;
} else {
TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex);
@@ -2251,7 +2252,7 @@ int32_t mergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pBloc
return code;
}
- doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, pBlockScanInfo);
taosMemoryFree(pTSRow);
tRowMergerClear(&merge);
@@ -2299,26 +2300,32 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
SFileDataBlockInfo* pBlockInfo = getCurrentBlockInfo(&pReader->status.blockIter);
SLastBlockReader* pLastBlockReader = pReader->status.fileIter.pLastBlockReader;
+ bool asc = ASCENDING_TRAVERSE(pReader->order);
int64_t st = taosGetTimestampUs();
+ int32_t step = asc ? 1 : -1;
STableBlockScanInfo* pBlockScanInfo = NULL;
if (pBlockInfo != NULL) {
- pBlockScanInfo = *(STableBlockScanInfo**)taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
- if (pBlockScanInfo == NULL) {
+ void* p = taosHashGet(pReader->status.pTableMap, &pBlockInfo->uid, sizeof(pBlockInfo->uid));
+ if (p == NULL) {
code = TSDB_CODE_INVALID_PARA;
tsdbError("failed to locate the uid:%" PRIu64 " in query table uid list, total tables:%d, %s", pBlockInfo->uid,
taosHashGetSize(pReader->status.pTableMap), pReader->idStr);
goto _end;
}
+ pBlockScanInfo = *(STableBlockScanInfo**) p;
+
SDataBlk* pBlock = getCurrentBlock(&pReader->status.blockIter);
TSDBKEY keyInBuf = getCurrentKeyInBuf(pBlockScanInfo, pReader);
// it is a clean block, load it directly
if (isCleanFileDataBlock(pReader, pBlockInfo, pBlock, pBlockScanInfo, keyInBuf, pLastBlockReader)) {
- if (pReader->order == TSDB_ORDER_ASC ||
- (pReader->order == TSDB_ORDER_DESC && (!hasDataInLastBlock(pLastBlockReader)))) {
+ if (asc || ((!asc) && (!hasDataInLastBlock(pLastBlockReader)))) {
copyBlockDataToSDataBlock(pReader, pBlockScanInfo);
+
+ // record the last key value
+ pBlockScanInfo->lastKey = asc? pBlock->maxKey.ts:pBlock->minKey.ts;
goto _end;
}
}
@@ -2328,7 +2335,6 @@ static int32_t buildComposedDataBlock(STsdbReader* pReader) {
SFileBlockDumpInfo* pDumpInfo = &pReader->status.fBlockDumpInfo;
SBlockData* pBlockData = &pReader->status.fileBlockData;
- int32_t step = ASCENDING_TRAVERSE(pReader->order) ? 1 : -1;
while (1) {
bool hasBlockData = false;
@@ -2710,6 +2716,9 @@ static int32_t doBuildDataBlock(STsdbReader* pReader) {
pInfo->window = (STimeWindow){.skey = pBlock->minKey.ts, .ekey = pBlock->maxKey.ts};
setComposedBlockFlag(pReader, false);
setBlockAllDumped(&pStatus->fBlockDumpInfo, pBlock->maxKey.ts, pReader->order);
+
+ // update the last key for the corresponding table
+ pScanInfo->lastKey = ASCENDING_TRAVERSE(pReader->order)? pInfo->window.ekey:pInfo->window.skey;
}
}
@@ -3214,7 +3223,6 @@ int32_t doMergeRowsInFileBlocks(SBlockData* pBlockData, STableBlockScanInfo* pSc
int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockScanInfo* pScanInfo, int64_t ts,
SRowMerger* pMerger, SVersionRange* pVerRange) {
- pScanInfo->lastKey = ts;
while (nextRowFromLastBlocks(pLastBlockReader, pScanInfo, pVerRange)) {
int64_t next1 = getCurrentKeyInLastBlock(pLastBlockReader);
if (next1 == ts) {
@@ -3407,9 +3415,10 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR
return TSDB_CODE_SUCCESS;
}
-int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, uint64_t uid) {
+int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow* pTSRow, STableBlockScanInfo* pScanInfo) {
int32_t numOfRows = pBlock->info.rows;
int32_t numOfCols = (int32_t)taosArrayGetSize(pBlock->pDataBlock);
+ int64_t uid = pScanInfo->uid;
SBlockLoadSuppInfo* pSupInfo = &pReader->suppInfo;
STSchema* pSchema = doGetSchemaForTSRow(pTSRow->sver, pReader, uid);
@@ -3448,6 +3457,7 @@ int32_t doAppendRowFromTSRow(SSDataBlock* pBlock, STsdbReader* pReader, STSRow*
}
pBlock->info.rows += 1;
+ pScanInfo->lastKey = pTSRow->ts;
return TSDB_CODE_SUCCESS;
}
@@ -3511,7 +3521,8 @@ int32_t buildDataBlockFromBufImpl(STableBlockScanInfo* pBlockScanInfo, int64_t e
break;
}
- doAppendRowFromTSRow(pBlock, pReader, pTSRow, pBlockScanInfo->uid);
+ doAppendRowFromTSRow(pBlock, pReader, pTSRow, pBlockScanInfo);
+
if (freeTSRow) {
taosMemoryFree(pTSRow);
}
diff --git a/source/dnode/vnode/src/vnd/vnodeOpen.c b/source/dnode/vnode/src/vnd/vnodeOpen.c
index f7164c4ac3..8c2036b97b 100644
--- a/source/dnode/vnode/src/vnd/vnodeOpen.c
+++ b/source/dnode/vnode/src/vnd/vnodeOpen.c
@@ -242,12 +242,7 @@ _err:
return NULL;
}
-void vnodePreClose(SVnode *pVnode) {
- if (pVnode) {
- syncLeaderTransfer(pVnode->sync);
- syncPreStop(pVnode->sync);
- }
-}
+void vnodePreClose(SVnode *pVnode) { vnodeSyncPreClose(pVnode); }
void vnodeClose(SVnode *pVnode) {
if (pVnode) {
diff --git a/source/dnode/vnode/src/vnd/vnodeSvr.c b/source/dnode/vnode/src/vnd/vnodeSvr.c
index 435dbde30a..78d95cf0d7 100644
--- a/source/dnode/vnode/src/vnd/vnodeSvr.c
+++ b/source/dnode/vnode/src/vnd/vnodeSvr.c
@@ -229,8 +229,7 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
break;
/* TQ */
case TDMT_VND_TMQ_SUBSCRIBE:
- if (tqProcessSubscribeReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessSubscribeReq(pVnode->pTq, version, pReq, len) < 0) {
goto _err;
}
break;
@@ -240,26 +239,22 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
}
break;
case TDMT_VND_TMQ_COMMIT_OFFSET:
- if (tqProcessOffsetCommitReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessOffsetCommitReq(pVnode->pTq, version, pReq, pMsg->contLen - sizeof(SMsgHead)) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_ADD_CHECKINFO:
- if (tqProcessAddCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessAddCheckInfoReq(pVnode->pTq, version, pReq, len) < 0) {
goto _err;
}
break;
case TDMT_VND_TMQ_DEL_CHECKINFO:
- if (tqProcessDelCheckInfoReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessDelCheckInfoReq(pVnode->pTq, version, pReq, len) < 0) {
goto _err;
}
break;
case TDMT_STREAM_TASK_DEPLOY: {
- if (tqProcessTaskDeployReq(pVnode->pTq, version, POINTER_SHIFT(pMsg->pCont, sizeof(SMsgHead)),
- pMsg->contLen - sizeof(SMsgHead)) < 0) {
+ if (tqProcessTaskDeployReq(pVnode->pTq, version, pReq, len) < 0) {
goto _err;
}
} break;
@@ -273,6 +268,11 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
goto _err;
}
} break;
+ case TDMT_STREAM_TASK_CHECK_RSP: {
+ if (tqProcessStreamTaskCheckRsp(pVnode->pTq, version, pReq, len) < 0) {
+ goto _err;
+ }
+ } break;
case TDMT_VND_ALTER_CONFIRM:
vnodeProcessAlterConfirmReq(pVnode, version, pReq, len, pRsp);
break;
@@ -390,10 +390,12 @@ int32_t vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
return tqProcessPollReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_RUN:
return tqProcessTaskRunReq(pVnode->pTq, pMsg);
-#if 0
+#if 1
case TDMT_STREAM_TASK_DISPATCH:
return tqProcessTaskDispatchReq(pVnode->pTq, pMsg, true);
#endif
+ case TDMT_STREAM_TASK_CHECK:
+ return tqProcessStreamTaskCheckReq(pVnode->pTq, pMsg);
case TDMT_STREAM_TASK_DISPATCH_RSP:
return tqProcessTaskDispatchRsp(pVnode->pTq, pMsg);
case TDMT_STREAM_RETRIEVE:
@@ -863,6 +865,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
SEncoder encoder = {0};
SArray *newTbUids = NULL;
SVStatis statis = {0};
+ bool tbCreated = false;
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@@ -896,11 +899,10 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
if (pBlock == NULL) break;
SSubmitBlkRsp submitBlkRsp = {0};
+ tbCreated = false;
// create table for auto create table mode
if (msgIter.schemaLen > 0) {
- submitBlkRsp.hashMeta = 1;
-
tDecoderInit(&decoder, pBlock->data, msgIter.schemaLen);
if (tDecodeSVCreateTbReq(&decoder, &createTbReq) < 0) {
pRsp->code = TSDB_CODE_INVALID_MSG;
@@ -937,12 +939,13 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
}
taosArrayPush(newTbUids, &createTbReq.uid);
+
+ submitBlkRsp.uid = createTbReq.uid;
+ submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
+ sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
+ tbCreated = true;
}
-
- submitBlkRsp.uid = createTbReq.uid;
- submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
- sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
-
+
msgIter.uid = createTbReq.uid;
if (createTbReq.type == TSDB_CHILD_TABLE) {
msgIter.suid = createTbReq.ctb.suid;
@@ -955,10 +958,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
#endif
tDecoderClear(&decoder);
taosArrayDestroy(createTbReq.ctb.tagName);
- } else {
- submitBlkRsp.tblFName = taosMemoryMalloc(TSDB_TABLE_FNAME_LEN);
- sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname);
- }
+ }
if (tsdbInsertTableData(pVnode->pTsdb, version, &msgIter, pBlock, &submitBlkRsp) < 0) {
submitBlkRsp.code = terrno;
@@ -966,7 +966,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
submitRsp.numOfRows += submitBlkRsp.numOfRows;
submitRsp.affectedRows += submitBlkRsp.affectedRows;
- taosArrayPush(submitRsp.pArray, &submitBlkRsp);
+ if (tbCreated || submitBlkRsp.code) {
+ taosArrayPush(submitRsp.pArray, &submitBlkRsp);
+ }
}
if (taosArrayGetSize(newTbUids) > 0) {
diff --git a/source/dnode/vnode/src/vnd/vnodeSync.c b/source/dnode/vnode/src/vnd/vnodeSync.c
index e27ae07460..3913561ae7 100644
--- a/source/dnode/vnode/src/vnd/vnodeSync.c
+++ b/source/dnode/vnode/src/vnd/vnodeSync.c
@@ -342,52 +342,26 @@ static void vnodeSyncRollBackMsg(const SSyncFSM *pFsm, const SRpcMsg *pMsg, cons
TMSG_INFO(pMsg->msgType));
}
-#define USE_TSDB_SNAPSHOT
-
static int32_t vnodeSnapshotStartRead(const SSyncFSM *pFsm, void *pParam, void **ppReader) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
SSnapshotParam *pSnapshotParam = pParam;
int32_t code = vnodeSnapReaderOpen(pVnode, pSnapshotParam->start, pSnapshotParam->end, (SVSnapReader **)ppReader);
return code;
-#else
- *ppReader = taosMemoryMalloc(32);
- return 0;
-#endif
}
static int32_t vnodeSnapshotStopRead(const SSyncFSM *pFsm, void *pReader) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
int32_t code = vnodeSnapReaderClose(pReader);
return code;
-#else
- taosMemoryFree(pReader);
- return 0;
-#endif
}
static int32_t vnodeSnapshotDoRead(const SSyncFSM *pFsm, void *pReader, void **ppBuf, int32_t *len) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
int32_t code = vnodeSnapRead(pReader, (uint8_t **)ppBuf, len);
return code;
-#else
- static int32_t times = 0;
- if (times++ < 5) {
- *len = 64;
- *ppBuf = taosMemoryMalloc(*len);
- snprintf(*ppBuf, *len, "snapshot block %d", times);
- } else {
- *len = 0;
- *ppBuf = NULL;
- }
- return 0;
-#endif
}
static int32_t vnodeSnapshotStartWrite(const SSyncFSM *pFsm, void *pParam, void **ppWriter) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
SSnapshotParam *pSnapshotParam = pParam;
@@ -404,14 +378,9 @@ static int32_t vnodeSnapshotStartWrite(const SSyncFSM *pFsm, void *pParam, void
int32_t code = vnodeSnapWriterOpen(pVnode, pSnapshotParam->start, pSnapshotParam->end, (SVSnapWriter **)ppWriter);
return code;
-#else
- *ppWriter = taosMemoryMalloc(32);
- return 0;
-#endif
}
static int32_t vnodeSnapshotStopWrite(const SSyncFSM *pFsm, void *pWriter, bool isApply, SSnapshot *pSnapshot) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
vInfo("vgId:%d, stop write vnode snapshot, apply:%d, index:%" PRId64 " term:%" PRIu64 " config:%" PRId64,
pVnode->config.vgId, isApply, pSnapshot->lastApplyIndex, pSnapshot->lastApplyTerm, pSnapshot->lastConfigIndex);
@@ -419,22 +388,14 @@ static int32_t vnodeSnapshotStopWrite(const SSyncFSM *pFsm, void *pWriter, bool
int32_t code = vnodeSnapWriterClose(pWriter, !isApply, pSnapshot);
vInfo("vgId:%d, apply vnode snapshot finished, code:0x%x", pVnode->config.vgId, code);
return code;
-#else
- taosMemoryFree(pWriter);
- return 0;
-#endif
}
static int32_t vnodeSnapshotDoWrite(const SSyncFSM *pFsm, void *pWriter, void *pBuf, int32_t len) {
-#ifdef USE_TSDB_SNAPSHOT
SVnode *pVnode = pFsm->data;
vDebug("vgId:%d, continue write vnode snapshot, len:%d", pVnode->config.vgId, len);
int32_t code = vnodeSnapWrite(pWriter, pBuf, len);
vDebug("vgId:%d, continue write vnode snapshot finished, len:%d", pVnode->config.vgId, len);
return code;
-#else
- return 0;
-#endif
}
static void vnodeRestoreFinish(const SSyncFSM *pFsm) {
@@ -461,7 +422,6 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
SVnode *pVnode = pFsm->data;
vDebug("vgId:%d, become follower", pVnode->config.vgId);
- // clear old leader resource
taosThreadMutexLock(&pVnode->lock);
if (pVnode->blocked) {
pVnode->blocked = false;
@@ -474,15 +434,6 @@ static void vnodeBecomeFollower(const SSyncFSM *pFsm) {
static void vnodeBecomeLeader(const SSyncFSM *pFsm) {
SVnode *pVnode = pFsm->data;
vDebug("vgId:%d, become leader", pVnode->config.vgId);
-
-#if 0
- taosThreadMutexLock(&pVnode->lock);
- if (pVnode->blocked) {
- pVnode->blocked = false;
- tsem_post(&pVnode->syncSem);
- }
- taosThreadMutexUnlock(&pVnode->lock);
-#endif
}
static SSyncFSM *vnodeSyncMakeFsm(SVnode *pVnode) {
@@ -543,12 +494,25 @@ int32_t vnodeSyncOpen(SVnode *pVnode, char *path) {
}
void vnodeSyncStart(SVnode *pVnode) {
- vDebug("vgId:%d, start sync", pVnode->config.vgId);
+ vInfo("vgId:%d, start sync", pVnode->config.vgId);
syncStart(pVnode->sync);
}
+void vnodeSyncPreClose(SVnode *pVnode) {
+ vInfo("vgId:%d, pre close sync", pVnode->config.vgId);
+ syncLeaderTransfer(pVnode->sync);
+ syncPreStop(pVnode->sync);
+ taosThreadMutexLock(&pVnode->lock);
+ if (pVnode->blocked) {
+ vInfo("vgId:%d, post block after close sync", pVnode->config.vgId);
+ pVnode->blocked = false;
+ tsem_post(&pVnode->syncSem);
+ }
+ taosThreadMutexUnlock(&pVnode->lock);
+}
+
void vnodeSyncClose(SVnode *pVnode) {
- vDebug("vgId:%d, close sync", pVnode->config.vgId);
+ vInfo("vgId:%d, close sync", pVnode->config.vgId);
syncStop(pVnode->sync);
}
diff --git a/source/libs/catalog/inc/catalogInt.h b/source/libs/catalog/inc/catalogInt.h
index 0786321686..8c699bb59b 100644
--- a/source/libs/catalog/inc/catalogInt.h
+++ b/source/libs/catalog/inc/catalogInt.h
@@ -33,6 +33,7 @@ extern "C" {
#define CTG_DEFAULT_MAX_RETRY_TIMES 3
#define CTG_DEFAULT_BATCH_NUM 64
#define CTG_DEFAULT_FETCH_NUM 8
+#define CTG_MAX_COMMAND_LEN 512
#define CTG_RENT_SLOT_SECOND 1.5
@@ -223,6 +224,7 @@ typedef struct SCtgUserAuth {
typedef struct SCatalog {
uint64_t clusterId;
+ bool stopUpdate;
SHashObj* userCache; // key:user, value:SCtgUserAuth
SHashObj* dbCache; // key:dbname, value:SCtgDBCache
SCtgRentMgmt dbRent;
@@ -671,7 +673,7 @@ void ctgdShowClusterCache(SCatalog* pCtg);
int32_t ctgdShowCacheInfo(void);
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq);
-int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
+int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetasCtx* ctx, int32_t dbIdx,
int32_t* fetchIdx, int32_t baseResIdx, SArray* pList);
@@ -786,6 +788,7 @@ void ctgFreeTbCacheImpl(SCtgTbCache* pCache);
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch);
+int32_t ctgdGetOneHandle(SCatalog **pHandle);
extern SCatalogMgmt gCtgMgmt;
extern SCtgDebug gCTGDebug;
diff --git a/source/libs/catalog/src/catalog.c b/source/libs/catalog/src/catalog.c
index e66cdb14ce..1f87066c82 100644
--- a/source/libs/catalog/src/catalog.c
+++ b/source/libs/catalog/src/catalog.c
@@ -202,7 +202,7 @@ int32_t ctgGetTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx
int32_t code = 0;
STableMetaOutput* output = NULL;
- CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, ctx, pTableMeta));
+ CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, ctx, pTableMeta));
if (*pTableMeta || (ctx->flag & CTG_FLAG_ONLY_CACHE)) {
goto _return;
}
@@ -959,14 +959,14 @@ int32_t catalogGetTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
}
-int32_t catalogGetCachedTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableMeta** pTableMeta) {
+int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta) {
CTG_API_ENTER();
SCtgTbMetaCtx ctx = {0};
ctx.pName = (SName*)pTableName;
ctx.flag = CTG_FLAG_UNKNOWN_STB | CTG_FLAG_ONLY_CACHE;
- CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
+ CTG_API_LEAVE(ctgGetTbMeta(pCtg, NULL, &ctx, pTableMeta));
}
@@ -981,15 +981,14 @@ int32_t catalogGetSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SNam
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
}
-int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
- STableMeta** pTableMeta) {
+int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta) {
CTG_API_ENTER();
SCtgTbMetaCtx ctx = {0};
ctx.pName = (SName*)pTableName;
ctx.flag = CTG_FLAG_STB | CTG_FLAG_ONLY_CACHE;
- CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
+ CTG_API_LEAVE(ctgGetTbMeta(pCtg, NULL, &ctx, pTableMeta));
}
@@ -1114,11 +1113,10 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup, NULL));
}
-int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
- SVgroupInfo* pVgroup, bool* exists) {
+int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists) {
CTG_API_ENTER();
- CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup, exists));
+ CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, NULL, pTableName, pVgroup, exists));
}
#if 0
@@ -1387,16 +1385,16 @@ _return:
CTG_API_LEAVE(code);
}
-int32_t catalogChkAuthFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
+int32_t catalogChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass, bool* exists) {
CTG_API_ENTER();
- if (NULL == pCtg || NULL == pConn || NULL == user || NULL == dbFName || NULL == pass || NULL == exists) {
+ if (NULL == pCtg || NULL == user || NULL == dbFName || NULL == pass || NULL == exists) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
int32_t code = 0;
- CTG_ERR_JRET(ctgChkAuth(pCtg, pConn, user, dbFName, type, pass, exists));
+ CTG_ERR_JRET(ctgChkAuth(pCtg, NULL, user, dbFName, type, pass, exists));
_return:
diff --git a/source/libs/catalog/src/ctgAsync.c b/source/libs/catalog/src/ctgAsync.c
index 1f1a210699..b601865306 100644
--- a/source/libs/catalog/src/ctgAsync.c
+++ b/source/libs/catalog/src/ctgAsync.c
@@ -1204,11 +1204,15 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
stbCtx.flag = flag;
stbCtx.pName = &stbName;
- taosMemoryFreeClear(pOut->tbMeta);
- CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
- if (pOut->tbMeta) {
+ STableMeta *stbMeta = NULL;
+ ctgReadTbMetaFromCache(pCtg, &stbCtx, &stbMeta);
+ if (stbMeta && stbMeta->sversion >= pOut->tbMeta->sversion) {
ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
exist = 1;
+ } else {
+ ctgDebug("need to get/update stb meta, tbName:%s", tNameGetTableName(pName));
+ taosMemoryFreeClear(pOut->tbMeta);
+ taosMemoryFreeClear(stbMeta);
}
}
@@ -1641,7 +1645,7 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask* pTask) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
- CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
+ CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
if (pTask->res) {
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/catalog/src/ctgCache.c b/source/libs/catalog/src/ctgCache.c
index 61da0724c5..51807a145a 100644
--- a/source/libs/catalog/src/ctgCache.c
+++ b/source/libs/catalog/src/ctgCache.c
@@ -248,6 +248,8 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid,
goto _return;
}
+ taosHashRelease(dbCache->stbCache, stName);
+
CTG_LOCK(CTG_READ, &pCache->metaLock);
if (NULL == pCache->pMeta) {
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
@@ -1550,7 +1552,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
char *dbFName = msg->dbFName;
SCatalog *pCtg = msg->pCtg;
- if (NULL == dbInfo->vgHash) {
+ if (pCtg->stopUpdate || NULL == dbInfo->vgHash) {
goto _return;
}
@@ -1620,6 +1622,10 @@ int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
SCtgDropDBMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@@ -1646,6 +1652,10 @@ int32_t ctgOpDropDbVgroup(SCtgCacheOperation *operation) {
SCtgDropDbVgroupMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@@ -1675,6 +1685,10 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
STableMetaOutput *pMeta = msg->pMeta;
SCtgDBCache *dbCache = NULL;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
if ((!CTG_IS_META_CTABLE(pMeta->metaType)) && NULL == pMeta->tbMeta) {
ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", pMeta->dbFName, pMeta->tbName);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
@@ -1723,6 +1737,10 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
SCtgDropStbMetaMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@@ -1776,6 +1794,10 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
SCtgDropTblMetaMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@@ -1819,6 +1841,10 @@ int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
SCtgUpdateUserMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user));
if (NULL == pUser) {
SCtgUserAuth userAuth = {0};
@@ -1872,8 +1898,12 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
int32_t code = 0;
SCtgUpdateEpsetMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
-
SCtgDBCache *dbCache = NULL;
+
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
ctgDebug("db %s not exist, ignore epset update", msg->dbFName);
@@ -1920,6 +1950,10 @@ int32_t ctgOpUpdateTbIndex(SCtgCacheOperation *operation) {
STableIndex *pIndex = msg->pIndex;
SCtgDBCache *dbCache = NULL;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pIndex->dbFName, 0, &dbCache));
CTG_ERR_JRET(ctgWriteTbIndexToCache(pCtg, dbCache, pIndex->dbFName, pIndex->tbName, &pIndex));
@@ -1942,6 +1976,10 @@ int32_t ctgOpDropTbIndex(SCtgCacheOperation *operation) {
SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
+ if (pCtg->stopUpdate) {
+ goto _return;
+ }
+
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
return TSDB_CODE_SUCCESS;
@@ -2154,7 +2192,7 @@ int32_t ctgStartUpdateThread() {
return TSDB_CODE_SUCCESS;
}
-int32_t ctgGetTbMetaFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
+int32_t ctgGetTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
if (IS_SYS_DBNAME(ctx->pName->dbname)) {
CTG_FLAG_SET_SYS_DB(ctx->flag);
}
diff --git a/source/libs/catalog/src/ctgDbg.c b/source/libs/catalog/src/ctgDbg.c
index dbca2ad977..26b5903bb0 100644
--- a/source/libs/catalog/src/ctgDbg.c
+++ b/source/libs/catalog/src/ctgDbg.c
@@ -226,28 +226,45 @@ _return:
CTG_RET(code);
}
-int32_t ctgdEnableDebug(char *option) {
+int32_t ctgdEnableDebug(char *option, bool enable) {
if (0 == strcasecmp(option, "lock")) {
- gCTGDebug.lockEnable = true;
- qDebug("lock debug enabled");
+ gCTGDebug.lockEnable = enable;
+ qDebug("catalog lock debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "cache")) {
- gCTGDebug.cacheEnable = true;
- qDebug("cache debug enabled");
+ gCTGDebug.cacheEnable = enable;
+ qDebug("catalog cache debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "api")) {
- gCTGDebug.apiEnable = true;
- qDebug("api debug enabled");
+ gCTGDebug.apiEnable = enable;
+ qDebug("catalog api debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "meta")) {
- gCTGDebug.metaEnable = true;
- qDebug("api debug enabled");
+ gCTGDebug.metaEnable = enable;
+ qDebug("catalog meta debug set to %d", enable);
+ return TSDB_CODE_SUCCESS;
+ }
+
+ if (0 == strcasecmp(option, "stopUpdate")) {
+ SCatalog *pCtg = NULL;
+
+ void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
+ while (pIter) {
+ pCtg = *(SCatalog **)pIter;
+
+ pCtg->stopUpdate = enable;
+
+ pIter = taosHashIterate(gCtgMgmt.pCluster, pIter);
+ }
+
+ qDebug("catalog stopUpdate set to %d", enable);
+
return TSDB_CODE_SUCCESS;
}
@@ -256,6 +273,77 @@ int32_t ctgdEnableDebug(char *option) {
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
+int32_t ctgdHandleDbgCommand(char *command) {
+ if (NULL == command) {
+ CTG_RET(TSDB_CODE_INVALID_PARA);
+ }
+
+ if (strlen(command) > CTG_MAX_COMMAND_LEN) {
+ CTG_RET(TSDB_CODE_INVALID_PARA);
+ }
+
+ char *dup = strdup(command);
+ char *option = NULL;
+ char *param = NULL;
+
+ int32_t i = 0;
+ bool newItem = true;
+ while (*(dup + i)) {
+ if (isspace(*(dup + i))) {
+ *(dup + i) = 0;
+ ++i;
+ newItem = true;
+ continue;
+ }
+
+ if (!newItem) {
+ ++i;
+ continue;
+ }
+
+ newItem = false;
+ if (NULL == option) {
+ option = dup + i;
+ ++i;
+ continue;
+ }
+
+ if (NULL == param) {
+ param = dup + i;
+ ++i;
+ continue;
+ }
+
+ taosMemoryFree(dup);
+ CTG_RET(TSDB_CODE_INVALID_PARA);
+ }
+
+ bool enable = atoi(param);
+
+ int32_t code = ctgdEnableDebug(option, enable);
+
+ taosMemoryFree(dup);
+
+ CTG_RET(code);
+}
+
+int32_t ctgdGetOneHandle(SCatalog **pHandle) {
+ SCatalog *pCtg = NULL;
+
+ void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
+ while (pIter) {
+ pCtg = *(SCatalog **)pIter;
+
+ taosHashCancelIterate(gCtgMgmt.pCluster, pIter);
+ break;
+ }
+
+ *pHandle = pCtg;
+
+ return TSDB_CODE_SUCCESS;
+}
+
+
int32_t ctgdGetStatNum(char *option, void *res) {
if (0 == strcasecmp(option, "runtime.numOfOpDequeue")) {
*(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.numOfOpDequeue);
diff --git a/source/libs/catalog/test/catalogTests.cpp b/source/libs/catalog/test/catalogTests.cpp
index 489d174e17..ebf7c7baeb 100644
--- a/source/libs/catalog/test/catalogTests.cpp
+++ b/source/libs/catalog/test/catalogTests.cpp
@@ -41,7 +41,6 @@
namespace {
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog *pCatalog, int32_t type);
-extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);
void ctgTestSetRspTableMeta();
@@ -49,6 +48,8 @@ void ctgTestSetRspCTableMeta();
void ctgTestSetRspSTableMeta();
void ctgTestSetRspMultiSTableMeta();
+extern int32_t clientConnRefPool;
+
enum {
CTGT_RSP_VGINFO = 1,
CTGT_RSP_TBMETA,
@@ -151,10 +152,10 @@ void ctgTestInitLogFile() {
qDebugFlag = 159;
strcpy(tsLogDir, TD_LOG_DIR_PATH);
- ctgdEnableDebug("api");
- ctgdEnableDebug("meta");
- ctgdEnableDebug("cache");
- ctgdEnableDebug("lock");
+ ctgdEnableDebug("api", true);
+ ctgdEnableDebug("meta", true);
+ ctgdEnableDebug("cache", true);
+ ctgdEnableDebug("lock", true);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@@ -1204,6 +1205,34 @@ void *ctgTestSetCtableMetaThread(void *param) {
}
+void ctgTestFetchRows(TAOS_RES *result, int32_t *rows) {
+ TAOS_ROW row;
+ int num_fields = taos_num_fields(result);
+ TAOS_FIELD *fields = taos_fetch_fields(result);
+ char temp[256];
+
+ // fetch the records row by row
+ while ((row = taos_fetch_row(result))) {
+ (*rows)++;
+ memset(temp, 0, sizeof(temp));
+ taos_print_row(temp, row, fields, num_fields);
+ printf("\t[%s]\n", temp);
+ }
+}
+
+void ctgTestExecQuery(TAOS * taos, char* sql, bool fetch, int32_t *rows) {
+ TAOS_RES *result = taos_query(taos, sql);
+ int code = taos_errno(result);
+ ASSERT_EQ(code, 0);
+
+ if (fetch) {
+ ctgTestFetchRows(result, rows);
+ }
+
+ taos_free_result(result);
+}
+
+
TEST(tableMeta, normalTable) {
struct SCatalog *pCtg = NULL;
SVgroupInfo vgInfo = {0};
@@ -1245,7 +1274,7 @@ TEST(tableMeta, normalTable) {
memset(&vgInfo, 0, sizeof(vgInfo));
bool exists = false;
- code = catalogGetCachedTableHashVgroup(pCtg, mockPointer, &n, &vgInfo, &exists);
+ code = catalogGetCachedTableHashVgroup(pCtg, &n, &vgInfo, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
@@ -1292,7 +1321,7 @@ TEST(tableMeta, normalTable) {
taosMemoryFree(tableMeta);
tableMeta = NULL;
- catalogGetCachedTableMeta(pCtg, mockPointer, &n, &tableMeta);
+ catalogGetCachedTableMeta(pCtg, &n, &tableMeta);
ASSERT_EQ(code, 0);
ASSERT_EQ(tableMeta->vgId, 8);
ASSERT_EQ(tableMeta->tableType, TSDB_NORMAL_TABLE);
@@ -1500,7 +1529,7 @@ TEST(tableMeta, superTableCase) {
}
tableMeta = NULL;
- code = catalogGetCachedSTableMeta(pCtg, mockPointer, &n, &tableMeta);
+ code = catalogGetCachedSTableMeta(pCtg, &n, &tableMeta);
ASSERT_EQ(code, 0);
ASSERT_EQ(tableMeta->vgId, 0);
ASSERT_EQ(tableMeta->tableType, TSDB_SUPER_TABLE);
@@ -2772,7 +2801,7 @@ TEST(apiTest, catalogChkAuth_test) {
bool pass = false;
bool exists = false;
- code = catalogChkAuthFromCache(pCtg, mockPointer, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
+ code = catalogChkAuthFromCache(pCtg, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(exists, false);
@@ -2790,7 +2819,7 @@ TEST(apiTest, catalogChkAuth_test) {
}
}
- code = catalogChkAuthFromCache(pCtg, mockPointer, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
+ code = catalogChkAuthFromCache(pCtg, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(pass, true);
ASSERT_EQ(exists, true);
@@ -3063,6 +3092,58 @@ TEST(apiTest, catalogGetDnodeList_test) {
catalogDestroy();
}
+#ifdef INTEGRATION_TEST
+TEST(intTest, autoCreateTableTest) {
+ struct SCatalog *pCtg = NULL;
+
+ TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
+ ASSERT_TRUE(NULL != taos);
+
+ ctgdEnableDebug("api", true);
+ ctgdEnableDebug("meta", true);
+ ctgdEnableDebug("cache", true);
+ ctgdEnableDebug("lock", true);
+
+ ctgTestExecQuery(taos, "drop database if exists db1", false, NULL);
+ ctgTestExecQuery(taos, "create database db1", false, NULL);
+ ctgTestExecQuery(taos, "create stable db1.st1 (ts timestamp, f1 int) tags(tg1 int)", false, NULL);
+ ctgTestExecQuery(taos, "insert into db1.tb1 using db1.st1 tags(1) values(now, 1)", false, NULL);
+
+ ctgdGetOneHandle(&pCtg);
+
+ while (true) {
+ uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
+ if (2 != n) {
+ taosMsleep(50);
+ } else {
+ break;
+ }
+ }
+
+ uint64_t n = 0, m = 0;
+ ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&n);
+
+ ctgTestExecQuery(taos, "insert into db1.tb1 using db1.st1 tags(1) values(now, 2)", false, NULL);
+
+ ctgTestExecQuery(taos, "insert into db1.tb1 values(now, 3)", false, NULL);
+
+ taosMsleep(1000);
+ ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&m);
+
+ ASSERT_EQ(n, m);
+
+ ctgdEnableDebug("stopUpdate", true);
+ ctgTestExecQuery(taos, "alter table db1.st1 add column f2 double", false, NULL);
+
+ ctgdEnableDebug("stopUpdate", false);
+
+ ctgTestExecQuery(taos, "insert into db1.tb1 (ts, f1) values(now, 4)", false, NULL);
+
+ taos_close(taos);
+}
+
+#endif
+
int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv);
diff --git a/source/libs/command/inc/commandInt.h b/source/libs/command/inc/commandInt.h
index ce4c4cf09b..4d0c5389e1 100644
--- a/source/libs/command/inc/commandInt.h
+++ b/source/libs/command/inc/commandInt.h
@@ -96,6 +96,7 @@ extern "C" {
#define COMMAND_RESET_LOG "resetLog"
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"
#define COMMAND_ENABLE_RESCHEDULE "enableReSchedule"
+#define COMMAND_CATALOG_DEBUG "catalogDebug"
typedef struct SExplainGroup {
int32_t nodeNum;
diff --git a/source/libs/command/src/command.c b/source/libs/command/src/command.c
index 47a904bba2..76c84b3be9 100644
--- a/source/libs/command/src/command.c
+++ b/source/libs/command/src/command.c
@@ -571,6 +571,8 @@ static int32_t execAlterCmd(char* cmd, char* value, bool* processed) {
code = schedulerUpdatePolicy(atoi(value));
} else if (0 == strcasecmp(cmd, COMMAND_ENABLE_RESCHEDULE)) {
code = schedulerEnableReSchedule(atoi(value));
+ } else if (0 == strcasecmp(cmd, COMMAND_CATALOG_DEBUG)) {
+ code = ctgdHandleDbgCommand(value);
} else {
goto _return;
}
diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c
index 32386a72fd..6f8d60f538 100644
--- a/source/libs/executor/src/executil.c
+++ b/source/libs/executor/src/executil.c
@@ -30,10 +30,10 @@
// The numOfOutputGroups is specified by physical plan. and will not be affect by numOfGroups
struct STableListInfo {
bool oneTableForEachGroup;
- int32_t numOfOuputGroups; // the data block will be generated one by one
- int32_t* groupOffset; // keep the offset value for each group in the tableList
+ int32_t numOfOuputGroups; // the data block will be generated one by one
+ int32_t* groupOffset; // keep the offset value for each group in the tableList
SArray* pTableList;
- SHashObj* map; // speedup acquire the tableQueryInfo by table uid
+ SHashObj* map; // speedup acquire the tableQueryInfo by table uid
uint64_t suid;
};
@@ -421,7 +421,7 @@ static SColumnInfoData* getColInfoResult(void* metaHandle, int64_t suid, SArray*
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
- SColumnInfoData colInfo = {{0}, 0};
+ SColumnInfoData colInfo = {0, {0}};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
@@ -582,7 +582,7 @@ int32_t getColInfoResultForGroupby(void* metaHandle, SNodeList* group, STableLis
}
for (int32_t i = 0; i < taosArrayGetSize(ctx.cInfoList); ++i) {
- SColumnInfoData colInfo = {{0}, 0};
+ SColumnInfoData colInfo = {0, {0}};
colInfo.info = *(SColumnInfo*)taosArrayGet(ctx.cInfoList, i);
blockDataAppendColInfo(pResBlock, &colInfo);
}
@@ -925,6 +925,15 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
SArray* pTbList = getTableNameList(pList);
int32_t numOfTables = taosArrayGetSize(pTbList);
+ SHashObj *uHash = NULL;
+ size_t listlen = taosArrayGetSize(list); // len > 0 means there already have uids
+ if (listlen > 0) {
+ uHash = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false, HASH_NO_LOCK);
+ for (int i = 0; i < listlen; i++) {
+ int64_t *uid = taosArrayGet(list, i);
+ taosHashPut(uHash, uid, sizeof(int64_t), &i, sizeof(i));
+ }
+ }
for (int i = 0; i < numOfTables; i++) {
char* name = taosArrayGetP(pTbList, i);
@@ -933,9 +942,12 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
if (metaGetTableUidByName(metaHandle, name, &uid) == 0) {
ETableType tbType = TSDB_TABLE_MAX;
if (metaGetTableTypeByName(metaHandle, name, &tbType) == 0 && tbType == TSDB_CHILD_TABLE) {
- taosArrayPush(list, &uid);
+ if (NULL == uHash || taosHashGet(uHash, &uid, sizeof(uid)) == NULL) {
+ taosArrayPush(list, &uid);
+ }
} else {
taosArrayDestroy(pTbList);
+ taosHashCleanup(uHash);
return -1;
}
} else {
@@ -944,6 +956,7 @@ static int32_t optimizeTbnameInCondImpl(void* metaHandle, int64_t suid, SArray*
}
}
+ taosHashCleanup(uHash);
taosArrayDestroy(pTbList);
return 0;
}
@@ -1482,10 +1495,6 @@ void relocateColumnData(SSDataBlock* pBlock, const SArray* pColMatchInfo, SArray
while (i < numOfSrcCols && j < taosArrayGetSize(pColMatchInfo)) {
SColumnInfoData* p = taosArrayGet(pCols, i);
SColMatchItem* pmInfo = taosArrayGet(pColMatchInfo, j);
- /* if (!outputEveryColumn && pmInfo->reserved) {
- j++;
- continue;
- }*/
if (p->info.colId == pmInfo->colId) {
SColumnInfoData* pDst = taosArrayGet(pBlock->pDataBlock, pmInfo->dstSlotId);
@@ -1678,9 +1687,7 @@ uint64_t tableListGetSize(const STableListInfo* pTableList) {
return taosArrayGetSize(pTableList->pTableList);
}
-uint64_t tableListGetSuid(const STableListInfo* pTableList) {
- return pTableList->suid;
-}
+uint64_t tableListGetSuid(const STableListInfo* pTableList) { return pTableList->suid; }
STableKeyInfo* tableListGetInfo(const STableListInfo* pTableList, int32_t index) {
if (taosArrayGetSize(pTableList->pTableList) == 0) {
@@ -1718,7 +1725,7 @@ int32_t tableListAddTableInfo(STableListInfo* pTableList, uint64_t uid, uint64_t
}
int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalGroupIndex, STableKeyInfo** pKeyInfo,
- int32_t* size) {
+ int32_t* size) {
int32_t total = tableListGetOutputGroups(pTableList);
if (ordinalGroupIndex < 0 || ordinalGroupIndex >= total) {
return TSDB_CODE_INVALID_PARA;
@@ -1728,7 +1735,7 @@ int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalG
// 1. only one group exists, and 2. one table exists for each group.
if (total == 1) {
*size = tableListGetSize(pTableList);
- *pKeyInfo = (*size == 0)? NULL:taosArrayGet(pTableList->pTableList, 0);
+ *pKeyInfo = (*size == 0) ? NULL : taosArrayGet(pTableList->pTableList, 0);
return TSDB_CODE_SUCCESS;
} else if (total == tableListGetSize(pTableList)) {
*size = 1;
@@ -1806,13 +1813,13 @@ void tableListClear(STableListInfo* pTableListInfo) {
}
static int32_t orderbyGroupIdComparFn(const void* p1, const void* p2) {
- STableKeyInfo* pInfo1 = (STableKeyInfo*) p1;
- STableKeyInfo* pInfo2 = (STableKeyInfo*) p2;
+ STableKeyInfo* pInfo1 = (STableKeyInfo*)p1;
+ STableKeyInfo* pInfo2 = (STableKeyInfo*)p2;
if (pInfo1->groupId == pInfo2->groupId) {
return 0;
} else {
- return pInfo1->groupId < pInfo2->groupId? -1:1;
+ return pInfo1->groupId < pInfo2->groupId ? -1 : 1;
}
}
@@ -1825,12 +1832,12 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
SArray* pList = taosArrayInit(4, sizeof(int32_t));
STableKeyInfo* pInfo = taosArrayGet(pTableListInfo->pTableList, 0);
- uint64_t gid = pInfo->groupId;
+ uint64_t gid = pInfo->groupId;
int32_t start = 0;
taosArrayPush(pList, &start);
- for(int32_t i = 1; i < size; ++i) {
+ for (int32_t i = 1; i < size; ++i) {
pInfo = taosArrayGet(pTableListInfo->pTableList, i);
if (pInfo->groupId != gid) {
taosArrayPush(pList, &i);
@@ -1845,16 +1852,17 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
return TDB_CODE_SUCCESS;
}
-int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* group, bool groupSort) {
+int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* group,
+ bool groupSort) {
int32_t code = TSDB_CODE_SUCCESS;
ASSERT(pTableListInfo->map != NULL);
- bool groupByTbname = groupbyTbname(group);
+ bool groupByTbname = groupbyTbname(group);
size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
if (group == NULL || groupByTbname) {
for (int32_t i = 0; i < numOfTables; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
- info->groupId = groupByTbname? info->uid:0;
+ info->groupId = groupByTbname ? info->uid : 0;
}
pTableListInfo->oneTableForEachGroup = groupByTbname;
@@ -1878,7 +1886,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
// add all table entry in the hash map
size_t size = taosArrayGetSize(pTableListInfo->pTableList);
- for(int32_t i = 0; i < size; ++i) {
+ for (int32_t i = 0; i < size; ++i) {
STableKeyInfo* p = taosArrayGet(pTableListInfo->pTableList, i);
taosHashPut(pTableListInfo->map, &p->uid, sizeof(uint64_t), &i, sizeof(int32_t));
}
@@ -1889,7 +1897,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
struct SExecTaskInfo* pTaskInfo) {
- int64_t st = taosGetTimestampUs();
+ int64_t st = taosGetTimestampUs();
const char* idStr = GET_TASKID(pTaskInfo);
if (pHandle == NULL) {
@@ -1919,7 +1927,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return code;
}
- pTaskInfo->cost.groupIdMapTime = (taosGetTimestampUs() - st1)/1000.0;
+ pTaskInfo->cost.groupIdMapTime = (taosGetTimestampUs() - st1) / 1000.0;
qDebug("generate group id map completed, elapsed time:%.2f ms %s", pTaskInfo->cost.groupIdMapTime, idStr);
return TSDB_CODE_SUCCESS;
diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c
index 2964878a2c..1cf01c8661 100644
--- a/source/libs/executor/src/executor.c
+++ b/source/libs/executor/src/executor.c
@@ -321,7 +321,7 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
- SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
+ SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (isAdd) {
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), pTaskInfo->id.str);
@@ -473,7 +473,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
qDebug("subplan task create completed, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId);
- _error:
+_error:
// if failed to add ref for all tables in this query, abort current query
return code;
}
@@ -1027,10 +1027,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
if (pTableScanInfo->dataReader == NULL) {
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
- int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
+ int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, pList, num,
- &pTableScanInfo->dataReader, NULL) < 0 || pTableScanInfo->dataReader == NULL) {
+ &pTableScanInfo->dataReader, NULL) < 0 ||
+ pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
@@ -1071,14 +1072,14 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, &mtInfo);
pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
- if (pTaskInfo->pTableInfoList == NULL) {
+ if (pTaskInfo->pTableInfoList == NULL) {
pTaskInfo->pTableInfoList = tableListCreate();
}
tableListAddTableInfo(pTaskInfo->pTableInfoList, mtInfo.uid, 0);
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
- int32_t size = tableListGetSize(pTaskInfo->pTableInfoList);
+ int32_t size = tableListGetSize(pTaskInfo->pTableInfoList);
ASSERT(size == 1);
tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pList, size, &pInfo->dataReader, NULL);
diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c
index 13a3712f0c..26abc2b90b 100644
--- a/source/libs/executor/src/executorimpl.c
+++ b/source/libs/executor/src/executorimpl.c
@@ -2783,8 +2783,10 @@ int32_t getBufferPgSize(int32_t rowSize, uint32_t* defaultPgsz, uint32_t* defaul
*defaultPgsz <<= 1u;
}
+ // The default buffer for each operator in query is 10MB.
// at least four pages need to be in buffer
- *defaultBufsz = 4096 * 256;
+ // TODO: make this variable to be configurable.
+ *defaultBufsz = 4096 * 2560;
if ((*defaultBufsz) <= (*defaultPgsz)) {
(*defaultBufsz) = (*defaultPgsz) * 4;
}
@@ -2971,7 +2973,7 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
return pOperator;
- _error:
+_error:
if (pInfo != NULL) {
destroyAggOperatorInfo(pInfo);
}
@@ -3187,11 +3189,12 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
- _error:
+_error:
if (pInfo != NULL) {
destroyFillOperatorInfo(pInfo);
}
+ pTaskInfo->code = code;
taosMemoryFreeClear(pOperator);
return NULL;
}
diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c
index 98f8d57fc6..e07a3475e0 100644
--- a/source/libs/executor/src/groupoperator.c
+++ b/source/libs/executor/src/groupoperator.c
@@ -750,6 +750,14 @@ static void destroyPartitionOperatorInfo(void* param) {
taosArrayDestroy(pInfo->pGroupColVals);
taosMemoryFree(pInfo->keyBuf);
taosArrayDestroy(pInfo->sortedGroupArray);
+
+ void* pGroupIter = taosHashIterate(pInfo->pGroupSet, NULL);
+ while (pGroupIter != NULL) {
+ SDataGroupInfo* pGroupInfo = pGroupIter;
+ taosArrayDestroy(pGroupInfo->pPageList);
+ pGroupIter = taosHashIterate(pInfo->pGroupSet, pGroupIter);
+ }
+
taosHashCleanup(pInfo->pGroupSet);
taosMemoryFree(pInfo->columnOffset);
diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c
index e54b889a7a..3352b2685a 100644
--- a/source/libs/executor/src/scanoperator.c
+++ b/source/libs/executor/src/scanoperator.c
@@ -1062,15 +1062,18 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
int32_t code = initTableblockDistQueryCond(pBlockScanNode->suid, &cond);
if (code != TSDB_CODE_SUCCESS) {
- return NULL;
+ goto _error;
}
STableListInfo* pTableListInfo = pTaskInfo->pTableInfoList;
size_t num = tableListGetSize(pTableListInfo);
void* pList = tableListGetInfo(pTableListInfo, 0);
- tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str);
+ code = tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str);
cleanupQueryTableDataCond(&cond);
+ if (code != 0) {
+ goto _error;
+ }
}
pInfo->readHandle = *readHandle;
@@ -1164,6 +1167,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
+ T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
@@ -1884,11 +1888,15 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
memcpy(&pTSInfo->cond, &pTaskInfo->streamInfo.tableCond, sizeof(SQueryTableDataCond));
if (pTaskInfo->streamInfo.recoverStep == STREAM_RECOVER_STEP__PREPARE1) {
- pTSInfo->cond.startVersion = -1;
+ pTSInfo->cond.startVersion = 0;
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer1;
+ qDebug("stream recover step 1, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
+ pTSInfo->cond.endVersion);
} else {
pTSInfo->cond.startVersion = pTaskInfo->streamInfo.fillHistoryVer1 + 1;
pTSInfo->cond.endVersion = pTaskInfo->streamInfo.fillHistoryVer2;
+ qDebug("stream recover step 2, from %" PRId64 " to %" PRId64, pTSInfo->cond.startVersion,
+ pTSInfo->cond.endVersion);
}
/*resetTableScanInfo(pTSInfo, pWin);*/
@@ -1905,11 +1913,15 @@ static SSDataBlock* doStreamScan(SOperatorInfo* pOperator) {
if (pBlock != NULL) {
calBlockTbName(&pInfo->tbnameCalSup, pBlock);
updateInfoFillBlockData(pInfo->pUpdateInfo, pBlock, pInfo->primaryTsIndex);
+ qDebug("stream recover scan get block, rows %d", pBlock->info.rows);
return pBlock;
}
pTaskInfo->streamInfo.recoverStep = STREAM_RECOVER_STEP__NONE;
STableScanInfo* pTSInfo = pInfo->pTableScanOp->info;
- pTSInfo->cond.startVersion = 0;
+ tsdbReaderClose(pTSInfo->dataReader);
+ pTSInfo->dataReader = NULL;
+
+ pTSInfo->cond.startVersion = -1;
pTSInfo->cond.endVersion = -1;
return NULL;
@@ -2418,8 +2430,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTableReader) {
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->dataReader = NULL;
- if (tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL) < 0) {
- terrno = TSDB_CODE_OUT_OF_MEMORY;
+ int32_t code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL);
+ if (code != 0) {
+ terrno = code;
destroyTableScanOperatorInfo(pTableScanOp);
goto _error;
}
@@ -4284,131 +4297,6 @@ int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle*
return TSDB_CODE_SUCCESS;
}
-int32_t createMultipleDataReaders2(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle,
- STableListInfo* pTableListInfo, int32_t tableStartIdx, int32_t tableEndIdx,
- STsdbReader** ppReader, const char* idstr) {
- STsdbReader* pReader = NULL;
- void* pStart = tableListGetInfo(pTableListInfo, tableStartIdx);
- int32_t num = tableEndIdx - tableStartIdx + 1;
-
- int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, pStart, num, &pReader, idstr);
- if (code != 0) {
- return code;
- }
-
- *ppReader = pReader;
- return TSDB_CODE_SUCCESS;
-}
-
-static int32_t loadDataBlockFromOneTable2(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
- SSDataBlock* pBlock, uint32_t* status) {
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
- STableMergeScanInfo* pInfo = pOperator->info;
-
- uint64_t uid = pBlock->info.uid;
-
- SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
-
- pCost->totalBlocks += 1;
- pCost->totalRows += pBlock->info.rows;
-
- *status = pInfo->dataBlockLoadFlag;
- if (pTableScanInfo->pFilterNode != NULL ||
- overlapWithTimeWindow(&pTableScanInfo->interval, &pBlock->info, pTableScanInfo->cond.order)) {
- (*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
- }
-
- SDataBlockInfo* pBlockInfo = &pBlock->info;
- taosMemoryFreeClear(pBlock->pBlockAgg);
-
- if (*status == FUNC_DATA_REQUIRED_FILTEROUT) {
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- pCost->filterOutBlocks += 1;
- return TSDB_CODE_SUCCESS;
- } else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
- qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
- pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
- pCost->skipBlocks += 1;
-
- // clear all data in pBlock that are set when handing the previous block
- for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
- SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i);
- pcol->pData = NULL;
- }
-
- return TSDB_CODE_SUCCESS;
- } else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
- pCost->loadBlockStatis += 1;
-
- bool allColumnsHaveAgg = true;
- SColumnDataAgg** pColAgg = NULL;
- STsdbReader* reader = pTableScanInfo->pReader;
- tsdbRetrieveDatablockSMA(reader, &pColAgg, &allColumnsHaveAgg);
-
- if (allColumnsHaveAgg == true) {
- int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
-
- // todo create this buffer during creating operator
- if (pBlock->pBlockAgg == NULL) {
- pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
- }
-
- for (int32_t i = 0; i < numOfCols; ++i) {
- SColMatchItem* pColMatchInfo = taosArrayGet(pTableScanInfo->matchInfo.pList, i);
- if (!pColMatchInfo->needOutput) {
- continue;
- }
- pBlock->pBlockAgg[pColMatchInfo->dstSlotId] = pColAgg[i];
- }
-
- return TSDB_CODE_SUCCESS;
- } else { // failed to load the block sma data, data block statistics does not exist, load data block instead
- *status = FUNC_DATA_REQUIRED_DATA_LOAD;
- }
- }
-
- ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
-
- pCost->totalCheckedRows += pBlock->info.rows;
- pCost->loadBlocks += 1;
-
- STsdbReader* reader = pTableScanInfo->pReader;
- SArray* pCols = tsdbRetrieveDataBlock(reader, NULL);
- if (pCols == NULL) {
- return terrno;
- }
-
- relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
-
- // currently only the tbname pseudo column
- if (pTableScanInfo->pseudoSup.numOfExprs > 0) {
- int32_t code =
- addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo,
- pTableScanInfo->pseudoSup.numOfExprs, pBlock, pBlock->info.rows, GET_TASKID(pTaskInfo));
- if (code != TSDB_CODE_SUCCESS) {
- T_LONG_JMP(pTaskInfo->env, code);
- }
- }
-
- if (pTableScanInfo->pFilterNode != NULL) {
- int64_t st = taosGetTimestampMs();
- doFilter(pTableScanInfo->pFilterNode, pBlock, &pTableScanInfo->matchInfo, NULL);
-
- double el = (taosGetTimestampUs() - st) / 1000.0;
- pTableScanInfo->readRecorder.filterTime += el;
-
- if (pBlock->info.rows == 0) {
- pCost->filterOutBlocks += 1;
- qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
- GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
- } else {
- qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
- }
- }
- return TSDB_CODE_SUCCESS;
-}
-
// todo refactor
static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) {
@@ -4535,7 +4423,7 @@ typedef struct STableMergeScanSortSourceParam {
SSDataBlock* inputBlock;
} STableMergeScanSortSourceParam;
-static SSDataBlock* getTableDataBlockTemp(void* param) {
+static SSDataBlock* getTableDataBlockImpl(void* param) {
STableMergeScanSortSourceParam* source = param;
SOperatorInfo* pOperator = source->pOperator;
STableMergeScanInfo* pInfo = pOperator->info;
@@ -4552,7 +4440,11 @@ static SSDataBlock* getTableDataBlockTemp(void* param) {
void* p = tableListGetInfo(pInfo->tableListInfo, readIdx + pInfo->tableStartIndex);
SReadHandle* pHandle = &pInfo->readHandle;
- tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
+
+ int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
+ if (code != 0) {
+ T_LONG_JMP(pOperator->pTaskInfo->env, code);
+ }
STsdbReader* reader = pInfo->pReader;
while (tsdbNextDataBlock(reader)) {
@@ -4603,55 +4495,6 @@ static SSDataBlock* getTableDataBlockTemp(void* param) {
pInfo->pReader = NULL;
return NULL;
}
-static SSDataBlock* getTableDataBlock2(void* param) {
- STableMergeScanSortSourceParam* source = param;
- SOperatorInfo* pOperator = source->pOperator;
- int64_t uid = source->uid;
- SSDataBlock* pBlock = source->inputBlock;
- STableMergeScanInfo* pTableScanInfo = pOperator->info;
-
- int64_t st = taosGetTimestampUs();
-
- blockDataCleanup(pBlock);
-
- STsdbReader* reader = pTableScanInfo->pReader;
- while (tsdbTableNextDataBlock(reader, uid)) {
- if (isTaskKilled(pOperator->pTaskInfo)) {
- T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
- }
-
- // process this data block based on the probabilities
- bool processThisBlock = processBlockWithProbability(&pTableScanInfo->sample);
- if (!processThisBlock) {
- continue;
- }
-
- blockDataCleanup(pBlock);
-
- int32_t rows = 0;
- tsdbRetrieveDataBlockInfo(reader, &rows, &pBlock->info.uid, &pBlock->info.window);
- blockDataEnsureCapacity(pBlock, rows);
- pBlock->info.rows = rows;
-
- uint32_t status = 0;
- int32_t code = loadDataBlockFromOneTable2(pOperator, pTableScanInfo, pBlock, &status);
- if (code != TSDB_CODE_SUCCESS) {
- T_LONG_JMP(pOperator->pTaskInfo->env, code);
- }
-
- // current block is filter out according to filter condition, continue load the next block
- if (status == FUNC_DATA_REQUIRED_FILTEROUT || pBlock->info.rows == 0) {
- continue;
- }
-
- pBlock->info.groupId = getTableGroupId(pOperator->pTaskInfo->pTableInfoList, pBlock->info.uid);
- pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
- pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
-
- return pBlock;
- }
- return NULL;
-}
static SSDataBlock* getTableDataBlock(void* param) {
STableMergeScanSortSourceParam* source = param;
@@ -4761,7 +4604,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str);
- tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockTemp, NULL, NULL);
+ tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockImpl, NULL, NULL);
// one table has one data block
int32_t numOfTable = tableEndIdx - tableStartIdx + 1;
diff --git a/source/libs/executor/src/sortoperator.c b/source/libs/executor/src/sortoperator.c
index 26f1932b12..5b05b3b2ed 100644
--- a/source/libs/executor/src/sortoperator.c
+++ b/source/libs/executor/src/sortoperator.c
@@ -579,21 +579,8 @@ int32_t doOpenMultiwayMergeOperator(SOperatorInfo* pOperator) {
return TSDB_CODE_SUCCESS;
}
-SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
- SArray* pColMatchInfo, SOperatorInfo* pOperator) {
- SMultiwayMergeOperatorInfo* pInfo = pOperator->info;
- SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+static void doGetSortedBlockData(SMultiwayMergeOperatorInfo* pInfo, SSortHandle* pHandle, int32_t capacity, SSDataBlock* p) {
- blockDataCleanup(pDataBlock);
-
- SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
- if (p == NULL) {
- return NULL;
- }
-
- blockDataEnsureCapacity(p, capacity);
-
-_retry:
while (1) {
STupleHandle* pTupleHandle = NULL;
if (pInfo->groupSort) {
@@ -638,22 +625,48 @@ _retry:
pInfo->hasGroupId = false;
}
- if (p->info.rows > 0) { // todo extract method
- applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
+}
+
+SSDataBlock* getMultiwaySortedBlockData(SSortHandle* pHandle, SSDataBlock* pDataBlock, int32_t capacity,
+ SArray* pColMatchInfo, SOperatorInfo* pOperator) {
+ SMultiwayMergeOperatorInfo* pInfo = pOperator->info;
+
+ SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
+ blockDataCleanup(pDataBlock);
+
+ SSDataBlock* p = tsortGetSortedDataBlock(pHandle);
+ if (p == NULL) {
+ return NULL;
+ }
+
+ blockDataEnsureCapacity(p, capacity);
+
+ while (1) {
+ doGetSortedBlockData(pInfo, pHandle, capacity, p);
if (p->info.rows == 0) {
- goto _retry;
+ break;
}
+ if (p->info.rows > 0) {
+ applyLimitOffset(&pInfo->limitInfo, p, pTaskInfo, pOperator);
+ if (p->info.rows > 0) {
+ break;
+ }
+ }
+ }
+
+ if (p->info.rows > 0) {
blockDataEnsureCapacity(pDataBlock, p->info.rows);
+
int32_t numOfCols = taosArrayGetSize(pColMatchInfo);
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchItem* pmInfo = taosArrayGet(pColMatchInfo, i);
- // ASSERT(pColMatchInfo-> == COL_MATCH_FROM_SLOT_ID);
SColumnInfoData* pSrc = taosArrayGet(p->pDataBlock, pmInfo->srcSlotId);
SColumnInfoData* pDst = taosArrayGet(pDataBlock->pDataBlock, pmInfo->dstSlotId);
colDataAssign(pDst, pSrc, p->info.rows, &pDataBlock->info);
}
+
pInfo->limitInfo.numOfOutputRows += p->info.rows;
pDataBlock->info.rows = p->info.rows;
pDataBlock->info.groupId = pInfo->groupId;
diff --git a/source/libs/function/src/tudf.c b/source/libs/function/src/tudf.c
index 85b14d6017..1dedfe8364 100644
--- a/source/libs/function/src/tudf.c
+++ b/source/libs/function/src/tudf.c
@@ -122,14 +122,14 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
taosGetCpuCores(&numCpuCores);
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2);
- char pathTaosdLdLib[512] = {0};
+ char pathTaosdLdLib[512] = {0};
size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib);
int ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen);
if (ret != UV_ENOBUFS) {
taosdLdLibPathLen = strlen(pathTaosdLdLib);
}
- char udfdPathLdLib[1024] = {0};
+ char udfdPathLdLib[1024] = {0};
size_t udfdLdLibPathLen = strlen(tsUdfdLdLibPath);
strncpy(udfdPathLdLib, tsUdfdLdLibPath, udfdLdLibPathLen);
udfdPathLdLib[udfdLdLibPathLen] = ':';
@@ -362,7 +362,7 @@ typedef struct SUdfcProxy {
SArray *udfStubs; // SUdfcFuncStub
uv_mutex_t udfcUvMutex;
- int8_t initialized;
+ int8_t initialized;
} SUdfcProxy;
SUdfcProxy gUdfcProxy = {0};
diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c
index 001a1972a0..a7cd3db824 100644
--- a/source/libs/qworker/src/qworker.c
+++ b/source/libs/qworker/src/qworker.c
@@ -644,11 +644,13 @@ _return:
input.msgType = qwMsg->msgType;
code = qwHandlePostPhaseEvents(QW_FPARAMS(), QW_PHASE_POST_QUERY, &input, NULL);
- if (ctx != NULL && QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) {
+ if (QUERY_RSP_POLICY_QUICK == tsQueryRspPolicy && ctx != NULL && QW_EVENT_RECEIVED(ctx, QW_EVENT_FETCH)) {
void *rsp = NULL;
int32_t dataLen = 0;
SOutputData sOutput = {0};
- QW_ERR_JRET(qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput));
+ if (qwGetQueryResFromSink(QW_FPARAMS(), ctx, &dataLen, &rsp, &sOutput)) {
+ return TSDB_CODE_SUCCESS;
+ }
if (rsp) {
bool qComplete = (DS_BUF_EMPTY == sOutput.bufStatus && sOutput.queryEnd);
diff --git a/source/libs/scheduler/src/schRemote.c b/source/libs/scheduler/src/schRemote.c
index d1d0680e65..47de2528fa 100644
--- a/source/libs/scheduler/src/schRemote.c
+++ b/source/libs/scheduler/src/schRemote.c
@@ -280,7 +280,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
}
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
- SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows);
+ SCH_TASK_DLOG("submit succeed, affectedRows:%d, blocks:%d", rsp->affectedRows, rsp->nBlocks);
SCH_LOCK(SCH_WRITE, &pJob->resLock);
if (pJob->execRes.res) {
diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h
index a8f7184bb2..6a3bdb59c9 100644
--- a/source/libs/stream/inc/streamInc.h
+++ b/source/libs/stream/inc/streamInc.h
@@ -43,6 +43,8 @@ int32_t streamBroadcastToChildren(SStreamTask* pTask, const SSDataBlock* pBlock)
int32_t tEncodeStreamRetrieveReq(SEncoder* pEncoder, const SStreamRetrieveReq* pReq);
+int32_t streamDispatchOneCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet);
+
int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId,
SEpSet* pEpSet);
diff --git a/source/libs/stream/src/stream.c b/source/libs/stream/src/stream.c
index ee317d0751..b71562cf45 100644
--- a/source/libs/stream/src/stream.c
+++ b/source/libs/stream/src/stream.c
@@ -135,8 +135,11 @@ int32_t streamTaskEnqueue(SStreamTask* pTask, const SStreamDispatchReq* pReq, SR
((SMsgHead*)buf)->vgId = htonl(pReq->upstreamNodeId);
SStreamDispatchRsp* pCont = POINTER_SHIFT(buf, sizeof(SMsgHead));
pCont->inputStatus = status;
- pCont->streamId = pReq->streamId;
- pCont->taskId = pReq->upstreamTaskId;
+ pCont->streamId = htobe64(pReq->streamId);
+ pCont->upstreamNodeId = htonl(pReq->upstreamNodeId);
+ pCont->upstreamTaskId = htonl(pReq->upstreamTaskId);
+ pCont->downstreamNodeId = htonl(pTask->nodeId);
+ pCont->downstreamTaskId = htonl(pTask->taskId);
pRsp->pCont = buf;
pRsp->contLen = sizeof(SMsgHead) + sizeof(SStreamDispatchRsp);
tmsgSendRsp(pRsp);
@@ -203,10 +206,10 @@ int32_t streamProcessDispatchReq(SStreamTask* pTask, SStreamDispatchReq* pReq, S
return 0;
}
-int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp) {
+int32_t streamProcessDispatchRsp(SStreamTask* pTask, SStreamDispatchRsp* pRsp, int32_t code) {
ASSERT(pRsp->inputStatus == TASK_OUTPUT_STATUS__NORMAL || pRsp->inputStatus == TASK_OUTPUT_STATUS__BLOCKED);
- qDebug("task %d receive dispatch rsp", pTask->taskId);
+ qDebug("task %d receive dispatch rsp, code: %x", pTask->taskId, code);
if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
int32_t leftRsp = atomic_sub_fetch_32(&pTask->shuffleDispatcher.waitingRspCnt, 1);
diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c
index d2876a22c6..ad342edfa0 100644
--- a/source/libs/stream/src/streamDispatch.c
+++ b/source/libs/stream/src/streamDispatch.c
@@ -210,6 +210,46 @@ static int32_t streamAddBlockToDispatchMsg(const SSDataBlock* pBlock, SStreamDis
return 0;
}
+int32_t streamDispatchOneCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq, int32_t nodeId, SEpSet* pEpSet) {
+ void* buf = NULL;
+ int32_t code = -1;
+ SRpcMsg msg = {0};
+
+ int32_t tlen;
+ tEncodeSize(tEncodeSStreamTaskCheckReq, pReq, tlen, code);
+ if (code < 0) {
+ return -1;
+ }
+
+ buf = rpcMallocCont(sizeof(SMsgHead) + tlen);
+ if (buf == NULL) {
+ return -1;
+ }
+
+ ((SMsgHead*)buf)->vgId = htonl(nodeId);
+ void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
+
+ SEncoder encoder;
+ tEncoderInit(&encoder, abuf, tlen);
+ if ((code = tEncodeSStreamTaskCheckReq(&encoder, pReq)) < 0) {
+ goto FAIL;
+ }
+ tEncoderClear(&encoder);
+
+ msg.contLen = tlen + sizeof(SMsgHead);
+ msg.pCont = buf;
+ msg.msgType = TDMT_STREAM_TASK_CHECK;
+
+ qDebug("dispatch from task %d to task %d node %d: check msg", pTask->taskId, pReq->downstreamTaskId, nodeId);
+
+ tmsgSendReq(pEpSet, &msg);
+
+ return 0;
+FAIL:
+ if (buf) rpcFreeCont(buf);
+ return code;
+}
+
int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecoverFinishReq* pReq, int32_t vgId,
SEpSet* pEpSet) {
void* buf = NULL;
@@ -243,7 +283,8 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov
tmsgSendReq(pEpSet, &msg);
- code = 0;
+ qDebug("dispatch from task %d to task %d node %d: recover finish msg", pTask->taskId, pReq->taskId, vgId);
+
return 0;
FAIL:
if (buf) rpcFreeCont(buf);
@@ -279,7 +320,7 @@ int32_t streamDispatchOneDataReq(SStreamTask* pTask, const SStreamDispatchReq* p
msg.pCont = buf;
msg.msgType = pTask->dispatchMsgType;
- qDebug("dispatch from task %d to task %d node %d", pTask->taskId, pReq->taskId, vgId);
+ qDebug("dispatch from task %d to task %d node %d: data msg", pTask->taskId, pReq->taskId, vgId);
tmsgSendReq(pEpSet, &msg);
diff --git a/source/libs/stream/src/streamExec.c b/source/libs/stream/src/streamExec.c
index 629333b4b4..46fab53659 100644
--- a/source/libs/stream/src/streamExec.c
+++ b/source/libs/stream/src/streamExec.c
@@ -202,83 +202,83 @@ int32_t streamPipelineExec(SStreamTask* pTask, int32_t batchNum, bool dispatch)
int32_t streamExecForAll(SStreamTask* pTask) {
while (1) {
int32_t batchCnt = 1;
- void* data = NULL;
+ void* input = NULL;
while (1) {
SStreamQueueItem* qItem = streamQueueNextItem(pTask->inputQueue);
if (qItem == NULL) {
qDebug("stream task exec over, queue empty, task: %d", pTask->taskId);
break;
}
- if (data == NULL) {
- data = qItem;
+ if (input == NULL) {
+ input = qItem;
streamQueueProcessSuccess(pTask->inputQueue);
if (pTask->taskLevel == TASK_LEVEL__SINK) {
break;
}
} else {
void* newRet;
- if ((newRet = streamMergeQueueItem(data, qItem)) == NULL) {
+ if ((newRet = streamMergeQueueItem(input, qItem)) == NULL) {
streamQueueProcessFail(pTask->inputQueue);
break;
} else {
batchCnt++;
- data = newRet;
+ input = newRet;
streamQueueProcessSuccess(pTask->inputQueue);
}
}
}
if (pTask->taskStatus == TASK_STATUS__DROPPING) {
- if (data) streamFreeQitem(data);
+ if (input) streamFreeQitem(input);
return 0;
}
- if (data == NULL) {
+ if (input == NULL) {
break;
}
if (pTask->taskLevel == TASK_LEVEL__SINK) {
- ASSERT(((SStreamQueueItem*)data)->type == STREAM_INPUT__DATA_BLOCK);
- streamTaskOutput(pTask, data);
+ ASSERT(((SStreamQueueItem*)input)->type == STREAM_INPUT__DATA_BLOCK);
+ streamTaskOutput(pTask, input);
continue;
}
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
qDebug("stream task %d exec begin, msg batch: %d", pTask->taskId, batchCnt);
- streamTaskExecImpl(pTask, data, pRes);
+ streamTaskExecImpl(pTask, input, pRes);
qDebug("stream task %d exec end", pTask->taskId);
if (taosArrayGetSize(pRes) != 0) {
SStreamDataBlock* qRes = taosAllocateQitem(sizeof(SStreamDataBlock), DEF_QITEM);
if (qRes == NULL) {
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
- streamFreeQitem(data);
+ streamFreeQitem(input);
return -1;
}
qRes->type = STREAM_INPUT__DATA_BLOCK;
qRes->blocks = pRes;
- if (((SStreamQueueItem*)data)->type == STREAM_INPUT__DATA_SUBMIT) {
- SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
+ if (((SStreamQueueItem*)input)->type == STREAM_INPUT__DATA_SUBMIT) {
+ SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)input;
qRes->childId = pTask->selfChildId;
qRes->sourceVer = pSubmit->ver;
- } else if (((SStreamQueueItem*)data)->type == STREAM_INPUT__MERGED_SUBMIT) {
- SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)data;
+ } else if (((SStreamQueueItem*)input)->type == STREAM_INPUT__MERGED_SUBMIT) {
+ SStreamMergedSubmit* pMerged = (SStreamMergedSubmit*)input;
qRes->childId = pTask->selfChildId;
qRes->sourceVer = pMerged->ver;
}
if (streamTaskOutput(pTask, qRes) < 0) {
taosArrayDestroyEx(pRes, (FDelete)blockDataFreeRes);
- streamFreeQitem(data);
+ streamFreeQitem(input);
taosFreeQitem(qRes);
return -1;
}
} else {
taosArrayDestroy(pRes);
}
- streamFreeQitem(data);
+ streamFreeQitem(input);
}
return 0;
}
diff --git a/source/libs/stream/src/streamRecover.c b/source/libs/stream/src/streamRecover.c
index adeb797721..2a2784afea 100644
--- a/source/libs/stream/src/streamRecover.c
+++ b/source/libs/stream/src/streamRecover.c
@@ -15,6 +15,148 @@
#include "streamInc.h"
+int32_t streamTaskLaunchRecover(SStreamTask* pTask, int64_t version) {
+ qDebug("task %d at node %d launch recover", pTask->taskId, pTask->nodeId);
+ if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
+ atomic_store_8(&pTask->taskStatus, TASK_STATUS__RECOVER_PREPARE);
+ streamSetParamForRecover(pTask);
+ streamSourceRecoverPrepareStep1(pTask, version);
+
+ SStreamRecoverStep1Req req;
+ streamBuildSourceRecover1Req(pTask, &req);
+ int32_t len = sizeof(SStreamRecoverStep1Req);
+
+ void* serializedReq = rpcMallocCont(len);
+ if (serializedReq == NULL) {
+ return -1;
+ }
+
+ memcpy(serializedReq, &req, len);
+
+ SRpcMsg rpcMsg = {
+ .contLen = len,
+ .pCont = serializedReq,
+ .msgType = TDMT_VND_STREAM_RECOVER_STEP1,
+ };
+
+ if (tmsgPutToQueue(pTask->pMsgCb, STREAM_QUEUE, &rpcMsg) < 0) {
+ /*ASSERT(0);*/
+ }
+
+ } else if (pTask->taskLevel == TASK_LEVEL__AGG) {
+ atomic_store_8(&pTask->taskStatus, TASK_STATUS__NORMAL);
+ streamSetParamForRecover(pTask);
+ streamAggRecoverPrepare(pTask);
+ } else if (pTask->taskLevel == TASK_LEVEL__SINK) {
+ atomic_store_8(&pTask->taskStatus, TASK_STATUS__NORMAL);
+ }
+ return 0;
+}
+
+// checkstatus
+int32_t streamTaskCheckDownstream(SStreamTask* pTask, int64_t version) {
+ SStreamTaskCheckReq req = {
+ .streamId = pTask->streamId,
+ .upstreamTaskId = pTask->taskId,
+ .upstreamNodeId = pTask->nodeId,
+ .childId = pTask->selfChildId,
+ };
+ // serialize
+ if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
+ req.reqId = tGenIdPI64();
+ req.downstreamNodeId = pTask->fixedEpDispatcher.nodeId;
+ req.downstreamTaskId = pTask->fixedEpDispatcher.taskId;
+ pTask->checkReqId = req.reqId;
+
+ qDebug("task %d at node %d check downstream task %d at node %d", pTask->taskId, pTask->nodeId, req.downstreamTaskId,
+ req.downstreamNodeId);
+ streamDispatchOneCheckReq(pTask, &req, pTask->fixedEpDispatcher.nodeId, &pTask->fixedEpDispatcher.epSet);
+ } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+ int32_t vgSz = taosArrayGetSize(vgInfo);
+ pTask->recoverTryingDownstream = vgSz;
+ pTask->checkReqIds = taosArrayInit(vgSz, sizeof(int64_t));
+
+ for (int32_t i = 0; i < vgSz; i++) {
+ SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
+ req.reqId = tGenIdPI64();
+ taosArrayPush(pTask->checkReqIds, &req.reqId);
+ req.downstreamNodeId = pVgInfo->vgId;
+ req.downstreamTaskId = pVgInfo->taskId;
+ qDebug("task %d at node %d check downstream task %d at node %d (shuffle)", pTask->taskId, pTask->nodeId,
+ req.downstreamTaskId, req.downstreamNodeId);
+ streamDispatchOneCheckReq(pTask, &req, pVgInfo->vgId, &pVgInfo->epSet);
+ }
+ } else {
+ qDebug("task %d at node %d direct launch recover since no downstream", pTask->taskId, pTask->nodeId);
+ streamTaskLaunchRecover(pTask, version);
+ }
+ return 0;
+}
+
+int32_t streamRecheckOneDownstream(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp) {
+ SStreamTaskCheckReq req = {
+ .reqId = pRsp->reqId,
+ .streamId = pRsp->streamId,
+ .upstreamTaskId = pRsp->upstreamTaskId,
+ .upstreamNodeId = pRsp->upstreamNodeId,
+ .downstreamTaskId = pRsp->downstreamTaskId,
+ .downstreamNodeId = pRsp->downstreamNodeId,
+ .childId = pRsp->childId,
+ };
+ qDebug("task %d at node %d check downstream task %d at node %d (recheck)", pTask->taskId, pTask->nodeId,
+ req.downstreamTaskId, req.downstreamNodeId);
+ if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
+ streamDispatchOneCheckReq(pTask, &req, pRsp->downstreamNodeId, &pTask->fixedEpDispatcher.epSet);
+ } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos;
+ int32_t vgSz = taosArrayGetSize(vgInfo);
+ for (int32_t i = 0; i < vgSz; i++) {
+ SVgroupInfo* pVgInfo = taosArrayGet(vgInfo, i);
+ if (pVgInfo->taskId == req.downstreamTaskId) {
+ streamDispatchOneCheckReq(pTask, &req, pRsp->downstreamNodeId, &pVgInfo->epSet);
+ }
+ }
+ }
+ return 0;
+}
+
+int32_t streamProcessTaskCheckReq(SStreamTask* pTask, const SStreamTaskCheckReq* pReq) {
+ return atomic_load_8(&pTask->taskStatus) == TASK_STATUS__NORMAL;
+}
+
+int32_t streamProcessTaskCheckRsp(SStreamTask* pTask, const SStreamTaskCheckRsp* pRsp, int64_t version) {
+ qDebug("task %d at node %d recv check rsp from task %d at node %d: status %d", pRsp->upstreamTaskId,
+ pRsp->upstreamNodeId, pRsp->downstreamTaskId, pRsp->downstreamNodeId, pRsp->status);
+ if (pRsp->status == 1) {
+ if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
+ bool found = false;
+ for (int32_t i = 0; i < taosArrayGetSize(pTask->checkReqIds); i++) {
+ int64_t reqId = *(int64_t*)taosArrayGet(pTask->checkReqIds, i);
+ if (reqId == pRsp->reqId) {
+ found = true;
+ break;
+ }
+ }
+ if (!found) return -1;
+ int32_t left = atomic_sub_fetch_32(&pTask->recoverTryingDownstream, 1);
+ ASSERT(left >= 0);
+ if (left == 0) {
+ taosArrayDestroy(pTask->checkReqIds);
+ streamTaskLaunchRecover(pTask, version);
+ }
+ } else if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
+ if (pRsp->reqId != pTask->checkReqId) return -1;
+ streamTaskLaunchRecover(pTask, version);
+ } else {
+ ASSERT(0);
+ }
+ } else {
+ streamRecheckOneDownstream(pTask, pRsp);
+ }
+ return 0;
+}
+
// common
int32_t streamSetParamForRecover(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
@@ -86,10 +228,7 @@ int32_t streamDispatchRecoverFinishReq(SStreamTask* pTask) {
// agg
int32_t streamAggRecoverPrepare(SStreamTask* pTask) {
void* exec = pTask->exec.executor;
- /*if (qStreamSetParamForRecover(exec) < 0) {*/
- /*return -1;*/
- /*}*/
- pTask->recoverWaitingChild = taosArrayGetSize(pTask->childEpInfo);
+ pTask->recoverWaitingUpstream = taosArrayGetSize(pTask->childEpInfo);
return 0;
}
@@ -107,7 +246,7 @@ int32_t streamAggChildrenRecoverFinish(SStreamTask* pTask) {
int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId) {
if (pTask->taskLevel == TASK_LEVEL__AGG) {
- int32_t left = atomic_sub_fetch_32(&pTask->recoverWaitingChild, 1);
+ int32_t left = atomic_sub_fetch_32(&pTask->recoverWaitingUpstream, 1);
ASSERT(left >= 0);
if (left == 0) {
streamAggChildrenRecoverFinish(pTask);
@@ -116,6 +255,60 @@ int32_t streamProcessRecoverFinishReq(SStreamTask* pTask, int32_t childId) {
return 0;
}
+int32_t tEncodeSStreamTaskCheckReq(SEncoder* pEncoder, const SStreamTaskCheckReq* pReq) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI64(pEncoder, pReq->reqId) < 0) return -1;
+ if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->downstreamNodeId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->downstreamTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pReq->childId) < 0) return -1;
+ tEndEncode(pEncoder);
+ return pEncoder->pos;
+}
+
+int32_t tDecodeSStreamTaskCheckReq(SDecoder* pDecoder, SStreamTaskCheckReq* pReq) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pReq->reqId) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->downstreamNodeId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->downstreamTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pReq->childId) < 0) return -1;
+ tEndDecode(pDecoder);
+ return 0;
+}
+
+int32_t tEncodeSStreamTaskCheckRsp(SEncoder* pEncoder, const SStreamTaskCheckRsp* pRsp) {
+ if (tStartEncode(pEncoder) < 0) return -1;
+ if (tEncodeI64(pEncoder, pRsp->reqId) < 0) return -1;
+ if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->upstreamNodeId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->upstreamTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->downstreamNodeId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->downstreamTaskId) < 0) return -1;
+ if (tEncodeI32(pEncoder, pRsp->childId) < 0) return -1;
+ if (tEncodeI8(pEncoder, pRsp->status) < 0) return -1;
+ tEndEncode(pEncoder);
+ return pEncoder->pos;
+}
+
+int32_t tDecodeSStreamTaskCheckRsp(SDecoder* pDecoder, SStreamTaskCheckRsp* pRsp) {
+ if (tStartDecode(pDecoder) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pRsp->reqId) < 0) return -1;
+ if (tDecodeI64(pDecoder, &pRsp->streamId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->upstreamNodeId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->upstreamTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->downstreamNodeId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->downstreamTaskId) < 0) return -1;
+ if (tDecodeI32(pDecoder, &pRsp->childId) < 0) return -1;
+ if (tDecodeI8(pDecoder, &pRsp->status) < 0) return -1;
+ tEndDecode(pDecoder);
+ return 0;
+}
+
int32_t tEncodeSStreamRecoverFinishReq(SEncoder* pEncoder, const SStreamRecoverFinishReq* pReq) {
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
@@ -132,79 +325,6 @@ int32_t tDecodeSStreamRecoverFinishReq(SDecoder* pDecoder, SStreamRecoverFinishR
tEndDecode(pDecoder);
return 0;
}
-#if 0
-int32_t tEncodeStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamTaskRecoverReq* pReq) {
- if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->upstreamTaskId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->upstreamNodeId) < 0) return -1;
- tEndEncode(pEncoder);
- return pEncoder->pos;
-}
-
-int32_t tDecodeStreamTaskRecoverReq(SDecoder* pDecoder, SStreamTaskRecoverReq* pReq) {
- if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->upstreamTaskId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->upstreamNodeId) < 0) return -1;
- tEndDecode(pDecoder);
- return 0;
-}
-
-int32_t tEncodeStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamTaskRecoverRsp* pRsp) {
- if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pRsp->reqTaskId) < 0) return -1;
- if (tEncodeI32(pEncoder, pRsp->rspTaskId) < 0) return -1;
- if (tEncodeI8(pEncoder, pRsp->inputStatus) < 0) return -1;
- tEndEncode(pEncoder);
- return pEncoder->pos;
-}
-
-int32_t tDecodeStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamTaskRecoverRsp* pReq) {
- if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->reqTaskId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->rspTaskId) < 0) return -1;
- if (tDecodeI8(pDecoder, &pReq->inputStatus) < 0) return -1;
- tEndDecode(pDecoder);
- return 0;
-}
-
-int32_t tEncodeSMStreamTaskRecoverReq(SEncoder* pEncoder, const SMStreamTaskRecoverReq* pReq) {
- if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
- tEndEncode(pEncoder);
- return pEncoder->pos;
-}
-
-int32_t tDecodeSMStreamTaskRecoverReq(SDecoder* pDecoder, SMStreamTaskRecoverReq* pReq) {
- if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
- tEndDecode(pDecoder);
- return 0;
-}
-
-int32_t tEncodeSMStreamTaskRecoverRsp(SEncoder* pEncoder, const SMStreamTaskRecoverRsp* pRsp) {
- if (tStartEncode(pEncoder) < 0) return -1;
- if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pRsp->taskId) < 0) return -1;
- tEndEncode(pEncoder);
- return pEncoder->pos;
-}
-
-int32_t tDecodeSMStreamTaskRecoverRsp(SDecoder* pDecoder, SMStreamTaskRecoverRsp* pReq) {
- if (tStartDecode(pDecoder) < 0) return -1;
- if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
- tEndDecode(pDecoder);
- return 0;
-}
-#endif
int32_t tEncodeSStreamCheckpointInfo(SEncoder* pEncoder, const SStreamCheckpointInfo* pCheckpoint) {
if (tEncodeI32(pEncoder, pCheckpoint->srcNodeId) < 0) return -1;
@@ -248,308 +368,3 @@ int32_t tDecodeSStreamMultiVgCheckpointInfo(SDecoder* pDecoder, SStreamMultiVgCh
}
return 0;
}
-
-#if 0
-int32_t tEncodeSStreamTaskRecoverReq(SEncoder* pEncoder, const SStreamRecoverDownstreamReq* pReq) {
- if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->downstreamTaskId) < 0) return -1;
- if (tEncodeI32(pEncoder, pReq->taskId) < 0) return -1;
- return 0;
-}
-
-int32_t tDecodeSStreamTaskRecoverReq(SDecoder* pDecoder, SStreamRecoverDownstreamReq* pReq) {
- if (tDecodeI64(pDecoder, &pReq->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->downstreamTaskId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pReq->taskId) < 0) return -1;
- return 0;
-}
-
-int32_t tEncodeSStreamTaskRecoverRsp(SEncoder* pEncoder, const SStreamRecoverDownstreamRsp* pRsp) {
- if (tEncodeI64(pEncoder, pRsp->streamId) < 0) return -1;
- if (tEncodeI32(pEncoder, pRsp->downstreamTaskId) < 0) return -1;
- if (tEncodeI32(pEncoder, pRsp->taskId) < 0) return -1;
- int32_t sz = taosArrayGetSize(pRsp->checkpointVer);
- if (tEncodeI32(pEncoder, sz) < 0) return -1;
- for (int32_t i = 0; i < sz; i++) {
- SStreamCheckpointInfo* pInfo = taosArrayGet(pRsp->checkpointVer, i);
- if (tEncodeSStreamCheckpointInfo(pEncoder, pInfo) < 0) return -1;
- }
- return 0;
-}
-
-int32_t tDecodeSStreamTaskRecoverRsp(SDecoder* pDecoder, SStreamRecoverDownstreamRsp* pRsp) {
- if (tDecodeI64(pDecoder, &pRsp->streamId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pRsp->downstreamTaskId) < 0) return -1;
- if (tDecodeI32(pDecoder, &pRsp->taskId) < 0) return -1;
- int32_t sz;
- if (tDecodeI32(pDecoder, &sz) < 0) return -1;
- pRsp->checkpointVer = taosArrayInit(sz, sizeof(SStreamCheckpointInfo));
- if (pRsp->checkpointVer == NULL) return -1;
- for (int32_t i = 0; i < sz; i++) {
- SStreamCheckpointInfo info;
- if (tDecodeSStreamCheckpointInfo(pDecoder, &info) < 0) return -1;
- taosArrayPush(pRsp->checkpointVer, &info);
- }
- return 0;
-}
-#endif
-
-int32_t streamSaveStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
-#if 0
- void* buf = NULL;
-
- ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
-
- SStreamMultiVgCheckpointInfo checkpoint;
- checkpoint.checkpointId = atomic_fetch_add_32(&pTask->nextCheckId, 1);
- checkpoint.checkTs = taosGetTimestampMs();
- checkpoint.streamId = pTask->streamId;
- checkpoint.taskId = pTask->taskId;
- checkpoint.checkpointVer = pTask->checkpointInfo;
-
- int32_t len;
- int32_t code;
- tEncodeSize(tEncodeSStreamMultiVgCheckpointInfo, &checkpoint, len, code);
- if (code < 0) {
- return -1;
- }
-
- buf = taosMemoryCalloc(1, len);
- if (buf == NULL) {
- return -1;
- }
- SEncoder encoder;
- tEncoderInit(&encoder, buf, len);
- tEncodeSStreamMultiVgCheckpointInfo(&encoder, &checkpoint);
- tEncoderClear(&encoder);
-
- SStreamCheckpointKey key = {
- .taskId = pTask->taskId,
- .checkpointId = checkpoint.checkpointId,
- };
-
- if (tdbTbUpsert(pMeta->pStateDb, &key, sizeof(SStreamCheckpointKey), buf, len, &pMeta->txn) < 0) {
- ASSERT(0);
- goto FAIL;
- }
-
- int32_t sz = taosArrayGetSize(pTask->checkpointInfo);
- for (int32_t i = 0; i < sz; i++) {
- SStreamCheckpointInfo* pCheck = taosArrayGet(pTask->checkpointInfo, i);
- pCheck->stateSaveVer = pCheck->stateProcessedVer;
- }
-
- taosMemoryFree(buf);
- return 0;
-FAIL:
- if (buf) taosMemoryFree(buf);
- return -1;
-#endif
- return 0;
-}
-
-int32_t streamLoadStateInfo(SStreamMeta* pMeta, SStreamTask* pTask) {
-#if 0
- void* pVal = NULL;
- int32_t vLen = 0;
- if (tdbTbGet(pMeta->pStateDb, &pTask->taskId, sizeof(void*), &pVal, &vLen) < 0) {
- return -1;
- }
- SDecoder decoder;
- tDecoderInit(&decoder, pVal, vLen);
- SStreamMultiVgCheckpointInfo aggCheckpoint;
- tDecodeSStreamMultiVgCheckpointInfo(&decoder, &aggCheckpoint);
- tDecoderClear(&decoder);
-
- pTask->nextCheckId = aggCheckpoint.checkpointId + 1;
- pTask->checkpointInfo = aggCheckpoint.checkpointVer;
-#endif
- return 0;
-}
-
-int32_t streamSaveSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
- return streamSaveStateInfo(pMeta, pTask);
-}
-
-int32_t streamRecoverSinkLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__SINK);
- return streamLoadStateInfo(pMeta, pTask);
-}
-
-int32_t streamSaveAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
- // TODO save and copy state
-
- // save state info
- if (streamSaveStateInfo(pMeta, pTask) < 0) {
- return -1;
- }
- return 0;
-}
-
-#if 0
-int32_t streamFetchRecoverStatus(SStreamTask* pTask, const SVgroupInfo* pVgInfo) {
- int32_t taskId = pVgInfo->taskId;
- int32_t nodeId = pVgInfo->vgId;
- SStreamRecoverDownstreamReq req = {
- .streamId = pTask->taskId,
- .downstreamTaskId = taskId,
- .taskId = pTask->taskId,
- };
- int32_t tlen;
- int32_t code;
- tEncodeSize(tEncodeSStreamTaskRecoverReq, &req, tlen, code);
- if (code < 0) {
- return -1;
- }
- void* buf = taosMemoryCalloc(1, sizeof(SMsgHead) + tlen);
- if (buf == NULL) {
- return -1;
- }
- void* abuf = POINTER_SHIFT(buf, sizeof(SMsgHead));
- SEncoder encoder;
- tEncoderInit(&encoder, abuf, tlen);
- if (tEncodeSStreamTaskRecoverReq(&encoder, &req) < 0) {
- tEncoderClear(&encoder);
- taosMemoryFree(buf);
- return -1;
- }
- tEncoderClear(&encoder);
-
- ((SMsgHead*)buf)->vgId = htonl(nodeId);
- SRpcMsg msg = {
- .pCont = buf, .contLen = sizeof(SMsgHead) + tlen,
- /*.msgType = */
- };
- tmsgSendReq(&pVgInfo->epSet, &msg);
-
- return 0;
-}
-
-int32_t streamFetchDownstreamStatus(SStreamMeta* pMeta, SStreamTask* pTask) {
- // set self status to recover_phase1
- SStreamRecoverStatus* pRecover;
- atomic_store_8(&pTask->taskStatus, TASK_STATUS__RECOVER_DOWNSTREAM);
- pRecover = taosHashGet(pMeta->pRecoverStatus, &pTask->taskId, sizeof(int32_t));
- if (pRecover == NULL) {
- pRecover = taosMemoryCalloc(1, sizeof(SStreamRecoverStatus));
- if (pRecover == NULL) {
- return -1;
- }
- pRecover->info = taosArrayInit(0, sizeof(void*));
- if (pRecover->info == NULL) {
- taosMemoryFree(pRecover);
- return -1;
- }
- taosHashPut(pMeta->pRecoverStatus, &pTask->taskId, sizeof(int32_t), &pRecover, sizeof(void*));
- }
-
- if (pTask->outputType == TASK_OUTPUT__FIXED_DISPATCH) {
- pRecover->totReq = 1;
- } else if (pTask->outputType == TASK_OUTPUT__SHUFFLE_DISPATCH) {
- int32_t numOfDownstream = taosArrayGetSize(pTask->shuffleDispatcher.dbInfo.pVgroupInfos);
- pRecover->totReq = numOfDownstream;
- for (int32_t i = 0; i < numOfDownstream; i++) {
- SVgroupInfo* pVgInfo = taosArrayGet(pTask->shuffleDispatcher.dbInfo.pVgroupInfos, i);
- streamFetchRecoverStatus(pTask, pVgInfo);
- }
- } else {
- ASSERT(0);
- }
- return 0;
-}
-#endif
-
-#if 0
-int32_t streamProcessFetchStatusRsp(SStreamMeta* pMeta, SStreamTask* pTask, SStreamRecoverDownstreamRsp* pRsp) {
- // if failed, set timer and retry
- // if successful
- int32_t taskId = pTask->taskId;
- SStreamRecoverStatus* pRecover = taosHashGet(pMeta->pRecoverStatus, &taskId, sizeof(int32_t));
- if (pRecover == NULL) {
- return -1;
- }
-
- taosArrayPush(pRecover->info, &pRsp->checkpointVer);
-
- int32_t leftRsp = atomic_sub_fetch_32(&pRecover->waitingRspCnt, 1);
- ASSERT(leftRsp >= 0);
-
- if (leftRsp == 0) {
- ASSERT(taosArrayGetSize(pRecover->info) == pRecover->totReq);
-
- // srcNodeId -> SStreamCheckpointInfo*
- SHashObj* pFinalChecks = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
- if (pFinalChecks == NULL) return -1;
-
- for (int32_t i = 0; i < pRecover->totReq; i++) {
- SArray* pChecks = taosArrayGetP(pRecover->info, i);
- int32_t sz = taosArrayGetSize(pChecks);
- for (int32_t j = 0; j < sz; j++) {
- SStreamCheckpointInfo* pOneCheck = taosArrayGet(pChecks, j);
- SStreamCheckpointInfo* pCheck = taosHashGet(pFinalChecks, &pOneCheck->srcNodeId, sizeof(int32_t));
- if (pCheck == NULL) {
- pCheck = taosMemoryCalloc(1, sizeof(SStreamCheckpointInfo));
- pCheck->srcNodeId = pOneCheck->srcNodeId;
- pCheck->srcChildId = pOneCheck->srcChildId;
- pCheck->stateProcessedVer = pOneCheck->stateProcessedVer;
- taosHashPut(pFinalChecks, &pCheck->srcNodeId, sizeof(int32_t), &pCheck, sizeof(void*));
- } else {
- pCheck->stateProcessedVer = TMIN(pCheck->stateProcessedVer, pOneCheck->stateProcessedVer);
- }
- }
- }
- // load local state
- //
- // recover
- //
- if (pTask->taskLevel == TASK_LEVEL__SOURCE) {
- qStreamPrepareRecover(pTask->exec.executor, pTask->startVer, pTask->recoverSnapVer);
- if (streamPipelineExec(pTask, 10000, true) < 0) {
- return -1;
- }
- }
- taosHashCleanup(pFinalChecks);
- taosHashRemove(pMeta->pRecoverStatus, &taskId, sizeof(int32_t));
- atomic_store_8(&pTask->taskStatus, TASK_STATUS__NORMAL);
- }
- return 0;
-}
-
-int32_t streamRecoverAggLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__AGG);
- // recover sink level
- // after all sink level recovered
- // choose suitable state to recover
- return 0;
-}
-
-int32_t streamSaveSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
- // TODO: save and copy state
- return 0;
-}
-
-int32_t streamRecoverSourceLevel(SStreamMeta* pMeta, SStreamTask* pTask) {
- ASSERT(pTask->taskLevel == TASK_LEVEL__SOURCE);
- // if totLevel == 3
- // fetch agg state
- // recover from local state to agg state, not send msg
- // recover from agg state to most recent log v1
- // enable input queue, set status recover_phase2
- // recover from v1 to queue msg v2, set status normal
-
- // if totLevel == 2
- // fetch sink state
- // recover from local state to sink state v1, send msg
- // enable input queue, set status recover_phase2
- // recover from v1 to queue msg v2, set status normal
- return 0;
-}
-
-int32_t streamRecoverTask(SStreamTask* pTask) {
- //
- return 0;
-}
-#endif
diff --git a/source/libs/transport/src/transCli.c b/source/libs/transport/src/transCli.c
index 21444018cd..e0821b8ca6 100644
--- a/source/libs/transport/src/transCli.c
+++ b/source/libs/transport/src/transCli.c
@@ -427,7 +427,7 @@ void cliHandleExceptImpl(SCliConn* pConn, int32_t code) {
if (pCtx == NULL || pCtx->pSem == NULL) {
if (transMsg.info.ahandle == NULL) {
- if (REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
+ if (pMsg == NULL || REQUEST_NO_RESP(&pMsg->msg) || pMsg->type == Release) destroyCmsg(pMsg);
once = true;
continue;
}
diff --git a/source/libs/transport/src/transSvr.c b/source/libs/transport/src/transSvr.c
index ac7c23501a..944995c892 100644
--- a/source/libs/transport/src/transSvr.c
+++ b/source/libs/transport/src/transSvr.c
@@ -127,7 +127,7 @@ static void uvFreeCb(uv_handle_t* handle);
static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg);
-static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
+static int uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
@@ -384,7 +384,7 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
taosMemoryFree(req);
}
-static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
+static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
SSvrConn* pConn = smsg->pConn;
STransMsg* pMsg = &smsg->msg;
if (pMsg->pCont == 0) {
@@ -397,6 +397,13 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
pHead->hasEpSet = pMsg->info.hasEpSet;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
+ // handle invalid drop_task resp, TD-20098
+ if (pMsg->msgType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
+ transQueuePop(&pConn->srvMsgs);
+ destroySmsg(smsg);
+ return -1;
+ }
+
if (pConn->status == ConnNormal) {
pHead->msgType = (0 == pMsg->msgType ? pConn->inType + 1 : pMsg->msgType);
if (smsg->type == Release) pHead->msgType = 0;
@@ -431,6 +438,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->base = (char*)pHead;
wb->len = len;
+ return 0;
}
static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
@@ -440,7 +448,9 @@ static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
}
uv_buf_t wb;
- uvPrepareSendData(smsg, &wb);
+ if (uvPrepareSendData(smsg, &wb) < 0) {
+ return;
+ }
transRefSrvHandle(pConn);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
@@ -451,8 +461,9 @@ static void uvStartSendResp(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
- transFreeMsg(smsg->msg.pCont);
- taosMemoryFree(smsg);
+ destroySmsg(smsg);
+ // transFreeMsg(smsg->msg.pCont);
+ // taosMemoryFree(smsg);
transUnrefSrvHandle(pConn);
return;
}
@@ -748,10 +759,11 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
return;
}
transSockInfo2Str(&sockname, pConn->src);
- struct sockaddr_in addr = *(struct sockaddr_in*)&sockname;
+ struct sockaddr_in addr = *(struct sockaddr_in*)&peername;
pConn->clientIp = addr.sin_addr.s_addr;
pConn->port = ntohs(addr.sin_port);
+
uv_read_start((uv_stream_t*)(pConn->pTcp), uvAllocRecvBufferCb, uvOnRecvCb);
} else {
diff --git a/source/util/src/terror.c b/source/util/src/terror.c
index 88ce8e5c13..f7e56f372f 100644
--- a/source/util/src/terror.c
+++ b/source/util/src/terror.c
@@ -340,7 +340,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last ro
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
-TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECREATED, "Table re-created")
+TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, "Table schema is old")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
// query
@@ -609,6 +609,9 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_INVALID_MSG, "Invalid message")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_MISMATCH, "Consumer mismatch")
TAOS_DEFINE_ERROR(TSDB_CODE_TMQ_CONSUMER_CLOSED, "Consumer closed")
+// stream
+TAOS_DEFINE_ERROR(TSDB_CODE_STREAM_TASK_NOT_EXIST, "Stream task not exist")
+
// TDLite
TAOS_DEFINE_ERROR(TSDB_CODE_TDLITE_IVLD_OPEN_FLAGS, "Invalid TDLite open flags")
TAOS_DEFINE_ERROR(TSDB_CODE_TDLITE_IVLD_OPEN_DIR, "Invalid TDLite open directory")
diff --git a/source/util/src/tqueue.c b/source/util/src/tqueue.c
index 19b9b89cab..c8f128e666 100644
--- a/source/util/src/tqueue.c
+++ b/source/util/src/tqueue.c
@@ -21,46 +21,6 @@
int64_t tsRpcQueueMemoryAllowed = 0;
int64_t tsRpcQueueMemoryUsed = 0;
-typedef struct STaosQnode STaosQnode;
-
-typedef struct STaosQnode {
- STaosQnode *next;
- STaosQueue *queue;
- int64_t timestamp;
- int32_t size;
- int8_t itype;
- int8_t reserved[3];
- char item[];
-} STaosQnode;
-
-typedef struct STaosQueue {
- STaosQnode *head;
- STaosQnode *tail;
- STaosQueue *next; // for queue set
- STaosQset *qset; // for queue set
- void *ahandle; // for queue set
- FItem itemFp;
- FItems itemsFp;
- TdThreadMutex mutex;
- int64_t memOfItems;
- int32_t numOfItems;
-} STaosQueue;
-
-typedef struct STaosQset {
- STaosQueue *head;
- STaosQueue *current;
- TdThreadMutex mutex;
- tsem_t sem;
- int32_t numOfQueues;
- int32_t numOfItems;
-} STaosQset;
-
-typedef struct STaosQall {
- STaosQnode *current;
- STaosQnode *start;
- int32_t numOfItems;
-} STaosQall;
-
STaosQueue *taosOpenQueue() {
STaosQueue *queue = taosMemoryCalloc(1, sizeof(STaosQueue));
if (queue == NULL) {
diff --git a/source/util/src/tworker.c b/source/util/src/tworker.c
index f7d4173d3f..5971033bf8 100644
--- a/source/util/src/tworker.c
+++ b/source/util/src/tworker.c
@@ -36,14 +36,13 @@ int32_t tQWorkerInit(SQWorkerPool *pool) {
worker->pool = pool;
}
- uInfo("worker:%s is initialized, min:%d max:%d", pool->name, pool->min, pool->max);
+ uDebug("worker:%s is initialized, min:%d max:%d", pool->name, pool->min, pool->max);
return 0;
}
void tQWorkerCleanup(SQWorkerPool *pool) {
for (int32_t i = 0; i < pool->max; ++i) {
SQWorker *worker = pool->workers + i;
- // if (worker == NULL) continue;
if (taosCheckPthreadValid(worker->thread)) {
taosQsetThreadResume(pool->qset);
}
@@ -51,7 +50,6 @@ void tQWorkerCleanup(SQWorkerPool *pool) {
for (int32_t i = 0; i < pool->max; ++i) {
SQWorker *worker = pool->workers + i;
- // if (worker == NULL) continue;
if (taosCheckPthreadValid(worker->thread)) {
taosThreadJoin(worker->thread, NULL);
taosThreadClear(&worker->thread);
@@ -73,11 +71,13 @@ static void *tQWorkerThreadFp(SQWorker *worker) {
taosBlockSIGPIPE();
setThreadName(pool->name);
- uDebug("worker:%s:%d is running", pool->name, worker->id);
+ worker->pid = taosGetSelfPthreadId();
+ uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid);
while (1) {
if (taosReadQitemFromQset(pool->qset, (void **)&msg, &qinfo) == 0) {
- uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, pool->qset);
+ uInfo("worker:%s:%d qset:%p, got no message and exiting, thread:%08" PRId64, pool->name, worker->id, pool->qset,
+ worker->pid);
break;
}
@@ -124,7 +124,7 @@ STaosQueue *tQWorkerAllocQueue(SQWorkerPool *pool, void *ahandle, FItem fp) {
}
taosThreadMutexUnlock(&pool->mutex);
- uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
+ uInfo("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
return queue;
}
@@ -191,12 +191,14 @@ static void *tWWorkerThreadFp(SWWorker *worker) {
taosBlockSIGPIPE();
setThreadName(pool->name);
- uDebug("worker:%s:%d is running", pool->name, worker->id);
+ worker->pid = taosGetSelfPthreadId();
+ uInfo("worker:%s:%d is running, thread:%08" PRId64, pool->name, worker->id, worker->pid);
while (1) {
numOfMsgs = taosReadAllQitemsFromQset(worker->qset, worker->qall, &qinfo);
if (numOfMsgs == 0) {
- uDebug("worker:%s:%d qset:%p, got no message and exiting", pool->name, worker->id, worker->qset);
+ uInfo("worker:%s:%d qset:%p, got no message and exiting, thread:%08" PRId64, pool->name, worker->id, worker->qset,
+ worker->pid);
break;
}
@@ -244,7 +246,9 @@ STaosQueue *tWWorkerAllocQueue(SWWorkerPool *pool, void *ahandle, FItems fp) {
pool->nextId = (pool->nextId + 1) % pool->max;
}
- uDebug("worker:%s, queue:%p is allocated, ahandle:%p", pool->name, queue, ahandle);
+ while (worker->pid <= 0) taosMsleep(10);
+ queue->threadId = worker->pid;
+ uInfo("worker:%s, queue:%p is allocated, ahandle:%p thread:%08" PRId64, pool->name, queue, ahandle, queue->threadId);
code = 0;
_OVER:
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py
index 23b6341e66..4e3c9d07ce 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/auto_create_table_json.py
@@ -25,7 +25,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py
index 4473ff2c2d..163cdd0055 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/commandline.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py
index 4f8e94a887..0fb39c210c 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/custom_col_tag.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py
index 95afdc413e..7599c82483 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/default_json.py
@@ -23,7 +23,8 @@ class TDTestCase:
[TD-11510] taosBenchmark test cases
"""
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
index 90d0ffd8b1..6be5117b08 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/demo.py
@@ -27,7 +27,8 @@ class TDTestCase:
"""
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
index 7437b46353..e63908ce33 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/insert_alltypes_json.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py
index a706967bc1..73894d5e33 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/invalid_commandline.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py
index 3b9123974f..afefabef66 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/json_tag.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
index dbddb840ca..d95e6741f8 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/limit_offset_json.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py
index 7f0b082e33..375113c3ef 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/query_json.py
@@ -29,7 +29,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py
index 5b59d84b79..fd79f1e01b 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/sample_csv_json.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py
index ee7bb8a925..200a3c0e69 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_interlace.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py
index d7ccdc2716..2c6d09b0f5 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_json_alltypes.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py
index 7b0f5d1672..241c6d6359 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/sml_telnet_alltypes.py
@@ -24,7 +24,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py
index 1588b4077c..4d0adde192 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/taosadapter_json.py
@@ -25,7 +25,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py
index b8d4c35e58..78cb49846e 100644
--- a/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py
+++ b/tests/develop-test/5-taos-tools/taosbenchmark/telnet_tcp.py
@@ -25,7 +25,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py
index 09976953e1..a6c2062d6c 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestInspect.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -52,7 +53,7 @@ class TDTestCase:
return paths[0]
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py
index 7eaddd9e2d..d10734abd7 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBigInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py
index c70748e9c2..98a128b489 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBinary.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py
index eb4b8a364a..1342295d7d 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeBool.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py
index 70501965fe..f3de1c7628 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeDouble.py
@@ -28,7 +28,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -51,7 +52,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py
index 5e551a373c..1133c1d1a5 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeFloat.py
@@ -28,7 +28,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -51,7 +52,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py
index f2d4c4f814..933bc41bae 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py
index 5971902cb6..77dd3d3938 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeJson.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py
index f2dca23488..5568c3679f 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeSmallInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -49,7 +50,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py
index 6f2781d645..852aa17dc5 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeTinyInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -49,7 +50,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py
index b226204654..ab61b48c8f 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedBigInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py
index 766110d74e..ae895712c0 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py
index 242681b0fc..86bcfd92f2 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedSmallInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py
index d512cba97c..d67dfea408 100644
--- a/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py
+++ b/tests/develop-test/5-taos-tools/taosdump/taosdumpTestTypeUnsignedTinyInt.py
@@ -27,7 +27,8 @@ class TDTestCase:
'''
return
- def init(self, conn, logSql, replicaVarl=1):
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")
diff --git a/tests/docs-examples-test/csharp.sh b/tests/docs-examples-test/csharp.sh
index 8d1031ab8f..21c19b9b3d 100644
--- a/tests/docs-examples-test/csharp.sh
+++ b/tests/docs-examples-test/csharp.sh
@@ -27,11 +27,11 @@ dotnet run --project optsTelnet/optstelnet.csproj
taos -s "drop database if exists test"
dotnet run --project optsJSON/optsJSON.csproj
-taos -s "create database if exists test"
-dotnet run --project wsConnect/wsConnect.csproj
-dotnet run --project wsInsert/wsInsert.csproj
-dotnet run --project wsStmt/wsStmt.csproj
-dotnet run --project wsQuery/wsQuery.csproj
+taos -s "create database if not exists test"
+# dotnet run --project wsConnect/wsConnect.csproj
+# dotnet run --project wsInsert/wsInsert.csproj
+# dotnet run --project wsStmt/wsStmt.csproj
+# dotnet run --project wsQuery/wsQuery.csproj
taos -s "drop database if exists test"
taos -s "drop database if exists power"
\ No newline at end of file
diff --git a/tests/pytest/concurrent_inquiry.py b/tests/pytest/concurrent_inquiry.py
index 1bb2081d7f..0d281674f5 100644
--- a/tests/pytest/concurrent_inquiry.py
+++ b/tests/pytest/concurrent_inquiry.py
@@ -21,43 +21,52 @@ import argparse
import datetime
import string
from requests.auth import HTTPBasicAuth
-func_list=['avg','count','twa','sum','stddev','leastsquares','min',
-'max','first','last','top','bottom','percentile','apercentile',
-'last_row','diff','spread','distinct']
-condition_list=[
+
+func_list = ['abs', 'acos', 'asin', 'atan', 'ceil', 'cos', 'floor', 'log', 'pow', 'round', 'sin', 'sqrt', 'tan',
+ 'char_length', 'concat', 'concat_ws', 'length', 'lower', 'ltrim', 'rtrim', 'substr', 'upper',
+ 'cast', 'to_iso8601', 'to_json', 'to_unixtimestamp', 'now', 'timediff', 'timetruncate', 'timezone', 'today',
+ 'apercentile', 'avg', 'count', 'elapsed', 'leastsquares', 'spread', 'stddev', 'sum', 'hyperloglog', 'histogram', 'percentile',
+ 'bottom', 'first', 'interp', 'last', 'last_row', 'max', 'min', 'mode', 'sample', 'tail', 'top', 'unique',
+ 'csum', 'derivative', 'diff', 'irate', 'mavg', 'statecount', 'stateduration', 'twa',
+ 'database', 'client_version', 'server_version', 'server_status']
+
+condition_list = [
"where _c0 > now -10d ",
'interval(10s)',
'limit 10',
'group by',
+ 'partition by',
'order by',
'fill(null)'
-
]
-where_list = ['_c0>now-10d',' <50','like',' is null','in']
+
+where_list = ['_c0>now-10d', ' <50', 'like', ' is null', 'in']
+
+
class ConcurrentInquiry:
# def __init__(self,ts=1500000001000,host='127.0.0.1',user='root',password='taosdata',dbname='test',
# stb_prefix='st',subtb_prefix='t',n_Therads=10,r_Therads=10,probabilities=0.05,loop=5,
- # stableNum = 2,subtableNum = 1000,insertRows = 100):
- def __init__(self,ts,host,user,password,dbname,
- stb_prefix,subtb_prefix,n_Therads,r_Therads,probabilities,loop,
- stableNum ,subtableNum ,insertRows ,mix_table, replay):
+ # stableNum = 2,subtableNum = 1000,insertRows = 100):
+ def __init__(self, ts, host, user, password, dbname,
+ stb_prefix, subtb_prefix, n_Therads, r_Therads, probabilities, loop,
+ stableNum, subtableNum, insertRows, mix_table, replay):
self.n_numOfTherads = n_Therads
self.r_numOfTherads = r_Therads
- self.ts=ts
+ self.ts = ts
self.host = host
self.user = user
self.password = password
- self.dbname=dbname
+ self.dbname = dbname
self.stb_prefix = stb_prefix
self.subtb_prefix = subtb_prefix
- self.stb_list=[]
- self.subtb_list=[]
- self.stb_stru_list=[]
- self.subtb_stru_list=[]
- self.stb_tag_list=[]
- self.subtb_tag_list=[]
- self.probabilities = [1-probabilities,probabilities]
- self.ifjoin = [1,0]
+ self.stb_list = []
+ self.subtb_list = []
+ self.stb_stru_list = []
+ self.subtb_stru_list = []
+ self.stb_tag_list = []
+ self.subtb_tag_list = []
+ self.probabilities = [1-probabilities, probabilities]
+ self.ifjoin = [1, 0]
self.loop = loop
self.stableNum = stableNum
self.subtableNum = subtableNum
@@ -66,253 +75,276 @@ class ConcurrentInquiry:
self.max_ts = datetime.datetime.now()
self.min_ts = datetime.datetime.now() - datetime.timedelta(days=5)
self.replay = replay
- def SetThreadsNum(self,num):
- self.numOfTherads=num
- def ret_fcol(self,cl,sql): #返回结果的第一列
+ def SetThreadsNum(self, num):
+ self.numOfTherads = num
+
+ def ret_fcol(self, cl, sql): # 返回结果的第一列
cl.execute(sql)
- fcol_list=[]
+ fcol_list = []
for data in cl:
fcol_list.append(data[0])
return fcol_list
- def r_stb_list(self,cl): #返回超级表列表
- sql='show '+self.dbname+'.stables'
- self.stb_list=self.ret_fcol(cl,sql)
+ def r_stb_list(self, cl): # 返回超级表列表
+ sql = 'show '+self.dbname+'.stables'
+ self.stb_list = self.ret_fcol(cl, sql)
- def r_subtb_list(self,cl,stablename): #每个超级表返回2个子表
- sql='select tbname from '+self.dbname+'.'+stablename+' limit 2;'
- self.subtb_list+=self.ret_fcol(cl,sql)
+ def r_subtb_list(self, cl, stablename): # 每个超级表返回2个子表
+ sql = 'select tbname from '+self.dbname+'.'+stablename+' limit 2;'
+ self.subtb_list += self.ret_fcol(cl, sql)
- def cal_struct(self,cl,tbname): #查看表结构
- tb=[]
- tag=[]
- sql='describe '+self.dbname+'.'+tbname+';'
+ def cal_struct(self, cl, tbname): # 查看表结构
+ tb = []
+ tag = []
+ sql = 'describe '+self.dbname+'.'+tbname+';'
cl.execute(sql)
for data in cl:
if data[3]:
tag.append(data[0])
else:
tb.append(data[0])
- return tb,tag
+ return tb, tag
- def r_stb_stru(self,cl): #获取所有超级表的表结构
+ def r_stb_stru(self, cl): # 获取所有超级表的表结构
for i in self.stb_list:
- tb,tag=self.cal_struct(cl,i)
+ tb, tag = self.cal_struct(cl, i)
self.stb_stru_list.append(tb)
self.stb_tag_list.append(tag)
- def r_subtb_stru(self,cl): #返回所有子表的表结构
+ def r_subtb_stru(self, cl): # 返回所有子表的表结构
for i in self.subtb_list:
- tb,tag=self.cal_struct(cl,i)
+ tb, tag = self.cal_struct(cl, i)
self.subtb_stru_list.append(tb)
self.subtb_tag_list.append(tag)
- def get_timespan(self,cl): #获取时间跨度(仅第一个超级表)
- sql = 'select first(_c0),last(_c0) from ' + self.dbname + '.' + self.stb_list[0] + ';'
+ def get_timespan(self, cl): # 获取时间跨度(仅第一个超级表)
+ sql = 'select first(_c0),last(_c0) from ' + \
+ self.dbname + '.' + self.stb_list[0] + ';'
print(sql)
cl.execute(sql)
for data in cl:
self.max_ts = data[1]
self.min_ts = data[0]
- def get_full(self): #获取所有的表、表结构
+ def get_full(self): # 获取所有的表、表结构
host = self.host
user = self.user
password = self.password
conn = taos.connect(
- host,
- user,
- password,
- )
+ host='%s' % host,
+ user='%s' % user,
+ password='%s' % password,
+ )
cl = conn.cursor()
self.r_stb_list(cl)
for i in self.stb_list:
- self.r_subtb_list(cl,i)
+ self.r_subtb_list(cl, i)
self.r_stb_stru(cl)
self.r_subtb_stru(cl)
self.get_timespan(cl)
cl.close()
- conn.close()
-
- #query condition
- def con_where(self,tlist,col_list,tag_list):
- l=[]
- for i in range(random.randint(0,len(tlist))):
+ conn.close()
+
+ # query condition
+ def con_where(self, tlist, col_list, tag_list):
+ l = []
+ for i in range(random.randint(0, len(tlist))):
c = random.choice(where_list)
if c == '_c0>now-10d':
- rdate = self.min_ts + (self.max_ts - self.min_ts)/10 * random.randint(-11,11)
- conlist = ' _c0 ' + random.choice(['<','>','>=','<=','<>']) + "'" + str(rdate) + "'"
+ rdate = self.min_ts + \
+ (self.max_ts - self.min_ts)/10 * random.randint(-11, 11)
+ conlist = ' _c0 ' + \
+ random.choice(['<', '>', '>=', '<=', '<>']
+ ) + "'" + str(rdate) + "'"
if self.random_pick():
l.append(conlist)
- else: l.append(c)
+ else:
+ l.append(c)
elif '<50' in c:
- conlist = ' ' + random.choice(tlist) + random.choice(['<','>','>=','<=','<>']) + str(random.randrange(-100,100))
- l.append(conlist)
+ conlist = ' ' + random.choice(tlist) + random.choice(
+ ['<', '>', '>=', '<=', '<>']) + str(random.randrange(-100, 100))
+ l.append(conlist)
elif 'is null' in c:
- conlist = ' ' + random.choice(tlist) + random.choice([' is null',' is not null'])
- l.append(conlist)
+ conlist = ' ' + \
+ random.choice(tlist) + \
+ random.choice([' is null', ' is not null'])
+ l.append(conlist)
elif 'in' in c:
in_list = []
temp = []
- for i in range(random.randint(0,100)):
- temp.append(random.randint(-10000,10000))
+ for i in range(random.randint(0, 100)):
+ temp.append(random.randint(-10000, 10000))
temp = (str(i) for i in temp)
in_list.append(temp)
temp1 = []
- for i in range(random.randint(0,100)):
- temp1.append("'" + ''.join(random.sample(string.ascii_letters, random.randint(0,10))) + "'")
- in_list.append(temp1)
- in_list.append(['NULL','NULL'])
- conlist = ' ' + random.choice(tlist) + ' in (' + ','.join(random.choice(in_list)) + ')'
+ for i in range(random.randint(0, 100)):
+ temp1.append(
+ "'" + ''.join(random.sample(string.ascii_letters, random.randint(0, 10))) + "'")
+ in_list.append(temp1)
+ in_list.append(['NULL', 'NULL'])
+ conlist = ' ' + \
+ random.choice(tlist) + ' in (' + \
+ ','.join(random.choice(in_list)) + ')'
l.append(conlist)
else:
s_all = string.ascii_letters
- conlist = ' ' + random.choice(tlist) + " like \'%" + random.choice(s_all) + "%\' "
+ conlist = ' ' + \
+ random.choice(tlist) + " like \'%" + \
+ random.choice(s_all) + "%\' "
l.append(conlist)
- return 'where '+random.choice([' and ',' or ']).join(l)
+ return 'where '+random.choice([' and ', ' or ']).join(l)
- def con_interval(self,tlist,col_list,tag_list):
- interval = 'interval(' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')'
+ def con_interval(self, tlist, col_list, tag_list):
+ interval = 'interval(' + str(random.randint(0, 20)) + \
+ random.choice(['a', 's', 'd', 'w', 'n', 'y']) + ')'
return interval
- def con_limit(self,tlist,col_list,tag_list):
- rand1 = str(random.randint(0,1000))
- rand2 = str(random.randint(0,1000))
- return random.choice(['limit ' + rand1,'limit ' + rand1 + ' offset '+rand2,
- ' slimit ' + rand1,' slimit ' + rand1 + ' offset ' + rand2,'limit '+rand1 + ' slimit '+ rand2,
- 'limit '+ rand1 + ' offset' + rand2 + ' slimit '+ rand1 + ' soffset ' + rand2 ])
-
- def con_fill(self,tlist,col_list,tag_list):
- return random.choice(['fill(null)','fill(prev)','fill(none)','fill(LINEAR)'])
-
- def con_group(self,tlist,col_list,tag_list):
- rand_tag = random.randint(0,5)
- rand_col = random.randint(0,1)
- if len(tag_list):
- return 'group by '+','.join(random.sample(col_list,rand_col) + random.sample(tag_list,rand_tag))
- else:
- return 'group by '+','.join(random.sample(col_list,rand_col))
+ def con_limit(self, tlist, col_list, tag_list):
+ rand1 = str(random.randint(0, 1000))
+ rand2 = str(random.randint(0, 1000))
+ return random.choice(['limit ' + rand1, 'limit ' + rand1 + ' offset '+rand2,
+ ' slimit ' + rand1, ' slimit ' + rand1 + ' offset ' +
+ rand2, 'limit '+rand1 + ' slimit ' + rand2,
+ 'limit ' + rand1 + ' offset' + rand2 + ' slimit ' + rand1 + ' soffset ' + rand2])
- def con_order(self,tlist,col_list,tag_list):
+ def con_fill(self, tlist, col_list, tag_list):
+ return random.choice(['fill(null)', 'fill(prev)', 'fill(none)', 'fill(LINEAR)'])
+
+ def con_group(self, tlist, col_list, tag_list):
+ rand_tag = random.randint(0, 5)
+ rand_col = random.randint(0, 1)
+ if len(tag_list):
+ return 'group by '+','.join(random.sample(col_list, rand_col) + random.sample(tag_list, rand_tag))
+ else:
+ return 'group by '+','.join(random.sample(col_list, rand_col))
+
+ def con_order(self, tlist, col_list, tag_list):
return 'order by '+random.choice(tlist)
- def con_state_window(self,tlist,col_list,tag_list):
+ def con_state_window(self, tlist, col_list, tag_list):
return 'state_window(' + random.choice(tlist + tag_list) + ')'
- def con_session_window(self,tlist,col_list,tag_list):
- session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(random.randint(0,20)) + random.choice(['a','s','d','w','n','y']) + ')'
+ def con_session_window(self, tlist, col_list, tag_list):
+ session_window = 'session_window(' + random.choice(tlist + tag_list) + ',' + str(
+ random.randint(0, 20)) + random.choice(['a', 's', 'd', 'w', 'n', 'y']) + ')'
return session_window
def gen_subquery_sql(self):
- subsql ,col_num = self.gen_query_sql(1)
+ subsql, col_num = self.gen_query_sql(1)
if col_num == 0:
return 0
- col_list=[]
- tag_list=[]
+ col_list = []
+ tag_list = []
for i in range(col_num):
- col_list.append("taosd%d"%i)
+ col_list.append("taosd%d" % i)
- tlist=col_list+['abc'] #增加不存在的域'abc',是否会引起新bug
- con_rand=random.randint(0,len(condition_list))
- func_rand=random.randint(0,len(func_list))
- col_rand=random.randint(0,len(col_list))
- t_rand=random.randint(0,len(tlist))
- sql='select ' #select
+ tlist = col_list+['abc'] # 增加不存在的域'abc',是否会引起新bug
+ con_rand = random.randint(0, len(condition_list))
+ func_rand = random.randint(0, len(func_list))
+ col_rand = random.randint(0, len(col_list))
+ t_rand = random.randint(0, len(tlist))
+ sql = 'select ' # select
random.shuffle(col_list)
random.shuffle(func_list)
- sel_col_list=[]
- col_rand=random.randint(0,len(col_list))
+ sel_col_list = []
+ col_rand = random.randint(0, len(col_list))
loop = 0
- for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
- alias = ' as '+ 'sub%d ' % loop
+ for i, j in zip(col_list[0:col_rand], func_list): # 决定每个被查询col的函数
+ alias = ' as ' + 'sub%d ' % loop
loop += 1
pick_func = ''
if j == 'leastsquares':
- pick_func=j+'('+i+',1,1)'
+ pick_func = j+'('+i+',1,1)'
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
- pick_func=j+'('+i+',1)'
+ pick_func = j+'('+i+',1)'
else:
- pick_func=j+'('+i+')'
- if bool(random.getrandbits(1)) :
- pick_func+=alias
+ pick_func = j+'('+i+')'
+ if bool(random.getrandbits(1)):
+ pick_func += alias
sel_col_list.append(pick_func)
if col_rand == 0:
- sql = sql + '*'
- else:
- sql=sql+','.join(sel_col_list) #select col & func
- sql = sql + ' from ('+ subsql +') '
- con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window]
- sel_con=random.sample(con_func,random.randint(0,len(con_func)))
- sel_con_list=[]
- for i in sel_con:
- sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
- sql+=' '.join(sel_con_list) # condition
- #print(sql)
- return sql
-
- def gen_query_sql(self,subquery=0): #生成查询语句
- tbi=random.randint(0,len(self.subtb_list)+len(self.stb_list)) #随机决定查询哪张表
- tbname=''
- col_list=[]
- tag_list=[]
- is_stb=0
- if tbi>len(self.stb_list) :
- tbi=tbi-len(self.stb_list)
- tbname=self.subtb_list[tbi-1]
- col_list=self.subtb_stru_list[tbi-1]
- tag_list=self.subtb_tag_list[tbi-1]
+ sql = sql + '*'
else:
- tbname=self.stb_list[tbi-1]
- col_list=self.stb_stru_list[tbi-1]
- tag_list=self.stb_tag_list[tbi-1]
- is_stb=1
- tlist=col_list+tag_list+['abc'] #增加不存在的域'abc',是否会引起新bug
- con_rand=random.randint(0,len(condition_list))
- func_rand=random.randint(0,len(func_list))
- col_rand=random.randint(0,len(col_list))
- tag_rand=random.randint(0,len(tag_list))
- t_rand=random.randint(0,len(tlist))
- sql='select ' #select
+ sql = sql+','.join(sel_col_list) # select col & func
+ sql = sql + ' from (' + subsql + ') '
+ con_func = [self.con_where, self.con_interval, self.con_limit, self.con_group,
+ self.con_order, self.con_fill, self.con_state_window, self.con_session_window]
+ sel_con = random.sample(con_func, random.randint(0, len(con_func)))
+ sel_con_list = []
+ for i in sel_con:
+ sel_con_list.append(i(tlist, col_list, tag_list)) # 获取对应的条件函数
+ # condition
+ sql += ' '.join(sel_con_list)
+ # print(sql)
+ return sql
+
+ def gen_query_sql(self, subquery=0): # 生成查询语句
+ tbi = random.randint(0, len(self.subtb_list) +
+ len(self.stb_list)) # 随机决定查询哪张表
+ tbname = ''
+ col_list = []
+ tag_list = []
+ is_stb = 0
+ if tbi > len(self.stb_list):
+ tbi = tbi-len(self.stb_list)
+ tbname = self.subtb_list[tbi-1]
+ col_list = self.subtb_stru_list[tbi-1]
+ tag_list = self.subtb_tag_list[tbi-1]
+ else:
+ tbname = self.stb_list[tbi-1]
+ col_list = self.stb_stru_list[tbi-1]
+ tag_list = self.stb_tag_list[tbi-1]
+ is_stb = 1
+ tlist = col_list+tag_list+['abc'] # 增加不存在的域'abc',是否会引起新bug
+ con_rand = random.randint(0, len(condition_list))
+ func_rand = random.randint(0, len(func_list))
+ col_rand = random.randint(0, len(col_list))
+ tag_rand = random.randint(0, len(tag_list))
+ t_rand = random.randint(0, len(tlist))
+ sql = 'select ' # select
random.shuffle(col_list)
random.shuffle(func_list)
- sel_col_list=[]
- col_rand=random.randint(0,len(col_list))
+ sel_col_list = []
+ col_rand = random.randint(0, len(col_list))
loop = 0
- for i,j in zip(col_list[0:col_rand],func_list): #决定每个被查询col的函数
- alias = ' as '+ 'taos%d ' % loop
+ for i, j in zip(col_list[0:col_rand], func_list): # 决定每个被查询col的函数
+ alias = ' as ' + 'taos%d ' % loop
loop += 1
pick_func = ''
if j == 'leastsquares':
- pick_func=j+'('+i+',1,1)'
+ pick_func = j+'('+i+',1,1)'
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
- pick_func=j+'('+i+',1)'
+ pick_func = j+'('+i+',1)'
else:
- pick_func=j+'('+i+')'
- if bool(random.getrandbits(1)) | subquery :
- pick_func+=alias
+ pick_func = j+'('+i+')'
+ if bool(random.getrandbits(1)) | subquery:
+ pick_func += alias
sel_col_list.append(pick_func)
- if col_rand == 0 & subquery :
- sql = sql + '*'
- else:
- sql=sql+','.join(sel_col_list) #select col & func
+ if col_rand == 0 & subquery:
+ sql = sql + '*'
+ else:
+ sql = sql+','.join(sel_col_list) # select col & func
if self.mix_table == 0:
- sql = sql + ' from '+random.choice(self.stb_list+self.subtb_list)+' '
+ sql = sql + ' from ' + \
+ random.choice(self.stb_list+self.subtb_list)+' '
elif self.mix_table == 1:
sql = sql + ' from '+random.choice(self.subtb_list)+' '
else:
- sql = sql + ' from '+random.choice(self.stb_list)+' '
- con_func=[self.con_where,self.con_interval,self.con_limit,self.con_group,self.con_order,self.con_fill,self.con_state_window,self.con_session_window]
- sel_con=random.sample(con_func,random.randint(0,len(con_func)))
- sel_con_list=[]
+ sql = sql + ' from '+random.choice(self.stb_list)+' '
+ con_func = [self.con_where, self.con_interval, self.con_limit, self.con_group,
+ self.con_order, self.con_fill, self.con_state_window, self.con_session_window]
+ sel_con = random.sample(con_func, random.randint(0, len(con_func)))
+ sel_con_list = []
for i in sel_con:
- sel_con_list.append(i(tlist,col_list,tag_list)) #获取对应的条件函数
- sql+=' '.join(sel_con_list) # condition
- #print(sql)
- return (sql,loop)
+ sel_con_list.append(i(tlist, col_list, tag_list)) # 获取对应的条件函数
+ # condition
+ sql += ' '.join(sel_con_list)
+ # print(sql)
+ return (sql, loop)
- def gen_query_join(self): #生成join查询语句
- tbname = []
+ def gen_query_join(self): # 生成join查询语句
+ tbname = []
col_list = []
tag_list = []
col_intersection = []
@@ -321,88 +353,105 @@ class ConcurrentInquiry:
if self.mix_table == 0:
if bool(random.getrandbits(1)):
subtable = True
- tbname = random.sample(self.subtb_list,2)
+ tbname = random.sample(self.subtb_list, 2)
for i in tbname:
- col_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
- tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
- col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
- tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
+ col_list.append(
+ self.subtb_stru_list[self.subtb_list.index(i)])
+ tag_list.append(
+ self.subtb_stru_list[self.subtb_list.index(i)])
+ col_intersection = list(
+ set(col_list[0]).intersection(set(col_list[1])))
+ tag_intersection = list(
+ set(tag_list[0]).intersection(set(tag_list[1])))
else:
- tbname = random.sample(self.stb_list,2)
+ tbname = random.sample(self.stb_list, 2)
for i in tbname:
col_list.append(self.stb_stru_list[self.stb_list.index(i)])
tag_list.append(self.stb_stru_list[self.stb_list.index(i)])
- col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
- tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
+ col_intersection = list(
+ set(col_list[0]).intersection(set(col_list[1])))
+ tag_intersection = list(
+ set(tag_list[0]).intersection(set(tag_list[1])))
elif self.mix_table == 1:
subtable = True
- tbname = random.sample(self.subtb_list,2)
+ tbname = random.sample(self.subtb_list, 2)
for i in tbname:
col_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
tag_list.append(self.subtb_stru_list[self.subtb_list.index(i)])
- col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
- tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
+ col_intersection = list(
+ set(col_list[0]).intersection(set(col_list[1])))
+ tag_intersection = list(
+ set(tag_list[0]).intersection(set(tag_list[1])))
else:
- tbname = random.sample(self.stb_list,2)
+ tbname = random.sample(self.stb_list, 2)
for i in tbname:
col_list.append(self.stb_stru_list[self.stb_list.index(i)])
tag_list.append(self.stb_stru_list[self.stb_list.index(i)])
- col_intersection = list(set(col_list[0]).intersection(set(col_list[1])))
- tag_intersection = list(set(tag_list[0]).intersection(set(tag_list[1])))
- con_rand=random.randint(0,len(condition_list))
- col_rand=random.randint(0,len(col_list))
- tag_rand=random.randint(0,len(tag_list))
- sql='select ' #select
-
- sel_col_tag=[]
- col_rand=random.randint(0,len(col_list))
+ col_intersection = list(
+ set(col_list[0]).intersection(set(col_list[1])))
+ tag_intersection = list(
+ set(tag_list[0]).intersection(set(tag_list[1])))
+ con_rand = random.randint(0, len(condition_list))
+ col_rand = random.randint(0, len(col_list))
+ tag_rand = random.randint(0, len(tag_list))
+ sql = 'select ' # select
+
+ sel_col_tag = []
+ col_rand = random.randint(0, len(col_list))
if bool(random.getrandbits(1)):
sql += '*'
else:
- sel_col_tag.append('t1.' + str(random.choice(col_list[0] + tag_list[0])))
- sel_col_tag.append('t2.' + str(random.choice(col_list[1] + tag_list[1])))
+ sel_col_tag.append(
+ 't1.' + str(random.choice(col_list[0] + tag_list[0])))
+ sel_col_tag.append(
+ 't2.' + str(random.choice(col_list[1] + tag_list[1])))
sel_col_list = []
random.shuffle(func_list)
if self.random_pick():
loop = 0
- for i,j in zip(sel_col_tag,func_list): #决定每个被查询col的函数
- alias = ' as '+ 'taos%d ' % loop
+ for i, j in zip(sel_col_tag, func_list): # 决定每个被查询col的函数
+ alias = ' as ' + 'taos%d ' % loop
loop += 1
pick_func = ''
if j == 'leastsquares':
- pick_func=j+'('+i+',1,1)'
+ pick_func = j+'('+i+',1,1)'
elif j == 'top' or j == 'bottom' or j == 'percentile' or j == 'apercentile':
- pick_func=j+'('+i+',1)'
+ pick_func = j+'('+i+',1)'
else:
- pick_func=j+'('+i+')'
+ pick_func = j+'('+i+')'
if bool(random.getrandbits(1)):
- pick_func+=alias
+ pick_func += alias
sel_col_list.append(pick_func)
sql += ','.join(sel_col_list)
else:
sql += ','.join(sel_col_tag)
- sql = sql + ' from '+ str(tbname[0]) +' t1,' + str(tbname[1]) + ' t2 ' #select col & func
+ sql = sql + ' from ' + \
+ str(tbname[0]) + ' t1,' + str(tbname[1]) + \
+ ' t2 ' # select col & func
join_section = None
temp = None
if subtable:
temp = random.choices(col_intersection)
join_section = temp.pop()
- sql += 'where t1._c0 = t2._c0 and ' + 't1.' + str(join_section) + '=t2.' + str(join_section)
+ sql += 'where t1._c0 = t2._c0 and ' + 't1.' + \
+ str(join_section) + '=t2.' + str(join_section)
else:
temp = random.choices(col_intersection+tag_intersection)
join_section = temp.pop()
- sql += 'where t1._c0 = t2._c0 and ' + 't1.' + str(join_section) + '=t2.' + str(join_section)
+ sql += 'where t1._c0 = t2._c0 and ' + 't1.' + \
+ str(join_section) + '=t2.' + str(join_section)
return sql
- def random_pick(self):
- x = random.uniform(0,1)
- cumulative_probability = 0.0
- for item, item_probability in zip(self.ifjoin, self.probabilities):
- cumulative_probability += item_probability
- if x < cumulative_probability:break
+ def random_pick(self):
+ x = random.uniform(0, 1)
+ cumulative_probability = 0.0
+ for item, item_probability in zip(self.ifjoin, self.probabilities):
+ cumulative_probability += item_probability
+ if x < cumulative_probability:
+ break
return item
-
+
def gen_data(self):
stableNum = self.stableNum
subtableNum = self.subtableNum
@@ -412,52 +461,54 @@ class ConcurrentInquiry:
user = self.user
password = self.password
conn = taos.connect(
- host,
- user,
- password,
- )
+ host='%s' % host,
+ user='%s' % user,
+ password='%s' % password,
+ )
cl = conn.cursor()
- cl.execute("drop database if exists %s;" %self.dbname)
- cl.execute("create database if not exists %s;" %self.dbname)
+ cl.execute("drop database if exists %s;" % self.dbname)
+ cl.execute("create database if not exists %s;" % self.dbname)
cl.execute("use %s" % self.dbname)
for k in range(stableNum):
- sql="create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
+ sql = "create table %s (ts timestamp, c1 int, c2 float, c3 bigint, c4 smallint, c5 tinyint, c6 double, c7 bool,c8 binary(20),c9 nchar(20),c11 int unsigned,c12 smallint unsigned,c13 tinyint unsigned,c14 bigint unsigned) \
tags(t1 int, t2 float, t3 bigint, t4 smallint, t5 tinyint, t6 double, t7 bool,t8 binary(20),t9 nchar(20), t11 int unsigned , t12 smallint unsigned , t13 tinyint unsigned , t14 bigint unsigned)" % (self.stb_prefix+str(k))
cl.execute(sql)
for j in range(subtableNum):
if j % 100 == 0:
sql = "create table %s using %s tags(NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" % \
- (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k))
+ (self.subtb_prefix+str(k)+'_' +
+ str(j), self.stb_prefix+str(k))
else:
sql = "create table %s using %s tags(%d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" % \
- (self.subtb_prefix+str(k)+'_'+str(j),self.stb_prefix+str(k),j,j/2.0,j%41,j%51,j%53,j*1.0,j%2,'taos'+str(j),'涛思'+str(j), j%43, j%23 , j%17 , j%3167)
+ (self.subtb_prefix+str(k)+'_'+str(j), self.stb_prefix+str(k), j, j/2.0, j % 41, j %
+ 51, j % 53, j*1.0, j % 2, 'taos'+str(j), '涛思'+str(j), j % 43, j % 23, j % 17, j % 3167)
print(sql)
cl.execute(sql)
for i in range(insertRows):
- if i % 100 == 0 :
+ if i % 100 == 0:
ret = cl.execute(
- "insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" %
- (self.subtb_prefix+str(k)+'_'+str(j), t0+i))
+ "insert into %s values (%d , NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL)" %
+ (self.subtb_prefix+str(k)+'_'+str(j), t0+i))
else:
ret = cl.execute(
"insert into %s values (%d , %d,%d,%d,%d,%d,%d,%d,'%s','%s',%d,%d,%d,%d)" %
- (self.subtb_prefix+str(k)+'_'+str(j), t0+i, i%100, i/2.0, i%41, i%51, i%53, i*1.0, i%2,'taos'+str(i),'涛思'+str(i), i%43, i%23 , i%17 , i%3167))
+ (self.subtb_prefix+str(k)+'_'+str(j), t0+i, i % 100, i/2.0, i % 41, i % 51, i % 53, i*1.0, i % 2, 'taos'+str(i), '涛思'+str(i), i % 43, i % 23, i % 17, i % 3167))
cl.close()
conn.close()
-
- def rest_query(self,sql): #rest 接口
+
+ def rest_query(self, sql): # rest 接口
host = self.host
user = self.user
password = self.password
- port =6041
- url = "http://{}:{}/rest/sql".format(host, port )
+ port = 6041
+ url = "http://{}:{}/rest/sql".format(host, port)
try:
- r = requests.post(url,
- data = 'use %s' % self.dbname,
- auth = HTTPBasicAuth('root', 'taosdata'))
- r = requests.post(url,
- data = sql,
- auth = HTTPBasicAuth('root', 'taosdata'))
+ r = requests.post(url,
+ data='use %s' % self.dbname,
+ auth=HTTPBasicAuth('root', 'taosdata'))
+ r = requests.post(url,
+ data=sql,
+ auth=HTTPBasicAuth('root', 'taosdata'))
except:
print("REST API Failure (TODO: more info here)")
raise
@@ -481,165 +532,171 @@ class ConcurrentInquiry:
nRows = rj['rows'] if ('rows' in rj) else 0
return nRows
-
- def query_thread_n(self,threadID): #使用原生python接口查询
+ def query_thread_n(self, threadID): # 使用原生python接口查询
host = self.host
user = self.user
password = self.password
conn = taos.connect(
- host,
- user,
- password,
- )
+ host='%s' % host,
+ user='%s' % user,
+ password='%s' % password,
+ )
cl = conn.cursor()
cl.execute("use %s;" % self.dbname)
- fo = open('bak_sql_n_%d'%threadID,'w+')
+ fo = open('bak_sql_n_%d' % threadID, 'w+')
print("Thread %d: starting" % threadID)
loop = self.loop
while loop:
-
- try:
- if self.random_pick():
- if self.random_pick():
- sql,temp=self.gen_query_sql()
- else:
- sql = self.gen_subquery_sql()
- else:
- sql = self.gen_query_join()
- print("sql is ",sql)
- fo.write(sql+'\n')
- start = time.time()
- cl.execute(sql)
- cl.fetchall()
- end = time.time()
- print("time cost :",end-start)
- except Exception as e:
- print('-'*40)
- print(
- "Failure thread%d, sql: %s \nexception: %s" %
- (threadID, str(sql),str(e)))
- err_uec='Unable to establish connection'
- if err_uec in str(e) and loop >0:
- exit(-1)
- loop -= 1
- if loop == 0: break
- fo.close()
- cl.close()
- conn.close()
- print("Thread %d: finishing" % threadID)
- def query_thread_nr(self,threadID): #使用原生python接口进行重放
- host = self.host
- user = self.user
- password = self.password
- conn = taos.connect(
- host,
- user,
- password,
- )
- cl = conn.cursor()
- cl.execute("use %s;" % self.dbname)
- replay_sql = []
- with open('bak_sql_n_%d'%threadID,'r') as f:
- replay_sql = f.readlines()
- print("Replay Thread %d: starting" % threadID)
- for sql in replay_sql:
try:
- print("sql is ",sql)
+ if self.random_pick():
+ if self.random_pick():
+ sql, temp = self.gen_query_sql()
+ else:
+ sql = self.gen_subquery_sql()
+ else:
+ sql = self.gen_query_join()
+ print("sql is ", sql)
+ fo.write(sql+'\n')
start = time.time()
cl.execute(sql)
cl.fetchall()
end = time.time()
- print("time cost :",end-start)
+ print("time cost :", end-start)
except Exception as e:
print('-'*40)
print(
- "Failure thread%d, sql: %s \nexception: %s" %
- (threadID, str(sql),str(e)))
- err_uec='Unable to establish connection'
- if err_uec in str(e) and loop >0:
- exit(-1)
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql), str(e)))
+ err_uec = 'Unable to establish connection'
+ if err_uec in str(e) and loop > 0:
+ exit(-1)
+ loop -= 1
+ if loop == 0:
+ break
+ fo.close()
cl.close()
- conn.close()
+ conn.close()
+ print("Thread %d: finishing" % threadID)
+
+ def query_thread_nr(self, threadID): # 使用原生python接口进行重放
+ host = self.host
+ user = self.user
+ password = self.password
+ conn = taos.connect(
+ host='%s' % host,
+ user='%s' % user,
+ password='%s' % password,
+ )
+ cl = conn.cursor()
+ cl.execute("use %s;" % self.dbname)
+ replay_sql = []
+ with open('bak_sql_n_%d' % threadID, 'r') as f:
+ replay_sql = f.readlines()
+ print("Replay Thread %d: starting" % threadID)
+ for sql in replay_sql:
+ try:
+ print("sql is ", sql)
+ start = time.time()
+ cl.execute(sql)
+ cl.fetchall()
+ end = time.time()
+ print("time cost :", end-start)
+ except Exception as e:
+ print('-'*40)
+ print(
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql), str(e)))
+ err_uec = 'Unable to establish connection'
+ if err_uec in str(e) and loop > 0:
+ exit(-1)
+ cl.close()
+ conn.close()
print("Replay Thread %d: finishing" % threadID)
-
- def query_thread_r(self,threadID): #使用rest接口查询
+
+ def query_thread_r(self, threadID): # 使用rest接口查询
print("Thread %d: starting" % threadID)
- fo = open('bak_sql_r_%d'%threadID,'w+')
+ fo = open('bak_sql_r_%d' % threadID, 'w+')
loop = self.loop
while loop:
try:
if self.random_pick():
if self.random_pick():
- sql,temp=self.gen_query_sql()
+ sql, temp = self.gen_query_sql()
else:
sql = self.gen_subquery_sql()
else:
sql = self.gen_query_join()
- print("sql is ",sql)
+ print("sql is ", sql)
fo.write(sql+'\n')
start = time.time()
self.rest_query(sql)
end = time.time()
- print("time cost :",end-start)
+ print("time cost :", end-start)
except Exception as e:
print('-'*40)
print(
- "Failure thread%d, sql: %s \nexception: %s" %
- (threadID, str(sql),str(e)))
- err_uec='Unable to establish connection'
- if err_uec in str(e) and loop >0:
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql), str(e)))
+ err_uec = 'Unable to establish connection'
+ if err_uec in str(e) and loop > 0:
exit(-1)
- loop -= 1
- if loop == 0: break
- fo.close()
- print("Thread %d: finishing" % threadID)
+ loop -= 1
+ if loop == 0:
+ break
+ fo.close()
+ print("Thread %d: finishing" % threadID)
- def query_thread_rr(self,threadID): #使用rest接口重放
+ def query_thread_rr(self, threadID): # 使用rest接口重放
print("Replay Thread %d: starting" % threadID)
replay_sql = []
- with open('bak_sql_r_%d'%threadID,'r') as f:
+ with open('bak_sql_r_%d' % threadID, 'r') as f:
replay_sql = f.readlines()
for sql in replay_sql:
try:
- print("sql is ",sql)
+ print("sql is ", sql)
start = time.time()
self.rest_query(sql)
end = time.time()
- print("time cost :",end-start)
+ print("time cost :", end-start)
except Exception as e:
print('-'*40)
print(
- "Failure thread%d, sql: %s \nexception: %s" %
- (threadID, str(sql),str(e)))
- err_uec='Unable to establish connection'
- if err_uec in str(e) and loop >0:
- exit(-1)
- print("Replay Thread %d: finishing" % threadID)
+ "Failure thread%d, sql: %s \nexception: %s" %
+ (threadID, str(sql), str(e)))
+ err_uec = 'Unable to establish connection'
+ if err_uec in str(e) and loop > 0:
+ exit(-1)
+ print("Replay Thread %d: finishing" % threadID)
def run(self):
- print(self.n_numOfTherads,self.r_numOfTherads)
+ print(self.n_numOfTherads, self.r_numOfTherads)
threads = []
- if self.replay: #whether replay
+ if self.replay: # whether replay
for i in range(self.n_numOfTherads):
- thread = threading.Thread(target=self.query_thread_nr, args=(i,))
+ thread = threading.Thread(
+ target=self.query_thread_nr, args=(i,))
threads.append(thread)
- thread.start()
+ thread.start()
for i in range(self.r_numOfTherads):
- thread = threading.Thread(target=self.query_thread_rr, args=(i,))
+ thread = threading.Thread(
+ target=self.query_thread_rr, args=(i,))
threads.append(thread)
thread.start()
else:
for i in range(self.n_numOfTherads):
- thread = threading.Thread(target=self.query_thread_n, args=(i,))
- threads.append(thread)
- thread.start()
- for i in range(self.r_numOfTherads):
- thread = threading.Thread(target=self.query_thread_r, args=(i,))
+ thread = threading.Thread(
+ target=self.query_thread_n, args=(i,))
threads.append(thread)
thread.start()
-
+ for i in range(self.r_numOfTherads):
+ thread = threading.Thread(
+ target=self.query_thread_r, args=(i,))
+ threads.append(thread)
+ thread.start()
+
+
parser = argparse.ArgumentParser()
parser.add_argument(
'-H',
@@ -721,17 +778,17 @@ parser.add_argument(
parser.add_argument(
'-u',
'--user',
- action='store',
+ action='store',
default='root',
type=str,
help='user name')
parser.add_argument(
'-w',
'--password',
- action='store',
- default='root',
+ action='store',
+ default='taosdata',
type=str,
- help='user name')
+ help='password')
parser.add_argument(
'-n',
'--number-of-tables',
@@ -763,15 +820,14 @@ parser.add_argument(
args = parser.parse_args()
q = ConcurrentInquiry(
- args.ts,args.host_name,args.user,args.password,args.db_name,
- args.stb_name_prefix,args.subtb_name_prefix,args.number_of_native_threads,args.number_of_rest_threads,
- args.probabilities,args.loop_per_thread,args.number_of_stables,args.number_of_tables ,args.number_of_records,
- args.mix_stable_subtable, args.replay )
+ args.ts, args.host_name, args.user, args.password, args.db_name,
+ args.stb_name_prefix, args.subtb_name_prefix, args.number_of_native_threads, args.number_of_rest_threads,
+ args.probabilities, args.loop_per_thread, args.number_of_stables, args.number_of_tables, args.number_of_records,
+ args.mix_stable_subtable, args.replay)
-if args.create_table:
+if args.create_table:
q.gen_data()
q.get_full()
-#q.gen_query_sql()
+# q.gen_query_sql()
q.run()
-
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index 1a80aa0681..3b00c6bf24 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -235,6 +235,8 @@
./test.sh -f tsim/stream/basic2.sim
./test.sh -f tsim/stream/drop_stream.sim
./test.sh -f tsim/stream/fillHistoryBasic1.sim
+./test.sh -f tsim/stream/fillHistoryBasic2.sim
+./test.sh -f tsim/stream/fillHistoryBasic3.sim
./test.sh -f tsim/stream/distributeInterval0.sim
./test.sh -f tsim/stream/distributeIntervalRetrive0.sim
./test.sh -f tsim/stream/distributeSession0.sim
@@ -449,6 +451,7 @@
./test.sh -f tsim/tag/smallint.sim
./test.sh -f tsim/tag/tinyint.sim
./test.sh -f tsim/tag/drop_tag.sim
+./test.sh -f tsim/tag/tbNameIn.sim
./test.sh -f tmp/monitor.sim
#======================b1-end===============
diff --git a/tests/script/tsim/stream/fillHistoryBasic1.sim b/tests/script/tsim/stream/fillHistoryBasic1.sim
index 5bbaf1b712..772a09c017 100644
--- a/tests/script/tsim/stream/fillHistoryBasic1.sim
+++ b/tests/script/tsim/stream/fillHistoryBasic1.sim
@@ -26,7 +26,7 @@ sql insert into t1 values(1648791243003,4,2,3,3.1);
sql insert into t1 values(1648791213004,4,2,3,4.1);
-sleep 1000
+sleep 5000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
if $rows != 4 then
@@ -139,7 +139,7 @@ if $data35 != 3 then
endi
sql insert into t1 values(1648791223001,12,14,13,11.1);
-sleep 500
+sleep 1000
sql select * from streamt;
print count(*) , count(d) , sum(a) , max(b) , min(c)
@@ -256,7 +256,7 @@ if $data35 != 3 then
endi
sql insert into t1 values(1648791223002,12,14,13,11.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -286,7 +286,7 @@ if $data15 != 13 then
endi
sql insert into t1 values(1648791223003,12,14,13,11.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -318,7 +318,7 @@ endi
sql insert into t1 values(1648791223001,1,1,1,1.1);
sql insert into t1 values(1648791223002,2,2,2,2.1);
sql insert into t1 values(1648791223003,3,3,3,3.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -350,7 +350,7 @@ endi
sql insert into t1 values(1648791233003,3,2,3,2.1);
sql insert into t1 values(1648791233002,5,6,7,8.1);
sql insert into t1 values(1648791233002,3,2,3,2.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 2
@@ -380,7 +380,7 @@ if $data25 != 3 then
endi
sql insert into t1 values(1648791213004,4,2,3,4.1) (1648791213006,5,4,7,9.1) (1648791213004,40,20,30,40.1) (1648791213005,4,2,3,4.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 0
@@ -410,7 +410,7 @@ if $data05 != 3 then
endi
sql insert into t1 values(1648791223004,4,2,3,4.1) (1648791233006,5,4,7,9.1) (1648791223004,40,20,30,40.1) (1648791233005,4,2,3,4.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -466,9 +466,6 @@ if $data25 != 3 then
endi
-
-
-
sql create database test2 vgroups 1;
sql select * from information_schema.ins_databases;
@@ -484,7 +481,7 @@ sql insert into t1 values(1648791213004,4,2,3,4.1);
sql create stream stream2 trigger at_once fill_history 1 into streamt as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from t1 interval(10s);
-sleep 1000
+sleep 5000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
if $rows != 4 then
@@ -597,7 +594,7 @@ if $data35 != 3 then
endi
sql insert into t1 values(1648791223001,12,14,13,11.1);
-sleep 500
+sleep 1000
sql select * from streamt;
print count(*) , count(d) , sum(a) , max(b) , min(c)
@@ -714,7 +711,7 @@ if $data35 != 3 then
endi
sql insert into t1 values(1648791223002,12,14,13,11.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -744,7 +741,7 @@ if $data15 != 13 then
endi
sql insert into t1 values(1648791223003,12,14,13,11.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -776,7 +773,7 @@ endi
sql insert into t1 values(1648791223001,1,1,1,1.1);
sql insert into t1 values(1648791223002,2,2,2,2.1);
sql insert into t1 values(1648791223003,3,3,3,3.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
@@ -808,7 +805,7 @@ endi
sql insert into t1 values(1648791233003,3,2,3,2.1);
sql insert into t1 values(1648791233002,5,6,7,8.1);
sql insert into t1 values(1648791233002,3,2,3,2.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 2
@@ -838,7 +835,7 @@ if $data25 != 3 then
endi
sql insert into t1 values(1648791213004,4,2,3,4.1) (1648791213006,5,4,7,9.1) (1648791213004,40,20,30,40.1) (1648791213005,4,2,3,4.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 0
@@ -868,7 +865,7 @@ if $data05 != 3 then
endi
sql insert into t1 values(1648791223004,4,2,3,4.1) (1648791233006,5,4,7,9.1) (1648791223004,40,20,30,40.1) (1648791233005,4,2,3,4.1);
-sleep 100
+sleep 1000
sql select `_wstart`, c1, c2 ,c3 ,c4, c5 from streamt;
# row 1
diff --git a/tests/script/tsim/stream/fillHistoryBasic2.sim b/tests/script/tsim/stream/fillHistoryBasic2.sim
new file mode 100644
index 0000000000..3af198259d
--- /dev/null
+++ b/tests/script/tsim/stream/fillHistoryBasic2.sim
@@ -0,0 +1,277 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/deploy.sh -n dnode2 -i 2
+
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql create dnode $hostname2 port 7200
+
+system sh/exec.sh -n dnode2 -s start
+
+print ===== step1
+$x = 0
+step1:
+ $x = $x + 1
+ sleep 1000
+ if $x == 10 then
+ print ====> dnode not ready!
+ return -1
+ endi
+sql select * from information_schema.ins_dnodes
+print ===> $data00 $data01 $data02 $data03 $data04 $data05
+print ===> $data10 $data11 $data12 $data13 $data14 $data15
+if $rows != 2 then
+ return -1
+endi
+if $data(1)[4] != ready then
+ goto step1
+endi
+if $data(2)[4] != ready then
+ goto step1
+endi
+
+print ===== step2
+sql drop stream if exists stream_t1;
+sql drop database if exists test;
+sql create database test vgroups 4;
+sql use test;
+sql create stable st(ts timestamp, a int, b int , c int, d double) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create table ts2 using st tags(2,2,2);
+sql create table ts3 using st tags(3,2,2);
+sql create table ts4 using st tags(4,2,2);
+
+sql insert into ts1 values(1648791213001,1,12,3,1.0);
+sql insert into ts2 values(1648791213001,1,12,3,1.0);
+
+sql insert into ts3 values(1648791213001,1,12,3,1.0);
+sql insert into ts4 values(1648791213001,1,12,3,1.0);
+
+sql insert into ts1 values(1648791213002,NULL,NULL,NULL,NULL);
+sql insert into ts2 values(1648791213002,NULL,NULL,NULL,NULL);
+
+sql insert into ts3 values(1648791213002,NULL,NULL,NULL,NULL);
+sql insert into ts4 values(1648791213002,NULL,NULL,NULL,NULL);
+
+sql insert into ts1 values(1648791223002,2,2,3,1.1);
+sql insert into ts1 values(1648791233003,3,2,3,2.1);
+sql insert into ts2 values(1648791243004,4,2,43,73.1);
+sql insert into ts1 values(1648791213002,24,22,23,4.1);
+sql insert into ts1 values(1648791243005,4,20,3,3.1);
+sql insert into ts2 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
+sql insert into ts1 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
+sql insert into ts2 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
+sql insert into ts1 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
+sql insert into ts2 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) (1648791233004,13,12,13,2.1) ;
+sql insert into ts1 values(1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
+
+sql insert into ts3 values(1648791223002,2,2,3,1.1);
+sql insert into ts4 values(1648791233003,3,2,3,2.1);
+sql insert into ts3 values(1648791243004,4,2,43,73.1);
+sql insert into ts4 values(1648791213002,24,22,23,4.1);
+sql insert into ts3 values(1648791243005,4,20,3,3.1);
+sql insert into ts4 values(1648791243006,4,2,3,3.1) (1648791243007,4,2,3,3.1) ;
+sql insert into ts3 values(1648791243008,4,2,30,3.1) (1648791243009,4,2,3,3.1) (1648791243010,4,2,3,3.1) ;
+sql insert into ts4 values(1648791243011,4,2,3,3.1) (1648791243012,34,32,33,3.1) (1648791243013,4,2,3,3.1) (1648791243014,4,2,13,3.1);
+sql insert into ts3 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
+sql insert into ts4 values(1648791243005,4,42,3,3.1) (1648791243003,4,2,33,3.1) (1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) (1648791233004,13,12,13,2.1) ;
+sql insert into ts3 values(1648791243006,4,2,3,3.1) (1648791213001,1,52,13,1.0) (1648791223001,22,22,83,1.1) ;
+
+sql create stream stream_t1 trigger at_once fill_history 1 watermark 1d into streamtST1 as select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5 from st interval(10s);
+
+sleep 1000
+
+$loop_count = 0
+loop1:
+sleep 300
+sql select * from streamtST1;
+
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+# row 0
+if $data01 != 8 then
+ print =====data01=$data01
+ goto loop1
+endi
+
+if $data02 != 6 then
+ print =====data02=$data02
+ goto loop1
+endi
+
+if $data03 != 52 then
+ print ======data03=$data03
+ goto loop1
+endi
+
+if $data04 != 52 then
+ print ======data04=$data04
+ goto loop1
+endi
+
+if $data05 != 13 then
+ print ======data05=$data05
+ goto loop1
+endi
+
+# row 1
+if $data11 != 6 then
+ print =====data11=$data11
+ goto loop1
+endi
+
+if $data12 != 6 then
+ print =====data12=$data12
+ goto loop1
+endi
+
+if $data13 != 92 then
+ print ======$data13
+ return -1
+endi
+
+if $data14 != 22 then
+ print ======$data14
+ return -1
+endi
+
+if $data15 != 3 then
+ print ======$data15
+ return -1
+endi
+
+# row 2
+if $data21 != 4 then
+ print =====data21=$data21
+ goto loop1
+endi
+
+if $data22 != 4 then
+ print =====data22=$data22
+ goto loop1
+endi
+
+if $data23 != 32 then
+ print ======$data23
+ return -1
+endi
+
+if $data24 != 12 then
+ print ======$data24
+ return -1
+endi
+
+if $data25 != 3 then
+ print ======$data25
+ return -1
+endi
+
+# row 3
+if $data31 != 30 then
+ print =====data31=$data31
+ goto loop1
+endi
+
+if $data32 != 30 then
+ print =====data32=$data32
+ goto loop1
+endi
+
+if $data33 != 180 then
+ print ======$data33
+ return -1
+endi
+
+if $data34 != 42 then
+ print ======$data34
+ return -1
+endi
+
+if $data35 != 3 then
+ print ======$data35
+ return -1
+endi
+
+sql select _wstart, count(*) c1, count(d) c2 , sum(a) c3 , max(b) c4, min(c) c5, avg(d) from st interval(10s);
+
+
+sql create database test1 vgroups 4;
+sql use test1;
+sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create table ts2 using st tags(2,2,2);
+
+sql insert into ts1 values(1648791211000,1,2,3);
+sql insert into ts1 values(1648791222001,2,2,3);
+sql insert into ts2 values(1648791211000,1,2,3);
+sql insert into ts2 values(1648791222001,2,2,3);
+
+sql create stream stream_t2 trigger at_once fill_history 1 watermark 20s into streamtST1 as select _wstart, count(*) c1, count(a) c2 , sum(a) c3 , max(b) c5, min(c) c6 from st interval(10s) ;
+
+$loop_count = 0
+loop2:
+sql select * from streamtST1;
+
+sleep 300
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+# row 0
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop2
+endi
+
+#rows 1
+if $data11 != 2 then
+ print =====data11=$data11
+ goto loop2
+endi
+
+#max,min selectivity
+sql create database test3 vgroups 4;
+sql use test3;
+sql create stable st(ts timestamp, a int, b int , c int) tags(ta int,tb int,tc int);
+sql create table ts1 using st tags(1,1,1);
+sql create table ts2 using st tags(2,2,2);
+sql create stream stream_t3 trigger at_once into streamtST3 as select ts, min(a) c6, a, b, c, ta, tb, tc from st interval(10s) ;
+
+sql insert into ts1 values(1648791211000,1,2,3);
+sleep 50
+sql insert into ts1 values(1648791222001,2,2,3);
+sleep 50
+sql insert into ts2 values(1648791211000,1,2,3);
+sleep 50
+sql insert into ts2 values(1648791222001,2,2,3);
+sleep 50
+
+$loop_count = 0
+loop3:
+sql select * from streamtST3;
+
+sleep 300
+$loop_count = $loop_count + 1
+if $loop_count == 10 then
+ return -1
+endi
+
+# row 0
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop3
+endi
+
+# row 1
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop3
+endi
+
+system sh/stop_dnodes.sh
diff --git a/tests/script/tsim/stream/fillHistoryBasic3.sim b/tests/script/tsim/stream/fillHistoryBasic3.sim
new file mode 100644
index 0000000000..db8d6bc2d0
--- /dev/null
+++ b/tests/script/tsim/stream/fillHistoryBasic3.sim
@@ -0,0 +1,203 @@
+$loop_all = 0
+looptest:
+
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sleep 50
+sql connect
+
+sql create database test vgroups 1;
+sql create database test2 vgroups 4;
+sql use test2;
+sql create stable st(ts timestamp, a int, b int, c int, d double) tags(ta int,tb int,tc int);
+sql create table t1 using st tags(1,1,1);
+sql create table t2 using st tags(2,2,2);
+
+sql insert into t1 values(1648791213000,NULL,NULL,NULL,NULL);
+sql insert into t2 values(1648791213000,NULL,NULL,NULL,NULL);
+
+sql create stream streams2 trigger at_once fill_history 1 into test.streamt2 as select _wstart c1, count(*) c2, max(a) c3 from st partition by a interval(10s);
+
+sleep 3000
+
+$loop_count = 0
+
+loop7:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop7
+endi
+
+if $data02 != NULL then
+ print =====data02=$data02
+ goto loop7
+endi
+
+sql insert into t1 values(1648791213000,1,2,3,1.0);
+sql insert into t2 values(1648791213000,1,2,3,1.0);
+
+$loop_count = 0
+
+loop8:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop8
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop8
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+
+$loop_count = 0
+
+loop9:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop9
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop9
+endi
+
+sql insert into t1 values(1648791213000,2,2,3,1.0);
+sql insert into t1 values(1648791213001,2,2,3,1.0);
+sql insert into t1 values(1648791213002,2,2,3,1.0);
+sql insert into t1 values(1648791213002,1,2,3,1.0);
+sql insert into t2 values(1648791213000,2,2,3,1.0);
+sql insert into t2 values(1648791213001,2,2,3,1.0);
+sql insert into t2 values(1648791213002,2,2,3,1.0);
+sql insert into t2 values(1648791213002,1,2,3,1.0);
+
+$loop_count = 0
+
+loop10:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop10
+endi
+
+if $data02 != 1 then
+ print =====data02=$data02
+ goto loop10
+endi
+
+if $data11 != 4 thenloop4
+ print =====data11=$data11
+ goto loop10
+endi
+
+if $data12 != 2 then
+ print =====data12=$data12
+ goto loop10
+endi
+
+sql insert into t1 values(1648791223000,1,2,3,1.0);
+sql insert into t1 values(1648791223001,1,2,3,1.0);
+sql insert into t1 values(1648791223002,3,2,3,1.0);
+sql insert into t1 values(1648791223003,3,2,3,1.0);
+sql insert into t1 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+sql insert into t2 values(1648791223000,1,2,3,1.0);
+sql insert into t2 values(1648791223001,1,2,3,1.0);
+sql insert into t2 values(1648791223002,3,2,3,1.0);
+sql insert into t2 values(1648791223003,3,2,3,1.0);
+sql insert into t2 values(1648791213001,1,2,3,1.0) (1648791223001,2,2,3,1.0) (1648791223003,1,2,3,1.0);
+
+$loop_count = 0
+
+loop11:
+sleep 50
+sql select * from test.streamt2 order by c1, c2, c3;
+
+$loop_count = $loop_count + 1
+if $loop_count == 20 then
+ return -1
+endi
+
+if $data01 != 2 then
+ print =====data01=$data01
+ goto loop11
+endi
+
+if $data02 != 2 then
+ print =====data02=$data02
+ goto loop11
+endi
+
+if $data11 != 4 then
+ print =====data11=$data11
+ goto loop11
+endi
+
+if $data12 != 1 then
+ print =====data12=$data12
+ goto loop11
+endi
+
+if $data21 != 2 then
+ print =====data21=$data21
+ goto loop11
+endi
+
+if $data22 != 2 then
+ print =====data22=$data22
+ goto loop11
+endi
+
+if $data31 != 2 then
+ print =====data31=$data31
+ goto loop11
+endi
+
+if $data32 != 3 then
+ print =====data32=$data32
+ goto loop11
+endi
+
+if $data41 != 4 then
+ print =====data41=$data41
+ goto loop11
+endi
+
+if $data42 != 1 then
+ print =====data42=$data42
+ goto loop11
+endi
diff --git a/tests/script/tsim/tag/tbNameIn.sim b/tests/script/tsim/tag/tbNameIn.sim
new file mode 100644
index 0000000000..1af4bd6a9e
--- /dev/null
+++ b/tests/script/tsim/tag/tbNameIn.sim
@@ -0,0 +1,102 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/exec.sh -n dnode1 -s start
+sql connect
+
+print ======== step1
+sql drop database if exists db1;
+sql create database db1 vgroups 3;
+sql use db1;
+sql create stable st1 (ts timestamp, f1 int) tags(tg1 int);
+sql create table tb1 using st1 tags(1);
+sql create table tb2 using st1 tags(2);
+sql create table tb3 using st1 tags(3);
+sql create table tb4 using st1 tags(4);
+sql create table tb5 using st1 tags(5);
+sql create table tb6 using st1 tags(6);
+sql create table tb7 using st1 tags(7);
+sql create table tb8 using st1 tags(8);
+
+sql insert into tb1 values ('2022-07-10 16:31:01', 1);
+sql insert into tb2 values ('2022-07-10 16:31:02', 2);
+sql insert into tb3 values ('2022-07-10 16:31:03', 3);
+sql insert into tb4 values ('2022-07-10 16:31:04', 4);
+sql insert into tb5 values ('2022-07-10 16:31:05', 5);
+sql insert into tb6 values ('2022-07-10 16:31:06', 6);
+sql insert into tb7 values ('2022-07-10 16:31:07', 7);
+sql insert into tb8 values ('2022-07-10 16:31:08', 8);
+
+sql select * from tb1 where tbname in ('tb1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from tb1 where tbname in ('tb1','tb1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from tb1 where tbname in ('tb1','tb2','tb1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from tb1 where tbname in ('tb1','tb2','st1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from tb1 where tbname = 'tb1';
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from tb1 where tbname > 'tb1';
+if $rows != 0 then
+ return -1
+endi
+sql select * from st1 where tbname in ('tb1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from st1 where tbname in ('tb1','tb1');
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from st1 where tbname in ('tb1','tb2','tb1');
+if $rows != 2 then
+ return -1
+endi
+sql select * from st1 where tbname in ('tb1','tb2','st1');
+if $rows != 2 then
+ return -1
+endi
+sql select * from st1 where tbname = 'tb1';
+if $rows != 1 then
+ return -1
+endi
+if $data01 != 1 then
+ return -1
+endi
+sql select * from st1 where tbname > 'tb1';
+if $rows != 7 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
diff --git a/tests/system-test/0-others/cachemodel.py b/tests/system-test/0-others/cachemodel.py
index 53834f792d..a8efd0d719 100644
--- a/tests/system-test/0-others/cachemodel.py
+++ b/tests/system-test/0-others/cachemodel.py
@@ -15,6 +15,7 @@ class TDTestCase:
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
diff --git a/tests/system-test/0-others/compatibility.py b/tests/system-test/0-others/compatibility.py
index 88fbcd179d..619031fc29 100644
--- a/tests/system-test/0-others/compatibility.py
+++ b/tests/system-test/0-others/compatibility.py
@@ -23,6 +23,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/fsync.py b/tests/system-test/0-others/fsync.py
index fe470c442e..04a9f5c941 100644
--- a/tests/system-test/0-others/fsync.py
+++ b/tests/system-test/0-others/fsync.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/show.py b/tests/system-test/0-others/show.py
index 6ce8872cda..b5d6a0d1a3 100644
--- a/tests/system-test/0-others/show.py
+++ b/tests/system-test/0-others/show.py
@@ -21,6 +21,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/0-others/sysinfo.py b/tests/system-test/0-others/sysinfo.py
index b9ea39fc84..4ddae42ac5 100644
--- a/tests/system-test/0-others/sysinfo.py
+++ b/tests/system-test/0-others/sysinfo.py
@@ -21,6 +21,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db'
diff --git a/tests/system-test/0-others/taosShell.py b/tests/system-test/0-others/taosShell.py
index accb49dbfb..e3095e8b93 100644
--- a/tests/system-test/0-others/taosShell.py
+++ b/tests/system-test/0-others/taosShell.py
@@ -109,6 +109,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/taosShellError.py b/tests/system-test/0-others/taosShellError.py
index e8271387a8..5e6a590806 100644
--- a/tests/system-test/0-others/taosShellError.py
+++ b/tests/system-test/0-others/taosShellError.py
@@ -111,6 +111,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/taosShellNetChk.py b/tests/system-test/0-others/taosShellNetChk.py
index 72c06f71be..781fcae638 100644
--- a/tests/system-test/0-others/taosShellNetChk.py
+++ b/tests/system-test/0-others/taosShellNetChk.py
@@ -111,6 +111,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/taosdMonitor.py b/tests/system-test/0-others/taosdMonitor.py
index a9907419e4..944ff52d5b 100644
--- a/tests/system-test/0-others/taosdMonitor.py
+++ b/tests/system-test/0-others/taosdMonitor.py
@@ -285,6 +285,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/taosdShell.py b/tests/system-test/0-others/taosdShell.py
index 83b4956c9b..b743783a4f 100644
--- a/tests/system-test/0-others/taosdShell.py
+++ b/tests/system-test/0-others/taosdShell.py
@@ -45,6 +45,7 @@ class TDTestCase:
# print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
@@ -90,7 +91,7 @@ class TDTestCase:
break
else:
tdLog.info( "wait start taosd ,times: %d "%i)
- sleep
+ time.sleep(1)
i+= 1
else :
tdLog.exit("taosd %s is not running "%startAction)
diff --git a/tests/system-test/0-others/taosdlog.py b/tests/system-test/0-others/taosdlog.py
index ba265abc2d..d4698960cd 100644
--- a/tests/system-test/0-others/taosdlog.py
+++ b/tests/system-test/0-others/taosdlog.py
@@ -11,6 +11,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/telemetry.py b/tests/system-test/0-others/telemetry.py
index 021b9aed2c..bc5d276faa 100644
--- a/tests/system-test/0-others/telemetry.py
+++ b/tests/system-test/0-others/telemetry.py
@@ -174,6 +174,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/0-others/udfTest.py b/tests/system-test/0-others/udfTest.py
index a58e22ba1c..1f6096dd5a 100644
--- a/tests/system-test/0-others/udfTest.py
+++ b/tests/system-test/0-others/udfTest.py
@@ -14,6 +14,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/0-others/udf_cfg1.py b/tests/system-test/0-others/udf_cfg1.py
index 9b1874d576..35d43ea9b7 100644
--- a/tests/system-test/0-others/udf_cfg1.py
+++ b/tests/system-test/0-others/udf_cfg1.py
@@ -16,6 +16,7 @@ class TDTestCase:
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/0-others/udf_cfg2.py b/tests/system-test/0-others/udf_cfg2.py
index 67848a9430..b49c99de34 100644
--- a/tests/system-test/0-others/udf_cfg2.py
+++ b/tests/system-test/0-others/udf_cfg2.py
@@ -16,6 +16,7 @@ class TDTestCase:
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":1}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/0-others/udf_cluster.py b/tests/system-test/0-others/udf_cluster.py
index a20bc128a2..90b6df16ff 100644
--- a/tests/system-test/0-others/udf_cluster.py
+++ b/tests/system-test/0-others/udf_cluster.py
@@ -20,6 +20,7 @@ class MyDnodes(TDDnodes):
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.depoly_cluster(3)
diff --git a/tests/system-test/0-others/udf_create.py b/tests/system-test/0-others/udf_create.py
index 736943cc56..9426e7eacb 100644
--- a/tests/system-test/0-others/udf_create.py
+++ b/tests/system-test/0-others/udf_create.py
@@ -16,6 +16,7 @@ import threading
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/0-others/udf_restart_taosd.py b/tests/system-test/0-others/udf_restart_taosd.py
index 02c59c21e1..f6134909b4 100644
--- a/tests/system-test/0-others/udf_restart_taosd.py
+++ b/tests/system-test/0-others/udf_restart_taosd.py
@@ -13,6 +13,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/0-others/user_control.py b/tests/system-test/0-others/user_control.py
index c6a1a7eac2..3918828646 100644
--- a/tests/system-test/0-others/user_control.py
+++ b/tests/system-test/0-others/user_control.py
@@ -155,6 +155,7 @@ class User:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/1-insert/alter_database.py b/tests/system-test/1-insert/alter_database.py
index 6d9e04ec42..1918c0ef76 100644
--- a/tests/system-test/1-insert/alter_database.py
+++ b/tests/system-test/1-insert/alter_database.py
@@ -12,6 +12,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
self.buffer_boundary = [3,4097,8193,12289,16384]
diff --git a/tests/system-test/1-insert/alter_stable.py b/tests/system-test/1-insert/alter_stable.py
index 77ff6ae0f8..3e82b573b1 100644
--- a/tests/system-test/1-insert/alter_stable.py
+++ b/tests/system-test/1-insert/alter_stable.py
@@ -21,6 +21,7 @@ from util import constant
from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/1-insert/alter_table.py b/tests/system-test/1-insert/alter_table.py
index aed4089cfa..6a437d4601 100644
--- a/tests/system-test/1-insert/alter_table.py
+++ b/tests/system-test/1-insert/alter_table.py
@@ -22,6 +22,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/1-insert/block_wise.py b/tests/system-test/1-insert/block_wise.py
index bb35154746..0e17a01d05 100644
--- a/tests/system-test/1-insert/block_wise.py
+++ b/tests/system-test/1-insert/block_wise.py
@@ -143,6 +143,7 @@ class BSMAschema:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.precision = "ms"
diff --git a/tests/system-test/1-insert/create_retentions.py b/tests/system-test/1-insert/create_retentions.py
index d16e506b53..e673815c73 100644
--- a/tests/system-test/1-insert/create_retentions.py
+++ b/tests/system-test/1-insert/create_retentions.py
@@ -45,6 +45,7 @@ NTBNAME = "nt1"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
diff --git a/tests/system-test/1-insert/database_pre_suf.py b/tests/system-test/1-insert/database_pre_suf.py
index c921619e56..862edbdde9 100755
--- a/tests/system-test/1-insert/database_pre_suf.py
+++ b/tests/system-test/1-insert/database_pre_suf.py
@@ -29,6 +29,7 @@ class TDTestCase:
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/1-insert/db_tb_name_check.py b/tests/system-test/1-insert/db_tb_name_check.py
index fd0bd43b01..23bb539620 100644
--- a/tests/system-test/1-insert/db_tb_name_check.py
+++ b/tests/system-test/1-insert/db_tb_name_check.py
@@ -32,6 +32,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.special_name = ['!','@','#','$','%','^','&','*','(',')','[',']','{','}',\
diff --git a/tests/system-test/1-insert/delete_data.py b/tests/system-test/1-insert/delete_data.py
index d935a08133..c085d2763a 100644
--- a/tests/system-test/1-insert/delete_data.py
+++ b/tests/system-test/1-insert/delete_data.py
@@ -1,302 +1,312 @@
-###################################################################
-# Copyright (c) 2016 by TAOS Technologies, Inc.
-# All rights reserved.
-#
-# This file is proprietary and confidential to TAOS Technologies.
-# No part of this file may be reproduced, stored, transmitted,
-# disclosed or used in any form or by any means other than as
-# expressly provided by the written permission from Jianhui Tao
-#
-###################################################################
-
-# -*- coding: utf-8 -*-
-
-import random
-import string
-
-from numpy import logspace
-from util import constant
-from util.log import *
-from util.cases import *
-from util.sql import *
-from util.common import *
-from util.sqlset import TDSetSql
-
-class TDTestCase:
- def init(self, conn, logSql, replicaVar=1):
- tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
- self.dbname = 'db_test'
- self.setsql = TDSetSql()
- self.stbname = 'stb'
- self.ntbname = 'ntb'
- self.rowNum = 5
- self.tbnum = 2
- self.ts = 1537146000000
- self.binary_str = 'taosdata'
- self.nchar_str = '涛思数据'
- self.str_length = 20
- self.column_dict = {
- 'col1': 'tinyint',
- 'col2': 'smallint',
- 'col3': 'int',
- 'col4': 'bigint',
- 'col5': 'tinyint unsigned',
- 'col6': 'smallint unsigned',
- 'col7': 'int unsigned',
- 'col8': 'bigint unsigned',
- 'col9': 'float',
- 'col10': 'double',
- 'col11': 'bool',
- 'col12': f'binary({self.str_length})',
- 'col13': f'nchar({self.str_length})',
-
- }
-
- self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
- self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
- self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
- self.bigint_val = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX)
- self.untingint_val = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX)
- self.unsmallint_val = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX)
- self.unint_val = random.randint(constant.INT_UN_MIN,constant.INT_MAX)
- self.unbigint_val = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX)
- self.float_val = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX)
- self.double_val = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300))
- self.bool_val = random.randint(0,100)%2
- self.binary_val = tdCom.getLongName(random.randint(0,self.str_length))
- self.nchar_val = tdCom.getLongName(random.randint(0,self.str_length))
- self.base_data = {
- 'tinyint':self.tinyint_val,
- 'smallint':self.smallint_val,
- 'int':self.int_val,
- 'bigint':self.bigint_val,
- 'tinyint unsigned':self.untingint_val,
- 'smallint unsigned':self.unsmallint_val,
- 'int unsigned':self.unint_val,
- 'bigint unsigned':self.unbigint_val,
- 'bool':self.bool_val,
- 'float':self.float_val,
- 'double':self.double_val,
- 'binary':self.binary_val,
- 'nchar':self.nchar_val
- }
-
- def insert_base_data(self,col_type,tbname,rows,base_data):
- for i in range(rows):
- if col_type.lower() == 'tinyint':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint"]})')
- elif col_type.lower() == 'smallint':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint"]})')
- elif col_type.lower() == 'int':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int"]})')
- elif col_type.lower() == 'bigint':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint"]})')
- elif col_type.lower() == 'tinyint unsigned':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint unsigned"]})')
- elif col_type.lower() == 'smallint unsigned':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint unsigned"]})')
- elif col_type.lower() == 'int unsigned':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int unsigned"]})')
- elif col_type.lower() == 'bigint unsigned':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})')
- elif col_type.lower() == 'bool':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
- elif col_type.lower() == 'float':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
- elif col_type.lower() == 'double':
- tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})')
- elif 'binary' in col_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
- elif 'nchar' in col_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
- def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1):
- tdSql.execute(f'delete from {tbname}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select * from {tbname}')
- tdSql.checkRows(0)
- if tb_type == 'ntb' or tb_type == 'ctb':
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- for i in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{i}',row_num,base_data)
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select * from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num)
- elif tb_type =='stb':
- tdSql.checkRows(row_num*tb_num)
- def delete_one_row(self,tbname,column_type,column_name,base_data,row_num,dbname,tb_type,tb_num=1):
- tdSql.execute(f'delete from {tbname} where ts={self.ts}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {column_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num-1)
- elif tb_type == 'stb':
- tdSql.checkRows((row_num-1)*tb_num)
- tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
- tdSql.checkRows(0)
- if tb_type == 'ntb' or tb_type == 'ctb':
- if 'binary' in column_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''')
- elif 'nchar' in column_type.lower():
- tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''')
- else:
- tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})')
- elif tb_type == 'stb':
- for i in range(tb_num):
- if 'binary' in column_type.lower():
- tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['binary']}")''')
- elif 'nchar' in column_type.lower():
- tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['nchar']}")''')
- else:
- tdSql.execute(f'insert into {tbname}_{i} values({self.ts},{base_data[column_type]})')
- tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
- if column_type.lower() == 'float' or column_type.lower() == 'double':
- if abs(tdSql.queryResult[0][0] - base_data[column_type]) / base_data[column_type] <= 0.0001:
- tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
- else:
- tdLog.exit(f'{column_type} data check failure')
- elif 'binary' in column_type.lower():
- tdSql.checkEqual(tdSql.queryResult[0][0],base_data['binary'])
- elif 'nchar' in column_type.lower():
- tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
- else:
- tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
- def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1):
- for i in range(row_num):
- tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(i+1)
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- tdSql.checkRows((i+1)*tb_num)
- for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
- for i in range(row_num):
- tdSql.execute(f'delete from {tbname} where ts>={self.ts+i}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(i)
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- tdSql.checkRows(i*tb_num)
- for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
- for i in range(row_num):
- tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num-i-1)
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- tdSql.checkRows((row_num-i-1)*tb_num)
- for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
- for i in range(row_num):
- tdSql.execute(f'delete from {tbname} where ts<{self.ts+i}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num-i)
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- tdSql.checkRows((row_num-i)*tb_num)
- for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
- for i in range(row_num):
- tdSql.execute(f'delete from {tbname} where ts between {self.ts} and {self.ts+i}')
- tdSql.execute(f'flush database {dbname}')
- tdSql.execute('reset query cache')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num - i-1)
- self.insert_base_data(col_type,tbname,row_num,base_data)
- elif tb_type == 'stb':
- tdSql.checkRows(tb_num*(row_num - i-1))
- for j in range(tb_num):
- self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
- tdSql.execute(f'delete from {tbname} where ts between {self.ts+i+1} and {self.ts}')
- tdSql.query(f'select {col_name} from {tbname}')
- if tb_type == 'ntb' or tb_type == 'ctb':
- tdSql.checkRows(row_num)
- elif tb_type == 'stb':
- tdSql.checkRows(tb_num*row_num)
- def delete_error(self,tbname,column_name,column_type,base_data):
- for error_list in ['',f'ts = {self.ts} and',f'ts = {self.ts} or']:
- if 'binary' in column_type.lower():
- tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['binary']}"''')
- elif 'nchar' in column_type.lower():
- tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
- else:
- tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
-
- def delete_data_ntb(self):
- tdSql.execute(f'create database if not exists {self.dbname}')
- tdSql.execute(f'use {self.dbname}')
- for col_name,col_type in self.column_dict.items():
- tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})')
- self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
- self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'ntb')
- self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname,'ntb')
- self.delete_error(self.ntbname,col_name,col_type,self.base_data)
- self.delete_rows(self.dbname,self.ntbname,col_name,col_type,self.base_data,self.rowNum,'ntb')
- for func in ['first','last']:
- tdSql.query(f'select {func}(*) from {self.ntbname}')
- tdSql.execute(f'drop table {self.ntbname}')
- tdSql.execute(f'drop database {self.dbname}')
- def delete_data_ctb(self):
- tdSql.execute(f'create database if not exists {self.dbname}')
- tdSql.execute(f'use {self.dbname}')
- for col_name,col_type in self.column_dict.items():
- tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
- for i in range(self.tbnum):
- tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
- self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
- self.delete_one_row(f'{self.stbname}_{i}',col_type,col_name,self.base_data,self.rowNum,self.dbname,'ctb')
- self.delete_all_data(f'{self.stbname}_{i}',col_type,self.rowNum,self.base_data,self.dbname,'ctb')
- self.delete_error(f'{self.stbname}_{i}',col_name,col_type,self.base_data)
- self.delete_rows(self.dbname,f'{self.stbname}_{i}',col_name,col_type,self.base_data,self.rowNum,'ctb')
- for func in ['first','last']:
- tdSql.query(f'select {func}(*) from {self.stbname}_{i}')
- tdSql.execute(f'drop table {self.stbname}')
- def delete_data_stb(self):
- tdSql.execute(f'create database if not exists {self.dbname}')
- tdSql.execute(f'use {self.dbname}')
- for col_name,col_type in self.column_dict.items():
- tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
- for i in range(self.tbnum):
- tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
- self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
- self.delete_error(self.stbname,col_name,col_type,self.base_data)
- self.delete_one_row(self.stbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'stb',self.tbnum)
- self.delete_all_data(self.stbname,col_type,self.rowNum,self.base_data,self.dbname,'stb',self.tbnum)
- self.delete_rows(self.dbname,self.stbname,col_name,col_type,self.base_data,self.rowNum,'stb',self.tbnum)
- for func in ['first','last']:
- tdSql.query(f'select {func}(*) from {self.stbname}')
- tdSql.execute(f'drop table {self.stbname}')
- tdSql.execute(f'drop database {self.dbname}')
- def run(self):
- self.delete_data_ntb()
- self.delete_data_ctb()
- self.delete_data_stb()
- tdDnodes.stoptaosd(1)
- tdDnodes.starttaosd(1)
- self.delete_data_ntb()
- def stop(self):
- tdSql.close()
- tdLog.success("%s successfully executed" % __file__)
-
-tdCases.addWindows(__file__, TDTestCase())
-tdCases.addLinux(__file__, TDTestCase())
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import random
+import string
+
+from numpy import logspace
+from util import constant
+from util.log import *
+from util.cases import *
+from util.sql import *
+from util.common import *
+from util.sqlset import TDSetSql
+
+class TDTestCase:
+ def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), True)
+ self.dbname = 'db_test'
+ self.setsql = TDSetSql()
+ self.stbname = 'stb'
+ self.ntbname = 'ntb'
+ self.rowNum = 10
+ self.tbnum = 3
+ self.ts = 1537146000000
+ self.binary_str = 'taosdata'
+ self.nchar_str = '涛思数据'
+ self.str_length = 20
+ self.column_dict = {
+ 'col1': 'tinyint',
+ 'col2': 'smallint',
+ 'col3': 'int',
+ 'col4': 'bigint',
+ 'col5': 'tinyint unsigned',
+ 'col6': 'smallint unsigned',
+ 'col7': 'int unsigned',
+ 'col8': 'bigint unsigned',
+ 'col9': 'float',
+ 'col10': 'double',
+ 'col11': 'bool',
+ 'col12': f'binary({self.str_length})',
+ 'col13': f'nchar({self.str_length})',
+
+ }
+
+ self.tinyint_val = random.randint(constant.TINYINT_MIN,constant.TINYINT_MAX)
+ self.smallint_val = random.randint(constant.SMALLINT_MIN,constant.SMALLINT_MAX)
+ self.int_val = random.randint(constant.INT_MIN,constant.INT_MAX)
+ self.bigint_val = random.randint(constant.BIGINT_MIN,constant.BIGINT_MAX)
+ self.untingint_val = random.randint(constant.TINYINT_UN_MIN,constant.TINYINT_UN_MAX)
+ self.unsmallint_val = random.randint(constant.SMALLINT_UN_MIN,constant.SMALLINT_UN_MAX)
+ self.unint_val = random.randint(constant.INT_UN_MIN,constant.INT_MAX)
+ self.unbigint_val = random.randint(constant.BIGINT_UN_MIN,constant.BIGINT_UN_MAX)
+ self.float_val = random.uniform(constant.FLOAT_MIN,constant.FLOAT_MAX)
+ self.double_val = random.uniform(constant.DOUBLE_MIN*(1E-300),constant.DOUBLE_MAX*(1E-300))
+ self.bool_val = random.randint(0,100)%2
+ self.binary_val = tdCom.getLongName(random.randint(0,self.str_length))
+ self.nchar_val = tdCom.getLongName(random.randint(0,self.str_length))
+ self.base_data = {
+ 'tinyint':self.tinyint_val,
+ 'smallint':self.smallint_val,
+ 'int':self.int_val,
+ 'bigint':self.bigint_val,
+ 'tinyint unsigned':self.untingint_val,
+ 'smallint unsigned':self.unsmallint_val,
+ 'int unsigned':self.unint_val,
+ 'bigint unsigned':self.unbigint_val,
+ 'bool':self.bool_val,
+ 'float':self.float_val,
+ 'double':self.double_val,
+ 'binary':self.binary_val,
+ 'nchar':self.nchar_val
+ }
+
+ def insert_base_data(self,col_type,tbname,rows,base_data):
+ for i in range(rows):
+ if col_type.lower() == 'tinyint':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint"]})')
+ elif col_type.lower() == 'smallint':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint"]})')
+ elif col_type.lower() == 'int':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int"]})')
+ elif col_type.lower() == 'bigint':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint"]})')
+ elif col_type.lower() == 'tinyint unsigned':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["tinyint unsigned"]})')
+ elif col_type.lower() == 'smallint unsigned':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["smallint unsigned"]})')
+ elif col_type.lower() == 'int unsigned':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["int unsigned"]})')
+ elif col_type.lower() == 'bigint unsigned':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bigint unsigned"]})')
+ elif col_type.lower() == 'bool':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["bool"]})')
+ elif col_type.lower() == 'float':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["float"]})')
+ elif col_type.lower() == 'double':
+ tdSql.execute(f'insert into {tbname} values({self.ts+i},{base_data["double"]})')
+ elif 'binary' in col_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['binary']}")''')
+ elif 'nchar' in col_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts+i},"{base_data['nchar']}")''')
+ def delete_all_data(self,tbname,col_type,row_num,base_data,dbname,tb_type,tb_num=1,stbname=''):
+ tdSql.query(f'select count(*) from {tbname}')
+ tdSql.execute(f'delete from {tbname}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select * from {tbname}')
+ tdSql.checkRows(0)
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ if tb_type == 'ctb':
+ tdSql.query(f'select count(*) from {stbname}')
+ if tb_num <= 1:
+ if len(tdSql.queryResult) != 0:
+ tdLog.exit('delete case failure!')
+ else:
+ tdSql.checkEqual(tdSql.queryResult[0][0],(tb_num-1)*row_num)
+
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ for i in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{i}',row_num,base_data)
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select * from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num)
+ elif tb_type =='stb':
+ tdSql.checkRows(row_num*tb_num)
+ def delete_one_row(self,tbname,column_type,column_name,base_data,row_num,dbname,tb_type,tb_num=1):
+ tdSql.execute(f'delete from {tbname} where ts={self.ts}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {column_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-1)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-1)*tb_num)
+ tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
+ tdSql.checkRows(0)
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ if 'binary' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['binary']}")''')
+ elif 'nchar' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname} values({self.ts},"{base_data['nchar']}")''')
+ else:
+ tdSql.execute(f'insert into {tbname} values({self.ts},{base_data[column_type]})')
+ elif tb_type == 'stb':
+ for i in range(tb_num):
+ if 'binary' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['binary']}")''')
+ elif 'nchar' in column_type.lower():
+ tdSql.execute(f'''insert into {tbname}_{i} values({self.ts},"{base_data['nchar']}")''')
+ else:
+ tdSql.execute(f'insert into {tbname}_{i} values({self.ts},{base_data[column_type]})')
+ tdSql.query(f'select {column_name} from {tbname} where ts={self.ts}')
+ if column_type.lower() == 'float' or column_type.lower() == 'double':
+ if abs(tdSql.queryResult[0][0] - base_data[column_type]) / base_data[column_type] <= 0.0001:
+ tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
+ else:
+ tdLog.exit(f'{column_type} data check failure')
+ elif 'binary' in column_type.lower():
+ tdSql.checkEqual(tdSql.queryResult[0][0],base_data['binary'])
+ elif 'nchar' in column_type.lower():
+ tdSql.checkEqual(tdSql.queryResult[0][0],base_data['nchar'])
+ else:
+ tdSql.checkEqual(tdSql.queryResult[0][0],base_data[column_type])
+ def delete_rows(self,dbname,tbname,col_name,col_type,base_data,row_num,tb_type,tb_num=1):
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts>{self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(i+1)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((i+1)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts>={self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(i)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows(i*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts<={self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-i-1)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-i-1)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts<{self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num-i)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows((row_num-i)*tb_num)
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ for i in range(row_num):
+ tdSql.execute(f'delete from {tbname} where ts between {self.ts} and {self.ts+i}')
+ tdSql.execute(f'flush database {dbname}')
+ tdSql.execute('reset query cache')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num - i-1)
+ self.insert_base_data(col_type,tbname,row_num,base_data)
+ elif tb_type == 'stb':
+ tdSql.checkRows(tb_num*(row_num - i-1))
+ for j in range(tb_num):
+ self.insert_base_data(col_type,f'{tbname}_{j}',row_num,base_data)
+ tdSql.execute(f'delete from {tbname} where ts between {self.ts+i+1} and {self.ts}')
+ tdSql.query(f'select {col_name} from {tbname}')
+ if tb_type == 'ntb' or tb_type == 'ctb':
+ tdSql.checkRows(row_num)
+ elif tb_type == 'stb':
+ tdSql.checkRows(tb_num*row_num)
+ def delete_error(self,tbname,column_name,column_type,base_data):
+ for error_list in ['',f'ts = {self.ts} and',f'ts = {self.ts} or']:
+ if 'binary' in column_type.lower():
+ tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['binary']}"''')
+ elif 'nchar' in column_type.lower():
+ tdSql.error(f'''delete from {tbname} where {error_list} {column_name} ="{base_data['nchar']}"''')
+ else:
+ tdSql.error(f'delete from {tbname} where {error_list} {column_name} = {base_data[column_type]}')
+
+ def delete_data_ntb(self):
+ tdSql.execute(f'create database if not exists {self.dbname}')
+ tdSql.execute(f'use {self.dbname}')
+ for col_name,col_type in self.column_dict.items():
+ tdSql.execute(f'create table {self.ntbname} (ts timestamp,{col_name} {col_type})')
+ self.insert_base_data(col_type,self.ntbname,self.rowNum,self.base_data)
+ self.delete_one_row(self.ntbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'ntb')
+ self.delete_all_data(self.ntbname,col_type,self.rowNum,self.base_data,self.dbname,'ntb')
+ self.delete_error(self.ntbname,col_name,col_type,self.base_data)
+ self.delete_rows(self.dbname,self.ntbname,col_name,col_type,self.base_data,self.rowNum,'ntb')
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.ntbname}')
+ tdSql.execute(f'drop table {self.ntbname}')
+ tdSql.execute(f'drop database {self.dbname}')
+ def delete_data_ctb(self):
+ tdSql.execute(f'create database if not exists {self.dbname}')
+ tdSql.execute(f'use {self.dbname}')
+ for col_name,col_type in self.column_dict.items():
+ tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
+ for i in range(self.tbnum):
+ tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
+ self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
+ self.delete_one_row(f'{self.stbname}_{i}',col_type,col_name,self.base_data,self.rowNum,self.dbname,'ctb')
+ self.delete_all_data(f'{self.stbname}_{i}',col_type,self.rowNum,self.base_data,self.dbname,'ctb',i+1,self.stbname)
+ self.delete_error(f'{self.stbname}_{i}',col_name,col_type,self.base_data)
+ self.delete_rows(self.dbname,f'{self.stbname}_{i}',col_name,col_type,self.base_data,self.rowNum,'ctb')
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.stbname}_{i}')
+ tdSql.execute(f'drop table {self.stbname}')
+ def delete_data_stb(self):
+ tdSql.execute(f'create database if not exists {self.dbname}')
+ tdSql.execute(f'use {self.dbname}')
+ for col_name,col_type in self.column_dict.items():
+ tdSql.execute(f'create table {self.stbname} (ts timestamp,{col_name} {col_type}) tags(t1 int)')
+ for i in range(self.tbnum):
+ tdSql.execute(f'create table {self.stbname}_{i} using {self.stbname} tags(1)')
+ self.insert_base_data(col_type,f'{self.stbname}_{i}',self.rowNum,self.base_data)
+ self.delete_error(self.stbname,col_name,col_type,self.base_data)
+ self.delete_one_row(self.stbname,col_type,col_name,self.base_data,self.rowNum,self.dbname,'stb',self.tbnum)
+ self.delete_all_data(self.stbname,col_type,self.rowNum,self.base_data,self.dbname,'stb',self.tbnum)
+ self.delete_rows(self.dbname,self.stbname,col_name,col_type,self.base_data,self.rowNum,'stb',self.tbnum)
+ for func in ['first','last']:
+ tdSql.query(f'select {func}(*) from {self.stbname}')
+ tdSql.execute(f'drop table {self.stbname}')
+ tdSql.execute(f'drop database {self.dbname}')
+ def run(self):
+ self.delete_data_ntb()
+ self.delete_data_ctb()
+ self.delete_data_stb()
+ tdDnodes.stoptaosd(1)
+ tdDnodes.starttaosd(1)
+ self.delete_data_ntb()
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/system-test/1-insert/influxdb_line_taosc_insert.py b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
index 82db318c2b..6372502484 100644
--- a/tests/system-test/1-insert/influxdb_line_taosc_insert.py
+++ b/tests/system-test/1-insert/influxdb_line_taosc_insert.py
@@ -30,6 +30,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
self._conn = conn
diff --git a/tests/system-test/1-insert/insertWithMoreVgroup.py b/tests/system-test/1-insert/insertWithMoreVgroup.py
index 9d033ff6e5..2356835ce8 100644
--- a/tests/system-test/1-insert/insertWithMoreVgroup.py
+++ b/tests/system-test/1-insert/insertWithMoreVgroup.py
@@ -63,6 +63,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()
diff --git a/tests/system-test/1-insert/insert_drop.py b/tests/system-test/1-insert/insert_drop.py
index d063501484..029d013d5b 100644
--- a/tests/system-test/1-insert/insert_drop.py
+++ b/tests/system-test/1-insert/insert_drop.py
@@ -9,6 +9,7 @@ import threading
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/1-insert/keep_expired.py b/tests/system-test/1-insert/keep_expired.py
index 00cbc771c5..0cb6d841ed 100644
--- a/tests/system-test/1-insert/keep_expired.py
+++ b/tests/system-test/1-insert/keep_expired.py
@@ -8,6 +8,7 @@ import time
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.dbname = "test"
diff --git a/tests/system-test/1-insert/mutil_stage.py b/tests/system-test/1-insert/mutil_stage.py
index 08f0ba8313..3e2bec130e 100644
--- a/tests/system-test/1-insert/mutil_stage.py
+++ b/tests/system-test/1-insert/mutil_stage.py
@@ -58,6 +58,7 @@ DATA_PRE2 = f"data2"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.taos_cfg_path = tdDnodes.dnodes[0].cfgPath
diff --git a/tests/system-test/1-insert/mutipythonnodebugtaosd.py b/tests/system-test/1-insert/mutipythonnodebugtaosd.py
index 1f568fdea1..042d881658 100644
--- a/tests/system-test/1-insert/mutipythonnodebugtaosd.py
+++ b/tests/system-test/1-insert/mutipythonnodebugtaosd.py
@@ -61,6 +61,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
# tdSql.init(conn.cursor())
# tdSql.prepare()
diff --git a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
index 9be51eb445..44243fe029 100644
--- a/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_json_taosc_insert.py
@@ -25,6 +25,7 @@ import json
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self._conn = conn
diff --git a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
index ab11691cf8..f588827206 100644
--- a/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
+++ b/tests/system-test/1-insert/opentsdb_telnet_line_taosc_insert.py
@@ -29,6 +29,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
self._conn = conn
diff --git a/tests/system-test/1-insert/table_comment.py b/tests/system-test/1-insert/table_comment.py
index 2a8aa9b26a..270e0ca424 100644
--- a/tests/system-test/1-insert/table_comment.py
+++ b/tests/system-test/1-insert/table_comment.py
@@ -21,6 +21,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# prepare data
diff --git a/tests/system-test/1-insert/table_param_ttl.py b/tests/system-test/1-insert/table_param_ttl.py
index a184c51178..51d7d596cd 100644
--- a/tests/system-test/1-insert/table_param_ttl.py
+++ b/tests/system-test/1-insert/table_param_ttl.py
@@ -19,6 +19,7 @@ from util.common import *
class TDTestCase:
updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ntbname = 'ntb'
diff --git a/tests/system-test/1-insert/tb_100w_data_order.py b/tests/system-test/1-insert/tb_100w_data_order.py
index 85fe559cc2..ec166f5f6c 100644
--- a/tests/system-test/1-insert/tb_100w_data_order.py
+++ b/tests/system-test/1-insert/tb_100w_data_order.py
@@ -6,6 +6,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ts = 1537146000000
diff --git a/tests/system-test/1-insert/test_stmt_muti_insert_query.py b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
index 89beb30dc9..d75e04d2db 100644
--- a/tests/system-test/1-insert/test_stmt_muti_insert_query.py
+++ b/tests/system-test/1-insert/test_stmt_muti_insert_query.py
@@ -58,6 +58,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()
diff --git a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
index 535a65ffed..afd9d45b56 100644
--- a/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
+++ b/tests/system-test/1-insert/test_stmt_set_tbname_tag.py
@@ -58,6 +58,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()
diff --git a/tests/system-test/1-insert/time_range_wise.py b/tests/system-test/1-insert/time_range_wise.py
index 1499c178d4..3d5c9197d1 100644
--- a/tests/system-test/1-insert/time_range_wise.py
+++ b/tests/system-test/1-insert/time_range_wise.py
@@ -135,6 +135,7 @@ class TDTestCase:
updatecfgDict = {"querySmaOptimize": 1}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.precision = "ms"
diff --git a/tests/system-test/1-insert/update_data.py b/tests/system-test/1-insert/update_data.py
index d6b37db8b1..6fcd987ea4 100644
--- a/tests/system-test/1-insert/update_data.py
+++ b/tests/system-test/1-insert/update_data.py
@@ -21,6 +21,7 @@ from util.common import *
from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
self.setsql = TDSetSql()
diff --git a/tests/system-test/1-insert/update_data_muti_rows.py b/tests/system-test/1-insert/update_data_muti_rows.py
index af78ba5d19..67406f327e 100644
--- a/tests/system-test/1-insert/update_data_muti_rows.py
+++ b/tests/system-test/1-insert/update_data_muti_rows.py
@@ -24,6 +24,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db_test'
diff --git a/tests/system-test/2-query/Now.py b/tests/system-test/2-query/Now.py
index 0e514cf43b..db97cdad64 100644
--- a/tests/system-test/2-query/Now.py
+++ b/tests/system-test/2-query/Now.py
@@ -8,6 +8,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/Timediff.py b/tests/system-test/2-query/Timediff.py
index c126ce926a..4e72c07b30 100644
--- a/tests/system-test/2-query/Timediff.py
+++ b/tests/system-test/2-query/Timediff.py
@@ -5,6 +5,7 @@ from util.gettime import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.get_time = GetTime()
diff --git a/tests/system-test/2-query/To_iso8601.py b/tests/system-test/2-query/To_iso8601.py
index bf4fe404aa..92aacbb350 100644
--- a/tests/system-test/2-query/To_iso8601.py
+++ b/tests/system-test/2-query/To_iso8601.py
@@ -11,6 +11,7 @@ import os
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.rowNum = 10
diff --git a/tests/system-test/2-query/To_unixtimestamp.py b/tests/system-test/2-query/To_unixtimestamp.py
index df99b08862..8ee2007450 100644
--- a/tests/system-test/2-query/To_unixtimestamp.py
+++ b/tests/system-test/2-query/To_unixtimestamp.py
@@ -11,6 +11,7 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/Today.py b/tests/system-test/2-query/Today.py
index 0f89a378be..08f6ba6baf 100644
--- a/tests/system-test/2-query/Today.py
+++ b/tests/system-test/2-query/Today.py
@@ -11,6 +11,7 @@ import pandas as pd
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.today_date = datetime.datetime.strptime(datetime.datetime.now().strftime("%Y-%m-%d"), "%Y-%m-%d")
diff --git a/tests/system-test/2-query/abs.py b/tests/system-test/2-query/abs.py
index 6bc9457264..d7478a55a8 100644
--- a/tests/system-test/2-query/abs.py
+++ b/tests/system-test/2-query/abs.py
@@ -15,12 +15,14 @@ class TDTestCase:
# "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.tb_nums = 10
self.row_nums = 20
self.ts = 1434938400000
self.time_step = 1000
+ self.replicaVar = int(replicaVar)
def insert_datas_and_check_abs(self ,tbnums , rownums , time_step ):
tdLog.info(" prepare datas for auto check abs function ")
@@ -28,7 +30,7 @@ class TDTestCase:
stbname = f"{dbname}.stb"
ctbname_pre = f"{dbname}.sub_tb_"
- tdSql.execute(f" create database {dbname} ")
+ tdSql.execute(f" create database {dbname} replica {self.replicaVar} ")
tdSql.execute(f" use {dbname} ")
tdSql.execute(f" create stable {stbname} (ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint,\
c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp) tags (t1 int)")
@@ -125,7 +127,7 @@ class TDTestCase:
def prepare_tag_datas(self, dbname="testdb"):
# prepare datas
tdSql.execute(
- f"create database if not exists {dbname} keep 3650 duration 1000")
+ f"create database if not exists {dbname} keep 3650 duration 1000 replica {self.replicaVar} ")
tdSql.execute(" use testdb ")
tdSql.execute(
f'''create table {dbname}.stb1
@@ -456,7 +458,7 @@ class TDTestCase:
dbname = "bound_test"
tdSql.execute(f"drop database if exists {dbname}")
- tdSql.execute(f"create database if not exists {dbname}")
+ tdSql.execute(f"create database if not exists {dbname} replica {self.replicaVar} ")
time.sleep(3)
tdSql.execute(f"use {dbname}")
tdSql.execute(
@@ -589,7 +591,7 @@ class TDTestCase:
def run(self): # sourcery skip: extract-duplicate-method, remove-redundant-fstring
- tdSql.prepare()
+ tdSql.prepare(replica=f"{self.replicaVar}")
tdLog.printNoPrefix("==========step1:create table ==============")
diff --git a/tests/system-test/2-query/and_or_for_byte.py b/tests/system-test/2-query/and_or_for_byte.py
index 682d729f0e..479918f2f9 100644
--- a/tests/system-test/2-query/and_or_for_byte.py
+++ b/tests/system-test/2-query/and_or_for_byte.py
@@ -15,6 +15,7 @@ class TDTestCase:
# "wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "udfDebugFlag": 143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.tb_nums = 10
diff --git a/tests/system-test/2-query/apercentile.py b/tests/system-test/2-query/apercentile.py
index 8997238d7b..e190bf9374 100644
--- a/tests/system-test/2-query/apercentile.py
+++ b/tests/system-test/2-query/apercentile.py
@@ -19,6 +19,7 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),False)
self.rowNum = 10
diff --git a/tests/system-test/2-query/arccos.py b/tests/system-test/2-query/arccos.py
index 69e42a3911..ed717741c5 100644
--- a/tests/system-test/2-query/arccos.py
+++ b/tests/system-test/2-query/arccos.py
@@ -13,6 +13,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/arcsin.py b/tests/system-test/2-query/arcsin.py
index 57d08d0587..71de088979 100644
--- a/tests/system-test/2-query/arcsin.py
+++ b/tests/system-test/2-query/arcsin.py
@@ -13,6 +13,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/arctan.py b/tests/system-test/2-query/arctan.py
index 64a29f7ccb..9780f9855b 100644
--- a/tests/system-test/2-query/arctan.py
+++ b/tests/system-test/2-query/arctan.py
@@ -13,6 +13,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/avg.py b/tests/system-test/2-query/avg.py
index e7086744f8..ec7ec34ed3 100644
--- a/tests/system-test/2-query/avg.py
+++ b/tests/system-test/2-query/avg.py
@@ -11,6 +11,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/between.py b/tests/system-test/2-query/between.py
index dd7dda668b..719852ff27 100644
--- a/tests/system-test/2-query/between.py
+++ b/tests/system-test/2-query/between.py
@@ -10,6 +10,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/bottom.py b/tests/system-test/2-query/bottom.py
index 4c352b619f..76052301d9 100644
--- a/tests/system-test/2-query/bottom.py
+++ b/tests/system-test/2-query/bottom.py
@@ -22,6 +22,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db_test'
diff --git a/tests/system-test/2-query/cast.py b/tests/system-test/2-query/cast.py
index b5881221f4..b3969a9c45 100644
--- a/tests/system-test/2-query/cast.py
+++ b/tests/system-test/2-query/cast.py
@@ -13,6 +13,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.dbname = "db"
diff --git a/tests/system-test/2-query/ceil.py b/tests/system-test/2-query/ceil.py
index 221b571e8e..fffd484720 100644
--- a/tests/system-test/2-query/ceil.py
+++ b/tests/system-test/2-query/ceil.py
@@ -14,6 +14,7 @@ class TDTestCase:
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/char_length.py b/tests/system-test/2-query/char_length.py
index e8546ca72e..e42e5e4f2c 100644
--- a/tests/system-test/2-query/char_length.py
+++ b/tests/system-test/2-query/char_length.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [TS_COL]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/check_tsdb.py b/tests/system-test/2-query/check_tsdb.py
index 0cbbc60171..b3ff259cc5 100644
--- a/tests/system-test/2-query/check_tsdb.py
+++ b/tests/system-test/2-query/check_tsdb.py
@@ -13,6 +13,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/concat.py b/tests/system-test/2-query/concat.py
index dc05a17185..326f6940f6 100644
--- a/tests/system-test/2-query/concat.py
+++ b/tests/system-test/2-query/concat.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/concat2.py b/tests/system-test/2-query/concat2.py
index 1316a3a228..a7e61444fe 100644
--- a/tests/system-test/2-query/concat2.py
+++ b/tests/system-test/2-query/concat2.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/concat_ws.py b/tests/system-test/2-query/concat_ws.py
index 8443d41818..26731715c1 100644
--- a/tests/system-test/2-query/concat_ws.py
+++ b/tests/system-test/2-query/concat_ws.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/concat_ws2.py b/tests/system-test/2-query/concat_ws2.py
index 38b93660e1..bf340826b6 100644
--- a/tests/system-test/2-query/concat_ws2.py
+++ b/tests/system-test/2-query/concat_ws2.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/cos.py b/tests/system-test/2-query/cos.py
index 2675c34266..d6bddc4e84 100644
--- a/tests/system-test/2-query/cos.py
+++ b/tests/system-test/2-query/cos.py
@@ -13,6 +13,7 @@ class TDTestCase:
# "jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
# "wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/count.py b/tests/system-test/2-query/count.py
index 6362974d0b..254a8792af 100644
--- a/tests/system-test/2-query/count.py
+++ b/tests/system-test/2-query/count.py
@@ -4,6 +4,7 @@ from util.cases import *
from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),False)
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/count_partition.py b/tests/system-test/2-query/count_partition.py
index 4ab51c4efe..f59376a979 100644
--- a/tests/system-test/2-query/count_partition.py
+++ b/tests/system-test/2-query/count_partition.py
@@ -5,6 +5,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/csum.py b/tests/system-test/2-query/csum.py
index fb828b85df..e933eb54cb 100644
--- a/tests/system-test/2-query/csum.py
+++ b/tests/system-test/2-query/csum.py
@@ -27,6 +27,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/2-query/db.py b/tests/system-test/2-query/db.py
index aea16dd162..7fe8b96157 100644
--- a/tests/system-test/2-query/db.py
+++ b/tests/system-test/2-query/db.py
@@ -12,6 +12,7 @@ import random
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/diff.py b/tests/system-test/2-query/diff.py
index 74dae9d0fd..d48a01db6a 100644
--- a/tests/system-test/2-query/diff.py
+++ b/tests/system-test/2-query/diff.py
@@ -7,6 +7,7 @@ import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/distinct.py b/tests/system-test/2-query/distinct.py
index c7c6e1c9b0..5c07544d5d 100644
--- a/tests/system-test/2-query/distinct.py
+++ b/tests/system-test/2-query/distinct.py
@@ -10,6 +10,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/distribute_agg_apercentile.py b/tests/system-test/2-query/distribute_agg_apercentile.py
index d2364df65a..897580fbcc 100644
--- a/tests/system-test/2-query/distribute_agg_apercentile.py
+++ b/tests/system-test/2-query/distribute_agg_apercentile.py
@@ -9,6 +9,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_avg.py b/tests/system-test/2-query/distribute_agg_avg.py
index 4b5d3d8c5a..1cd24103f8 100644
--- a/tests/system-test/2-query/distribute_agg_avg.py
+++ b/tests/system-test/2-query/distribute_agg_avg.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_count.py b/tests/system-test/2-query/distribute_agg_count.py
index e6cf718c05..7d131cd77d 100644
--- a/tests/system-test/2-query/distribute_agg_count.py
+++ b/tests/system-test/2-query/distribute_agg_count.py
@@ -9,6 +9,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_max.py b/tests/system-test/2-query/distribute_agg_max.py
index 92188bd765..fb91216c3e 100644
--- a/tests/system-test/2-query/distribute_agg_max.py
+++ b/tests/system-test/2-query/distribute_agg_max.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_min.py b/tests/system-test/2-query/distribute_agg_min.py
index afc42755b3..2667798640 100644
--- a/tests/system-test/2-query/distribute_agg_min.py
+++ b/tests/system-test/2-query/distribute_agg_min.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_spread.py b/tests/system-test/2-query/distribute_agg_spread.py
index ab073c76a2..0247a91861 100644
--- a/tests/system-test/2-query/distribute_agg_spread.py
+++ b/tests/system-test/2-query/distribute_agg_spread.py
@@ -9,6 +9,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to execute {__file__}")
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_stddev.py b/tests/system-test/2-query/distribute_agg_stddev.py
index ae9479dcdd..80bab3082d 100644
--- a/tests/system-test/2-query/distribute_agg_stddev.py
+++ b/tests/system-test/2-query/distribute_agg_stddev.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/distribute_agg_sum.py b/tests/system-test/2-query/distribute_agg_sum.py
index 59804b7dc5..da26fd58f9 100644
--- a/tests/system-test/2-query/distribute_agg_sum.py
+++ b/tests/system-test/2-query/distribute_agg_sum.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/elapsed.py b/tests/system-test/2-query/elapsed.py
index d31df49a8e..dc89cd513d 100644
--- a/tests/system-test/2-query/elapsed.py
+++ b/tests/system-test/2-query/elapsed.py
@@ -21,6 +21,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/2-query/explain.py b/tests/system-test/2-query/explain.py
index 1126e23f06..f164d3aedf 100644
--- a/tests/system-test/2-query/explain.py
+++ b/tests/system-test/2-query/explain.py
@@ -31,6 +31,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/2-query/first.py b/tests/system-test/2-query/first.py
index 9523527076..5b5b596a57 100644
--- a/tests/system-test/2-query/first.py
+++ b/tests/system-test/2-query/first.py
@@ -24,6 +24,7 @@ import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/floor.py b/tests/system-test/2-query/floor.py
index 65974ff2e9..6a75872bcf 100644
--- a/tests/system-test/2-query/floor.py
+++ b/tests/system-test/2-query/floor.py
@@ -13,6 +13,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/function_diff.py b/tests/system-test/2-query/function_diff.py
index 7f11b12e89..4abef03037 100644
--- a/tests/system-test/2-query/function_diff.py
+++ b/tests/system-test/2-query/function_diff.py
@@ -27,6 +27,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/function_null.py b/tests/system-test/2-query/function_null.py
index 3a74ca268f..44b86ee543 100644
--- a/tests/system-test/2-query/function_null.py
+++ b/tests/system-test/2-query/function_null.py
@@ -12,6 +12,7 @@ import random
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.tb_nums = 10
diff --git a/tests/system-test/2-query/function_stateduration.py b/tests/system-test/2-query/function_stateduration.py
index 8f25595713..ad9b8b0d79 100644
--- a/tests/system-test/2-query/function_stateduration.py
+++ b/tests/system-test/2-query/function_stateduration.py
@@ -6,6 +6,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/histogram.py b/tests/system-test/2-query/histogram.py
index ecbb7fe927..cc6e8fe7e9 100644
--- a/tests/system-test/2-query/histogram.py
+++ b/tests/system-test/2-query/histogram.py
@@ -145,6 +145,7 @@ class Hsgschema:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/hyperloglog.py b/tests/system-test/2-query/hyperloglog.py
index eff687cba7..d2cbe07b65 100644
--- a/tests/system-test/2-query/hyperloglog.py
+++ b/tests/system-test/2-query/hyperloglog.py
@@ -32,6 +32,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py
index 780565c778..ce6b85c1cf 100644
--- a/tests/system-test/2-query/interp.py
+++ b/tests/system-test/2-query/interp.py
@@ -10,6 +10,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/2-query/irate.py b/tests/system-test/2-query/irate.py
index 7b3ec10a8d..cee595d186 100644
--- a/tests/system-test/2-query/irate.py
+++ b/tests/system-test/2-query/irate.py
@@ -12,6 +12,7 @@ import random ,math
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.tb_nums = 10
diff --git a/tests/system-test/2-query/join.py b/tests/system-test/2-query/join.py
index 04b4fa89a2..120cc2eb30 100644
--- a/tests/system-test/2-query/join.py
+++ b/tests/system-test/2-query/join.py
@@ -64,6 +64,7 @@ class DataSet:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/join2.py b/tests/system-test/2-query/join2.py
index 79cc62e4bc..7ddc5e57ba 100644
--- a/tests/system-test/2-query/join2.py
+++ b/tests/system-test/2-query/join2.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/json_tag.py b/tests/system-test/2-query/json_tag.py
index c6f1e5076d..1c2a6055bc 100644
--- a/tests/system-test/2-query/json_tag.py
+++ b/tests/system-test/2-query/json_tag.py
@@ -14,6 +14,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/json_tag_large_tables.py b/tests/system-test/2-query/json_tag_large_tables.py
index 06170e0adf..ea3963640a 100644
--- a/tests/system-test/2-query/json_tag_large_tables.py
+++ b/tests/system-test/2-query/json_tag_large_tables.py
@@ -31,6 +31,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
# os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
diff --git a/tests/system-test/2-query/last.py b/tests/system-test/2-query/last.py
index 0561dc9695..f4a1ab1790 100644
--- a/tests/system-test/2-query/last.py
+++ b/tests/system-test/2-query/last.py
@@ -9,6 +9,7 @@ import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/last_row.py b/tests/system-test/2-query/last_row.py
index 333c1516b3..f8d6ce4c6c 100644
--- a/tests/system-test/2-query/last_row.py
+++ b/tests/system-test/2-query/last_row.py
@@ -12,6 +12,7 @@ import random
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)
self.tb_nums = 10
diff --git a/tests/system-test/2-query/leastsquares.py b/tests/system-test/2-query/leastsquares.py
index 8870e416f8..8ece4c46f0 100644
--- a/tests/system-test/2-query/leastsquares.py
+++ b/tests/system-test/2-query/leastsquares.py
@@ -30,6 +30,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/length.py b/tests/system-test/2-query/length.py
index 44c951dd4a..161ee20a9a 100644
--- a/tests/system-test/2-query/length.py
+++ b/tests/system-test/2-query/length.py
@@ -24,6 +24,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/log.py b/tests/system-test/2-query/log.py
index 11a0a9c317..7305a44f56 100644
--- a/tests/system-test/2-query/log.py
+++ b/tests/system-test/2-query/log.py
@@ -12,6 +12,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/lower.py b/tests/system-test/2-query/lower.py
index f8ac1ab217..4499047a04 100644
--- a/tests/system-test/2-query/lower.py
+++ b/tests/system-test/2-query/lower.py
@@ -23,6 +23,7 @@ TS_TYPE_COL = [TS_COL]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/ltrim.py b/tests/system-test/2-query/ltrim.py
index ba769ba350..193aa45198 100644
--- a/tests/system-test/2-query/ltrim.py
+++ b/tests/system-test/2-query/ltrim.py
@@ -28,6 +28,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/mavg.py b/tests/system-test/2-query/mavg.py
index 7f545f2048..ec4a087a8e 100644
--- a/tests/system-test/2-query/mavg.py
+++ b/tests/system-test/2-query/mavg.py
@@ -28,6 +28,7 @@ from util.dnodes import *
dbname = 'db'
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/max.py b/tests/system-test/2-query/max.py
index ee75afb501..b8da02b9a6 100644
--- a/tests/system-test/2-query/max.py
+++ b/tests/system-test/2-query/max.py
@@ -7,6 +7,7 @@ import numpy as np
class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/max_partition.py b/tests/system-test/2-query/max_partition.py
index f50b9fb2bb..a9b7a14eb0 100644
--- a/tests/system-test/2-query/max_partition.py
+++ b/tests/system-test/2-query/max_partition.py
@@ -5,6 +5,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/min.py b/tests/system-test/2-query/min.py
index 8849b14a33..bf9993da64 100644
--- a/tests/system-test/2-query/min.py
+++ b/tests/system-test/2-query/min.py
@@ -7,6 +7,7 @@ import numpy as np
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/nestedQuery.py b/tests/system-test/2-query/nestedQuery.py
index 4fbd840efe..b1008efa86 100755
--- a/tests/system-test/2-query/nestedQuery.py
+++ b/tests/system-test/2-query/nestedQuery.py
@@ -29,6 +29,7 @@ class TDTestCase:
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/2-query/percentile.py b/tests/system-test/2-query/percentile.py
index 58660eec6a..935f55a8c2 100644
--- a/tests/system-test/2-query/percentile.py
+++ b/tests/system-test/2-query/percentile.py
@@ -22,6 +22,7 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/pow.py b/tests/system-test/2-query/pow.py
index f803da6176..a067d66547 100644
--- a/tests/system-test/2-query/pow.py
+++ b/tests/system-test/2-query/pow.py
@@ -11,6 +11,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/qnodeCluster.py b/tests/system-test/2-query/qnodeCluster.py
index 6f3856146d..d15ef89eb8 100644
--- a/tests/system-test/2-query/qnodeCluster.py
+++ b/tests/system-test/2-query/qnodeCluster.py
@@ -27,6 +27,7 @@ class TDTestCase:
updatecfgDict["clientCfg"] = clientCfgDict
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/queryQnode.py b/tests/system-test/2-query/queryQnode.py
index e3f4732c08..fec553436c 100644
--- a/tests/system-test/2-query/queryQnode.py
+++ b/tests/system-test/2-query/queryQnode.py
@@ -65,6 +65,7 @@ class TDTestCase:
# init
def init(self, conn, logSql=True, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()
diff --git a/tests/system-test/2-query/query_cols_tags_and_or.py b/tests/system-test/2-query/query_cols_tags_and_or.py
index 9fc016cb40..b8d4e3c314 100644
--- a/tests/system-test/2-query/query_cols_tags_and_or.py
+++ b/tests/system-test/2-query/query_cols_tags_and_or.py
@@ -17,6 +17,7 @@ from util.common import tdCom
import random
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
## add for TD-6672
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/round.py b/tests/system-test/2-query/round.py
index 8f969a71d7..e3d98d6986 100644
--- a/tests/system-test/2-query/round.py
+++ b/tests/system-test/2-query/round.py
@@ -10,6 +10,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/rtrim.py b/tests/system-test/2-query/rtrim.py
index e4835fbd8e..effb596e1d 100644
--- a/tests/system-test/2-query/rtrim.py
+++ b/tests/system-test/2-query/rtrim.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/sample.py b/tests/system-test/2-query/sample.py
index d1890d8623..a09d6a2b85 100644
--- a/tests/system-test/2-query/sample.py
+++ b/tests/system-test/2-query/sample.py
@@ -25,6 +25,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ts = 1537146000000
diff --git a/tests/system-test/2-query/sin.py b/tests/system-test/2-query/sin.py
index 46b1ac50ae..4fdec8fd73 100644
--- a/tests/system-test/2-query/sin.py
+++ b/tests/system-test/2-query/sin.py
@@ -11,6 +11,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/smaTest.py b/tests/system-test/2-query/smaTest.py
index 20ed103f8e..0390bae114 100644
--- a/tests/system-test/2-query/smaTest.py
+++ b/tests/system-test/2-query/smaTest.py
@@ -32,6 +32,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.prepare()
diff --git a/tests/system-test/2-query/sml.py b/tests/system-test/2-query/sml.py
index 64fe92438c..676bc0c127 100644
--- a/tests/system-test/2-query/sml.py
+++ b/tests/system-test/2-query/sml.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/2-query/spread.py b/tests/system-test/2-query/spread.py
index 79881d7293..61fe6793f9 100644
--- a/tests/system-test/2-query/spread.py
+++ b/tests/system-test/2-query/spread.py
@@ -31,6 +31,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/sqrt.py b/tests/system-test/2-query/sqrt.py
index 4784b71fc6..9229444f74 100644
--- a/tests/system-test/2-query/sqrt.py
+++ b/tests/system-test/2-query/sqrt.py
@@ -11,6 +11,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/stablity.py b/tests/system-test/2-query/stablity.py
index 3c8d18e9e6..569bee62ec 100755
--- a/tests/system-test/2-query/stablity.py
+++ b/tests/system-test/2-query/stablity.py
@@ -29,6 +29,7 @@ class TDTestCase:
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/2-query/statecount.py b/tests/system-test/2-query/statecount.py
index 1d9a04cc73..2aa9194d37 100644
--- a/tests/system-test/2-query/statecount.py
+++ b/tests/system-test/2-query/statecount.py
@@ -13,6 +13,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.ts = 1420041600000 # 2015-01-01 00:00:00 this is begin time for first record
diff --git a/tests/system-test/2-query/stateduration.py b/tests/system-test/2-query/stateduration.py
index 009ec37764..9c37dc68e7 100644
--- a/tests/system-test/2-query/stateduration.py
+++ b/tests/system-test/2-query/stateduration.py
@@ -18,6 +18,7 @@ from util.sql import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ts = 1537146000000
diff --git a/tests/system-test/2-query/stddev.py b/tests/system-test/2-query/stddev.py
index d044fb0dd7..1a2118a966 100644
--- a/tests/system-test/2-query/stddev.py
+++ b/tests/system-test/2-query/stddev.py
@@ -21,6 +21,7 @@ from util.common import *
from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db_test'
diff --git a/tests/system-test/2-query/substr.py b/tests/system-test/2-query/substr.py
index 9b317aeab6..302711b13d 100644
--- a/tests/system-test/2-query/substr.py
+++ b/tests/system-test/2-query/substr.py
@@ -30,6 +30,7 @@ LENS = 6
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(),False)
diff --git a/tests/system-test/2-query/sum.py b/tests/system-test/2-query/sum.py
index ec1f60a720..27096fe5ad 100644
--- a/tests/system-test/2-query/sum.py
+++ b/tests/system-test/2-query/sum.py
@@ -25,6 +25,7 @@ DBNAME = "db"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/tail.py b/tests/system-test/2-query/tail.py
index 3d5ecfaa9a..f925380c09 100644
--- a/tests/system-test/2-query/tail.py
+++ b/tests/system-test/2-query/tail.py
@@ -12,6 +12,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/tan.py b/tests/system-test/2-query/tan.py
index 203f149712..27e6efb475 100644
--- a/tests/system-test/2-query/tan.py
+++ b/tests/system-test/2-query/tan.py
@@ -11,6 +11,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/timetruncate.py b/tests/system-test/2-query/timetruncate.py
index 917b98daa9..32d6ef98e9 100644
--- a/tests/system-test/2-query/timetruncate.py
+++ b/tests/system-test/2-query/timetruncate.py
@@ -8,6 +8,7 @@ from datetime import datetime
from util.gettime import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
print(conn)
diff --git a/tests/system-test/2-query/timezone.py b/tests/system-test/2-query/timezone.py
index 0a2da114de..316e776cb1 100644
--- a/tests/system-test/2-query/timezone.py
+++ b/tests/system-test/2-query/timezone.py
@@ -12,6 +12,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/top.py b/tests/system-test/2-query/top.py
index 1cba18c2e2..d85e4dc768 100644
--- a/tests/system-test/2-query/top.py
+++ b/tests/system-test/2-query/top.py
@@ -20,6 +20,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()
diff --git a/tests/system-test/2-query/tsbsQuery.py b/tests/system-test/2-query/tsbsQuery.py
index c4604799a1..106d43ea38 100644
--- a/tests/system-test/2-query/tsbsQuery.py
+++ b/tests/system-test/2-query/tsbsQuery.py
@@ -21,6 +21,7 @@ class TDTestCase:
updatecfgDict["clientCfg"] = clientCfgDict
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/ttl_comment.py b/tests/system-test/2-query/ttl_comment.py
index fabd641da7..3ae602fa23 100644
--- a/tests/system-test/2-query/ttl_comment.py
+++ b/tests/system-test/2-query/ttl_comment.py
@@ -25,6 +25,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/2-query/twa.py b/tests/system-test/2-query/twa.py
index 1fd3b8fdda..16b9779fa8 100644
--- a/tests/system-test/2-query/twa.py
+++ b/tests/system-test/2-query/twa.py
@@ -10,6 +10,7 @@ class TDTestCase:
updatecfgDict = {"maxTablesPerVnode":2 ,"minTablesPerVnode":2,"tableIncStepPerVnode":2 }
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.vnode_disbutes = None
diff --git a/tests/system-test/2-query/union.py b/tests/system-test/2-query/union.py
index 737817f262..82dcfe12e6 100644
--- a/tests/system-test/2-query/union.py
+++ b/tests/system-test/2-query/union.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/union1.py b/tests/system-test/2-query/union1.py
index 1ca25e7844..8db5ce01f3 100644
--- a/tests/system-test/2-query/union1.py
+++ b/tests/system-test/2-query/union1.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/union2.py b/tests/system-test/2-query/union2.py
index c063b5c383..b30d699482 100644
--- a/tests/system-test/2-query/union2.py
+++ b/tests/system-test/2-query/union2.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/union3.py b/tests/system-test/2-query/union3.py
index 3322f30359..7e2bdf6143 100644
--- a/tests/system-test/2-query/union3.py
+++ b/tests/system-test/2-query/union3.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/union4.py b/tests/system-test/2-query/union4.py
index 2da83d8190..efb5b71c4d 100644
--- a/tests/system-test/2-query/union4.py
+++ b/tests/system-test/2-query/union4.py
@@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/unique.py b/tests/system-test/2-query/unique.py
index b954fd004b..6c51854b43 100644
--- a/tests/system-test/2-query/unique.py
+++ b/tests/system-test/2-query/unique.py
@@ -13,6 +13,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/upper.py b/tests/system-test/2-query/upper.py
index cc5347774c..e28483dc56 100644
--- a/tests/system-test/2-query/upper.py
+++ b/tests/system-test/2-query/upper.py
@@ -23,6 +23,7 @@ TS_TYPE_COL = [TS_COL]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/2-query/varchar.py b/tests/system-test/2-query/varchar.py
index 3d5f443b3c..f0849010c6 100644
--- a/tests/system-test/2-query/varchar.py
+++ b/tests/system-test/2-query/varchar.py
@@ -10,6 +10,7 @@ from util.cases import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
diff --git a/tests/system-test/5-taos-tools/TD-12478.py b/tests/system-test/5-taos-tools/TD-12478.py
index 576e59f339..66f960338e 100644
--- a/tests/system-test/5-taos-tools/TD-12478.py
+++ b/tests/system-test/5-taos-tools/TD-12478.py
@@ -37,6 +37,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py b/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py
index 8cb58b8d70..cbc9e83ba8 100644
--- a/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py
+++ b/tests/system-test/5-taos-tools/taosdump/taosdumpTestColTag.py
@@ -29,6 +29,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/6-cluster/5dnode3mnodeDrop.py b/tests/system-test/6-cluster/5dnode3mnodeDrop.py
index 52f734b534..73c19b8cea 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeDrop.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeDrop.py
@@ -25,6 +25,7 @@ class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
+ self.replicaVar = int(replicaVar)
def buildcluster(self,dnodenumber):
self.depoly_cluster(dnodenumber)
diff --git a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
index 62f4b248f9..00f0472db3 100644
--- a/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
+++ b/tests/system-test/6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py
@@ -172,7 +172,7 @@ class TDTestCase:
def run(self):
# print(self.master_dnode.cfgDict)
- self.fiveDnodeThreeMnode(dnodeNumbers=6,mnodeNums=3,restartNumbers=2,stopRole='dnode')
+ self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=2,stopRole='dnode')
def stop(self):
tdSql.close()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
index 435701bd8f..9bdc0a2cf4 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
index f0d5e8328a..d33a1b0d27 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py
index 8d0801500e..75e01977fd 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas_querys.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
index 379615d358..77dcab90bf 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py
index 240169dbb6..1a2c31a311 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_force_stop_all_dnodes.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py
index 25b5de5afa..a9fb9555e8 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py
index ab5359601a..2eb631d433 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_all_vnode.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py
index 4eec3c348d..6102a82b04 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_follower.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py
index 0fe18ab705..d87ec3d35e 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_querys_loop_restart_leader.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py
index 75a7ca51b6..fc6d3c0683 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_all_dnodes.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
index e9c63151f3..00b808b8b4 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_sync.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
index 5dd57542b3..e64649189d 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
index fd00e89216..b633887009 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_follower_unsync_force_stop.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
index f98e2c07b5..6415da94b4 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader.py
@@ -20,6 +20,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
index 5be2b67c31..1b99c1e92b 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_insertdatas_stop_leader_forece_stop.py
@@ -20,6 +20,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py
index e8fa3099e9..ea5c4679a9 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_mnode3_insertdatas_querys.py
@@ -19,6 +19,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
index 21d0c2d2b8..1dcaae452e 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
index ff6ce98c81..945fcf2990 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_follower_force_stop.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
index e36f488c2b..6b87bee5a3 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
index 050b98024d..8ef151a385 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_querydatas_stop_leader_force_stop.py
@@ -22,6 +22,7 @@ sys.path.append(os.path.dirname(__file__))
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
index d086743306..49e5cafe96 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups.py
@@ -18,6 +18,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
index b9d3afa765..20cf7c583a 100644
--- a/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
+++ b/tests/system-test/6-cluster/vnode/4dnode1mnode_basic_replica3_vgroups_stopOne.py
@@ -20,6 +20,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
self.host = socket.gethostname()
diff --git a/tests/system-test/7-tmq/basic5.py b/tests/system-test/7-tmq/basic5.py
index d1985c6567..7d42a3e81f 100644
--- a/tests/system-test/7-tmq/basic5.py
+++ b/tests/system-test/7-tmq/basic5.py
@@ -27,6 +27,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/create_wrong_topic.py b/tests/system-test/7-tmq/create_wrong_topic.py
index dbdcedcbe8..39d519fec1 100644
--- a/tests/system-test/7-tmq/create_wrong_topic.py
+++ b/tests/system-test/7-tmq/create_wrong_topic.py
@@ -14,6 +14,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
self.setsql = TDSetSql()
diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
index 484969010c..808a4935e3 100644
--- a/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
+++ b/tests/system-test/7-tmq/dataFromTsdbNWal-multiCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/dataFromTsdbNWal.py b/tests/system-test/7-tmq/dataFromTsdbNWal.py
index ed22df2b07..8386c22cd0 100644
--- a/tests/system-test/7-tmq/dataFromTsdbNWal.py
+++ b/tests/system-test/7-tmq/dataFromTsdbNWal.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/db.py b/tests/system-test/7-tmq/db.py
index 17668d8d03..04f5aac559 100644
--- a/tests/system-test/7-tmq/db.py
+++ b/tests/system-test/7-tmq/db.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
index af6c51d947..4371a909c2 100644
--- a/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
+++ b/tests/system-test/7-tmq/dropDbR3ConflictTransaction.py
@@ -27,6 +27,7 @@ class TDTestCase:
self.rowsPerTbl = 2
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/schema.py b/tests/system-test/7-tmq/schema.py
index 2a67cd1bc1..04224fbc7e 100644
--- a/tests/system-test/7-tmq/schema.py
+++ b/tests/system-test/7-tmq/schema.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/stbFilter.py b/tests/system-test/7-tmq/stbFilter.py
index 0f0e7c5287..6b48a6d570 100644
--- a/tests/system-test/7-tmq/stbFilter.py
+++ b/tests/system-test/7-tmq/stbFilter.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/stbTagFilter-1ctb.py b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
index 0ef454f3e2..c4a7c8cae5 100644
--- a/tests/system-test/7-tmq/stbTagFilter-1ctb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-1ctb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
index 0ec114d3bd..c380d201b2 100644
--- a/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
+++ b/tests/system-test/7-tmq/stbTagFilter-multiCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/subscribeDb.py b/tests/system-test/7-tmq/subscribeDb.py
index 3790bd1690..fd06eedefd 100644
--- a/tests/system-test/7-tmq/subscribeDb.py
+++ b/tests/system-test/7-tmq/subscribeDb.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeDb0.py b/tests/system-test/7-tmq/subscribeDb0.py
index 81d824156d..d4c5e2f87f 100644
--- a/tests/system-test/7-tmq/subscribeDb0.py
+++ b/tests/system-test/7-tmq/subscribeDb0.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeDb1.py b/tests/system-test/7-tmq/subscribeDb1.py
index 02e72d0475..ea78c90abd 100644
--- a/tests/system-test/7-tmq/subscribeDb1.py
+++ b/tests/system-test/7-tmq/subscribeDb1.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeDb2.py b/tests/system-test/7-tmq/subscribeDb2.py
index edf1a8ec80..d045842e45 100644
--- a/tests/system-test/7-tmq/subscribeDb2.py
+++ b/tests/system-test/7-tmq/subscribeDb2.py
@@ -22,6 +22,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeDb3.py b/tests/system-test/7-tmq/subscribeDb3.py
index 34df61dc09..819588badc 100644
--- a/tests/system-test/7-tmq/subscribeDb3.py
+++ b/tests/system-test/7-tmq/subscribeDb3.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeDb4.py b/tests/system-test/7-tmq/subscribeDb4.py
index 27efbee016..7f5169361c 100644
--- a/tests/system-test/7-tmq/subscribeDb4.py
+++ b/tests/system-test/7-tmq/subscribeDb4.py
@@ -52,6 +52,7 @@ class TDTestCase:
hostname = socket.gethostname()
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
logSql = False
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/7-tmq/subscribeStb.py b/tests/system-test/7-tmq/subscribeStb.py
index 213f9aa89c..3ff0b25ff6 100644
--- a/tests/system-test/7-tmq/subscribeStb.py
+++ b/tests/system-test/7-tmq/subscribeStb.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeStb0.py b/tests/system-test/7-tmq/subscribeStb0.py
index 0c188754b2..1463cad627 100644
--- a/tests/system-test/7-tmq/subscribeStb0.py
+++ b/tests/system-test/7-tmq/subscribeStb0.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeStb1.py b/tests/system-test/7-tmq/subscribeStb1.py
index 4d5407e927..edbe1bc3c6 100644
--- a/tests/system-test/7-tmq/subscribeStb1.py
+++ b/tests/system-test/7-tmq/subscribeStb1.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeStb2.py b/tests/system-test/7-tmq/subscribeStb2.py
index bb38a981d3..6c3e122902 100644
--- a/tests/system-test/7-tmq/subscribeStb2.py
+++ b/tests/system-test/7-tmq/subscribeStb2.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeStb3.py b/tests/system-test/7-tmq/subscribeStb3.py
index 32272491c6..025f403282 100644
--- a/tests/system-test/7-tmq/subscribeStb3.py
+++ b/tests/system-test/7-tmq/subscribeStb3.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/subscribeStb4.py b/tests/system-test/7-tmq/subscribeStb4.py
index e347d27ac1..6aa3da66a4 100644
--- a/tests/system-test/7-tmq/subscribeStb4.py
+++ b/tests/system-test/7-tmq/subscribeStb4.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmq3mnodeSwitch.py b/tests/system-test/7-tmq/tmq3mnodeSwitch.py
index 837317e5d7..a6bf01aa06 100644
--- a/tests/system-test/7-tmq/tmq3mnodeSwitch.py
+++ b/tests/system-test/7-tmq/tmq3mnodeSwitch.py
@@ -33,6 +33,7 @@ class TDTestCase:
self.dnodeOfLeader = 0
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqAlterSchema.py b/tests/system-test/7-tmq/tmqAlterSchema.py
index 3bff0e4754..a70678219f 100644
--- a/tests/system-test/7-tmq/tmqAlterSchema.py
+++ b/tests/system-test/7-tmq/tmqAlterSchema.py
@@ -33,6 +33,7 @@ class TDTestCase:
self.dnodeOfLeader = 0
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqAutoCreateTbl.py b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
index 568e49388b..41073d83ae 100644
--- a/tests/system-test/7-tmq/tmqAutoCreateTbl.py
+++ b/tests/system-test/7-tmq/tmqAutoCreateTbl.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqCheckData.py b/tests/system-test/7-tmq/tmqCheckData.py
index 9995af15c2..04d0744ab5 100644
--- a/tests/system-test/7-tmq/tmqCheckData.py
+++ b/tests/system-test/7-tmq/tmqCheckData.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqCheckData1.py b/tests/system-test/7-tmq/tmqCheckData1.py
index 5b055cf725..b9dac62833 100644
--- a/tests/system-test/7-tmq/tmqCheckData1.py
+++ b/tests/system-test/7-tmq/tmqCheckData1.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqCommon.py b/tests/system-test/7-tmq/tmqCommon.py
index e71e4d257d..c153e94caa 100644
--- a/tests/system-test/7-tmq/tmqCommon.py
+++ b/tests/system-test/7-tmq/tmqCommon.py
@@ -38,6 +38,7 @@ from util.common import *
class TMQCom:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdSql.init(conn.cursor())
# tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
index 655097c924..f372a2b742 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb-funcNFilter.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 100000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
index 709464fc1e..c7f95f6f41 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-1ctb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 100000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
index 09ad6e38c9..26dacf514d 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb-funcNFilter.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 150
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
index 95e060d581..d6f100041b 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg-mutilCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 70
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
index fc3437613b..9bfc01529f 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb-mutilVg.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb.py b/tests/system-test/7-tmq/tmqConsFromTsdb.py
index 73af4d196f..975f89cbd7 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
index 20db44ef19..6a03f0f751 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb-funcNFilter.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
index bd42f74c03..009862137f 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-1ctb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 100000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
index 2eb1a7c52a..95ce03e653 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb-funcNFilter.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 150
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
index 351a10ed1f..8ff01444f8 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg-mutilCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 70
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
index 6504274993..528b3a8088 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1-mutilVg.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsFromTsdb1.py b/tests/system-test/7-tmq/tmqConsFromTsdb1.py
index 2cfb6581d8..c0c459d315 100644
--- a/tests/system-test/7-tmq/tmqConsFromTsdb1.py
+++ b/tests/system-test/7-tmq/tmqConsFromTsdb1.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqConsumerGroup.py b/tests/system-test/7-tmq/tmqConsumerGroup.py
index 5c7d21069b..02093a2d88 100644
--- a/tests/system-test/7-tmq/tmqConsumerGroup.py
+++ b/tests/system-test/7-tmq/tmqConsumerGroup.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqDelete-1ctb.py b/tests/system-test/7-tmq/tmqDelete-1ctb.py
index 7509e43af7..4b45b1a834 100644
--- a/tests/system-test/7-tmq/tmqDelete-1ctb.py
+++ b/tests/system-test/7-tmq/tmqDelete-1ctb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqDelete-multiCtb.py b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
index ded6fe94d7..3b72b4aea5 100644
--- a/tests/system-test/7-tmq/tmqDelete-multiCtb.py
+++ b/tests/system-test/7-tmq/tmqDelete-multiCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqDnode.py b/tests/system-test/7-tmq/tmqDnode.py
index 921e543e2d..e1d6d91e2d 100644
--- a/tests/system-test/7-tmq/tmqDnode.py
+++ b/tests/system-test/7-tmq/tmqDnode.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqDnodeRestart.py b/tests/system-test/7-tmq/tmqDnodeRestart.py
index 205f513351..a44ff916e5 100644
--- a/tests/system-test/7-tmq/tmqDnodeRestart.py
+++ b/tests/system-test/7-tmq/tmqDnodeRestart.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
index 0d75df58f9..6c49fae299 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot0.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
index 79cb79a83c..3fc5a2fdc7 100644
--- a/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqDropNtb-snapshot1.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqDropStb.py b/tests/system-test/7-tmq/tmqDropStb.py
index 7679f0ca44..3789632984 100644
--- a/tests/system-test/7-tmq/tmqDropStb.py
+++ b/tests/system-test/7-tmq/tmqDropStb.py
@@ -52,6 +52,7 @@ class TDTestCase:
hostname = socket.gethostname()
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
logSql = False
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/7-tmq/tmqDropStbCtb.py b/tests/system-test/7-tmq/tmqDropStbCtb.py
index 95780538d1..c9e34136cc 100644
--- a/tests/system-test/7-tmq/tmqDropStbCtb.py
+++ b/tests/system-test/7-tmq/tmqDropStbCtb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqError.py b/tests/system-test/7-tmq/tmqError.py
index 5c858d0dce..164e7f1c8c 100644
--- a/tests/system-test/7-tmq/tmqError.py
+++ b/tests/system-test/7-tmq/tmqError.py
@@ -30,6 +30,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqModule.py b/tests/system-test/7-tmq/tmqModule.py
index 0063edce56..d6b4aff938 100644
--- a/tests/system-test/7-tmq/tmqModule.py
+++ b/tests/system-test/7-tmq/tmqModule.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqShow.py b/tests/system-test/7-tmq/tmqShow.py
index 0a21680e78..406fd9f7f3 100644
--- a/tests/system-test/7-tmq/tmqShow.py
+++ b/tests/system-test/7-tmq/tmqShow.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
index 7e14f2568d..01f1ca5b15 100644
--- a/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
+++ b/tests/system-test/7-tmq/tmqSubscribeStb-r3.py
@@ -16,8 +16,11 @@ from util.dnodes import TDDnodes
from util.dnodes import TDDnode
from util.cluster import *
from util.common import *
+sys.path.append("./6-cluster")
sys.path.append("./7-tmq")
from tmqCommon import *
+from clusterCommonCreate import *
+from clusterCommonCheck import clusterComCheck
class TDTestCase:
def __init__(self):
@@ -26,8 +29,10 @@ class TDTestCase:
self.vgroups = 4
self.ctbNum = 1000
self.rowsPerTbl = 100
+ self.dnodeNumbers = 5
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
@@ -118,15 +123,19 @@ class TDTestCase:
tdLog.info("================= restart dnode 2===========================")
cluster.dnodes[1].stoptaosd()
cluster.dnodes[1].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 3===========================")
cluster.dnodes[2].stoptaosd()
cluster.dnodes[2].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 4===========================")
cluster.dnodes[3].stoptaosd()
cluster.dnodes[3].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 5===========================")
cluster.dnodes[4].stoptaosd()
cluster.dnodes[4].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
pThread.join()
# tdLog.info("restart taosd to ensure that the data falls into the disk")
@@ -192,9 +201,9 @@ class TDTestCase:
tdSql.query(queryString)
totalRowsFromQuery = tdSql.getRows()
- tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
+ tdLog.info("act consume rows: %d, act query rows: %d "%(totalConsumeRows, totalRowsFromQuery))
- if totalConsumeRows != totalRowsFromQuery:
+ if totalConsumeRows < totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
@@ -223,7 +232,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
- 'pollDelay': 15,
+ 'pollDelay': 30,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
@@ -237,7 +246,10 @@ class TDTestCase:
queryString = "select ts, c1, c2 from %s.%s where t4 == 'beijing' or t4 == 'changsha' "%(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicFromStb1, queryString)
tdLog.info("create topic sql: %s"%sqlString)
- tdSql.execute(sqlString)
+ tdSql.execute(sqlString)
+
+ tdSql.query(queryString)
+ totalRowsFromQuery = tdSql.getRows()
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
@@ -256,15 +268,19 @@ class TDTestCase:
tdLog.info("================= restart dnode 2===========================")
cluster.dnodes[1].stoptaosd()
cluster.dnodes[1].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 3===========================")
cluster.dnodes[2].stoptaosd()
cluster.dnodes[2].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 4===========================")
cluster.dnodes[3].stoptaosd()
cluster.dnodes[3].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("================= restart dnode 5===========================")
cluster.dnodes[4].stoptaosd()
cluster.dnodes[4].starttaosd()
+ clusterComCheck.checkDnodes(self.dnodeNumbers)
tdLog.info("start to check consume result")
expectRows = 1
@@ -273,12 +289,9 @@ class TDTestCase:
for i in range(expectRows):
totalConsumeRows += resultList[i]
- tdSql.query(queryString)
- totalRowsFromQuery = tdSql.getRows()
+ tdLog.info("act consume rows: %d, act query rows: %d "%(totalConsumeRows, totalRowsFromQuery))
- tdLog.info("act consume rows: %d, act query rows: %d, expect consume rows: %d, "%(totalConsumeRows, totalRowsFromQuery, expectrowcnt))
-
- if totalConsumeRows != totalRowsFromQuery:
+ if totalConsumeRows < totalRowsFromQuery:
tdLog.exit("tmq consume rows error!")
# tmqCom.checkFileContent(consumerId, queryString)
@@ -289,8 +302,8 @@ class TDTestCase:
tdLog.printNoPrefix("======== test case 2 end ...... ")
def run(self):
- self.prepareTestEnv()
- self.tmqCase1()
+ #self.prepareTestEnv()
+ #self.tmqCase1()
self.prepareTestEnv()
self.tmqCase2()
diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
index 8158249fa6..297429b495 100644
--- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot0.py
@@ -24,6 +24,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
index 97e50cf654..9c139b50de 100644
--- a/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqUdf-multCtb-snapshot1.py
@@ -24,6 +24,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqUdf.py b/tests/system-test/7-tmq/tmqUdf.py
index d081423142..8593fd4f1e 100644
--- a/tests/system-test/7-tmq/tmqUdf.py
+++ b/tests/system-test/7-tmq/tmqUdf.py
@@ -24,6 +24,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/7-tmq/tmqUpdate-1ctb.py b/tests/system-test/7-tmq/tmqUpdate-1ctb.py
index a95920b7c4..b974e4a41a 100644
--- a/tests/system-test/7-tmq/tmqUpdate-1ctb.py
+++ b/tests/system-test/7-tmq/tmqUpdate-1ctb.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
index 0ced8fc34b..d5df88cf43 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot0.py
@@ -23,6 +23,7 @@ class TDTestCase:
self.autoCtbPrefix = 'aCtb'
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
index bdcbb9578e..6b8c10de27 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb-snapshot1.py
@@ -23,6 +23,7 @@ class TDTestCase:
self.autoCtbPrefix = 'aCtb'
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
index df150130c0..3975013e74 100644
--- a/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
+++ b/tests/system-test/7-tmq/tmqUpdate-multiCtb.py
@@ -23,6 +23,7 @@ class TDTestCase:
self.autoCtbPrefix = 'aCtb'
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmqUpdateWithConsume.py b/tests/system-test/7-tmq/tmqUpdateWithConsume.py
index dbeac427ac..49a475ff16 100644
--- a/tests/system-test/7-tmq/tmqUpdateWithConsume.py
+++ b/tests/system-test/7-tmq/tmqUpdateWithConsume.py
@@ -22,6 +22,7 @@ class TDTestCase:
self.rowsPerTbl = 1000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/7-tmq/tmq_taosx.py b/tests/system-test/7-tmq/tmq_taosx.py
index fd7e1b7bb1..f2fbd84865 100644
--- a/tests/system-test/7-tmq/tmq_taosx.py
+++ b/tests/system-test/7-tmq/tmq_taosx.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-15517.py b/tests/system-test/99-TDcase/TD-15517.py
index e45a54c272..db06700284 100644
--- a/tests/system-test/99-TDcase/TD-15517.py
+++ b/tests/system-test/99-TDcase/TD-15517.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-15554.py b/tests/system-test/99-TDcase/TD-15554.py
index 02654b5703..51934ccd44 100644
--- a/tests/system-test/99-TDcase/TD-15554.py
+++ b/tests/system-test/99-TDcase/TD-15554.py
@@ -20,6 +20,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-15557.py b/tests/system-test/99-TDcase/TD-15557.py
index ca29e1282f..884c028a65 100644
--- a/tests/system-test/99-TDcase/TD-15557.py
+++ b/tests/system-test/99-TDcase/TD-15557.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-15563.py b/tests/system-test/99-TDcase/TD-15563.py
index 6b37f25bb6..5ea652b4fb 100644
--- a/tests/system-test/99-TDcase/TD-15563.py
+++ b/tests/system-test/99-TDcase/TD-15563.py
@@ -21,6 +21,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-16025.py b/tests/system-test/99-TDcase/TD-16025.py
index 0adc313429..8c9fa9319f 100644
--- a/tests/system-test/99-TDcase/TD-16025.py
+++ b/tests/system-test/99-TDcase/TD-16025.py
@@ -28,6 +28,7 @@ class TDTestCase:
#print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-16821.py b/tests/system-test/99-TDcase/TD-16821.py
index b2b5e3f425..f57fae752f 100644
--- a/tests/system-test/99-TDcase/TD-16821.py
+++ b/tests/system-test/99-TDcase/TD-16821.py
@@ -16,6 +16,7 @@ from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/99-TDcase/TD-17255.py b/tests/system-test/99-TDcase/TD-17255.py
index bcf02b654f..0f83468754 100644
--- a/tests/system-test/99-TDcase/TD-17255.py
+++ b/tests/system-test/99-TDcase/TD-17255.py
@@ -21,6 +21,7 @@ class TDTestCase:
self.rowsPerTbl = 10000
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
diff --git a/tests/system-test/99-TDcase/TD-17699.py b/tests/system-test/99-TDcase/TD-17699.py
index d4c4a4cc32..2862f4a78d 100644
--- a/tests/system-test/99-TDcase/TD-17699.py
+++ b/tests/system-test/99-TDcase/TD-17699.py
@@ -53,6 +53,7 @@ class TDTestCase:
hostname = socket.gethostname()
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
logSql = False
tdSql.init(conn.cursor(), logSql)
diff --git a/tests/system-test/99-TDcase/TD-19201.py b/tests/system-test/99-TDcase/TD-19201.py
index 0fd86bf6b4..2a25d1ad7e 100644
--- a/tests/system-test/99-TDcase/TD-19201.py
+++ b/tests/system-test/99-TDcase/TD-19201.py
@@ -14,6 +14,7 @@ class TDTestCase:
hostname = socket.gethostname()
def init(self, conn, logSql, replicaVar=1):
+ self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
#tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql) # output sql.txt file
diff --git a/tests/system-test/fulltest.sh b/tests/system-test/fulltest.sh
index c65c574e7c..febcc4b728 100755
--- a/tests/system-test/fulltest.sh
+++ b/tests/system-test/fulltest.sh
@@ -247,7 +247,7 @@ python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 6 -
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopDnodeCreateStb.py -N 6 -M 3 -n 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 6 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopMnodeCreateStb.py -N 6 -M 3 -n 3
-#python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3
+python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3
python3 ./test.py -f 6-cluster/5dnode3mnodeSep1VnodeStopVnodeCreateStb.py -N 6 -M 3 -n 3
python3 ./test.py -f 6-cluster/5dnode3mnodeRestartDnodeInsertData.py -N 6 -M 3