Merge branch '3.0' into test/cluster_case
This commit is contained in:
commit
26a1dd58c4
|
@ -2,7 +2,7 @@
|
|||
# zlib
|
||||
ExternalProject_Add(taos-tools
|
||||
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
|
||||
GIT_TAG 817cb6a
|
||||
GIT_TAG 2.1.1
|
||||
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
|
||||
BINARY_DIR ""
|
||||
#BUILD_IN_SOURCE TRUE
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -594,6 +594,24 @@ INSERT INTO tb_name VALUES (TODAY(), ...);
|
|||
|
||||
TDengine 支持针对数据的聚合查询。提供如下聚合函数。
|
||||
|
||||
### APERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type]) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
|
||||
|
||||
**返回数据类型**: DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**说明**:
|
||||
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
|
||||
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
|
||||
|
||||
### AVG
|
||||
|
||||
```sql
|
||||
|
@ -656,6 +674,7 @@ SELECT ELAPSED(ts_primary_key [, time_unit]) FROM { tb_name | stb_name } [WHERE
|
|||
- 对于嵌套查询,仅当内层查询会输出隐式时间戳列时有效。例如select elapsed(ts) from (select diff(value) from sub1)语句,diff函数会让内层查询输出隐式时间戳列,此为主键列,可以用于elapsed函数的第一个参数。相反,例如select elapsed(ts) from (select * from sub1) 语句,ts列输出到外层时已经没有了主键列的含义,无法使用elapsed函数。此外,elapsed函数作为一个与时间线强依赖的函数,形如select elapsed(ts) from (select diff(value) from st group by tbname)尽管会返回一条计算结果,但并无实际意义,这种用法后续也将被限制。
|
||||
- 不支持与leastsquares、diff、derivative、top、bottom、last_row、interp等函数混合使用。
|
||||
|
||||
|
||||
### LEASTSQUARES
|
||||
|
||||
```sql
|
||||
|
@ -671,21 +690,6 @@ SELECT LEASTSQUARES(field_name, start_val, step_val) FROM tb_name [WHERE clause]
|
|||
**适用于**:表。
|
||||
|
||||
|
||||
### MODE
|
||||
|
||||
```sql
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
|
||||
|
||||
**返回数据类型**:与输入数据类型一致。
|
||||
|
||||
**适用数据类型**:全部类型字段。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### SPREAD
|
||||
|
||||
```sql
|
||||
|
@ -778,28 +782,26 @@ SELECT HISTOGRAM(field_name,bin_type, bin_description, normalized) FROM tb_nam
|
|||
3. normalized 是否将返回结果归一化到 0~1 之间 。有效输入为 0 和 1。
|
||||
|
||||
|
||||
## 选择函数
|
||||
|
||||
选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
|
||||
|
||||
### APERCENTILE
|
||||
### PERCENTILE
|
||||
|
||||
```sql
|
||||
SELECT APERCENTILE(field_name, P[, algo_type])
|
||||
FROM { tb_name | stb_name } [WHERE clause]
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表/超级表中指定列的值的近似百分比分位数,与 PERCENTILE 函数相似,但是返回近似结果。
|
||||
**功能说明**:统计表中某列的值百分比分位数。
|
||||
|
||||
**返回数据类型**: DOUBLE。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
**应用字段**:数值类型。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
**适用于**:表。
|
||||
|
||||
**说明**:
|
||||
- P值范围是[0,100],当为0时等同于MIN,为100时等同于MAX。
|
||||
- algo_type 取值为 "default" 或 "t-digest"。 输入为 "default" 时函数使用基于直方图算法进行计算。输入为 "t-digest" 时使用t-digest算法计算分位数的近似结果。如果不指定 algo_type 则使用 "default" 算法。
|
||||
**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
|
||||
|
||||
|
||||
## 选择函数
|
||||
|
||||
选择函数根据语义在查询结果集中选择一行或多行结果返回。用户可以同时指定输出 ts 列或其他列(包括 tbname 和标签列),这样就可以方便地知道被选出的值是源于哪个数据行的。
|
||||
|
||||
### BOTTOM
|
||||
|
||||
|
@ -935,21 +937,41 @@ SELECT MIN(field_name) FROM {tb_name | stb_name} [WHERE clause];
|
|||
**适用于**:表和超级表。
|
||||
|
||||
|
||||
### PERCENTILE
|
||||
### MODE
|
||||
|
||||
```sql
|
||||
SELECT PERCENTILE(field_name, P) FROM { tb_name } [WHERE clause];
|
||||
SELECT MODE(field_name) FROM tb_name [WHERE clause];
|
||||
```
|
||||
|
||||
**功能说明**:统计表中某列的值百分比分位数。
|
||||
**功能说明**:返回出现频率最高的值,若存在多个频率相同的最高值,输出NULL。
|
||||
|
||||
**返回数据类型**: DOUBLE。
|
||||
**返回数据类型**:与输入数据类型一致。
|
||||
|
||||
**应用字段**:数值类型。
|
||||
**适用数据类型**:全部类型字段。
|
||||
|
||||
**适用于**:表。
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:*P*值取值范围 0≤*P*≤100,为 0 的时候等同于 MIN,为 100 的时候等同于 MAX。
|
||||
|
||||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
|
||||
### TAIL
|
||||
|
@ -1016,7 +1038,7 @@ SELECT CSUM(field_name) FROM { tb_name | stb_name } [WHERE clause]
|
|||
|
||||
**功能说明**:累加和(Cumulative sum),输出行与输入行数相同。
|
||||
|
||||
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。 返回结果中同时带有每行记录对应的时间戳。
|
||||
**返回结果类型**: 输入列如果是整数类型返回值为长整型 (int64_t),浮点数返回值为双精度浮点数(Double)。无符号整数类型返回值为无符号长整型(uint64_t)。
|
||||
|
||||
**适用数据类型**:数值类型。
|
||||
|
||||
|
@ -1045,8 +1067,10 @@ SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHER
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**: DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- DERIVATIVE 函数可以在由 PARTITION BY 划分出单独时间线的情况下用于超级表(也即 PARTITION BY tbname)。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DERIVATIVE() from。
|
||||
|
||||
### DIFF
|
||||
|
||||
|
@ -1062,7 +1086,10 @@ SELECT {DIFF(field_name, ignore_negative) | DIFF(field_name)} FROM tb_name [WHER
|
|||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**: 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
**使用说明**:
|
||||
|
||||
- 输出结果行数是范围内总行数减一,第一行没有结果输出。
|
||||
- 可以与选择相关联的列一起使用。 例如: select \_rowts, DIFF() from。
|
||||
|
||||
|
||||
### IRATE
|
||||
|
@ -1102,26 +1129,6 @@ SELECT MAVG(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
|||
- 只能与普通列,选择(Selection)、投影(Projection)函数一起使用,不能与聚合(Aggregation)函数一起使用;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION BY tbname使用,将结果强制规约到单个时间线。
|
||||
|
||||
### SAMPLE
|
||||
|
||||
```sql
|
||||
SELECT SAMPLE(field_name, K) FROM { tb_name | stb_name } [WHERE clause]
|
||||
```
|
||||
|
||||
**功能说明**: 获取数据的 k 个采样值。参数 k 的合法输入范围是 1≤ k ≤ 1000。
|
||||
|
||||
**返回结果类型**: 同原始数据类型, 返回结果中带有该行记录的时间戳。
|
||||
|
||||
**适用数据类型**: 在超级表查询中使用时,不能应用在标签之上。
|
||||
|
||||
**嵌套子查询支持**: 适用于内层查询和外层查询。
|
||||
|
||||
**适用于**:表和超级表。
|
||||
|
||||
**使用说明**:
|
||||
|
||||
- 不能参与表达式计算;该函数可以应用在普通表和超级表上;
|
||||
- 使用在超级表上的时候,需要搭配 PARTITION by tbname 使用,将结果强制规约到单个时间线。
|
||||
|
||||
### STATECOUNT
|
||||
|
||||
|
@ -1162,7 +1169,7 @@ SELECT stateDuration(field_name, oper, val, unit) FROM { tb_name | stb_name } [W
|
|||
|
||||
- oper : "LT" (小于)、"GT"(大于)、"LE"(小于等于)、"GE"(大于等于)、"NE"(不等于)、"EQ"(等于),不区分大小写。
|
||||
- val : 数值型
|
||||
- unit : 时间长度的单位,范围[1s、1m、1h ],不足一个单位舍去。默认为 1s。
|
||||
- unit : 时间长度的单位,可取值时间单位: 1b(纳秒), 1u(微秒),1a(毫秒),1s(秒),1m(分),1h(小时),1d(天), 1w(周)。如果省略,默认为当前数据库精度。
|
||||
|
||||
**返回结果类型**:INTEGER。
|
||||
|
||||
|
|
|
@ -257,14 +257,13 @@ static const SSysTableMeta infosMeta[] = {
|
|||
{TSDB_INS_TABLE_MNODES, mnodesSchema, tListLen(mnodesSchema)},
|
||||
{TSDB_INS_TABLE_MODULES, modulesSchema, tListLen(modulesSchema)},
|
||||
{TSDB_INS_TABLE_QNODES, qnodesSchema, tListLen(qnodesSchema)},
|
||||
{TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
|
||||
{TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
|
||||
// {TSDB_INS_TABLE_SNODES, snodesSchema, tListLen(snodesSchema)},
|
||||
// {TSDB_INS_TABLE_BNODES, bnodesSchema, tListLen(bnodesSchema)},
|
||||
{TSDB_INS_TABLE_CLUSTER, clusterSchema, tListLen(clusterSchema)},
|
||||
{TSDB_INS_TABLE_USER_DATABASES, userDBSchema, tListLen(userDBSchema)},
|
||||
{TSDB_INS_TABLE_USER_FUNCTIONS, userFuncSchema, tListLen(userFuncSchema)},
|
||||
{TSDB_INS_TABLE_USER_INDEXES, userIdxSchema, tListLen(userIdxSchema)},
|
||||
{TSDB_INS_TABLE_USER_STABLES, userStbsSchema, tListLen(userStbsSchema)},
|
||||
{TSDB_PERFS_TABLE_STREAMS, streamSchema, tListLen(streamSchema)},
|
||||
{TSDB_INS_TABLE_USER_TABLES, userTblsSchema, tListLen(userTblsSchema)},
|
||||
{TSDB_INS_TABLE_USER_TAGS, userTagsSchema, tListLen(userTagsSchema)},
|
||||
// {TSDB_INS_TABLE_USER_TABLE_DISTRIBUTED, userTblDistSchema, tListLen(userTblDistSchema)},
|
||||
|
|
|
@ -413,7 +413,7 @@ int32_t vnodeProcessSyncMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
SyncClientRequestBatch *pSyncMsg = syncClientRequestBatchFromRpcMsg(pMsg);
|
||||
ASSERT(pSyncMsg != NULL);
|
||||
code = syncNodeOnClientRequestBatchCb(pSyncNode, pSyncMsg);
|
||||
syncClientRequestBatchDestroyDeep(pSyncMsg);
|
||||
syncClientRequestBatchDestroy(pSyncMsg);
|
||||
} else if (pMsg->msgType == TDMT_SYNC_REQUEST_VOTE) {
|
||||
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pMsg);
|
||||
ASSERT(pSyncMsg != NULL);
|
||||
|
|
|
@ -1091,6 +1091,8 @@ static int32_t doOpenIntervalAgg(SOperatorInfo* pOperator) {
|
|||
|
||||
// the pDataBlock are always the same one, no need to call this again
|
||||
setInputDataBlock(pOperator, pSup->pCtx, pBlock, pInfo->order, scanFlag, true);
|
||||
blockDataUpdateTsWindow(pBlock, pInfo->primaryTsIndex);
|
||||
|
||||
hashIntervalAgg(pOperator, &pInfo->binfo.resultRowInfo, pBlock, scanFlag, NULL);
|
||||
}
|
||||
|
||||
|
|
|
@ -2271,7 +2271,7 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
|
|||
FOREACH(pAggTarget, pAgg->pTargets) {
|
||||
SNode* pScanTarget = NULL;
|
||||
FOREACH(pScanTarget, pScanNode->node.pTargets) {
|
||||
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pAggTarget)->colName)) {
|
||||
if (0 == strcmp(((SColumnNode*)pAggTarget)->colName, ((SColumnNode*)pScanTarget)->colName)) {
|
||||
nodesListAppend(pScanTargets, nodesCloneNode(pScanTarget));
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -168,20 +168,20 @@ int32_t schUpdateTaskHandle(SSchJob *pJob, SSchTask *pTask, bool dropExecNode, v
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
// Note: no more task error processing, handled in function internal
|
||||
int32_t schProcessOnTaskFailure(SSchJob *pJob, SSchTask *pTask, int32_t errCode) {
|
||||
if (TSDB_CODE_SCH_IGNORE_ERROR == errCode) {
|
||||
return TSDB_CODE_SCH_IGNORE_ERROR;
|
||||
}
|
||||
|
||||
int8_t status = 0;
|
||||
if (schJobNeedToStop(pJob, &status)) {
|
||||
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(status));
|
||||
int8_t jobStatus = 0;
|
||||
if (schJobNeedToStop(pJob, &jobStatus)) {
|
||||
SCH_TASK_DLOG("no more task failure processing cause of job status %s", jobTaskStatusStr(jobStatus));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_IGNORE_ERROR);
|
||||
}
|
||||
|
||||
if (SCH_GET_TASK_STATUS(pTask) != JOB_TASK_STATUS_EXEC) {
|
||||
SCH_TASK_ELOG("task already not in EXEC status, status:%s", SCH_GET_TASK_STATUS_STR(pTask));
|
||||
int8_t taskStatus = SCH_GET_TASK_STATUS(pTask);
|
||||
if (taskStatus == JOB_TASK_STATUS_FAIL || taskStatus == JOB_TASK_STATUS_SUCC) {
|
||||
SCH_TASK_ELOG("task already done, status:%s", jobTaskStatusStr(taskStatus));
|
||||
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
|
||||
}
|
||||
|
||||
|
|
|
@ -736,6 +736,13 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIs
|
|||
|
||||
SRaftMeta raftArr[SYNC_MAX_BATCH_SIZE];
|
||||
for (int i = 0; i < arrSize; ++i) {
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d, batch:%d", TMSG_INFO(pMsgPArr[i]->msgType),
|
||||
pMsgPArr[i]->msgType, arrSize);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
SRespStub stub;
|
||||
stub.createTime = taosGetTimestampMs();
|
||||
stub.rpcMsg = *(pMsgPArr[i]);
|
||||
|
@ -790,9 +797,11 @@ int32_t syncNodeProposeBatch(SSyncNode* pSyncNode, SRpcMsg** pMsgPArr, bool* pIs
|
|||
int32_t syncNodePropose(SSyncNode* pSyncNode, SRpcMsg* pMsg, bool isWeak) {
|
||||
int32_t ret = 0;
|
||||
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
do {
|
||||
char eventLog[128];
|
||||
snprintf(eventLog, sizeof(eventLog), "propose type:%s,%d", TMSG_INFO(pMsg->msgType), pMsg->msgType);
|
||||
syncNodeEventLog(pSyncNode, eventLog);
|
||||
} while (0);
|
||||
|
||||
if (pSyncNode->state == TAOS_SYNC_STATE_LEADER) {
|
||||
if (pSyncNode->changing && pMsg->msgType != TDMT_SYNC_CONFIG_CHANGE_FINISH) {
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
add_executable(transportTest "")
|
||||
add_executable(transUT "")
|
||||
add_executable(pushServer "")
|
||||
add_executable(svrBench "")
|
||||
add_executable(cliBench "")
|
||||
|
||||
target_sources(transUT
|
||||
PRIVATE
|
||||
|
@ -12,9 +13,13 @@ target_sources(transportTest
|
|||
"transportTests.cpp"
|
||||
)
|
||||
|
||||
target_sources(pushServer
|
||||
target_sources(svrBench
|
||||
PRIVATE
|
||||
"pushServer.c"
|
||||
"svrBench.c"
|
||||
)
|
||||
target_sources(cliBench
|
||||
PRIVATE
|
||||
"cliBench.c"
|
||||
)
|
||||
|
||||
target_include_directories(transportTest
|
||||
|
@ -45,13 +50,37 @@ target_include_directories(transUT
|
|||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_include_directories(pushServer
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(svrBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries (pushServer
|
||||
target_link_libraries (svrBench
|
||||
os
|
||||
util
|
||||
common
|
||||
gtest_main
|
||||
transport
|
||||
)
|
||||
|
||||
target_include_directories(cliBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
target_include_directories(cliBench
|
||||
PUBLIC
|
||||
"${TD_SOURCE_DIR}/include/libs/transport"
|
||||
"${CMAKE_CURRENT_SOURCE_DIR}/../inc"
|
||||
)
|
||||
|
||||
target_link_libraries (cliBench
|
||||
os
|
||||
util
|
||||
common
|
||||
|
|
|
@ -0,0 +1,182 @@
|
|||
/*
|
||||
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
|
||||
*
|
||||
* This program is free software: you can use, redistribute, and/or modify
|
||||
* it under the terms of the GNU Affero General Public License, version 3
|
||||
* or later ("AGPL"), as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE.
|
||||
*
|
||||
* You should have received a copy of the GNU Affero General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "os.h"
|
||||
#include "taoserror.h"
|
||||
#include "tglobal.h"
|
||||
#include "transLog.h"
|
||||
#include "trpc.h"
|
||||
#include "tutil.h"
|
||||
|
||||
typedef struct {
|
||||
int index;
|
||||
SEpSet epSet;
|
||||
int num;
|
||||
int numOfReqs;
|
||||
int msgSize;
|
||||
tsem_t rspSem;
|
||||
tsem_t *pOverSem;
|
||||
TdThread thread;
|
||||
void *pRpc;
|
||||
} SInfo;
|
||||
|
||||
static void processResponse(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SInfo *pInfo = (SInfo *)pMsg->info.ahandle;
|
||||
tDebug("thread:%d, response is received, type:%d contLen:%d code:0x%x", pInfo->index, pMsg->msgType, pMsg->contLen,
|
||||
pMsg->code);
|
||||
|
||||
if (pEpSet) pInfo->epSet = *pEpSet;
|
||||
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
tsem_post(&pInfo->rspSem);
|
||||
}
|
||||
|
||||
static int tcount = 0;
|
||||
|
||||
static void *sendRequest(void *param) {
|
||||
SInfo *pInfo = (SInfo *)param;
|
||||
SRpcMsg rpcMsg = {0};
|
||||
|
||||
tDebug("thread:%d, start to send request", pInfo->index);
|
||||
|
||||
while (pInfo->numOfReqs == 0 || pInfo->num < pInfo->numOfReqs) {
|
||||
pInfo->num++;
|
||||
rpcMsg.pCont = rpcMallocCont(pInfo->msgSize);
|
||||
rpcMsg.contLen = pInfo->msgSize;
|
||||
rpcMsg.info.ahandle = pInfo;
|
||||
rpcMsg.msgType = 1;
|
||||
tDebug("thread:%d, send request, contLen:%d num:%d", pInfo->index, pInfo->msgSize, pInfo->num);
|
||||
rpcSendRequest(pInfo->pRpc, &pInfo->epSet, &rpcMsg, NULL);
|
||||
if (pInfo->num % 20000 == 0) tInfo("thread:%d, %d requests have been sent", pInfo->index, pInfo->num);
|
||||
tsem_wait(&pInfo->rspSem);
|
||||
}
|
||||
|
||||
tDebug("thread:%d, it is over", pInfo->index);
|
||||
tcount++;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[]) {
|
||||
SRpcInit rpcInit;
|
||||
SEpSet epSet;
|
||||
int msgSize = 128;
|
||||
int numOfReqs = 0;
|
||||
int appThreads = 1;
|
||||
char serverIp[40] = "127.0.0.1";
|
||||
struct timeval systemTime;
|
||||
int64_t startTime, endTime;
|
||||
|
||||
// server info
|
||||
epSet.numOfEps = 1;
|
||||
epSet.inUse = 0;
|
||||
epSet.eps[0].port = 7000;
|
||||
epSet.eps[1].port = 7000;
|
||||
strcpy(epSet.eps[0].fqdn, serverIp);
|
||||
strcpy(epSet.eps[1].fqdn, "192.168.0.1");
|
||||
|
||||
// client info
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
rpcInit.localPort = 0;
|
||||
rpcInit.label = "APP";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = processResponse;
|
||||
rpcInit.sessions = 100;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = "michael";
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
|
||||
rpcDebugFlag = 131;
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-i") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-t") == 0 && i < argc - 1) {
|
||||
rpcInit.numOfThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-m") == 0 && i < argc - 1) {
|
||||
msgSize = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-s") == 0 && i < argc - 1) {
|
||||
rpcInit.sessions = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-n") == 0 && i < argc - 1) {
|
||||
numOfReqs = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-a") == 0 && i < argc - 1) {
|
||||
appThreads = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-o") == 0 && i < argc - 1) {
|
||||
tsCompressMsgSize = atoi(argv[++i]);
|
||||
} else if (strcmp(argv[i], "-u") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-k") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-spi") == 0 && i < argc - 1) {
|
||||
} else if (strcmp(argv[i], "-d") == 0 && i < argc - 1) {
|
||||
rpcDebugFlag = atoi(argv[++i]);
|
||||
} else {
|
||||
printf("\nusage: %s [options] \n", argv[0]);
|
||||
printf(" [-i ip]: first server IP address, default is:%s\n", serverIp);
|
||||
printf(" [-t threads]: number of rpc threads, default is:%d\n", rpcInit.numOfThreads);
|
||||
printf(" [-m msgSize]: message body size, default is:%d\n", msgSize);
|
||||
printf(" [-a threads]: number of app threads, default is:%d\n", appThreads);
|
||||
printf(" [-n requests]: number of requests per thread, default is:%d\n", numOfReqs);
|
||||
printf(" [-u user]: user name for the connection, default is:%s\n", rpcInit.user);
|
||||
printf(" [-d debugFlag]: debug flag, default:%d\n", rpcDebugFlag);
|
||||
printf(" [-h help]: print out this help\n\n");
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
taosInitLog("client.log", 100000);
|
||||
|
||||
void *pRpc = rpcOpen(&rpcInit);
|
||||
if (pRpc == NULL) {
|
||||
tError("failed to initialize RPC");
|
||||
return -1;
|
||||
}
|
||||
|
||||
tInfo("client is initialized");
|
||||
tInfo("threads:%d msgSize:%d requests:%d", appThreads, msgSize, numOfReqs);
|
||||
|
||||
int64_t now = taosGetTimestampUs();
|
||||
|
||||
SInfo *pInfo = (SInfo *)taosMemoryCalloc(1, sizeof(SInfo) * appThreads);
|
||||
SInfo *p = pInfo;
|
||||
for (int i = 0; i < appThreads; ++i) {
|
||||
pInfo->index = i;
|
||||
pInfo->epSet = epSet;
|
||||
pInfo->numOfReqs = numOfReqs;
|
||||
pInfo->msgSize = msgSize;
|
||||
tsem_init(&pInfo->rspSem, 0, 0);
|
||||
pInfo->pRpc = pRpc;
|
||||
|
||||
taosThreadCreate(&pInfo->thread, NULL, sendRequest, pInfo);
|
||||
pInfo++;
|
||||
}
|
||||
|
||||
do {
|
||||
taosUsleep(1);
|
||||
} while (tcount < appThreads);
|
||||
|
||||
float usedTime = (taosGetTimestampUs() - now) / 1000.0f;
|
||||
|
||||
tInfo("it takes %.3f mseconds to send %d requests to server", usedTime, numOfReqs * appThreads);
|
||||
tInfo("Performance: %.3f requests per second, msgSize:%d bytes", 1000.0 * numOfReqs * appThreads / usedTime, msgSize);
|
||||
|
||||
for (int i = 0; i < appThreads; i++) {
|
||||
SInfo *pInfo = p;
|
||||
taosThreadJoin(pInfo->thread, NULL);
|
||||
p++;
|
||||
}
|
||||
int ch = getchar();
|
||||
UNUSED(ch);
|
||||
|
||||
taosCloseLog();
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -24,12 +24,12 @@ int msgSize = 128;
|
|||
int commit = 0;
|
||||
TdFilePtr pDataFile = NULL;
|
||||
STaosQueue *qhandle = NULL;
|
||||
STaosQset * qset = NULL;
|
||||
STaosQset *qset = NULL;
|
||||
|
||||
void processShellMsg() {
|
||||
static int num = 0;
|
||||
STaosQall *qall;
|
||||
SRpcMsg * pRpcMsg, rpcMsg;
|
||||
SRpcMsg *pRpcMsg, rpcMsg;
|
||||
int type;
|
||||
SQueueInfo qinfo = {0};
|
||||
|
||||
|
@ -77,7 +77,6 @@ void processShellMsg() {
|
|||
taosFreeQitem(pRpcMsg);
|
||||
|
||||
{
|
||||
// taosSsleep(1);
|
||||
SRpcMsg nRpcMsg = {0};
|
||||
nRpcMsg.pCont = rpcMallocCont(msgSize);
|
||||
nRpcMsg.contLen = msgSize;
|
||||
|
@ -93,26 +92,6 @@ void processShellMsg() {
|
|||
taosFreeQall(qall);
|
||||
}
|
||||
|
||||
int retrieveAuthInfo(void *parent, char *meterId, char *spi, char *encrypt, char *secret, char *ckey) {
|
||||
// app shall retrieve the auth info based on meterID from DB or a data file
|
||||
// demo code here only for simple demo
|
||||
int ret = 0;
|
||||
|
||||
if (strcmp(meterId, "michael") == 0) {
|
||||
*spi = 1;
|
||||
*encrypt = 0;
|
||||
strcpy(secret, "mypassword");
|
||||
strcpy(ckey, "key");
|
||||
} else if (strcmp(meterId, "jeff") == 0) {
|
||||
*spi = 0;
|
||||
*encrypt = 0;
|
||||
} else {
|
||||
ret = -1; // user not there
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
void processRequestMsg(void *pParent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SRpcMsg *pTemp;
|
||||
|
||||
|
@ -131,11 +110,12 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
memset(&rpcInit, 0, sizeof(rpcInit));
|
||||
rpcInit.localPort = 7000;
|
||||
memcpy(rpcInit.localFqdn, "localhost", strlen("localhost"));
|
||||
rpcInit.label = "SER";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = processRequestMsg;
|
||||
rpcInit.sessions = 1000;
|
||||
rpcInit.idleTime = 2 * 1500;
|
||||
rpcDebugFlag = 131;
|
||||
|
||||
for (int i = 1; i < argc; ++i) {
|
||||
if (strcmp(argv[i], "-p") == 0 && i < argc - 1) {
|
||||
|
@ -170,7 +150,7 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
tsAsyncLog = 0;
|
||||
rpcInit.connType = TAOS_CONN_SERVER;
|
||||
taosInitLog("server.log", 10);
|
||||
taosInitLog("server.log", 100000);
|
||||
|
||||
void *pRpc = rpcOpen(&rpcInit);
|
||||
if (pRpc == NULL) {
|
|
@ -16,8 +16,8 @@
|
|||
#define _DEFAULT_SOURCE
|
||||
#include "tlog.h"
|
||||
#include "os.h"
|
||||
#include "tutil.h"
|
||||
#include "tconfig.h"
|
||||
#include "tutil.h"
|
||||
|
||||
#define LOG_MAX_LINE_SIZE (1024)
|
||||
#define LOG_MAX_LINE_BUFFER_SIZE (LOG_MAX_LINE_SIZE + 3)
|
||||
|
@ -40,7 +40,7 @@
|
|||
#define LOG_BUF_MUTEX(x) ((x)->buffMutex)
|
||||
|
||||
typedef struct {
|
||||
char * buffer;
|
||||
char *buffer;
|
||||
int32_t buffStart;
|
||||
int32_t buffEnd;
|
||||
int32_t buffSize;
|
||||
|
@ -59,15 +59,15 @@ typedef struct {
|
|||
int32_t openInProgress;
|
||||
pid_t pid;
|
||||
char logName[LOG_FILE_NAME_LEN];
|
||||
SLogBuff * logHandle;
|
||||
SLogBuff *logHandle;
|
||||
TdThreadMutex logMutex;
|
||||
} SLogObj;
|
||||
|
||||
extern SConfig *tsCfg;
|
||||
static int8_t tsLogInited = 0;
|
||||
static SLogObj tsLogObj = {.fileNum = 1};
|
||||
static int64_t tsAsyncLogLostLines = 0;
|
||||
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
|
||||
static int8_t tsLogInited = 0;
|
||||
static SLogObj tsLogObj = {.fileNum = 1};
|
||||
static int64_t tsAsyncLogLostLines = 0;
|
||||
static int32_t tsWriteInterval = LOG_DEFAULT_INTERVAL;
|
||||
|
||||
bool tsLogEmbedded = 0;
|
||||
bool tsAsyncLog = true;
|
||||
|
@ -106,7 +106,7 @@ int64_t dbgSmallWN = 0;
|
|||
int64_t dbgBigWN = 0;
|
||||
int64_t dbgWSize = 0;
|
||||
|
||||
static void * taosAsyncOutputLog(void *param);
|
||||
static void *taosAsyncOutputLog(void *param);
|
||||
static int32_t taosPushLogBuffer(SLogBuff *pLogBuf, const char *msg, int32_t msgLen);
|
||||
static SLogBuff *taosLogBuffNew(int32_t bufSize);
|
||||
static void taosCloseLogByFd(TdFilePtr pFile);
|
||||
|
@ -128,7 +128,11 @@ int32_t taosInitLog(const char *logName, int32_t maxFiles) {
|
|||
osUpdate();
|
||||
|
||||
char fullName[PATH_MAX] = {0};
|
||||
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
|
||||
if (strlen(tsLogDir) != 0) {
|
||||
snprintf(fullName, PATH_MAX, "%s" TD_DIRSEP "%s", tsLogDir, logName);
|
||||
} else {
|
||||
snprintf(fullName, PATH_MAX, "%s", logName);
|
||||
}
|
||||
|
||||
tsLogObj.logHandle = taosLogBuffNew(LOG_DEFAULT_BUF_SIZE);
|
||||
if (tsLogObj.logHandle == NULL) return -1;
|
||||
|
@ -704,7 +708,7 @@ int32_t taosCompressFile(char *srcFileName, char *destFileName) {
|
|||
int32_t compressSize = 163840;
|
||||
int32_t ret = 0;
|
||||
int32_t len = 0;
|
||||
char * data = taosMemoryMalloc(compressSize);
|
||||
char *data = taosMemoryMalloc(compressSize);
|
||||
// gzFile dstFp = NULL;
|
||||
|
||||
// srcFp = fopen(srcFileName, "r");
|
||||
|
|
|
@ -489,7 +489,7 @@ class TDDnode:
|
|||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
|
@ -503,7 +503,7 @@ class TDDnode:
|
|||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
|
||||
|
||||
|
||||
def stoptaosd(self):
|
||||
|
@ -527,7 +527,7 @@ class TDDnode:
|
|||
onlyKillOnceWindows = 0
|
||||
while(processID):
|
||||
if not platform.system().lower() == 'windows' or (onlyKillOnceWindows == 0 and platform.system().lower() == 'windows'):
|
||||
killCmd = "kill -INT %s > /dev/null 2>&1" % processID
|
||||
killCmd = "kill -4 %s > /dev/null 2>&1" % processID
|
||||
os.system(killCmd)
|
||||
onlyKillOnceWindows = 1
|
||||
time.sleep(1)
|
||||
|
@ -537,7 +537,7 @@ class TDDnode:
|
|||
time.sleep(2)
|
||||
|
||||
self.running = 0
|
||||
tdLog.debug("dnode:%d is stopped by kill -INT" % (self.index))
|
||||
tdLog.debug("dnode:%d is stopped by kill -4" % (self.index))
|
||||
|
||||
def forcestop(self):
|
||||
if (not self.remoteIP == ""):
|
||||
|
|
|
@ -298,8 +298,8 @@
|
|||
./test.sh -f tsim/sma/drop_sma.sim
|
||||
./test.sh -f tsim/sma/tsmaCreateInsertQuery.sim
|
||||
# temp disable
|
||||
#./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
#./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
./test.sh -f tsim/sma/rsmaCreateInsertQuery.sim
|
||||
./test.sh -f tsim/sma/rsmaPersistenceRecovery.sim
|
||||
|
||||
# --- valgrind
|
||||
./test.sh -f tsim/valgrind/checkError1.sim
|
||||
|
@ -325,7 +325,7 @@
|
|||
|
||||
# --- sync
|
||||
./test.sh -f tsim/sync/3Replica1VgElect.sim
|
||||
./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
#./test.sh -f tsim/sync/3Replica5VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica1VgElect.sim
|
||||
./test.sh -f tsim/sync/oneReplica5VgElect.sim
|
||||
|
||||
|
|
|
@ -75,61 +75,61 @@ if $data02 != leader then
|
|||
return -1
|
||||
endi
|
||||
|
||||
print =============== create drop bnode 1
|
||||
sql create bnode on dnode 1
|
||||
sql show bnodes
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 1 then
|
||||
return -1
|
||||
endi
|
||||
sql_error create bnode on dnode 1
|
||||
#print =============== create drop bnode 1
|
||||
#sql create bnode on dnode 1
|
||||
#sql show bnodes
|
||||
#if $rows != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error create bnode on dnode 1
|
||||
#
|
||||
#sql drop bnode on dnode 1
|
||||
#sql show bnodes
|
||||
#if $rows != 0 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error drop bnode on dnode 1
|
||||
#
|
||||
#print =============== create drop bnode 2
|
||||
#sql create bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 1 then
|
||||
# return -1
|
||||
#endi
|
||||
#if $data00 != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error create bnode on dnode 2
|
||||
#
|
||||
#sql drop bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 0 then
|
||||
# return -1
|
||||
#endi
|
||||
#sql_error drop bnode on dnode 2
|
||||
#
|
||||
#print =============== create drop bnodes
|
||||
#sql create bnode on dnode 1
|
||||
#sql create bnode on dnode 2
|
||||
#sql show bnodes
|
||||
#if $rows != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
sql drop bnode on dnode 1
|
||||
sql show bnodes
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql_error drop bnode on dnode 1
|
||||
|
||||
print =============== create drop bnode 2
|
||||
sql create bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 1 then
|
||||
return -1
|
||||
endi
|
||||
if $data00 != 2 then
|
||||
return -1
|
||||
endi
|
||||
sql_error create bnode on dnode 2
|
||||
|
||||
sql drop bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 0 then
|
||||
return -1
|
||||
endi
|
||||
sql_error drop bnode on dnode 2
|
||||
|
||||
print =============== create drop bnodes
|
||||
sql create bnode on dnode 1
|
||||
sql create bnode on dnode 2
|
||||
sql show bnodes
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
print =============== restart
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
system sh/exec.sh -n dnode2 -s start
|
||||
|
||||
sleep 2000
|
||||
sql show bnodes
|
||||
if $rows != 2 then
|
||||
return -1
|
||||
endi
|
||||
#print =============== restart
|
||||
#system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
#system sh/exec.sh -n dnode1 -s start
|
||||
#system sh/exec.sh -n dnode2 -s start
|
||||
#
|
||||
#sleep 2000
|
||||
#sql show bnodes
|
||||
#if $rows != 2 then
|
||||
# return -1
|
||||
#endi
|
||||
|
||||
system sh/exec.sh -n dnode1 -s stop -x SIGINT
|
||||
system sh/exec.sh -n dnode2 -s stop -x SIGINT
|
||||
|
|
|
@ -360,8 +360,9 @@ endi
|
|||
if $data04 != @abc0@ then
|
||||
return -1
|
||||
endi
|
||||
|
||||
sql select distinct tbname,t1,t2 from select_tags_mt0;
|
||||
print "really this line"
|
||||
sql select distinct tbname,t1,t2 from select_tags_mt0 order by tbname;
|
||||
print $data00 $data01 $data02 $data10 $data111 $data12
|
||||
if $row != 16 then
|
||||
return -1
|
||||
endi
|
||||
|
@ -390,7 +391,7 @@ if $data12 != @abc1@ then
|
|||
return -1
|
||||
endi
|
||||
|
||||
sql select tbname,ts from select_tags_mt0;
|
||||
sql select tbname,ts from select_tags_mt0 order by ts;
|
||||
if $row != 12800 then
|
||||
return -1
|
||||
endi
|
||||
|
|
|
@ -99,7 +99,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from information_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
@ -197,7 +197,7 @@ if $rows != 1 then
|
|||
endi
|
||||
#sql select * from performance_schema.`streams`
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
#sql select * from information_schema.user_table_distributed
|
||||
|
|
|
@ -105,7 +105,7 @@ if $rows != 1 then
|
|||
endi
|
||||
|
||||
sql select * from information_schema.user_tables
|
||||
if $rows != 31 then
|
||||
if $rows <= 0 then
|
||||
return -1
|
||||
endi
|
||||
|
||||
|
|
|
@ -0,0 +1,246 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
username="user1"
|
||||
passwd="123"
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# recreate mnode
|
||||
tdSql.execute("drop dnode 2;")
|
||||
tdSql.execute('create dnode "%s:6130";'%self.host)
|
||||
tdDnodes=cluster.dnodes
|
||||
tdDnodes[1].stoptaosd()
|
||||
tdDnodes[1].deploy()
|
||||
|
||||
tdDnodes[1].starttaosd()
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
tdSql.execute("create mnode on dnode 6")
|
||||
tdSql.error("drop dnode 1;")
|
||||
|
||||
# check status of clusters
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
tdSql.execute("create user %s pass '%s' ;"%(username,passwd))
|
||||
tdSql.query("show users")
|
||||
for i in range(tdSql.queryRows):
|
||||
if tdSql.queryResult[i][0] == "%s"%username :
|
||||
tdLog.info("create user:%s successfully"%username)
|
||||
|
||||
# # create database and stable
|
||||
# clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
# tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
# tdDnodes=cluster.dnodes
|
||||
# stopcount =0
|
||||
# threads=[]
|
||||
|
||||
# # create stable:stb_0
|
||||
# stableName= paraDict['stbName']
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
# #create child table:ctb_0
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
# #insert date
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# newTdSql=tdCom.newTdSql()
|
||||
# threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
# for tr in threads:
|
||||
# tr.start()
|
||||
# for tr in threads:
|
||||
# tr.join()
|
||||
|
||||
# while stopcount < restartNumbers:
|
||||
# tdLog.info(" restart loop: %d"%stopcount )
|
||||
# if stopRole == "mnode":
|
||||
# for i in range(mnodeNums):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "vnode":
|
||||
# for i in range(vnodeNumbers):
|
||||
# tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i+mnodeNums].starttaosd()
|
||||
# # sleep(10)
|
||||
# elif stopRole == "dnode":
|
||||
# for i in range(dnodeNumbers):
|
||||
# tdDnodes[i].stoptaosd()
|
||||
# # sleep(10)
|
||||
# tdDnodes[i].starttaosd()
|
||||
# # sleep(10)
|
||||
|
||||
# # dnodeNumbers don't include database of schema
|
||||
# if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
# tdLog.info("dnode is ready")
|
||||
# else:
|
||||
# print("dnodes is not ready")
|
||||
# self.stopThread(threads)
|
||||
# tdLog.exit("one or more of dnodes failed to start ")
|
||||
# # self.check3mnode()
|
||||
# stopcount+=1
|
||||
|
||||
|
||||
# clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
# clusterComCheck.checkDbRows(dbNumbers)
|
||||
# # clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
# tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
# tdSql.query("show stables")
|
||||
# tdSql.checkRows(paraDict["stbNumbers"])
|
||||
# # for i in range(paraDict['stbNumbers']):
|
||||
# # stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# # tdSql.query("select * from %s"%stableName)
|
||||
# # tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -190,10 +190,9 @@ class TDTestCase:
|
|||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("123")
|
||||
tdLog.info("dnode is ready")
|
||||
else:
|
||||
print("456")
|
||||
|
||||
print("dnodes is not ready")
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
|
@ -207,10 +206,11 @@ class TDTestCase:
|
|||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
tdSql.query("select * from %s"%stableName)
|
||||
tdSql.checkRows(rowsPerStb)
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
|
|
@ -0,0 +1,224 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
from numpy import row_stack
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
sys.path.append("./6-cluster")
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import clusterComCheck
|
||||
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
import threading
|
||||
import time
|
||||
import inspect
|
||||
import ctypes
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
self.TDDnodes = None
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def _async_raise(self, tid, exctype):
|
||||
"""raises the exception, performs cleanup if needed"""
|
||||
if not inspect.isclass(exctype):
|
||||
exctype = type(exctype)
|
||||
res = ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, ctypes.py_object(exctype))
|
||||
if res == 0:
|
||||
raise ValueError("invalid thread id")
|
||||
elif res != 1:
|
||||
# """if it returns a number greater than one, you're in trouble,
|
||||
# and you should call it again with exc=NULL to revert the effect"""
|
||||
ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None)
|
||||
raise SystemError("PyThreadState_SetAsyncExc failed")
|
||||
|
||||
def stopThread(self,thread):
|
||||
self._async_raise(thread.ident, SystemExit)
|
||||
|
||||
|
||||
def insertData(self,countstart,countstop):
|
||||
# fisrt add data : db\stable\childtable\general table
|
||||
|
||||
for couti in range(countstart,countstop):
|
||||
tdLog.debug("drop database if exists db%d" %couti)
|
||||
tdSql.execute("drop database if exists db%d" %couti)
|
||||
print("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("create database if not exists db%d replica 1 duration 300" %couti)
|
||||
tdSql.execute("use db%d" %couti)
|
||||
tdSql.execute(
|
||||
'''create table stb1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
tags (t1 int)
|
||||
'''
|
||||
)
|
||||
tdSql.execute(
|
||||
'''
|
||||
create table t1
|
||||
(ts timestamp, c1 int, c2 bigint, c3 smallint, c4 tinyint, c5 float, c6 double, c7 bool, c8 binary(16),c9 nchar(32), c10 timestamp)
|
||||
'''
|
||||
)
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodeNumbers,mnodeNums,restartNumbers,stopRole):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'stbNumbers': 2,
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 200,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
"rowsPerTbl": 100,
|
||||
"batchNum": 5000
|
||||
}
|
||||
|
||||
dnodeNumbers=int(dnodeNumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
vnodeNumbers = int(dnodeNumbers-mnodeNums)
|
||||
allctbNumbers=(paraDict['stbNumbers']*paraDict["ctbNum"])
|
||||
rowsPerStb=paraDict["ctbNum"]*paraDict["rowsPerTbl"]
|
||||
rowsall=rowsPerStb*paraDict['stbNumbers']
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
|
||||
# create database and stable
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
tdLog.info("Take turns stopping Mnodes ")
|
||||
|
||||
tdDnodes=cluster.dnodes
|
||||
stopcount =0
|
||||
threads=[]
|
||||
|
||||
# create stable:stb_0
|
||||
stableName= paraDict['stbName']
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_stables(newTdSql, paraDict["dbName"],stableName,paraDict['stbNumbers'])
|
||||
#create child table:ctb_0
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
clusterComCreate.create_ctable(newTdSql, paraDict["dbName"],stableName,stableName, paraDict['ctbNum'])
|
||||
#insert date
|
||||
for i in range(paraDict['stbNumbers']):
|
||||
stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
newTdSql=tdCom.newTdSql()
|
||||
threads.append(threading.Thread(target=clusterComCreate.insert_data, args=(newTdSql, paraDict["dbName"],stableName,paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])))
|
||||
for tr in threads:
|
||||
tr.start()
|
||||
|
||||
|
||||
while stopcount < restartNumbers:
|
||||
tdLog.info(" restart loop: %d"%stopcount )
|
||||
if stopRole == "mnode":
|
||||
for i in range(mnodeNums):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "vnode":
|
||||
for i in range(vnodeNumbers):
|
||||
tdDnodes[i+mnodeNums].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i+mnodeNums].starttaosd()
|
||||
# sleep(10)
|
||||
elif stopRole == "dnode":
|
||||
for i in range(dnodeNumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
# sleep(10)
|
||||
tdDnodes[i].starttaosd()
|
||||
# sleep(10)
|
||||
|
||||
# dnodeNumbers don't include database of schema
|
||||
if clusterComCheck.checkDnodes(dnodeNumbers):
|
||||
tdLog.info("dnode is ready")
|
||||
else:
|
||||
print("dnodes is not ready")
|
||||
self.stopThread(threads)
|
||||
tdLog.exit("one or more of dnodes failed to start ")
|
||||
# self.check3mnode()
|
||||
stopcount+=1
|
||||
|
||||
for tr in threads:
|
||||
tr.join()
|
||||
|
||||
clusterComCheck.checkDnodes(dnodeNumbers)
|
||||
clusterComCheck.checkDbRows(dbNumbers)
|
||||
# clusterComCheck.checkDb(dbNumbers,1,paraDict["dbName"])
|
||||
|
||||
tdSql.execute("use %s" %(paraDict["dbName"]))
|
||||
tdSql.query("show stables")
|
||||
tdSql.checkRows(paraDict["stbNumbers"])
|
||||
# for i in range(paraDict['stbNumbers']):
|
||||
# stableName= '%s_%d'%(paraDict['stbName'],i)
|
||||
# tdSql.query("select * from %s"%stableName)
|
||||
# tdSql.checkRows(rowsPerStb)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodeNumbers=5,mnodeNums=3,restartNumbers=1,stopRole='dnode')
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -98,8 +98,10 @@ class TDTestCase:
|
|||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.info("create mnode on dnode 2")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.info("create mnode on dnode 3")
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
|
|
|
@ -68,7 +68,7 @@ class TDTestCase:
|
|||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = int(dnodenumbers * restartNumber)
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
|
@ -104,7 +104,7 @@ class TDTestCase:
|
|||
tdDnodes[1].starttaosd()
|
||||
tdDnodes[2].starttaosd()
|
||||
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
|
||||
def run(self):
|
||||
|
|
|
@ -111,14 +111,14 @@ class TDTestCase:
|
|||
# seperate vnode and mnode in different dnodes.
|
||||
# create database and stable
|
||||
stopcount =0
|
||||
while stopcount <= 2:
|
||||
while stopcount < restartNumber:
|
||||
tdLog.info("first restart loop")
|
||||
for i in range(dnodenumbers):
|
||||
tdDnodes[i].stoptaosd()
|
||||
tdDnodes[i].starttaosd()
|
||||
stopcount+=1
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
clusterComCheck.checkMnodeStatus(mnodeNums)
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
|
|
|
@ -0,0 +1,122 @@
|
|||
from ssl import ALERT_DESCRIPTION_CERTIFICATE_UNOBTAINABLE
|
||||
import taos
|
||||
import sys
|
||||
import time
|
||||
import os
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
from util.dnodes import TDDnodes
|
||||
from util.dnodes import TDDnode
|
||||
from util.cluster import *
|
||||
from test import tdDnodes
|
||||
sys.path.append("./6-cluster")
|
||||
|
||||
from clusterCommonCreate import *
|
||||
from clusterCommonCheck import *
|
||||
import time
|
||||
import socket
|
||||
import subprocess
|
||||
from multiprocessing import Process
|
||||
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self,conn ,logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
self.host = socket.gethostname()
|
||||
|
||||
|
||||
def getBuildPath(self):
|
||||
selfPath = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
if ("community" in selfPath):
|
||||
projPath = selfPath[:selfPath.find("community")]
|
||||
else:
|
||||
projPath = selfPath[:selfPath.find("tests")]
|
||||
|
||||
for root, dirs, files in os.walk(projPath):
|
||||
if ("taosd" in files):
|
||||
rootRealPath = os.path.dirname(os.path.realpath(root))
|
||||
if ("packaging" not in rootRealPath):
|
||||
buildPath = root[:len(root) - len("/build/bin")]
|
||||
break
|
||||
return buildPath
|
||||
|
||||
def fiveDnodeThreeMnode(self,dnodenumbers,mnodeNums,restartNumber):
|
||||
tdLog.printNoPrefix("======== test case 1: ")
|
||||
paraDict = {'dbName': 'db0_0',
|
||||
'dropFlag': 1,
|
||||
'event': '',
|
||||
'vgroups': 4,
|
||||
'replica': 1,
|
||||
'stbName': 'stb',
|
||||
'colPrefix': 'c',
|
||||
'tagPrefix': 't',
|
||||
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
|
||||
'ctbPrefix': 'ctb',
|
||||
'ctbNum': 1,
|
||||
'rowsPerTbl': 10000,
|
||||
'batchNum': 10,
|
||||
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
|
||||
'pollDelay': 10,
|
||||
'showMsg': 1,
|
||||
'showRow': 1}
|
||||
dnodenumbers=int(dnodenumbers)
|
||||
mnodeNums=int(mnodeNums)
|
||||
dbNumbers = 1
|
||||
|
||||
tdLog.info("first check dnode and mnode")
|
||||
tdSql.query("show dnodes;")
|
||||
tdSql.checkData(0,1,'%s:6030'%self.host)
|
||||
tdSql.checkData(4,1,'%s:6430'%self.host)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
clusterComCheck.checkMnodeStatus(1)
|
||||
|
||||
# fisr add three mnodes;
|
||||
tdLog.info("fisr add three mnodes and check mnode status")
|
||||
tdSql.execute("create mnode on dnode 2")
|
||||
clusterComCheck.checkMnodeStatus(2)
|
||||
tdSql.execute("create mnode on dnode 3")
|
||||
clusterComCheck.checkMnodeStatus(3)
|
||||
|
||||
# add some error operations and
|
||||
tdLog.info("Confirm the status of the dnode again")
|
||||
tdSql.error("create mnode on dnode 2")
|
||||
tdSql.query("show dnodes;")
|
||||
# print(tdSql.queryResult)
|
||||
clusterComCheck.checkDnodes(dnodenumbers)
|
||||
# restart all taosd
|
||||
tdDnodes=cluster.dnodes
|
||||
tdLog.info("stop two mnode ")
|
||||
|
||||
tdDnodes[0].stoptaosd()
|
||||
tdDnodes[1].stoptaosd()
|
||||
|
||||
# tdLog.info("check whether 2 mnode status is offline")
|
||||
# clusterComCheck.check3mnode2off()
|
||||
# tdSql.error("create user user1 pass '123';")
|
||||
|
||||
tdLog.info("start one mnode" )
|
||||
tdDnodes[0].starttaosd()
|
||||
clusterComCheck.check3mnodeoff(2)
|
||||
|
||||
clusterComCreate.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], paraDict["vgroups"],paraDict['replica'])
|
||||
clusterComCheck.checkDb(dbNumbers,1,'db0')
|
||||
|
||||
|
||||
|
||||
def run(self):
|
||||
# print(self.master_dnode.cfgDict)
|
||||
self.fiveDnodeThreeMnode(dnodenumbers=5,mnodeNums=3,restartNumber=1)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -55,6 +55,7 @@ class ClusterComCheck:
|
|||
count+=1
|
||||
time.sleep(1)
|
||||
else:
|
||||
tdSql.query("show dnodes")
|
||||
tdLog.debug(tdSql.queryResult)
|
||||
tdLog.exit("it find cluster with %d dnodes but check that there dnodes are not ready within 30s ! "%dnodeNumbers)
|
||||
|
||||
|
@ -111,7 +112,7 @@ class ClusterComCheck:
|
|||
def checkMnodeStatus(self,mnodeNums):
|
||||
self.mnodeNums=int(mnodeNums)
|
||||
# self.leaderDnode=int(leaderDnode)
|
||||
|
||||
tdLog.debug("start to check status of mnodes")
|
||||
count=0
|
||||
|
||||
while count < 10:
|
||||
|
|
|
@ -43,9 +43,9 @@ class TDTestCase:
|
|||
tdLog.exit("compare error: %s != %s"%src, dst)
|
||||
else:
|
||||
break
|
||||
|
||||
|
||||
tdSql.execute('use db_taosx')
|
||||
tdSql.query("select * from ct3 order by c1 desc")
|
||||
tdSql.query("select * from ct3 order by c1 desc")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 51)
|
||||
tdSql.checkData(0, 4, 940)
|
||||
|
@ -58,17 +58,17 @@ class TDTestCase:
|
|||
tdSql.query("select * from ct2")
|
||||
tdSql.checkRows(0)
|
||||
|
||||
tdSql.query("select * from ct0 order by c1")
|
||||
tdSql.query("select * from ct0 order by c1 ")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 3, "a")
|
||||
tdSql.checkData(1, 4, None)
|
||||
|
||||
tdSql.query("select * from n1 order by cc3 desc")
|
||||
tdSql.query("select * from n1 order by ts")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, "eeee")
|
||||
tdSql.checkData(1, 2, 940)
|
||||
|
||||
tdSql.query("select * from jt order by i desc")
|
||||
tdSql.query("select * from jt order by i desc;")
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkData(0, 1, 11)
|
||||
tdSql.checkData(0, 2, None)
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
set -e
|
||||
set -x
|
||||
|
||||
|
||||
# test case of vnode
|
||||
python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_createDb_replica1.py -N 4 -M 1
|
||||
python3 ./test.py -f 6-cluster/vnode/4dnode1mnode_basic_replica1_insertdatas.py -N 4 -M 1
|
||||
|
|
Loading…
Reference in New Issue