Merge branch 'develop' into coverity_scan

This commit is contained in:
Shuduo Sang 2020-07-25 14:22:37 +08:00
commit b17672cd0d
76 changed files with 1202 additions and 432 deletions

View File

@ -358,7 +358,7 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) {
}
case TSDB_SQL_CFG_DNODE: {
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:1-dnode:2' / monitor 1 ";
const char* msg2 = "invalid configure options or values, such as resetlog / debugFlag 135 / balance 'vnode:2-dnode:2' / monitor 1 ";
const char* msg3 = "invalid dnode ep";
/* validate the ip address */
@ -4700,10 +4700,10 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
} else if ((strncasecmp(cfgOptions[tokenBalance].name, pOptionToken->z, pOptionToken->n) == 0) &&
(cfgOptions[tokenBalance].len == pOptionToken->n)) {
SSQLToken* pValToken = &pOptions->a[2];
int32_t vnodeIndex = 0;
int32_t dnodeIndex = 0;
int32_t vnodeId = 0;
int32_t dnodeId = 0;
strdequote(pValToken->z);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeIndex, &dnodeIndex);
bool parseOk = taosCheckBalanceCfgOptions(pValToken->z, &vnodeId, &dnodeId);
if (!parseOk) {
return TSDB_CODE_TSC_INVALID_SQL; // options value is invalid
}

View File

@ -106,13 +106,14 @@ static void tscUpdateVgroupInfo(SSqlObj *pObj, SRpcEpSet *pEpSet) {
SCMCorVgroupInfo *pVgroupInfo = &pTableMetaInfo->pTableMeta->corVgroupInfo;
taosCorBeginWrite(&pVgroupInfo->version);
//TODO(dengyihao), dont care vgid
tscDebug("before: Endpoint in use: %d", pVgroupInfo->inUse);
pVgroupInfo->inUse = pEpSet->inUse;
pVgroupInfo->numOfEps = pEpSet->numOfEps;
for (int32_t i = 0; pVgroupInfo->numOfEps; i++) {
for (int32_t i = 0; i < pVgroupInfo->numOfEps; i++) {
strncpy(pVgroupInfo->epAddr[i].fqdn, pEpSet->fqdn[i], TSDB_FQDN_LEN);
pVgroupInfo->epAddr[i].port = pEpSet->port[i];
}
tscDebug("after: EndPoint in use: %d", pVgroupInfo->inUse);
taosCorEndWrite(&pVgroupInfo->version);
}
void tscPrintMgmtEp() {
@ -283,9 +284,7 @@ void tscProcessMsgFromServer(SRpcMsg *rpcMsg, SRpcEpSet *pEpSet) {
}
if (pEpSet) {
//SRpcEpSet dump;
tscEpSetHtons(pEpSet);
if (tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if (!tscEpSetIsEqual(&pSql->epSet, pEpSet)) {
if(pCmd->command < TSDB_SQL_MGMT) {
tscUpdateVgroupInfo(pSql, pEpSet);
} else {

View File

@ -256,11 +256,12 @@ static int tscUpdateSubscription(STscObj* pObj, SSub* pSub) {
}
size_t numOfTables = taosArrayGetSize(tables);
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(pCmd, 0);
SArray* progress = taosArrayInit(numOfTables, sizeof(SSubscriptionProgress));
for( size_t i = 0; i < numOfTables; i++ ) {
STidTags* tt = taosArrayGet( tables, i );
SSubscriptionProgress p = { .uid = tt->uid };
p.key = tscGetSubscriptionProgress(pSub, tt->uid, INT64_MIN);
p.key = tscGetSubscriptionProgress(pSub, tt->uid, pQueryInfo->window.skey);
taosArrayPush(progress, &p);
}
taosArraySort(progress, tscCompareSubscriptionProgress);

View File

@ -174,7 +174,7 @@ bool taosCheckGlobalCfg();
void taosSetAllDebugFlag();
bool taosCfgDynamicOptions(char *msg);
int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex);
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId);
#ifdef __cplusplus
}

View File

@ -1315,7 +1315,7 @@ int taosGetFqdnPortFromEp(const char *ep, char *fqdn, uint16_t *port) {
* alter dnode 1 balance "vnode:1-dnode:2"
*/
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t *dnodeIndex) {
bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeId, int32_t *dnodeId) {
int len = strlen(option);
if (strncasecmp(option, "vnode:", 6) != 0) {
return false;
@ -1331,9 +1331,9 @@ bool taosCheckBalanceCfgOptions(const char *option, int32_t *vnodeIndex, int32_t
return false;
}
*vnodeIndex = strtol(option + 6, NULL, 10);
*dnodeIndex = strtol(option + pos + 6, NULL, 10);
if (*vnodeIndex <= 1 || *dnodeIndex <= 0) {
*vnodeId = strtol(option + 6, NULL, 10);
*dnodeId = strtol(option + pos + 6, NULL, 10);
if (*vnodeId <= 1 || *dnodeId <= 0) {
return false;
}

View File

@ -120,12 +120,17 @@ TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_OBJ_NOT_THERE, 0, 0x0323, "sdb object
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_META_ROW, 0, 0x0324, "sdb invalid meta row")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_SDB_INVAID_KEY_TYPE, 0, 0x0325, "sdb invalid key type")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "mnode dnode already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "mnode dnode not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "mnode vgroup not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "mnode cant not remove master")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "mnode no enough dnodes")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "mnode cluster cfg inconsistent")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_ALREADY_EXIST, 0, 0x0330, "dnode already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_EXIST, 0, 0x0331, "dnode not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_EXIST, 0, 0x0332, "vgroup not exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_REMOVE_MASTER, 0, 0x0333, "cant not remove master")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_NO_ENOUGH_DNODES, 0, 0x0334, "no enough dnodes")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_CLUSTER_CFG_INCONSISTENT, 0, 0x0335, "cluster cfg inconsistent")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION, 0, 0x0336, "invalid dnode cfg option")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_BALANCE_ENABLED, 0, 0x0337, "balance already enabled")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_NOT_IN_DNODE, 0, 0x0338, "vgroup not in dnode")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_VGROUP_ALREADY_IN_DNODE, 0, 0x0339, "vgroup already in dnode")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_DNODE_NOT_FREE, 0, 0x033A, "dnode not avaliable")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_ACCT_ALREADY_EXIST, 0, 0x0340, "mnode accounts already exist")
TAOS_DEFINE_ERROR(TSDB_CODE_MND_INVALID_ACCT, 0, 0x0341, "mnode invalid account")

View File

@ -149,6 +149,7 @@ enum _mgmt_table {
#define TSDB_ALTER_TABLE_ADD_COLUMN 5
#define TSDB_ALTER_TABLE_DROP_COLUMN 6
#define TSDB_ALTER_TABLE_CHANGE_COLUMN 7
#define TSDB_FILL_NONE 0
#define TSDB_FILL_NULL 1

View File

@ -29,7 +29,7 @@ void balanceAsyncNotify();
void balanceSyncNotify();
void balanceReset();
int32_t balanceAllocVnodes(struct SVgObj *pVgroup);
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option);
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId);
int32_t balanceDropDnode(struct SDnodeObj *pDnode);
#ifdef __cplusplus

View File

@ -53,14 +53,12 @@ typedef struct {
int32_t tsdbId;
int32_t cacheBlockSize;
int32_t totalBlocks;
int32_t maxTables; // maximum number of tables this repository can have
int32_t daysPerFile; // day per file sharding policy
int32_t keep; // day of data to keep
int32_t keep1;
int32_t keep2;
int32_t minRowsPerFileBlock; // minimum rows per file block
int32_t maxRowsPerFileBlock; // maximum rows per file block
int32_t commitTime;
int8_t precision;
int8_t compression;
} STsdbCfg;

View File

@ -37,14 +37,12 @@ static int32_t saveVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnode->cfgVersion);
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnode->tsdbCfg.cacheBlockSize);
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnode->tsdbCfg.totalBlocks);
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnode->tsdbCfg.maxTables);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnode->tsdbCfg.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnode->tsdbCfg.keep);
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnode->tsdbCfg.keep1);
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnode->tsdbCfg.keep2);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnode->tsdbCfg.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnode->tsdbCfg.commitTime);
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnode->tsdbCfg.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnode->tsdbCfg.compression);
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnode->walCfg.walLevel);
@ -136,12 +134,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
}
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
if (!maxTables || maxTables->type != cJSON_Number) {
printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxTables = maxTables->valueint;
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
// if (!maxTables || maxTables->type != cJSON_Number) {
// printf("vgId:%d, failed to read vnode cfg, maxTables not found\n", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
@ -185,12 +183,12 @@ static int32_t readVnodeCfg(SVnodeObj *pVnode, char* cfgFile)
}
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
if (!commitTime || commitTime->type != cJSON_Number) {
printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
// if (!commitTime || commitTime->type != cJSON_Number) {
// printf("vgId:%d, failed to read vnode cfg, commitTime not found\n", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {

View File

@ -28,7 +28,7 @@ void balanceCleanUp() {}
void balanceAsyncNotify() {}
void balanceSyncNotify() {}
void balanceReset() {}
int32_t balanceCfgDnode(struct SDnodeObj *pDnode, const char *option) { return TSDB_CODE_SYN_NOT_ENABLED; }
int32_t balanceAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId) { return TSDB_CODE_SYN_NOT_ENABLED; }
int32_t balanceAllocVnodes(SVgObj *pVgroup) {
void * pIter = NULL;

View File

@ -295,10 +295,19 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
}
SRpcEpSet epSet = mnodeGetEpSetFromIp(pDnode->dnodeEp);
mnodeDecDnodeRef(pDnode);
if (strncasecmp(pCmCfgDnode->config, "balance", 7) == 0) {
return balanceCfgDnode(pDnode, pCmCfgDnode->config + 8);
int32_t vnodeId = 0;
int32_t dnodeId = 0;
bool parseOk = taosCheckBalanceCfgOptions(pCmCfgDnode->config + 8, &vnodeId, &dnodeId);
if (!parseOk) {
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_MND_INVALID_DNODE_CFG_OPTION;
}
int32_t code = balanceAlterDnode(pDnode, vnodeId, dnodeId);
mnodeDecDnodeRef(pDnode);
return code;
} else {
SMDCfgDnodeMsg *pMdCfgDnode = rpcMallocCont(sizeof(SMDCfgDnodeMsg));
strcpy(pMdCfgDnode->ep, pCmCfgDnode->ep);
@ -314,6 +323,7 @@ static int32_t mnodeProcessCfgDnodeMsg(SMnodeMsg *pMsg) {
mInfo("dnode:%s, is configured by %s", pCmCfgDnode->ep, pMsg->pUser->user);
dnodeSendMsgToDnode(&epSet, &rpcMdCfgDnodeMsg);
mnodeDecDnodeRef(pDnode);
return TSDB_CODE_SUCCESS;
}
}

View File

@ -1223,6 +1223,55 @@ static int32_t mnodeDropSuperTableColumn(SMnodeMsg *pMsg, char *colName) {
return code;
}
static int32_t mnodeChangeSuperTableColumnCb(SMnodeMsg *pMsg, int32_t code) {
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
mLInfo("app:%p:%p, stable %s, change column result:%s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
tstrerror(code));
return code;
}
static int32_t mnodeChangeSuperTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
SSuperTableObj *pStable = (SSuperTableObj *)pMsg->pTable;
int32_t col = mnodeFindSuperTableColumnIndex(pStable, oldName);
if (col < 0) {
mError("app:%p:%p, stable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
pStable->info.tableId, oldName, newName);
return TSDB_CODE_MND_FIELD_NOT_EXIST;
}
// int32_t rowSize = 0;
uint32_t len = strlen(newName);
if (len >= TSDB_COL_NAME_LEN) {
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
}
if (mnodeFindSuperTableColumnIndex(pStable, newName) >= 0) {
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
}
// update
SSchema *schema = (SSchema *) (pStable->schema + col);
tstrncpy(schema->name, newName, sizeof(schema->name));
mInfo("app:%p:%p, stable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pStable->info.tableId,
oldName, newName);
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsSuperTableSdb,
.pObj = pStable,
.pMsg = pMsg,
.cb = mnodeChangeSuperTableColumnCb
};
int32_t code = sdbUpdateRow(&oper);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
return code;
}
// show super tables
static int32_t mnodeGetShowSuperTableMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn) {
SDbObj *pDb = mnodeGetDb(pShow->db);
@ -1405,6 +1454,9 @@ static int32_t mnodeSetSchemaFromSuperTable(SSchema *pSchema, SSuperTableObj *pT
static int32_t mnodeGetSuperTableMeta(SMnodeMsg *pMsg) {
SSuperTableObj *pTable = (SSuperTableObj *)pMsg->pTable;
STableMetaMsg *pMeta = rpcMallocCont(sizeof(STableMetaMsg) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16));
if (pMeta == NULL) {
return TSDB_CODE_MND_OUT_OF_MEMORY;
}
pMeta->uid = htobe64(pTable->uid);
pMeta->sversion = htons(pTable->sversion);
pMeta->tversion = htons(pTable->tversion);
@ -1977,6 +2029,48 @@ static int32_t mnodeDropNormalTableColumn(SMnodeMsg *pMsg, char *colName) {
return code;
}
static int32_t mnodeChangeNormalTableColumn(SMnodeMsg *pMsg, char *oldName, char *newName) {
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
int32_t col = mnodeFindNormalTableColumnIndex(pTable, oldName);
if (col < 0) {
mError("app:%p:%p, ctable:%s, change column, oldName: %s, newName: %s", pMsg->rpcMsg.ahandle, pMsg,
pTable->info.tableId, oldName, newName);
return TSDB_CODE_MND_FIELD_NOT_EXIST;
}
// int32_t rowSize = 0;
uint32_t len = strlen(newName);
if (len >= TSDB_COL_NAME_LEN) {
return TSDB_CODE_MND_COL_NAME_TOO_LONG;
}
if (mnodeFindNormalTableColumnIndex(pTable, newName) >= 0) {
return TSDB_CODE_MND_FIELD_ALREAY_EXIST;
}
// update
SSchema *schema = (SSchema *) (pTable->schema + col);
tstrncpy(schema->name, newName, sizeof(schema->name));
mInfo("app:%p:%p, ctable %s, start to modify column %s to %s", pMsg->rpcMsg.ahandle, pMsg, pTable->info.tableId,
oldName, newName);
SSdbOper oper = {
.type = SDB_OPER_GLOBAL,
.table = tsChildTableSdb,
.pObj = pTable,
.pMsg = pMsg,
.cb = mnodeAlterNormalTableColumnCb
};
int32_t code = sdbUpdateRow(&oper);
if (code == TSDB_CODE_SUCCESS) {
code = TSDB_CODE_MND_ACTION_IN_PROGRESS;
}
return code;
}
static int32_t mnodeSetSchemaFromNormalTable(SSchema *pSchema, SChildTableObj *pTable) {
int32_t numOfCols = pTable->numOfColumns;
for (int32_t i = 0; i < numOfCols; ++i) {
@ -2596,6 +2690,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
code = mnodeAddSuperTableColumn(pMsg, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
code = mnodeDropSuperTableColumn(pMsg, pAlter->schema[0].name);
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
code = mnodeChangeSuperTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
} else {
}
} else {
@ -2606,6 +2702,8 @@ static int32_t mnodeProcessAlterTableMsg(SMnodeMsg *pMsg) {
code = mnodeAddNormalTableColumn(pMsg, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
code = mnodeDropNormalTableColumn(pMsg, pAlter->schema[0].name);
} else if (pAlter->type == TSDB_ALTER_TABLE_CHANGE_COLUMN) {
code = mnodeChangeNormalTableColumn(pMsg, pAlter->schema[0].name, pAlter->schema[1].name);
} else {
}
}

View File

@ -592,7 +592,7 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 12 + VARSTR_HEADER_SIZE;
pShow->bytes[cols] = 8 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "status");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
@ -619,12 +619,6 @@ static int32_t mnodeGetVgroupMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *p
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 40 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "end_point");
pSchema[cols].bytes = htons(pShow->bytes[cols]);
cols++;
pShow->bytes[cols] = 9 + VARSTR_HEADER_SIZE;
pSchema[cols].type = TSDB_DATA_TYPE_BINARY;
strcpy(pSchema[cols].name, "vstatus");
@ -716,27 +710,15 @@ static int32_t mnodeRetrieveVgroups(SShowObj *pShow, char *data, int32_t rows, v
*(int16_t *) pWrite = pVgroup->vnodeGid[i].dnodeId;
cols++;
SDnodeObj *pDnode = pVgroup->vnodeGid[i].pDnode;
SDnodeObj * pDnode = pVgroup->vnodeGid[i].pDnode;
const char *role = "NULL";
if (pDnode != NULL) {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, pDnode->dnodeEp, pShow->bytes[cols]);
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
char *role = mnodeGetMnodeRoleStr(pVgroup->vnodeGid[i].role);
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, role, pShow->bytes[cols]);
cols++;
} else {
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
const char *src = "NULL";
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
cols++;
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_SIZE_TO_VARSTR(pWrite, src, strlen(src));
cols++;
role = mnodeGetMnodeRoleStr(pVgroup->vnodeGid[i].role);
}
pWrite = data + pShow->offset[cols] * rows + pShow->bytes[cols] * numOfRows;
STR_WITH_MAXSIZE_TO_VARSTR(pWrite, role, pShow->bytes[cols]);
cols++;
}
mnodeDecVgroupRef(pVgroup);

View File

@ -5913,8 +5913,10 @@ _cleanup_qinfo:
tsdbDestroyTableGroup(pTableGroupInfo);
_cleanup_query:
taosArrayDestroy(pGroupbyExpr->columnInfo);
tfree(pGroupbyExpr);
if (pGroupbyExpr != NULL) {
taosArrayDestroy(pGroupbyExpr->columnInfo);
free(pGroupbyExpr);
}
tfree(pTagCols);
for (int32_t i = 0; i < numOfOutput; ++i) {
SExprInfo* pExprInfo = &pExprs[i];

View File

@ -41,6 +41,9 @@ int32_t initWindowResInfo(SWindowResInfo *pWindowResInfo, SQueryRuntimeEnv *pRun
pWindowResInfo->type = type;
_hash_fn_t fn = taosGetDefaultHashFunction(type);
pWindowResInfo->hashList = taosHashInit(threshold, fn, false);
if (pWindowResInfo->hashList == NULL) {
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
pWindowResInfo->curIndex = -1;
pWindowResInfo->size = 0;

View File

@ -660,7 +660,7 @@ static SRpcConn *rpcAllocateClientConn(SRpcInfo *pRpc) {
pConn->spi = pRpc->spi;
pConn->encrypt = pRpc->encrypt;
if (pConn->spi) memcpy(pConn->secret, pRpc->secret, TSDB_KEY_LEN);
tDebug("%s %p client connection is allocated", pRpc->label, pConn);
tDebug("%s %p client connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
}
return pConn;
@ -721,7 +721,7 @@ static SRpcConn *rpcAllocateServerConn(SRpcInfo *pRpc, SRecvInfo *pRecv) {
}
taosHashPut(pRpc->hash, hashstr, size, (char *)&pConn, POINTER_BYTES);
tDebug("%s %p server connection is allocated", pRpc->label, pConn);
tDebug("%s %p server connection is allocated, uid:0x%x", pRpc->label, pConn, pConn->linkUid);
}
return pConn;
@ -848,6 +848,16 @@ static int rpcProcessRspHead(SRpcConn *pConn, SRpcHead *pHead) {
return TSDB_CODE_RPC_ALREADY_PROCESSED;
}
if (pHead->code == TSDB_CODE_RPC_MISMATCHED_LINK_ID) {
tDebug("%s, mismatched linkUid, link shall be restarted", pConn->info);
pConn->secured = 0;
((SRpcHead *)pConn->pReqMsg)->destId = 0;
rpcSendMsgToPeer(pConn, pConn->pReqMsg, pConn->reqMsgLen);
if (pConn->connType != RPC_CONN_TCPC)
pConn->pTimer = taosTmrStart(rpcProcessRetryTimer, tsRpcTimer, pConn, pRpc->tmrCtrl);
return TSDB_CODE_RPC_ALREADY_PROCESSED;
}
if (pHead->code == TSDB_CODE_RPC_ACTION_IN_PROGRESS) {
if (pConn->tretry <= tsRpcMaxRetry) {
tDebug("%s, peer is still processing the transaction, retry:%d", pConn->info, pConn->tretry);

View File

@ -70,6 +70,7 @@ typedef struct {
pthread_rwlock_t rwLock;
int32_t nTables;
int32_t maxTables;
STable** tables;
SList* superList;
SHashObj* uidMap;
@ -111,9 +112,11 @@ typedef struct {
typedef struct {
T_REF_DECLARE();
SRWLatch latch;
TSKEY keyFirst;
TSKEY keyLast;
int64_t numOfRows;
int32_t maxTables;
STableData** tData;
SList* actList;
SList* bufBlockList;
@ -304,6 +307,7 @@ typedef struct {
// Operations
// ------------------ tsdbMeta.c
#define TSDB_INIT_NTABLES 1024
#define TABLE_TYPE(t) (t)->type
#define TABLE_NAME(t) (t)->name
#define TABLE_CHAR_NAME(t) TABLE_NAME(t)->data
@ -395,6 +399,7 @@ int tsdbInsertRowToMem(STsdbRepo* pRepo, SDataRow row, STable* pTable);
int tsdbRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbUnRefMemTable(STsdbRepo* pRepo, SMemTable* pMemTable);
int tsdbTakeMemSnapshot(STsdbRepo* pRepo, SMemTable** pMem, SMemTable** pIMem);
void tsdbUnTakeMemSnapShot(STsdbRepo* pRepo, SMemTable* pMem, SMemTable* pIMem);
void* tsdbAllocBytes(STsdbRepo* pRepo, int bytes);
int tsdbAsyncCommit(STsdbRepo* pRepo);
int tsdbLoadDataFromCache(STable* pTable, SSkipListIterator* pIter, TSKEY maxKey, int maxRowsToRead, SDataCols* pCols,
@ -429,7 +434,7 @@ STsdbFileH* tsdbNewFileH(STsdbCfg* pCfg);
void tsdbFreeFileH(STsdbFileH* pFileH);
int tsdbOpenFileH(STsdbRepo* pRepo);
void tsdbCloseFileH(STsdbRepo* pRepo);
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid, int maxTables);
SFileGroup* tsdbCreateFGroupIfNeed(STsdbRepo* pRepo, char* dataDir, int fid);
void tsdbInitFileGroupIter(STsdbFileH* pFileH, SFileGroupIter* pIter, int direction);
void tsdbSeekFileGroupIter(SFileGroupIter* pIter, int fid);
SFileGroup* tsdbGetFileGroupNext(SFileGroupIter* pIter);
@ -511,6 +516,7 @@ void tsdbGetDataFileName(STsdbRepo* pRepo, int fid, int type, char* fname
int tsdbLockRepo(STsdbRepo* pRepo);
int tsdbUnlockRepo(STsdbRepo* pRepo);
char* tsdbGetDataDirName(char* rootDir);
int tsdbGetNextMaxTables(int tid);
STsdbMeta* tsdbGetMeta(TSDB_REPO_T* pRepo);
STsdbFileH* tsdbGetFile(TSDB_REPO_T* pRepo);

View File

@ -149,7 +149,7 @@ void tsdbCloseFileH(STsdbRepo *pRepo) {
}
}
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid, int maxTables) {
SFileGroup *tsdbCreateFGroupIfNeed(STsdbRepo *pRepo, char *dataDir, int fid) {
STsdbFileH *pFileH = pRepo->tsdbFileH;
if (pFileH->nFGroups >= pFileH->maxFGroups) return NULL;

View File

@ -62,7 +62,6 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo);
static int tsdbInitSubmitBlkIter(SSubmitBlk *pBlock, SSubmitBlkIter *pIter);
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
static int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks);
static int keyFGroupCompFunc(const void *key, const void *fgroup);
static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg);
@ -85,10 +84,10 @@ int32_t tsdbCreateRepo(char *rootDir, STsdbCfg *pCfg) {
if (tsdbSetRepoEnv(rootDir, pCfg) < 0) return -1;
tsdbDebug(
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d maxTables %d daysPerFile %d keep "
"vgId:%d tsdb env create succeed! cacheBlockSize %d totalBlocks %d daysPerFile %d keep "
"%d minRowsPerFileBlock %d maxRowsPerFileBlock %d precision %d compression %d",
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->maxTables, pCfg->daysPerFile, pCfg->keep,
pCfg->minRowsPerFileBlock, pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
pCfg->tsdbId, pCfg->cacheBlockSize, pCfg->totalBlocks, pCfg->daysPerFile, pCfg->keep, pCfg->minRowsPerFileBlock,
pCfg->maxRowsPerFileBlock, pCfg->precision, pCfg->compression);
return 0;
}
@ -307,13 +306,6 @@ int32_t tsdbConfigRepo(TSDB_REPO_T *repo, STsdbCfg *pCfg) {
tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
configChanged = true;
}
if (pRCfg->maxTables != pCfg->maxTables) {
if (tsdbAlterMaxTables(pRepo, pCfg->maxTables) < 0) {
tsdbError("vgId:%d failed to configure repo when alter maxTables since %s", REPO_ID(pRepo), tstrerror(terrno));
return -1;
}
configChanged = true;
}
if (configChanged) {
if (tsdbSaveConfig(pRepo->rootDir, &pRepo->config) < 0) {
@ -385,6 +377,18 @@ char *tsdbGetDataDirName(char *rootDir) {
return fname;
}
int tsdbGetNextMaxTables(int tid) {
ASSERT(tid >= 1 && tid <= TSDB_MAX_TABLES);
int maxTables = TSDB_INIT_NTABLES;
while (true) {
maxTables = MIN(maxTables, TSDB_MAX_TABLES);
if (tid <= maxTables) break;
maxTables *= 2;
}
return maxTables + 1;
}
STsdbMeta * tsdbGetMeta(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbMeta; }
STsdbFileH * tsdbGetFile(TSDB_REPO_T *pRepo) { return ((STsdbRepo *)pRepo)->tsdbFileH; }
STsdbRepoInfo *tsdbGetStatus(TSDB_REPO_T *pRepo) { return NULL; }
@ -417,17 +421,6 @@ static int32_t tsdbCheckAndSetDefaultCfg(STsdbCfg *pCfg) {
goto _err;
}
// Check maxTables
if (pCfg->maxTables == -1) {
pCfg->maxTables = TSDB_DEFAULT_TABLES+1;
} else {
if (pCfg->maxTables - 1 < TSDB_MIN_TABLES || pCfg->maxTables - 1 > TSDB_MAX_TABLES) {
tsdbError("vgId:%d invalid maxTables configuration! maxTables %d TSDB_MIN_TABLES %d TSDB_MAX_TABLES %d",
pCfg->tsdbId, pCfg->maxTables - 1, TSDB_MIN_TABLES, TSDB_MAX_TABLES);
goto _err;
}
}
// Check daysPerFile
if (pCfg->daysPerFile == -1) {
pCfg->daysPerFile = TSDB_DEFAULT_DAYS_PER_FILE;
@ -713,6 +706,7 @@ static int32_t tsdbInsertDataToTable(STsdbRepo *pRepo, SSubmitBlk *pBlock, TSKEY
STsdbMeta *pMeta = pRepo->tsdbMeta;
int64_t points = 0;
ASSERT(pBlock->tid < pMeta->maxTables);
STable *pTable = pMeta->tables[pBlock->tid];
ASSERT(pTable != NULL && TABLE_UID(pTable) == pBlock->uid);
@ -779,7 +773,6 @@ static SDataRow tsdbGetSubmitBlkNext(SSubmitBlkIter *pIter) {
}
static int tsdbRestoreInfo(STsdbRepo *pRepo) {
// TODO
STsdbMeta * pMeta = pRepo->tsdbMeta;
STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pFGroup = NULL;
@ -792,7 +785,7 @@ static int tsdbRestoreInfo(STsdbRepo *pRepo) {
tsdbInitFileGroupIter(pFileH, &iter, TSDB_ORDER_DESC);
while ((pFGroup = tsdbGetFileGroupNext(&iter)) != NULL) {
if (tsdbSetAndOpenHelperFile(&rhelper, pFGroup) < 0) goto _err;
for (int i = 1; i < pRepo->config.maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable == NULL) continue;
tsdbSetHelperTable(&rhelper, pTable, pRepo);
@ -868,36 +861,6 @@ static int tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
return 0;
}
static int tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
// TODO
int oldMaxTables = pRepo->config.maxTables;
if (oldMaxTables < pRepo->config.maxTables) {
terrno = TSDB_CODE_TDB_INVALID_ACTION;
return -1;
}
STsdbMeta *pMeta = pRepo->tsdbMeta;
pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *));
memset(&pMeta->tables[oldMaxTables], 0, sizeof(STable *) * (maxTables - oldMaxTables));
pRepo->config.maxTables = maxTables;
if (pRepo->mem) {
pRepo->mem->tData = realloc(pRepo->mem->tData, maxTables * sizeof(STableData *));
memset(POINTER_SHIFT(pRepo->mem->tData, sizeof(STableData *) * oldMaxTables), 0,
sizeof(STableData *) * (maxTables - oldMaxTables));
}
if (pRepo->imem) {
pRepo->imem->tData = realloc(pRepo->imem->tData, maxTables * sizeof(STableData *));
memset(POINTER_SHIFT(pRepo->imem->tData, sizeof(STableData *) * oldMaxTables), 0,
sizeof(STableData *) * (maxTables - oldMaxTables));
}
tsdbDebug("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables);
return 0;
}
static int keyFGroupCompFunc(const void *key, const void *fgroup) {
int fid = *(int *)key;
SFileGroup *pFGroup = (SFileGroup *)fgroup;
@ -914,7 +877,6 @@ static int tsdbEncodeCfg(void **buf, STsdbCfg *pCfg) {
tlen += taosEncodeVariantI32(buf, pCfg->tsdbId);
tlen += taosEncodeFixedI32(buf, pCfg->cacheBlockSize);
tlen += taosEncodeVariantI32(buf, pCfg->totalBlocks);
tlen += taosEncodeVariantI32(buf, pCfg->maxTables);
tlen += taosEncodeVariantI32(buf, pCfg->daysPerFile);
tlen += taosEncodeVariantI32(buf, pCfg->keep);
tlen += taosEncodeVariantI32(buf, pCfg->keep1);
@ -931,7 +893,6 @@ static void *tsdbDecodeCfg(void *buf, STsdbCfg *pCfg) {
buf = taosDecodeVariantI32(buf, &(pCfg->tsdbId));
buf = taosDecodeFixedI32(buf, &(pCfg->cacheBlockSize));
buf = taosDecodeVariantI32(buf, &(pCfg->totalBlocks));
buf = taosDecodeVariantI32(buf, &(pCfg->maxTables));
buf = taosDecodeVariantI32(buf, &(pCfg->daysPerFile));
buf = taosDecodeVariantI32(buf, &(pCfg->keep));
buf = taosDecodeVariantI32(buf, &(pCfg->keep1));
@ -1037,7 +998,7 @@ static int tsdbScanAndConvertSubmitMsg(STsdbRepo *pRepo, SSubmitMsg *pMsg) {
pBlock->schemaLen = htonl(pBlock->schemaLen);
pBlock->numOfRows = htons(pBlock->numOfRows);
if (pBlock->tid <= 0 || pBlock->tid >= pRepo->config.maxTables) {
if (pBlock->tid <= 0 || pBlock->tid >= pMeta->maxTables) {
tsdbError("vgId:%d failed to get table to insert data, uid %" PRIu64 " tid %d", REPO_ID(pRepo), pBlock->uid,
pBlock->tid);
terrno = TSDB_CODE_TDB_INVALID_TABLE_ID;
@ -1120,7 +1081,7 @@ TSKEY tsdbGetTableLastKey(TSDB_REPO_T *repo, uint64_t uid) {
static void tsdbStartStream(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
for (int i = 0; i < pRepo->config.maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
pTable->cqhandle = (*pRepo->appH.cqCreateFunc)(pRepo->appH.cqH, TABLE_UID(pTable), TABLE_TID(pTable), pTable->sql,
@ -1133,7 +1094,7 @@ static void tsdbStartStream(STsdbRepo *pRepo) {
static void tsdbStopStream(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
for (int i = 0; i < pRepo->config.maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable && pTable->type == TSDB_STREAM_TABLE) {
(*pRepo->appH.cqDropFunc)(pTable->cqhandle);

View File

@ -21,7 +21,7 @@
static FORCE_INLINE STsdbBufBlock *tsdbGetCurrBufBlock(STsdbRepo *pRepo);
static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes);
static SMemTable * tsdbNewMemTable(STsdbCfg *pCfg);
static SMemTable * tsdbNewMemTable(STsdbRepo *pRepo);
static void tsdbFreeMemTable(SMemTable *pMemTable);
static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable);
static void tsdbFreeTableData(STableData *pTableData);
@ -30,13 +30,15 @@ static void * tsdbCommitData(void *arg);
static int tsdbCommitMeta(STsdbRepo *pRepo);
static void tsdbEndCommit(STsdbRepo *pRepo);
static int tsdbHasDataToCommit(SCommitIter *iters, int nIters, TSKEY minKey, TSKEY maxKey);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHelper *pHelper, SDataCols *pDataCols);
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo);
static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables);
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables);
// ---------------- INTERNAL FUNCTIONS ----------------
int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
STsdbCfg * pCfg = &pRepo->config;
STsdbMeta * pMeta = pRepo->tsdbMeta;
int32_t level = 0;
int32_t headSize = 0;
TSKEY key = dataRowKey(row);
@ -45,7 +47,7 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
SSkipList * pSList = NULL;
int bytes = 0;
if (pMemTable != NULL && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
if (pMemTable != NULL && TABLE_TID(pTable) < pMemTable->maxTables && pMemTable->tData[TABLE_TID(pTable)] != NULL &&
pMemTable->tData[TABLE_TID(pTable)]->uid == TABLE_UID(pTable)) {
pTableData = pMemTable->tData[TABLE_TID(pTable)];
pSList = pTableData->pData;
@ -66,13 +68,20 @@ int tsdbInsertRowToMem(STsdbRepo *pRepo, SDataRow row, STable *pTable) {
// Operations above may change pRepo->mem, retake those values
ASSERT(pRepo->mem != NULL);
pMemTable = pRepo->mem;
if (TABLE_TID(pTable) >= pMemTable->maxTables) {
if (tsdbAdjustMemMaxTables(pMemTable, pMeta->maxTables) < 0) return -1;;
}
pTableData = pMemTable->tData[TABLE_TID(pTable)];
if (pTableData == NULL || pTableData->uid != TABLE_UID(pTable)) {
if (pTableData != NULL) { // destroy the table skiplist (may have race condition problem)
taosWLockLatch(&(pMemTable->latch));
pMemTable->tData[TABLE_TID(pTable)] = NULL;
tsdbFreeTableData(pTableData);
taosWUnLockLatch(&(pMemTable->latch));
}
pTableData = tsdbNewTableData(pCfg, pTable);
if (pTableData == NULL) {
tsdbError("vgId:%d failed to insert row with key %" PRId64
@ -122,7 +131,6 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
int ref = T_REF_DEC(pMemTable);
tsdbDebug("vgId:%d unref memtable %p ref %d", REPO_ID(pRepo), pMemTable, ref);
if (ref == 0) {
STsdbCfg * pCfg = &pRepo->config;
STsdbBufPool *pBufPool = pRepo->pPool;
SListNode *pNode = NULL;
@ -139,7 +147,7 @@ int tsdbUnRefMemTable(STsdbRepo *pRepo, SMemTable *pMemTable) {
}
if (tsdbUnlockRepo(pRepo) < 0) return -1;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMemTable->maxTables; i++) {
if (pMemTable->tData[i] != NULL) {
tsdbFreeTableData(pMemTable->tData[i]);
}
@ -161,11 +169,24 @@ int tsdbTakeMemSnapshot(STsdbRepo *pRepo, SMemTable **pMem, SMemTable **pIMem) {
tsdbRefMemTable(pRepo, *pIMem);
if (tsdbUnlockRepo(pRepo) < 0) return -1;
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
if (*pMem != NULL) taosRLockLatch(&((*pMem)->latch));
tsdbDebug("vgId:%d take memory snapshot, pMem %p pIMem %p", REPO_ID(pRepo), *pMem, *pIMem);
return 0;
}
void tsdbUnTakeMemSnapShot(STsdbRepo *pRepo, SMemTable *pMem, SMemTable *pIMem) {
if (pMem != NULL) {
taosRUnLockLatch(&(pMem->latch));
tsdbUnRefMemTable(pRepo, pMem);
}
if (pIMem != NULL) {
tsdbUnRefMemTable(pRepo, pIMem);
}
}
void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
STsdbCfg * pCfg = &pRepo->config;
STsdbBufBlock *pBufBlock = tsdbGetCurrBufBlock(pRepo);
@ -182,7 +203,7 @@ void *tsdbAllocBytes(STsdbRepo *pRepo, int bytes) {
}
if (pRepo->mem == NULL) {
SMemTable *pMemTable = tsdbNewMemTable(&pRepo->config);
SMemTable *pMemTable = tsdbNewMemTable(pRepo);
if (pMemTable == NULL) return NULL;
if (tsdbLockRepo(pRepo) < 0) {
@ -329,7 +350,9 @@ static void tsdbFreeBytes(STsdbRepo *pRepo, void *ptr, int bytes) {
listNEles(pRepo->mem->bufBlockList), pBufBlock->offset, pBufBlock->remain);
}
static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
static SMemTable* tsdbNewMemTable(STsdbRepo *pRepo) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
SMemTable *pMemTable = (SMemTable *)calloc(1, sizeof(*pMemTable));
if (pMemTable == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@ -340,7 +363,8 @@ static SMemTable* tsdbNewMemTable(STsdbCfg* pCfg) {
pMemTable->keyLast = 0;
pMemTable->numOfRows = 0;
pMemTable->tData = (STableData**)calloc(pCfg->maxTables, sizeof(STableData*));
pMemTable->maxTables = pMeta->maxTables;
pMemTable->tData = (STableData **)calloc(pMemTable->maxTables, sizeof(STableData *));
if (pMemTable->tData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
@ -398,9 +422,6 @@ static STableData *tsdbNewTableData(STsdbCfg *pCfg, STable *pTable) {
goto _err;
}
// TODO: operation here should not be here, remove it
pTableData->pData->level = 1;
return pTableData;
_err:
@ -473,7 +494,7 @@ static void *tsdbCommitData(void *arg) {
_exit:
tdFreeDataCols(pDataCols);
tsdbDestroyCommitIters(iters, pCfg->maxTables);
tsdbDestroyCommitIters(iters, pMem->maxTables);
tsdbDestroyHelper(&whelper);
tsdbEndCommit(pRepo);
tsdbInfo("vgId:%d commit over", pRepo->config.tsdbId);
@ -552,12 +573,13 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
STsdbCfg * pCfg = &pRepo->config;
STsdbFileH *pFileH = pRepo->tsdbFileH;
SFileGroup *pGroup = NULL;
SMemTable * pMem = pRepo->imem;
TSKEY minKey = 0, maxKey = 0;
tsdbGetFidKeyRange(pCfg->daysPerFile, pCfg->precision, fid, &minKey, &maxKey);
// Check if there are data to commit to this file
int hasDataToCommit = tsdbHasDataToCommit(iters, pCfg->maxTables, minKey, maxKey);
int hasDataToCommit = tsdbHasDataToCommit(iters, pMem->maxTables, minKey, maxKey);
if (!hasDataToCommit) {
tsdbDebug("vgId:%d no data to commit to file %d", REPO_ID(pRepo), fid);
return 0;
@ -570,7 +592,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
return -1;
}
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid, pCfg->maxTables)) == NULL) {
if ((pGroup = tsdbCreateFGroupIfNeed(pRepo, dataDir, fid)) == NULL) {
tsdbError("vgId:%d failed to create file group %d since %s", REPO_ID(pRepo), fid, tstrerror(terrno));
goto _err;
}
@ -582,7 +604,7 @@ static int tsdbCommitToFile(STsdbRepo *pRepo, int fid, SCommitIter *iters, SRWHe
}
// Loop to commit data in each table
for (int tid = 1; tid < pCfg->maxTables; tid++) {
for (int tid = 1; tid < pMem->maxTables; tid++) {
SCommitIter *pIter = iters + tid;
if (pIter->pTable == NULL) continue;
@ -643,11 +665,10 @@ _err:
}
static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
STsdbCfg * pCfg = &(pRepo->config);
SMemTable *pMem = pRepo->imem;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SCommitIter *iters = (SCommitIter *)calloc(pCfg->maxTables, sizeof(SCommitIter));
SCommitIter *iters = (SCommitIter *)calloc(pMem->maxTables, sizeof(SCommitIter));
if (iters == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
@ -656,7 +677,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
if (tsdbRLockRepoMeta(pRepo) < 0) goto _err;
// reference all tables
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMem->maxTables; i++) {
if (pMeta->tables[i] != NULL) {
tsdbRefTable(pMeta->tables[i]);
iters[i].pTable = pMeta->tables[i];
@ -665,7 +686,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
if (tsdbUnlockRepoMeta(pRepo) < 0) goto _err;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMem->maxTables; i++) {
if ((iters[i].pTable != NULL) && (pMem->tData[i] != NULL) && (TABLE_UID(iters[i].pTable) == pMem->tData[i]->uid)) {
if ((iters[i].pIter = tSkipListCreateIter(pMem->tData[i]->pData)) == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
@ -679,7 +700,7 @@ static SCommitIter *tsdbCreateCommitIters(STsdbRepo *pRepo) {
return iters;
_err:
tsdbDestroyCommitIters(iters, pCfg->maxTables);
tsdbDestroyCommitIters(iters, pMem->maxTables);
return NULL;
}
@ -694,4 +715,26 @@ static void tsdbDestroyCommitIters(SCommitIter *iters, int maxTables) {
}
free(iters);
}
static int tsdbAdjustMemMaxTables(SMemTable *pMemTable, int maxTables) {
ASSERT(pMemTable->maxTables < maxTables);
STableData **pTableData = (STableData **)calloc(maxTables, sizeof(STableData *));
if (pTableData == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
memcpy((void *)pTableData, (void *)pMemTable->tData, sizeof(STableData *) * pMemTable->maxTables);
STableData **tData = pMemTable->tData;
taosWLockLatch(&(pMemTable->latch));
pMemTable->maxTables = maxTables;
pMemTable->tData = pTableData;
taosWUnLockLatch(&(pMemTable->latch));
tfree(tData);
return 0;
}

View File

@ -49,6 +49,7 @@ static int tsdbGetTableEncodeSize(int8_t act, STable *pTable);
static void * tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable *pTable);
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable);
static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable);
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid);
// ------------------ OUTER FUNCTIONS ------------------
int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
@ -60,13 +61,13 @@ int tsdbCreateTable(TSDB_REPO_T *repo, STableCfg *pCfg) {
int tid = pCfg->tableId.tid;
STable * pTable = NULL;
if (tid < 0 || tid >= pRepo->config.maxTables) {
if (tid < 1 || tid > TSDB_MAX_TABLES) {
tsdbError("vgId:%d failed to create table since invalid tid %d", REPO_ID(pRepo), tid);
terrno = TSDB_CODE_TDB_IVD_CREATE_TABLE_INFO;
goto _err;
}
if (pMeta->tables[tid] != NULL) {
if (tid < pMeta->maxTables && pMeta->tables[tid] != NULL) {
if (TABLE_UID(pMeta->tables[tid]) == pCfg->tableId.uid) {
tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, REPO_ID(pRepo), TABLE_CHAR_NAME(pTable),
TABLE_TID(pTable), TABLE_UID(pTable));
@ -422,7 +423,8 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
goto _err;
}
pMeta->tables = (STable **)calloc(pCfg->maxTables, sizeof(STable *));
pMeta->maxTables = TSDB_INIT_NTABLES + 1;
pMeta->tables = (STable **)calloc(pMeta->maxTables, sizeof(STable *));
if (pMeta->tables == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
@ -434,7 +436,7 @@ STsdbMeta *tsdbNewMeta(STsdbCfg *pCfg) {
goto _err;
}
pMeta->uidMap = taosHashInit(pCfg->maxTables, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
pMeta->uidMap = taosHashInit(TSDB_INIT_NTABLES * 1.1, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), false);
if (pMeta->uidMap == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
goto _err;
@ -484,14 +486,13 @@ _err:
}
int tsdbCloseMeta(STsdbRepo *pRepo) {
STsdbCfg * pCfg = &pRepo->config;
STsdbMeta *pMeta = pRepo->tsdbMeta;
SListNode *pNode = NULL;
STable * pTable = NULL;
if (pMeta == NULL) return 0;
tdCloseKVStore(pMeta->pStore);
for (int i = 1; i < pCfg->maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
tsdbFreeTable(pMeta->tables[i]);
}
@ -624,9 +625,8 @@ static int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
static void tsdbOrgMeta(void *pHandle) {
STsdbRepo *pRepo = (STsdbRepo *)pHandle;
STsdbMeta *pMeta = pRepo->tsdbMeta;
STsdbCfg * pCfg = &pRepo->config;
for (int i = 1; i < pCfg->maxTables; i++) {
for (int i = 1; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL && pTable->type == TSDB_CHILD_TABLE) {
tsdbAddTableIntoIndex(pMeta, pTable, true);
@ -781,6 +781,9 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
goto _err;
}
} else {
if (TABLE_TID(pTable) >= pMeta->maxTables) {
if (tsdbAdjustMetaTables(pRepo, TABLE_TID(pTable)) < 0) goto _err;
}
if (TABLE_TYPE(pTable) == TSDB_CHILD_TABLE && addIdx) { // add STABLE to the index
if (tsdbAddTableIntoIndex(pMeta, pTable, true) < 0) {
tsdbDebug("vgId:%d failed to add table %s to meta while add table to index since %s", REPO_ID(pRepo),
@ -788,6 +791,7 @@ static int tsdbAddTableToMeta(STsdbRepo *pRepo, STable *pTable, bool addIdx, boo
goto _err;
}
}
ASSERT(TABLE_TID(pTable) < pMeta->maxTables);
pMeta->tables[TABLE_TID(pTable)] = pTable;
pMeta->nTables++;
}
@ -827,7 +831,6 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
SListIter lIter = {0};
SListNode *pNode = NULL;
STable * tTable = NULL;
STsdbCfg * pCfg = &(pRepo->config);
STSchema *pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
int maxCols = schemaNCols(pSchema);
@ -860,7 +863,7 @@ static void tsdbRemoveTableFromMeta(STsdbRepo *pRepo, STable *pTable, bool rmFro
if (maxCols == pMeta->maxCols || maxRowBytes == pMeta->maxRowBytes) {
maxCols = 0;
maxRowBytes = 0;
for (int i = 0; i < pCfg->maxTables; i++) {
for (int i = 0; i < pMeta->maxTables; i++) {
STable *pTable = pMeta->tables[i];
if (pTable != NULL) {
pSchema = tsdbGetTableSchemaImpl(pTable, false, false, -1);
@ -1215,7 +1218,9 @@ static void *tsdbInsertTableAct(STsdbRepo *pRepo, int8_t act, void *buf, STable
static int tsdbRemoveTableFromStore(STsdbRepo *pRepo, STable *pTable) {
int tlen = tsdbGetTableEncodeSize(TSDB_DROP_META, pTable);
void *buf = tsdbAllocBytes(pRepo, tlen);
ASSERT(buf != NULL);
if (buf == NULL) {
return -1;
}
void *pBuf = buf;
if (TABLE_TYPE(pTable) == TSDB_SUPER_TABLE) {
@ -1266,5 +1271,28 @@ static int tsdbRmTableFromMeta(STsdbRepo *pRepo, STable *pTable) {
tsdbRemoveTableFromMeta(pRepo, pTable, true, true);
}
return 0;
}
static int tsdbAdjustMetaTables(STsdbRepo *pRepo, int tid) {
STsdbMeta *pMeta = pRepo->tsdbMeta;
ASSERT(tid >= pMeta->maxTables);
int maxTables = tsdbGetNextMaxTables(tid);
STable **tables = (STable **)calloc(maxTables, sizeof(STable *));
if (tables == NULL) {
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return -1;
}
memcpy((void *)tables, (void *)pMeta->tables, sizeof(STable *) * pMeta->maxTables);
pMeta->maxTables = maxTables;
STable **tTables = pMeta->tables;
pMeta->tables = tables;
tfree(tTables);
tsdbDebug("vgId:%d tsdb meta maxTables is adjusted as %d", REPO_ID(pRepo), maxTables);
return 0;
}

View File

@ -147,6 +147,7 @@ int tsdbSetAndOpenHelperFile(SRWHelper *pHelper, SFileGroup *pGroup) {
if (tsdbOpenFile(pFile, O_WRONLY | O_CREAT) < 0) goto _err;
pFile->info.size = TSDB_FILE_HEAD_SIZE;
pFile->info.magic = TSDB_FILE_INIT_MAGIC;
pFile->info.len = 0;
if (tsdbUpdateFileHeader(pFile, 0) < 0) return -1;
}
} else {
@ -302,6 +303,10 @@ void tsdbSetHelperTable(SRWHelper *pHelper, STable *pTable, STsdbRepo *pRepo) {
memset(&(pHelper->curCompIdx), 0, sizeof(SCompIdx));
}
if (helperType(pHelper) == TSDB_WRITE_HELPER && pHelper->curCompIdx.hasLast) {
pHelper->hasOldLastBlock = true;
}
helperSetState(pHelper, TSDB_HELPER_TABLE_SET);
ASSERT(pHelper->state == ((TSDB_HELPER_TABLE_SET << 1) - 1));
}
@ -555,10 +560,6 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
}
helperSetState(pHelper, TSDB_HELPER_IDX_LOAD);
if (helperType(pHelper) == TSDB_WRITE_HELPER) {
pFile->info.len = 0;
}
// Copy the memory for outside usage
if (target && pHelper->idxH.numOfIdx > 0)
memcpy(target, pHelper->idxH.pIdxArray, sizeof(SCompIdx) * pHelper->idxH.numOfIdx);
@ -1231,8 +1232,8 @@ static int tsdbLoadColData(SRWHelper *pHelper, SFile *pFile, SCompBlock *pCompBl
if (tsdbCheckAndDecodeColumnData(pDataCol, pHelper->pBuffer, pCompCol->len, pCompBlock->algorithm,
pCompBlock->numOfRows, pHelper->pRepo->config.maxRowsPerFileBlock,
pHelper->compBuffer, tsizeof(pHelper->compBuffer)) < 0) {
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname, pCompCol->colId,
(int64_t)pCompCol->offset);
tsdbError("vgId:%d file %s is broken at column %d offset %" PRId64, REPO_ID(pHelper->pRepo), pFile->fname,
pCompCol->colId, offset);
return -1;
}
@ -1517,8 +1518,8 @@ static int tsdbProcessMergeCommit(SRWHelper *pHelper, SCommitIter *pCommitIter,
if (rows2 == 0) { // all data filtered out
*(pCommitIter->pIter) = slIter;
} else {
if (rows1 + rows2 < pCfg->minRowsPerFileBlock && pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS &&
!TSDB_NLAST_FILE_OPENED(pHelper)) {
if (pCompBlock->numOfRows + rows2 < pCfg->minRowsPerFileBlock &&
pCompBlock->numOfSubBlocks < TSDB_MAX_SUBBLOCKS && !TSDB_NLAST_FILE_OPENED(pHelper)) {
tdResetDataCols(pDataCols);
int rowsRead = tsdbLoadDataFromCache(pTable, pCommitIter->pIter, maxKey, rows1, pDataCols,
pDataCols0->cols[0].pData, pDataCols0->numOfRows);

View File

@ -175,6 +175,9 @@ static SArray* getDefaultLoadColumns(STsdbQueryHandle* pQueryHandle, bool loadTS
TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STableGroupInfo* groupList, void* qinfo) {
STsdbQueryHandle* pQueryHandle = calloc(1, sizeof(STsdbQueryHandle));
if (pQueryHandle == NULL) {
goto out_of_memory;
}
pQueryHandle->order = pCond->order;
pQueryHandle->window = pCond->twindow;
pQueryHandle->pTsdb = tsdb;
@ -260,8 +263,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab
return (TsdbQueryHandleT) pQueryHandle;
out_of_memory:
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
tsdbCleanupQueryHandle(pQueryHandle);
terrno = TSDB_CODE_TDB_OUT_OF_MEMORY;
return NULL;
}
@ -317,17 +320,20 @@ static bool initTableMemIterator(STsdbQueryHandle* pHandle, STableCheckInfo* pCh
}
assert(pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL);
if (pHandle->mem && pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
// TODO: add uid check
if (pHandle->mem && pCheckInfo->tableId.tid < pHandle->mem->maxTables &&
pHandle->mem->tData[pCheckInfo->tableId.tid] != NULL) {
pCheckInfo->iter = tSkipListCreateIterFromVal(pHandle->mem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
if (pHandle->imem && pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
if (pHandle->imem && pCheckInfo->tableId.tid < pHandle->imem->maxTables &&
pHandle->imem->tData[pCheckInfo->tableId.tid] != NULL) {
pCheckInfo->iiter = tSkipListCreateIterFromVal(pHandle->imem->tData[pCheckInfo->tableId.tid]->pData,
(const char*) &pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
(const char*)&pCheckInfo->lastKey, TSDB_DATA_TYPE_TIMESTAMP, order);
}
// both iterators are NULL, no data in buffer right now
if (pCheckInfo->iter == NULL && pCheckInfo->iiter == NULL) {
return false;
@ -1529,7 +1535,6 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists
static bool doHasDataInBuffer(STsdbQueryHandle* pQueryHandle) {
size_t numOfTables = taosArrayGetSize(pQueryHandle->pTableCheckInfo);
assert(numOfTables <= ((STsdbRepo*)pQueryHandle->pTsdb)->config.maxTables);
while (pQueryHandle->activeIndex < numOfTables) {
if (hasMoreDataInCache(pQueryHandle)) {
@ -2418,8 +2423,7 @@ void tsdbCleanupQueryHandle(TsdbQueryHandleT queryHandle) {
tfree(pQueryHandle->statis);
// todo check error
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->mem);
tsdbUnRefMemTable(pQueryHandle->pTsdb, pQueryHandle->imem);
tsdbUnTakeMemSnapShot(pQueryHandle->pTsdb, pQueryHandle->mem, pQueryHandle->imem);
tsdbDestroyHelper(&pQueryHandle->rhelper);

View File

@ -98,7 +98,7 @@ static void tsdbSetCfg(STsdbCfg *pCfg, int32_t tsdbId, int32_t cacheBlockSize, i
pCfg->tsdbId = tsdbId;
pCfg->cacheBlockSize = cacheBlockSize;
pCfg->totalBlocks = totalBlocks;
pCfg->maxTables = maxTables;
// pCfg->maxTables = maxTables;
pCfg->daysPerFile = daysPerFile;
pCfg->keep = keep;
pCfg->minRowsPerFileBlock = minRows;

View File

@ -123,7 +123,7 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
tsdbCfg.tsdbId = pVnodeCfg->cfg.vgId;
tsdbCfg.cacheBlockSize = pVnodeCfg->cfg.cacheBlockSize;
tsdbCfg.totalBlocks = pVnodeCfg->cfg.totalBlocks;
tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
// tsdbCfg.maxTables = pVnodeCfg->cfg.maxTables;
tsdbCfg.daysPerFile = pVnodeCfg->cfg.daysPerFile;
tsdbCfg.keep = pVnodeCfg->cfg.daysToKeep;
tsdbCfg.minRowsPerFileBlock = pVnodeCfg->cfg.minRowsPerFileBlock;
@ -630,14 +630,14 @@ static int32_t vnodeSaveCfg(SMDCreateVnodeMsg *pVnodeCfg) {
len += snprintf(content + len, maxLen - len, " \"cfgVersion\": %d,\n", pVnodeCfg->cfg.cfgVersion);
len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pVnodeCfg->cfg.cacheBlockSize);
len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pVnodeCfg->cfg.totalBlocks);
len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
// len += snprintf(content + len, maxLen - len, " \"maxTables\": %d,\n", pVnodeCfg->cfg.maxTables);
len += snprintf(content + len, maxLen - len, " \"daysPerFile\": %d,\n", pVnodeCfg->cfg.daysPerFile);
len += snprintf(content + len, maxLen - len, " \"daysToKeep\": %d,\n", pVnodeCfg->cfg.daysToKeep);
len += snprintf(content + len, maxLen - len, " \"daysToKeep1\": %d,\n", pVnodeCfg->cfg.daysToKeep1);
len += snprintf(content + len, maxLen - len, " \"daysToKeep2\": %d,\n", pVnodeCfg->cfg.daysToKeep2);
len += snprintf(content + len, maxLen - len, " \"minRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.minRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"maxRowsPerFileBlock\": %d,\n", pVnodeCfg->cfg.maxRowsPerFileBlock);
len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
// len += snprintf(content + len, maxLen - len, " \"commitTime\": %d,\n", pVnodeCfg->cfg.commitTime);
len += snprintf(content + len, maxLen - len, " \"precision\": %d,\n", pVnodeCfg->cfg.precision);
len += snprintf(content + len, maxLen - len, " \"compression\": %d,\n", pVnodeCfg->cfg.compression);
len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pVnodeCfg->cfg.walLevel);
@ -729,12 +729,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
pVnode->tsdbCfg.totalBlocks = totalBlocks->valueint;
cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
if (!maxTables || maxTables->type != cJSON_Number) {
vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.maxTables = maxTables->valueint;
// cJSON *maxTables = cJSON_GetObjectItem(root, "maxTables");
// if (!maxTables || maxTables->type != cJSON_Number) {
// vError("vgId:%d, failed to read vnode cfg, maxTables not found", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.maxTables = maxTables->valueint;
cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile");
if (!daysPerFile || daysPerFile->type != cJSON_Number) {
@ -778,12 +778,12 @@ static int32_t vnodeReadCfg(SVnodeObj *pVnode) {
}
pVnode->tsdbCfg.maxRowsPerFileBlock = maxRowsPerFileBlock->valueint;
cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
if (!commitTime || commitTime->type != cJSON_Number) {
vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
goto PARSE_OVER;
}
pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
// cJSON *commitTime = cJSON_GetObjectItem(root, "commitTime");
// if (!commitTime || commitTime->type != cJSON_Number) {
// vError("vgId:%d, failed to read vnode cfg, commitTime not found", pVnode->vgId);
// goto PARSE_OVER;
// }
// pVnode->tsdbCfg.commitTime = (int8_t)commitTime->valueint;
cJSON *precision = cJSON_GetObjectItem(root, "precision");
if (!precision || precision->type != cJSON_Number) {

View File

@ -0,0 +1,88 @@
#!/bin/bash
DATA_DIR=/mnt/root/testdata
NUM_LOOP=1
NUM_OF_FILES=100
OUT_FILE=cassandraWrite.out
rowsPerRequest=(1 10 50 100 500 1000 2000)
function printTo {
if $verbose ; then
echo $1
fi
}
function runTest {
for c in `seq 1 $clients`; do
avgRPR[$c]=0
done
printf "R/R, "
for c in `seq 1 $clients`; do
if [ "$c" == "1" ]; then
printf "$c client, "
else
printf "$c clients, "
fi
done
printf "\n"
for r in ${rowsPerRequest[@]}; do
printf "$r, "
for c in `seq 1 $clients`; do
totalRPR=0
for i in `seq 1 $NUM_LOOP`; do
printTo "loop i:$i, java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
-datadir $DATA_DIR \
-numofFiles $NUM_OF_FILES \
-rowsperrequest $r \
-writeclients $c \
-conf $CAS_TEST_DIR/application.conf"
java -jar $CAS_TEST_DIR/cassandratest/target/cassandratest-1.0-SNAPSHOT-jar-with-dependencies.jar \
-datadir $DATA_DIR \
-numofFiles $NUM_OF_FILES \
-rowsperrequest $r \
-writeclients $c \
-conf $CAS_TEST_DIR/application.conf \
2>&1 > $OUT_FILE
RPR=`cat $OUT_FILE | grep "insertation speed:" | awk '{print $(NF-1)}'`
totalRPR=`echo "scale=4; $totalRPR + $RPR" | bc`
printTo "rows:$r, clients:$c, i:$i RPR:$RPR"
done
avgRPR[$c]=`echo "scale=4; $totalRPR / $NUM_LOOP" | bc`
done
for c in `seq 1 $clients`; do
printf "${avgRPR[$c]}, "
done
printf "\n"
done
}
################ Main ################
verbose=false
clients=1
while : ; do
case $1 in
-v)
verbose=true
shift ;;
-c)
clients=$2
shift 2;;
*)
break ;;
esac
done
printTo "Cassandra Test begin.."
WORK_DIR=/mnt/root/TDengine
CAS_TEST_DIR=$WORK_DIR/tests/comparisonTest/cassandra
runTest
printTo "Cassandra Test done!"

View File

@ -162,6 +162,7 @@ python3 ./test.py -f client/client.py
# Misc
python3 testCompress.py
python3 testNoCompress.py
python3 testMinTablesPerVnode.py
# functions
python3 ./test.py -f functions/function_avg.py
@ -180,4 +181,4 @@ python3 ./test.py -f functions/function_spread.py
python3 ./test.py -f functions/function_stddev.py
python3 ./test.py -f functions/function_sum.py
python3 ./test.py -f functions/function_top.py
python3 ./test.py -f functions/function_twa.py
python3 ./test.py -f functions/function_twa.py

View File

View File

@ -0,0 +1,79 @@
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.sub import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.conn = conn
def run(self):
sqlstr = "select * from t0"
topic = "test"
now = int(time.time() * 1000)
tdSql.prepare()
tdLog.info("create a table and insert 10 rows.")
tdSql.execute("create table t0(ts timestamp, a int, b int);")
for i in range(0, 10):
tdSql.execute("insert into t0 values (%d, %d, %d);" % (now + i, i, i))
tdLog.info("consumption 01.")
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(10)
tdLog.info("consumption 02: no new rows inserted")
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 03: after one new rows inserted")
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 04: keep progress and continue previous subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 05: remove progress and continue previous subscription")
tdSub.close(False)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(11)
tdLog.info("consumption 06: keep progress and restart the subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(11)
tdSub.close(True)
def stop(self):
tdSub.close(False)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

View File

@ -0,0 +1,114 @@
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import taos
import time
from util.log import *
from util.cases import *
from util.sql import *
from util.sub import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.conn = conn
def run(self):
sqlstr = "select * from meters"
topic = "test"
now = int(time.time() * 1000)
tdSql.prepare()
tdLog.info("create a super table and 10 sub-tables, then insert 5 rows into each sub-table.")
tdSql.execute("create table meters(ts timestamp, a int, b int) tags(area int, loc binary(20));")
for i in range(0, 10):
for j in range(0, 5):
tdSql.execute("insert into t%d using meters tags(%d, 'area%d') values (%d, %d, %d);" % (i, i, i, now + j, j, j))
tdLog.info("consumption 01.")
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(50)
tdLog.info("consumption 02: no new rows inserted")
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 03: after one new rows inserted")
tdSql.execute("insert into t0 values (%d, 10, 10);" % (now + 10))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 04: keep progress and continue previous subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(0)
tdLog.info("consumption 05: remove progress and continue previous subscription")
tdSub.close(False)
tdSub.init(self.conn.subscribe(False, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(51)
tdLog.info("consumption 06: keep progress and restart the subscription")
tdSub.close(True)
tdSub.init(self.conn.subscribe(True, topic, sqlstr, 0))
tdSub.consume()
tdSub.checkRows(51)
tdLog.info("consumption 07: insert one row to two table then remove one table")
tdSql.execute("insert into t0 values (%d, 11, 11);" % (now + 11))
tdSql.execute("insert into t1 values (%d, 11, 11);" % (now + 11))
tdSql.execute("drop table t0")
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 08: check timestamp criteria")
tdSub.close(False)
tdSub.init(self.conn.subscribe(True, topic, sqlstr + " where ts > %d" % now, 0))
tdSub.consume()
tdSub.checkRows(37)
tdLog.info("consumption 09: insert large timestamp to t2 then insert smaller timestamp to t1")
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 100))
tdSub.consume()
tdSub.checkRows(1)
tdSql.execute("insert into t1 values (%d, 12, 12);" % (now + 12))
tdSub.consume()
tdSub.checkRows(1)
tdLog.info("consumption 10: field criteria")
tdSub.close(True)
tdSub.init(self.conn.subscribe(False, topic, sqlstr + " where a > 100", 0))
tdSql.execute("insert into t2 values (%d, 101, 100);" % (now + 101))
tdSql.execute("insert into t2 values (%d, 100, 100);" % (now + 102))
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 103))
tdSub.consume()
tdSub.checkRows(2)
tdLog.info("consumption 11: two vnodes")
tdSql.execute("insert into t2 values (%d, 102, 100);" % (now + 104))
tdSql.execute("insert into t9 values (%d, 102, 100);" % (now + 104))
tdSub.consume()
tdSub.checkRows(2)
def stop(self):
tdSub.close(False)
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
tdCases.addWindows(__file__, TDTestCase())
tdCases.addLinux(__file__, TDTestCase())

23
tests/pytest/test.sh Executable file
View File

@ -0,0 +1,23 @@
EXEC_DIR=`dirname "$0"`
if [[ $EXEC_DIR != "." ]]
then
echo "ERROR: Please execute `basename "$0"` in its own directory (for now anyway, pardon the dust)"
exit -1
fi
CURR_DIR=`pwd`
IN_TDINTERNAL="community"
if [[ "$CURR_DIR" == *"$IN_TDINTERNAL"* ]]; then
TAOS_DIR=$CURR_DIR/../../..
else
TAOS_DIR=$CURR_DIR/../..
fi
TAOSD_DIR=`find $TAOS_DIR -name "taosd"|grep bin|head -n1`
LIB_DIR=`echo $TAOSD_DIR|rev|cut -d '/' -f 3,4,5,6|rev`/lib
export PYTHONPATH=$(pwd)/../../src/connector/python/linux/python3
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$LIB_DIR
if [[ "$1" == *"test.py"* ]]; then
python3 ./test.py $@
else
python3 $1 $@
fi

View File

@ -0,0 +1,131 @@
#!/usr/bin/python
###################################################################
# Copyright (c) 2016 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# install pip
# pip install src/connector/python/linux/python2/
# -*- coding: utf-8 -*-
import sys
import getopt
import subprocess
from distutils.log import warn as printf
from util.log import *
from util.dnodes import *
from util.cases import *
from util.sql import *
import taos
if __name__ == "__main__":
fileName = "all"
deployPath = ""
testCluster = False
valgrind = 0
logSql = True
stop = 0
opts, args = getopt.gnu_getopt(sys.argv[1:], 'l:sgh', [
'logSql', 'stop', 'valgrind', 'help'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
'A collection of test cases written using Python')
tdLog.printNoPrefix('-l <True:False> logSql Flag')
tdLog.printNoPrefix('-s stop All dnodes')
tdLog.printNoPrefix('-g valgrind Test Flag')
sys.exit(0)
if key in ['-l', '--logSql']:
if (value.upper() == "TRUE"):
logSql = True
elif (value.upper() == "FALSE"):
logSql = False
else:
tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
sys.exit(0)
if key in ['-g', '--valgrind']:
valgrind = 1
if key in ['-s', '--stop']:
stop = 1
if (stop != 0):
if (valgrind == 0):
toBeKilled = "taosd"
else:
toBeKilled = "valgrind.bin"
killCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}' | xargs kill -HUP > /dev/null 2>&1" % toBeKilled
psCmd = "ps -ef|grep -w %s| grep -v grep | awk '{print $2}'" % toBeKilled
processID = subprocess.check_output(psCmd, shell=True)
while(processID):
os.system(killCmd)
time.sleep(1)
processID = subprocess.check_output(psCmd, shell=True)
for port in range(6030, 6041):
usePortPID = "lsof -i tcp:%d | grep LISTEn | awk '{print $2}'" % port
processID = subprocess.check_output(usePortPID, shell=True)
if processID:
killCmd = "kill -TERM %s" % processID
os.system(killCmd)
fuserCmd = "fuser -k -n tcp %d" % port
os.system(fuserCmd)
if valgrind:
time.sleep(2)
tdLog.info('stop All dnodes')
sys.exit(0)
tdDnodes.init(deployPath)
tdDnodes.setTestCluster(testCluster)
tdDnodes.setValgrind(valgrind)
tdDnodes.stopAll()
tdDnodes.addSimExtraCfg("minTablesPerVnode", "100")
tdDnodes.deploy(1)
tdDnodes.start(1)
host = '127.0.0.1'
tdLog.info("Procedures for tdengine deployed in %s" % (host))
tdCases.logSql(logSql)
conn = taos.connect(
host,
config=tdDnodes.getSimCfgPath())
tdSql.init(conn.cursor(), True)
tdSql.execute("DROP DATABASE IF EXISTS db")
tdSql.execute("CREATE DATABASE IF NOT EXISTS db")
tdSql.execute("USE db")
for i in range(0, 100):
tdSql.execute(
"CREATE TABLE IF NOT EXISTS tb%d (ts TIMESTAMP, temperature INT, humidity FLOAT)" % i)
for i in range(1, 6):
tdSql.execute("INSERT INTO tb99 values (now + %da, %d, %f)" % (i, i, i * 1.0))
tdSql.execute("DROP TABLE tb99")
tdSql.execute(
"CREATE TABLE IF NOT EXISTS tb99 (ts TIMESTAMP, temperature INT, humidity FLOAT)")
tdSql.query("SELECT * FROM tb99")
tdSql.checkRows(0)
conn.close()

43
tests/pytest/util/sub.py Normal file
View File

@ -0,0 +1,43 @@
###################################################################
# Copyright (c) 2020 by TAOS Technologies, Inc.
# All rights reserved.
#
# This file is proprietary and confidential to TAOS Technologies.
# No part of this file may be reproduced, stored, transmitted,
# disclosed or used in any form or by any means other than as
# expressly provided by the written permission from Jianhui Tao
#
###################################################################
# -*- coding: utf-8 -*-
import sys
import os
import time
import datetime
from util.log import *
class TDSub:
def __init__(self):
self.consumedRows = 0
self.consumedCols = 0
def init(self, sub):
self.sub = sub
def close(self, keepProgress):
self.sub.close(keepProgress)
def consume(self):
self.data = self.sub.consume()
self.consumedRows = len(self.data)
self.consumedCols = len(self.sub.fields)
return self.consumedRows
def checkRows(self, expectRows):
if self.consumedRows != expectRows:
tdLog.exit("consumed rows:%d != expect:%d" % (self.consumedRows, expectRows))
tdLog.info("consumed rows:%d == expect:%d" % (self.consumedRows, expectRows))
tdSub = TDSub()

View File

@ -28,7 +28,7 @@ sql create table diskstrm as select count(*), avg(disk_used), last(disk_used), a
sql create table bandstrm as select count(*), avg(band_speed), last(band_speed) from log.dn1 interval(4s) sliding(2s)
sql create table reqstrm as select count(*), avg(req_http), last(req_http), avg(req_select), last(req_select), avg(req_insert), last(req_insert) from log.dn1 interval(4s) sliding(2s)
sql create table iostrm as select count(*), avg(io_read), last(io_read), avg(io_write), last(io_write) from log.dn1 interval(4s) sliding(2s)
sleep 20000
sleep 120000
sql select * from cpustrm
if $rows <= 0 then
return -1

View File

@ -249,6 +249,7 @@ cd ../../../debug; make
./test.sh -f unique/big/maxvnodes.sim
./test.sh -f unique/big/tcp.sim
./test.sh -f unique/cluster/alter.sim
./test.sh -f unique/cluster/balance1.sim
./test.sh -f unique/cluster/balance2.sim
./test.sh -f unique/cluster/balance3.sim
@ -309,13 +310,10 @@ cd ../../../debug; make
./test.sh -f unique/vnode/replica3_repeat.sim
./test.sh -f unique/vnode/replica3_vgroup.sim
# stream still has bugs
#./test.sh -f general/parser/stream_on_sys.sim
#./test.sh -f general/parser/repeatStream.sim
#./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/parser/stream_on_sys.sim
./test.sh -f general/stream/metrics_del.sim
./test.sh -f general/stream/metrics_n.sim
#./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/metrics_replica1_vnoden.sim
./test.sh -f general/stream/restart_stream.sim
./test.sh -f general/stream/stream_3.sim
./test.sh -f general/stream/stream_restart.sim
@ -327,13 +325,12 @@ cd ../../../debug; make
./test.sh -f unique/arbitrator/check_cluster_cfg_para.sim
#./test.sh -f unique/arbitrator/dn2_mn1_cache_file_sync.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_createTableFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_full_dropDnodeFail.sim
./test.sh -f unique/arbitrator/dn3_mn1_multiCreateDropTable.sim
./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
#./test.sh -f unique/arbitrator/dn3_mn1_nw_disable_timeout_autoDropDnode.sim
#./test.sh -f unique/arbitrator/dn3_mn1_replica2_wal1_AddDelDnode.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change_dropDnod.sim
./test.sh -f unique/arbitrator/dn3_mn1_replica_change.sim
./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
#./test.sh -f unique/arbitrator/dn3_mn1_stopDnode_timeout.sim
# lower the priority while file corruption
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_change.sim
#./test.sh -f unique/arbitrator/dn3_mn1_vnode_corruptFile_offline.sim

View File

@ -125,7 +125,7 @@ echo "mqttDebugFlag 135" >> $TAOS_CFG
echo "qdebugFlag 135" >> $TAOS_CFG
echo "rpcDebugFlag 135" >> $TAOS_CFG
echo "tmrDebugFlag 131" >> $TAOS_CFG
echo "udebugFlag 143" >> $TAOS_CFG
echo "udebugFlag 135" >> $TAOS_CFG
echo "sdebugFlag 135" >> $TAOS_CFG
echo "wdebugFlag 135" >> $TAOS_CFG
echo "monitor 0" >> $TAOS_CFG

View File

@ -4,10 +4,6 @@ system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c walLevel -v 2
system sh/cfg.sh -n dnode2 -c walLevel -v 2
system sh/cfg.sh -n dnode3 -c walLevel -v 2
system sh/cfg.sh -n dnode1 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode2 -c numOfMnodes -v 3
system sh/cfg.sh -n dnode3 -c numOfMnodes -v 3
@ -16,64 +12,28 @@ system sh/cfg.sh -n dnode1 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode2 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode3 -c mnodeEqualVnodeNum -v 4
system sh/cfg.sh -n dnode1 -c http -v 1
system sh/cfg.sh -n dnode2 -c http -v 1
system sh/cfg.sh -n dnode3 -c http -v 1
system sh/cfg.sh -n dnode1 -c http -v 0
system sh/cfg.sh -n dnode2 -c http -v 0
system sh/cfg.sh -n dnode3 -c http -v 0
system sh/cfg.sh -n dnode1 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode2 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode3 -c mDebugFlag -v 143
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 20000
system sh/cfg.sh -n dnode1 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode2 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode3 -c sdbDebugFlag -v 143
system sh/cfg.sh -n dnode1 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode2 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode3 -c sdebugFlag -v 143
system sh/cfg.sh -n dnode1 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode2 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode3 -c rpcDebugFlag -v 135
system sh/cfg.sh -n dnode1 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode2 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode3 -c tsdbDebugFlag -v 131
system sh/cfg.sh -n dnode1 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode2 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode3 -c mqttDebugFlag -v 131
system sh/cfg.sh -n dnode1 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c qdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode2 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode3 -c cDebugFlag -v 135
system sh/cfg.sh -n dnode1 -c udebugFlag -v 131
system sh/cfg.sh -n dnode2 -c udebugFlag -v 131
system sh/cfg.sh -n dnode3 -c udebugFlag -v 131
system sh/cfg.sh -n dnode1 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode2 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode3 -c wdebugFlag -v 131
system sh/cfg.sh -n dnode1 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode2 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode3 -c maxTablesPerVnode -v 1000000
system sh/cfg.sh -n dnode1 -c replica -v 3
system sh/cfg.sh -n dnode2 -c replica -v 3
system sh/cfg.sh -n dnode3 -c replica -v 3
print ============== deploy
system sh/exec.sh -n dnode1 -s start
sleep 2001
system sh/exec.sh -n dnode1 -s start
sleep 5001
sql connect
sql create dnode $hostname2
sql create dnode $hostname3
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
system sh/exec.sh -n dnode2 -s start
system sh/exec.sh -n dnode3 -s start
print =============== step1
$x = 0
@ -112,8 +72,8 @@ print $data0_3 $data2_3
$x = $x + 1
sleep 2000
if $x == 1000 then
sleep 5000
if $x == 100000 then
return -1
endi

View File

@ -137,8 +137,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data5_2
$dnode2Vtatus = $data7_2
if $dnode3Vtatus != offline then
sleep 2000
@ -204,8 +204,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data5_2
$dnode2Vtatus = $data7_2
print dnode2Vtatus: $dnode3Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -319,8 +319,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data5_2
$dnode2Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus

View File

@ -139,8 +139,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
@ -206,8 +206,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -325,8 +325,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -386,8 +386,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus

View File

@ -358,10 +358,10 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4 $data10_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$thirdDnode_2 = $data10_1
$thirdDnode_3 = $data10_2
$thirdDnode_4 = $data10_3
$thirdDnode_5 = $data10_4
$thirdDnode_2 = $data8_1
$thirdDnode_3 = $data8_2
$thirdDnode_4 = $data8_3
$thirdDnode_5 = $data8_4
if $thirdDnode_2 != null then
sleep 2000
@ -405,10 +405,10 @@ print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $dat
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4
#print $data0_5 $data1_5 $data2_5 $data3_5 $data4_5
#print $data0_6 $data1_6 $data2_6 $data3_6 $data4_6
$sencodDnode_2 = $data7_1
$sencodDnode_3 = $data7_2
$sencodDnode_4 = $data7_3
$sencodDnode_5 = $data7_4
$sencodDnode_2 = $data6_1
$sencodDnode_3 = $data6_2
$sencodDnode_4 = $data6_3
$sencodDnode_5 = $data6_4
if $sencodDnode_2 != null then
sleep 2000

View File

@ -138,8 +138,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
@ -213,8 +213,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -287,8 +287,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus

View File

@ -144,8 +144,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data5_2
$dnode2Vtatus = $data7_2
if $dnode3Vtatus != offline then
sleep 2000
@ -234,8 +234,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode2Vtatus != master then
sleep 2000
@ -313,8 +313,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -393,8 +393,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus == offline then
sleep 2000

View File

@ -196,8 +196,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -269,8 +269,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -325,8 +325,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != slave then
sleep 2000

View File

@ -158,8 +158,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -231,8 +231,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -287,8 +287,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != slave then
sleep 2000

View File

@ -141,8 +141,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
@ -201,8 +201,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -301,8 +301,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus
@ -417,8 +417,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
print dnode4Vtatus: $dnode4Vtatus
print dnode3Vtatus: $dnode3Vtatus

View File

@ -144,8 +144,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data5_2
$dnode2Vtatus = $data7_2
if $dnode3Vtatus != offline then
sleep 2000
@ -234,8 +234,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != master then
sleep 2000
@ -313,8 +313,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus != offline then
sleep 2000
@ -392,8 +392,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode2Vtatus = $data9_2
$dnode3Vtatus = $data6_2
$dnode2Vtatus = $data7_2
$dnode3Vtatus = $data5_2
if $dnode2Vtatus == offline then
sleep 2000

View File

@ -151,8 +151,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
@ -259,8 +259,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode3Vtatus = $data6_3
$dnode2Vtatus = $data9_3
$dnode3Vtatus = $data5_3
$dnode2Vtatus = $data7_3
if $dnode3Vtatus != offline then
sleep 2000

View File

@ -139,8 +139,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -144,8 +144,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -139,8 +139,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000
@ -207,8 +207,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
sleep 2000
@ -240,8 +240,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
sleep 2000
@ -314,8 +314,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
sleep 2000
@ -347,8 +347,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
sleep 2000
@ -437,8 +437,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != slave then
sleep 2000
@ -470,8 +470,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != master then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -143,8 +143,8 @@ print show vgroups:
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
$dnode4Vtatus = $data6_2
$dnode3Vtatus = $data9_2
$dnode4Vtatus = $data5_2
$dnode3Vtatus = $data7_2
if $dnode4Vtatus != offline then
sleep 2000

View File

@ -0,0 +1,174 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/deploy.sh -n dnode4 -i 4
system sh/cfg.sh -n dnode1 -c role -v 1
system sh/cfg.sh -n dnode2 -c role -v 2
system sh/cfg.sh -n dnode3 -c role -v 2
system sh/cfg.sh -n dnode4 -c role -v 2
system sh/cfg.sh -n dnode1 -c balanceInterval -v 10
system sh/cfg.sh -n dnode2 -c balanceInterval -v 10
system sh/cfg.sh -n dnode3 -c balanceInterval -v 10
system sh/cfg.sh -n dnode4 -c balanceInterval -v 10
system sh/cfg.sh -n dnode1 -c wallevel -v 2
system sh/cfg.sh -n dnode2 -c wallevel -v 2
system sh/cfg.sh -n dnode3 -c wallevel -v 2
system sh/cfg.sh -n dnode4 -c wallevel -v 2
system sh/cfg.sh -n dnode1 -c balance -v 0
system sh/cfg.sh -n dnode2 -c balance -v 0
system sh/cfg.sh -n dnode3 -c balance -v 0
system sh/cfg.sh -n dnode4 -c balance -v 0
print ========== step1
system sh/exec.sh -n dnode1 -s start
sql connect
sleep 3000
sql create dnode $hostname2
system sh/exec.sh -n dnode2 -s start
sleep 3000
print ========== step2
sql create database d1
sql create table d1.t1 (t timestamp, i int)
sql insert into d1.t1 values(now+1s, 15)
sql insert into d1.t1 values(now+2s, 14)
sql insert into d1.t1 values(now+3s, 13)
sql insert into d1.t1 values(now+4s, 12)
sql insert into d1.t1 values(now+5s, 11)
print ========== step3
sleep 2000
sql create dnode $hostname3
system sh/exec.sh -n dnode3 -s start
sql create dnode $hostname4
system sh/exec.sh -n dnode4 -s start
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_1 != 0 then
return -1
endi
if $data2_2 != 1 then
return -1
endi
if $data2_3 != 0 then
return -1
endi
if $data2_4 != 0 then
return -1
endi
print ========== step4
sql alter dnode 2 balance "vnode:2-dnode:3"
$x = 0
show4:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show4
endi
if $data2_3 != 1 then
goto show4
endi
if $data2_4 != 0 then
goto show4
endi
print ========== step5
sql alter dnode 3 balance "vnode:2-dnode:4"
$x = 0
show5:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 0 then
goto show5
endi
if $data2_3 != 0 then
goto show5
endi
if $data2_4 != 1 then
goto show5
endi
print ========== step6
sql alter dnode 4 balance "vnode:2-dnode:2"
$x = 0
show6:
$x = $x + 1
sleep 2000
if $x == 10 then
return -1
endi
sql show dnodes
print dnode1 openVnodes $data2_1
print dnode2 openVnodes $data2_2
print dnode3 openVnodes $data2_3
print dnode4 openVnodes $data2_4
if $data2_2 != 1 then
goto show6
endi
if $data2_3 != 0 then
goto show6
endi
if $data2_4 != 0 then
goto show6
endi
print ========== step7
sql select * from d1.t1 order by t desc
print $data01 $data11 $data21 $data31 $data41
if $data01 != 11 then
return -1
endi
if $data11 != 12 then
return -1
endi
if $data21 != 13 then
return -1
endi
if $data31 != 14 then
return -1
endi
if $data41 != 15 then
return -1
endi
print ========== step8
sql_error sql alter dnode 4 balance "vnode:2-dnode:5"
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
system sh/exec.sh -n dnode4 -s stop -x SIGINT

View File

@ -194,13 +194,13 @@ print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $dat
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data6_4
$d2v3status = $data6_2
$d2v4status = $data6_3
$d2v2status = $data5_4
$d2v3status = $data5_2
$d2v4status = $data5_3
$d1v2status = $data9_4
$d1v3status = $data9_2
$d1v4status = $data9_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000

View File

@ -196,13 +196,13 @@ print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $dat
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data6_4
$d2v3status = $data6_2
$d2v4status = $data6_3
$d2v2status = $data5_4
$d2v3status = $data5_2
$d2v4status = $data5_3
$d1v2status = $data9_4
$d1v3status = $data9_2
$d1v4status = $data9_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000

View File

@ -161,13 +161,13 @@ print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $dat
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data6_4
$d2v3status = $data6_2
$d2v4status = $data6_3
$d2v2status = $data5_4
$d2v3status = $data5_2
$d2v4status = $data5_3
$d1v2status = $data9_4
$d1v3status = $data9_2
$d1v4status = $data9_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000

View File

@ -194,13 +194,13 @@ print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $dat
print $data0_2 $data1_2 $data2_2 $data3_2 $data4_2 $data5_2 $data6_2 $data7_2 $data8_2 $data9_2
print $data0_3 $data1_3 $data2_3 $data3_3 $data4_3 $data5_3 $data6_3 $data7_3 $data8_3 $data9_3
print $data0_4 $data1_4 $data2_4 $data3_4 $data4_4 $data5_4 $data6_4 $data7_4 $data8_4 $data9_4
$d2v2status = $data6_4
$d2v3status = $data6_2
$d2v4status = $data6_3
$d2v2status = $data5_4
$d2v3status = $data5_2
$d2v4status = $data5_3
$d1v2status = $data9_4
$d1v3status = $data9_2
$d1v4status = $data9_3
$d1v2status = $data7_4
$d1v3status = $data7_2
$d1v4status = $data7_3
if $d2v2status != master then
sleep 2000

View File

@ -10,8 +10,8 @@ IF ((TD_LINUX_64) OR (TD_LINUX_32 AND TD_ARM))
#add_executable(insertPerTable insertPerTable.c)
#target_link_libraries(insertPerTable taos_static pthread)
#add_executable(insertPerRow insertPerRow.c)
#target_link_libraries(insertPerRow taos_static pthread)
add_executable(insertPerRow insertPerRow.c)
target_link_libraries(insertPerRow taos_static pthread)
#add_executable(importOneRow importOneRow.c)
#target_link_libraries(importOneRow taos_static pthread)

View File

@ -44,14 +44,16 @@ void createDbAndTable();
void insertData();
int32_t randomData[MAX_RANDOM_POINTS];
int64_t rowsPerTable = 10000;
int64_t rowsPerTable = 1000000000;
int64_t pointsPerTable = 1;
int64_t numOfThreads = 1;
int64_t numOfTablesPerThread = 200;
int64_t numOfThreads = 10;
int64_t numOfTablesPerThread = 100;
char dbName[32] = "db";
char stableName[64] = "st";
int32_t cache = 16;
int32_t tables = 5000;
int32_t cache = 1;
int32_t replica = 3;
int32_t days = 10;
int32_t interval = 1000;
int main(int argc, char *argv[]) {
shellParseArgument(argc, argv);
@ -77,7 +79,7 @@ void createDbAndTable() {
exit(1);
}
sprintf(qstr, "create database if not exists %s cache %d maxtables %d", dbName, cache, tables);
sprintf(qstr, "create database if not exists %s cache %d replica %d days %d", dbName, cache, replica, days);
pSql = taos_query(con, qstr);
int32_t code = taos_errno(pSql);
if (code != 0) {
@ -239,7 +241,7 @@ void *syncTest(void *param) {
st = systemTime.tv_sec * 1000000 + systemTime.tv_usec;
int64_t start = 1430000000000;
int64_t interval = 1000; // 1000 ms
interval = 1000; // 1000 ms
char *sql = qstr;
char inserStr[] = "insert into";
@ -309,10 +311,14 @@ void printHelp() {
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of threads to be used, default is ", numOfThreads);
printf("%s%s\n", indent, "-n");
printf("%s%s%s%" PRId64 "\n", indent, indent, "Number of tables per thread, default is ", numOfTablesPerThread);
printf("%s%s\n", indent, "-tables");
printf("%s%s%s%d\n", indent, indent, "Database parameters tables, default is ", tables);
printf("%s%s\n", indent, "-replica");
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", replica);
printf("%s%s\n", indent, "-cache");
printf("%s%s%s%d\n", indent, indent, "Database parameters cache, default is ", cache);
printf("%s%s%s%d\n", indent, indent, "Database parameters replica, default is ", cache);
printf("%s%s\n", indent, "-days");
printf("%s%s%s%d\n", indent, indent, "Database parameters days, default is ", days);
printf("%s%s\n", indent, "-interval");
printf("%s%s%s%d\n", indent, indent, "Interval of each rows in ms, default is ", interval);
exit(EXIT_SUCCESS);
}
@ -336,10 +342,14 @@ void shellParseArgument(int argc, char *argv[]) {
numOfThreads = atoi(argv[++i]);
} else if (strcmp(argv[i], "-n") == 0) {
numOfTablesPerThread = atoi(argv[++i]);
} else if (strcmp(argv[i], "-tables") == 0) {
tables = atoi(argv[++i]);
} else if (strcmp(argv[i], "-replica") == 0) {
replica = atoi(argv[++i]);
} else if (strcmp(argv[i], "-cache") == 0) {
cache = atoi(argv[++i]);
} else if (strcmp(argv[i], "-days") == 0) {
days = atoi(argv[++i]);
} else if (strcmp(argv[i], "-interval") == 0) {
interval = atoi(argv[++i]);
} else {
}
}
@ -349,7 +359,7 @@ void shellParseArgument(int argc, char *argv[]) {
pPrint("%snumOfThreads:%" PRId64 "%s", GREEN, numOfThreads, NC);
pPrint("%snumOfTablesPerThread:%" PRId64 "%s", GREEN, numOfTablesPerThread, NC);
pPrint("%scache:%" PRId32 "%s", GREEN, cache, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, tables, NC);
pPrint("%stables:%" PRId32 "%s", GREEN, replica, NC);
pPrint("%sdbName:%s%s", GREEN, dbName, NC);
pPrint("%stableName:%s%s", GREEN, stableName, NC);
pPrint("%sstart to run%s", GREEN, NC);

View File

@ -670,12 +670,12 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
ret = taos_errno(pSql);
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST || ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
ret = 0;
break;
} else if (ret != 0) {
simDebug("script:%s, taos:%p, %s failed, ret:%d:%s, error:%s",
script->fileName, script->taos, rest, ret, tstrerror(ret), taos_errstr(pSql));
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret), taos_errstr(pSql));
if (line->errorJump == SQL_JUMP_TRUE) {
script->linePos = line->jump;
@ -691,7 +691,7 @@ bool simExecuteNativeSqlCommand(SScript *script, char *rest, bool isSlow) {
}
if (ret) {
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
sprintf(script->error, "lineNum:%d. sql:%s failed, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
return false;
}
@ -821,7 +821,7 @@ bool simExecuteRestFulSqlCommand(SScript *script, char *rest) {
ret = simExecuteRestFulCommand(script, command);
if (ret == TSDB_CODE_MND_TABLE_ALREADY_EXIST ||
ret == TSDB_CODE_MND_DB_ALREADY_EXIST) {
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret, tstrerror(ret));
simDebug("script:%s, taos:%p, %s success, ret:%d:%s", script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
ret = 0;
break;
} else if (ret != 0) {
@ -957,12 +957,12 @@ bool simExecuteSqlErrorCmd(SScript *script, char *rest) {
if (ret != TSDB_CODE_SUCCESS) {
simDebug("script:%s, taos:%p, %s execute, expect failed, so success, ret:%d:%s",
script->fileName, script->taos, rest, ret, tstrerror(ret));
script->fileName, script->taos, rest, ret & 0XFFFF, tstrerror(ret));
script->linePos++;
return true;
}
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret, tstrerror(ret));
sprintf(script->error, "lineNum:%d. sql:%s expect failed, but success, ret:%d:%s", line->lineNum, rest, ret & 0XFFFF, tstrerror(ret));
return false;
}