diff --git a/.travis.yml b/.travis.yml
index 9fefa61f8c..877e717eaf 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -90,6 +90,7 @@ matrix:
esac
- os: linux
+ dist: bionic
language: c
compiler: gcc
env: COVERITY_SCAN=true
@@ -125,6 +126,7 @@ matrix:
branch_pattern: coverity_scan
- os: linux
+ dist: bionic
language: c
compiler: gcc
env: ENV_COVER=true
@@ -230,6 +232,7 @@ matrix:
- make > /dev/null
- os: linux
+ dist: bionic
language: c
compiler: clang
env: DESC="linux/clang build"
diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c
index 2da786d1d8..79fa1e28cc 100644
--- a/src/client/src/tscLocal.c
+++ b/src/client/src/tscLocal.c
@@ -56,7 +56,7 @@ static int32_t getToStringLength(const char *pData, int32_t length, int32_t type
} break;
case TSDB_DATA_TYPE_TIMESTAMP:
case TSDB_DATA_TYPE_BIGINT:
- len = sprintf(buf, "%" PRId64 "", *(int64_t *)pData);
+ len = sprintf(buf, "%" PRId64, *(int64_t *)pData);
break;
case TSDB_DATA_TYPE_BOOL:
len = MAX_BOOL_TYPE_LENGTH;
diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c
index 56f0b5099d..ca9f439e4c 100644
--- a/src/client/src/tscParseInsert.c
+++ b/src/client/src/tscParseInsert.c
@@ -42,35 +42,35 @@ enum {
static int32_t tscAllocateMemIfNeed(STableDataBlocks *pDataBlock, int32_t rowSize, int32_t * numOfRows);
static int32_t tscToInteger(SSQLToken *pToken, int64_t *value, char **endPtr) {
- int32_t numType = isValidNumber(pToken);
- if (TK_ILLEGAL == numType) {
- return numType;
- }
+// int32_t numType = isValidNumber(pToken);
+// if (TK_ILLEGAL == numType) {
+// return numType;
+// }
int32_t radix = 10;
- if (numType == TK_HEX) {
+ if (pToken->type == TK_HEX) {
radix = 16;
- } else if (numType == TK_OCT) {
+ } else if (pToken->type == TK_OCT) {
radix = 8;
- } else if (numType == TK_BIN) {
+ } else if (pToken->type == TK_BIN) {
radix = 2;
}
errno = 0;
*value = strtoll(pToken->z, endPtr, radix);
- return numType;
+ return pToken->type;
}
static int32_t tscToDouble(SSQLToken *pToken, double *value, char **endPtr) {
- int32_t numType = isValidNumber(pToken);
- if (TK_ILLEGAL == numType) {
- return numType;
- }
+// int32_t numType = isValidNumber(pToken);
+// if (TK_ILLEGAL == numType) {
+// return numType;
+// }
errno = 0;
*value = strtod(pToken->z, endPtr);
- return numType;
+ return pToken->type;
}
int tsParseTime(SSQLToken *pToken, int64_t *time, char **next, char *error, int16_t timePrec) {
@@ -779,7 +779,6 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
STagData *pTag = (STagData *)pCmd->payload;
memset(pTag, 0, sizeof(STagData));
- pCmd->payloadLen = sizeof(STagData);
/*
* the source super table is moved to the secondary position of the pTableMetaInfo list
@@ -928,6 +927,14 @@ static int32_t tscCheckIfCreateTable(char **sqlstr, SSqlObj *pSql) {
}
}
+ // 3. calculate the actual data size of STagData
+ pCmd->payloadLen = sizeof(pTag->name) + sizeof(pTag->dataLen);
+ for (int32_t t = 0; t < numOfTags; ++t) {
+ pTag->dataLen += pTagSchema[t].bytes;
+ pCmd->payloadLen += pTagSchema[t].bytes;
+ }
+ pTag->dataLen = htonl(pTag->dataLen);
+
if (tscValidateName(&tableToken) != TSDB_CODE_SUCCESS) {
return tscInvalidSQLErrMsg(pCmd->payload, "invalid table name", *sqlstr);
}
diff --git a/src/client/src/tscProfile.c b/src/client/src/tscProfile.c
index 61fc1a19c8..74294d38e0 100644
--- a/src/client/src/tscProfile.c
+++ b/src/client/src/tscProfile.c
@@ -23,8 +23,6 @@
void tscSaveSlowQueryFp(void *handle, void *tmrId);
void *tscSlowQueryConn = NULL;
bool tscSlowQueryConnInitialized = false;
-TAOS *taos_connect_a(char *ip, char *user, char *pass, char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int),
- void *param, void **taos);
void tscInitConnCb(void *param, TAOS_RES *result, int code) {
char *sql = param;
diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c
index f9ce16471a..a134410bc4 100644
--- a/src/client/src/tscSQLParser.c
+++ b/src/client/src/tscSQLParser.c
@@ -4416,6 +4416,7 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) {
TSDB_CODE_SUCCESS) {
return invalidSqlErrMsg(pQueryInfo->msg, msg13);
}
+ pAlterSQL->tagData.dataLen = pTagsSchema->bytes;
// validate the length of binary
if ((pTagsSchema->type == TSDB_DATA_TYPE_BINARY || pTagsSchema->type == TSDB_DATA_TYPE_NCHAR) &&
@@ -4539,11 +4540,13 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_INVALID_SQL;
}
- const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[14] = {
- {"resetLog", 8}, {"resetQueryCache", 15}, {"dDebugFlag", 10}, {"rpcDebugFlag", 12},
- {"tmrDebugFlag", 12}, {"cDebugFlag", 10}, {"uDebugFlag", 10}, {"mDebugFlag", 10},
- {"sdbDebugFlag", 12}, {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"qDebugflag", 10},
- {"debugFlag", 9}, {"monitor", 7}};
+ const int DNODE_DYNAMIC_CFG_OPTIONS_SIZE = 17;
+ const SDNodeDynConfOption DNODE_DYNAMIC_CFG_OPTIONS[] = {
+ {"resetLog", 8}, {"resetQueryCache", 15}, {"debugFlag", 9}, {"mDebugFlag", 10},
+ {"dDebugFlag", 10}, {"sdbDebugFlag", 12}, {"vDebugFlag", 10}, {"cDebugFlag", 10},
+ {"httpDebugFlag", 13}, {"monitorDebugFlag", 16}, {"rpcDebugFlag", 12}, {"uDebugFlag", 10},
+ {"tmrDebugFlag", 12}, {"qDebugflag", 10}, {"sDebugflag", 10}, {"tsdbDebugFlag", 13},
+ {"monitor", 7}};
SSQLToken* pOptionToken = &pOptions->a[1];
@@ -4555,8 +4558,8 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_SUCCESS;
}
}
- } else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[13].name, pOptionToken->z, pOptionToken->n) == 0) &&
- (DNODE_DYNAMIC_CFG_OPTIONS[13].len == pOptionToken->n)) {
+ } else if ((strncasecmp(DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].name, pOptionToken->z, pOptionToken->n) == 0) &&
+ (DNODE_DYNAMIC_CFG_OPTIONS[DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1].len == pOptionToken->n)) {
SSQLToken* pValToken = &pOptions->a[2];
int32_t val = strtol(pValToken->z, NULL, 10);
if (val != 0 && val != 1) {
@@ -4572,7 +4575,7 @@ int32_t validateDNodeConfig(tDCLSQL* pOptions) {
return TSDB_CODE_INVALID_SQL;
}
- for (int32_t i = 2; i < tListLen(DNODE_DYNAMIC_CFG_OPTIONS) - 1; ++i) {
+ for (int32_t i = 2; i < DNODE_DYNAMIC_CFG_OPTIONS_SIZE - 1; ++i) {
const SDNodeDynConfOption* pOption = &DNODE_DYNAMIC_CFG_OPTIONS[i];
if ((strncasecmp(pOption->name, pOptionToken->z, pOptionToken->n) == 0) && (pOption->len == pOptionToken->n)) {
@@ -5550,11 +5553,11 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
// too long tag values will return invalid sql, not be truncated automatically
SSchema* pTagSchema = tscGetTableTagSchema(pStableMeterMetaInfo->pTableMeta);
- char* tagVal = pCreateTable->usingInfo.tagdata.data;
+ STagData* pTag = &pCreateTable->usingInfo.tagdata;
+ char* tagVal = pTag->data;
int32_t ret = TSDB_CODE_SUCCESS;
for (int32_t i = 0; i < pList->nExpr; ++i) {
-
if (pTagSchema[i].type == TSDB_DATA_TYPE_BINARY || pTagSchema[i].type == TSDB_DATA_TYPE_NCHAR) {
// validate the length of binary
if (pList->a[i].pVar.nLen + VARSTR_HEADER_SIZE > pTagSchema[i].bytes) {
@@ -5593,6 +5596,7 @@ int32_t doCheckForCreateFromStable(SSqlObj* pSql, SSqlInfo* pInfo) {
return ret;
}
+ pTag->dataLen = tagVal - pTag->data;
return TSDB_CODE_SUCCESS;
}
diff --git a/src/client/src/tscSecondaryMerge.c b/src/client/src/tscSecondaryMerge.c
index 575f7ee8f4..251d4079e3 100644
--- a/src/client/src/tscSecondaryMerge.c
+++ b/src/client/src/tscSecondaryMerge.c
@@ -808,18 +808,19 @@ void adjustLoserTreeFromNewData(SLocalReducer *pLocalReducer, SLocalDataSource *
}
}
-void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo,
- SFillInfo *pFillInfo) {
+void savePrevRecordAndSetupInterpoInfo(SLocalReducer *pLocalReducer, SQueryInfo *pQueryInfo, SFillInfo *pFillInfo) {
// discard following dataset in the same group and reset the interpolation information
STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0);
STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta);
- int16_t prec = tinfo.precision;
- int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
- int64_t revisedSTime =
- taosGetIntervalStartTimestamp(stime, pQueryInfo->intervalTime, pQueryInfo->slidingTimeUnit, prec);
- taosResetFillInfo(pFillInfo, revisedSTime);
+ if (pFillInfo != NULL) {
+ int64_t stime = (pQueryInfo->window.skey < pQueryInfo->window.ekey) ? pQueryInfo->window.skey : pQueryInfo->window.ekey;
+ int64_t revisedSTime =
+ taosGetIntervalStartTimestamp(stime, pQueryInfo->slidingTime, pQueryInfo->slidingTimeUnit, tinfo.precision);
+
+ taosResetFillInfo(pFillInfo, revisedSTime);
+ }
pLocalReducer->discard = true;
pLocalReducer->discardData->num = 0;
@@ -915,13 +916,12 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) {
/* impose the limitation of output rows on the final result */
int32_t prevSize = pFinalDataPage->num;
- int32_t overFlow = pRes->numOfClauseTotal - pQueryInfo->limit.limit;
-
- assert(overFlow < pRes->numOfRows);
+ int32_t overflow = pRes->numOfClauseTotal - pQueryInfo->limit.limit;
+ assert(overflow < pRes->numOfRows);
pRes->numOfClauseTotal = pQueryInfo->limit.limit;
- pRes->numOfRows -= overFlow;
- pFinalDataPage->num -= overFlow;
+ pRes->numOfRows -= overflow;
+ pFinalDataPage->num -= overflow;
tColModelCompact(pLocalReducer->resColModel, pFinalDataPage, prevSize);
@@ -988,13 +988,13 @@ static void doInterpolateResult(SSqlObj *pSql, SLocalReducer *pLocalReducer, boo
if (pRes->numOfRows > 0) {
if (pQueryInfo->limit.limit >= 0 && pRes->numOfClauseTotal > pQueryInfo->limit.limit) {
- int32_t overFlow = pRes->numOfClauseTotal - pQueryInfo->limit.limit;
- pRes->numOfRows -= overFlow;
+ int32_t overflow = pRes->numOfClauseTotal - pQueryInfo->limit.limit;
+ pRes->numOfRows -= overflow;
assert(pRes->numOfRows >= 0);
pRes->numOfClauseTotal = pQueryInfo->limit.limit;
- pFinalDataPage->num -= overFlow;
+ pFinalDataPage->num -= overflow;
/* set remain data to be discarded, and reset the interpolation information */
savePrevRecordAndSetupInterpoInfo(pLocalReducer, pQueryInfo, pFillInfo);
diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c
index e36fb0d86a..d906550591 100644
--- a/src/client/src/tscServer.c
+++ b/src/client/src/tscServer.c
@@ -1213,8 +1213,13 @@ int tscBuildCreateTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
int8_t type = pInfo->pCreateTableInfo->type;
if (type == TSQL_CREATE_TABLE_FROM_STABLE) { // create by using super table, tags value
- memcpy(pMsg, &pInfo->pCreateTableInfo->usingInfo.tagdata, sizeof(STagData));
- pMsg += sizeof(STagData);
+ STagData* pTag = &pInfo->pCreateTableInfo->usingInfo.tagdata;
+ *(int32_t*)pMsg = htonl(pTag->dataLen);
+ pMsg += sizeof(int32_t);
+ memcpy(pMsg, pTag->name, sizeof(pTag->name));
+ pMsg += sizeof(pTag->name);
+ memcpy(pMsg, pTag->data, pTag->dataLen);
+ pMsg += pTag->dataLen;
} else { // create (super) table
pSchema = (SSchema *)pCreateTableMsg->schema;
@@ -1281,9 +1286,7 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
strcpy(pAlterTableMsg->tableId, pTableMetaInfo->name);
pAlterTableMsg->type = htons(pAlterInfo->type);
- pAlterTableMsg->numOfCols = tscNumOfFields(pQueryInfo);
- memcpy(pAlterTableMsg->tagVal, pAlterInfo->tagData.data, TSDB_MAX_TAGS_LEN);
-
+ pAlterTableMsg->numOfCols = htons(tscNumOfFields(pQueryInfo));
SSchema *pSchema = pAlterTableMsg->schema;
for (int i = 0; i < pAlterTableMsg->numOfCols; ++i) {
TAOS_FIELD *pField = tscFieldInfoGetField(&pQueryInfo->fieldsInfo, i);
@@ -1295,6 +1298,9 @@ int tscBuildAlterTableMsg(SSqlObj *pSql, SSqlInfo *pInfo) {
}
pMsg = (char *)pSchema;
+ pAlterTableMsg->tagValLen = htonl(pAlterInfo->tagData.dataLen);
+ memcpy(pMsg, pAlterInfo->tagData.data, pAlterInfo->tagData.dataLen);
+ pMsg += pAlterInfo->tagData.dataLen;
msgLen = pMsg - (char*)pAlterTableMsg;
pCmd->payloadLen = msgLen;
diff --git a/src/client/src/tscStream.c b/src/client/src/tscStream.c
index f42bf819ca..fd84a2b759 100644
--- a/src/client/src/tscStream.c
+++ b/src/client/src/tscStream.c
@@ -165,7 +165,7 @@ static void tscSetTimestampForRes(SSqlStream *pStream, SSqlObj *pSql) {
if (timestamp != actualTimestamp) {
// reset the timestamp of each agg point by using start time of each interval
*((int64_t *)pRes->data) = actualTimestamp;
- tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64 "", pSql, pStream, timestamp, actualTimestamp);
+ tscWarn("%p stream:%p, timestamp of points is:%" PRId64 ", reset to %" PRId64, pSql, pStream, timestamp, actualTimestamp);
}
}
@@ -287,10 +287,10 @@ static void tscSetRetryTimer(SSqlStream *pStream, SSqlObj *pSql, int64_t timer)
return;
}
- tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
+ tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
now + timer, timer, delay, pStream->stime, etime);
} else {
- tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64 "", pStream->pSql, pStream,
+ tscTrace("%p stream:%p, next start at %" PRId64 ", in %" PRId64 "ms. delay:%" PRId64 "ms qrange %" PRId64 "-%" PRId64, pStream->pSql, pStream,
pStream->stime, timer, delay, pStream->stime - pStream->interval, pStream->stime - 1);
}
@@ -380,7 +380,7 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
SQueryInfo* pQueryInfo = tscGetQueryInfoDetail(&pSql->cmd, 0);
if (pQueryInfo->intervalTime < minIntervalTime) {
- tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64 "", pSql, pStream,
+ tscWarn("%p stream:%p, original sample interval:%ld too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->intervalTime, minIntervalTime);
pQueryInfo->intervalTime = minIntervalTime;
}
@@ -397,14 +397,14 @@ static void tscSetSlidingWindowInfo(SSqlObj *pSql, SSqlStream *pStream) {
if (pQueryInfo->slidingTime == -1) {
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
} else if (pQueryInfo->slidingTime < minSlidingTime) {
- tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64 "", pSql, pStream,
+ tscWarn("%p stream:%p, original sliding value:%" PRId64 " too small, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, minSlidingTime);
pQueryInfo->slidingTime = minSlidingTime;
}
if (pQueryInfo->slidingTime > pQueryInfo->intervalTime) {
- tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64 "", pSql, pStream,
+ tscWarn("%p stream:%p, sliding value:%" PRId64 " can not be larger than interval range, reset to:%" PRId64, pSql, pStream,
pQueryInfo->slidingTime, pQueryInfo->intervalTime);
pQueryInfo->slidingTime = pQueryInfo->intervalTime;
@@ -433,11 +433,11 @@ static int64_t tscGetStreamStartTimestamp(SSqlObj *pSql, SSqlStream *pStream, in
} else { // timewindow based aggregation stream
if (stime == 0) { // no data in meter till now
stime = ((int64_t)taosGetTimestamp(pStream->precision) / pStream->interval) * pStream->interval;
- tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64 "", pSql, pStream, stime);
+ tscWarn("%p stream:%p, last timestamp:0, reset to:%" PRId64, pSql, pStream, stime);
} else {
int64_t newStime = (stime / pStream->interval) * pStream->interval;
if (newStime != stime) {
- tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64 "", pSql, pStream, stime, newStime);
+ tscWarn("%p stream:%p, last timestamp:%" PRId64 ", reset to:%" PRId64, pSql, pStream, stime, newStime);
stime = newStime;
}
}
diff --git a/src/common/inc/tdataformat.h b/src/common/inc/tdataformat.h
index 51a5dad486..956121086c 100644
--- a/src/common/inc/tdataformat.h
+++ b/src/common/inc/tdataformat.h
@@ -106,9 +106,28 @@ typedef void *SDataRow;
SDataRow tdNewDataRowFromSchema(STSchema *pSchema);
void tdFreeDataRow(SDataRow row);
void tdInitDataRow(SDataRow row, STSchema *pSchema);
-int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset);
SDataRow tdDataRowDup(SDataRow row);
+static FORCE_INLINE int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) {
+ ASSERT(value != NULL);
+ int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
+ char * ptr = (char *)POINTER_SHIFT(row, dataRowLen(row));
+
+ switch (type) {
+ case TSDB_DATA_TYPE_BINARY:
+ case TSDB_DATA_TYPE_NCHAR:
+ *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
+ memcpy(ptr, value, varDataTLen(value));
+ dataRowLen(row) += varDataTLen(value);
+ break;
+ default:
+ memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
+ break;
+ }
+
+ return 0;
+}
+
// NOTE: offset here including the header size
static FORCE_INLINE void *tdGetRowDataOfCol(SDataRow row, int8_t type, int32_t offset) {
switch (type) {
diff --git a/src/common/src/tdataformat.c b/src/common/src/tdataformat.c
index 9d81cd07af..cf1b77d12c 100644
--- a/src/common/src/tdataformat.c
+++ b/src/common/src/tdataformat.c
@@ -158,32 +158,6 @@ void tdFreeDataRow(SDataRow row) {
if (row) free(row);
}
-/**
- * Append a column value to the data row
- * @param type: column type
- * @param bytes: column bytes
- * @param offset: offset in the data row tuple, not including the data row header
- */
-int tdAppendColVal(SDataRow row, void *value, int8_t type, int32_t bytes, int32_t offset) {
- ASSERT(value != NULL);
- int32_t toffset = offset + TD_DATA_ROW_HEAD_SIZE;
- char * ptr = POINTER_SHIFT(row, dataRowLen(row));
-
- switch (type) {
- case TSDB_DATA_TYPE_BINARY:
- case TSDB_DATA_TYPE_NCHAR:
- *(VarDataOffsetT *)POINTER_SHIFT(row, toffset) = dataRowLen(row);
- memcpy(ptr, value, varDataTLen(value));
- dataRowLen(row) += varDataTLen(value);
- break;
- default:
- memcpy(POINTER_SHIFT(row, toffset), value, TYPE_BYTES[type]);
- break;
- }
-
- return 0;
-}
-
SDataRow tdDataRowDup(SDataRow row) {
SDataRow trow = malloc(dataRowLen(row));
if (trow == NULL) return NULL;
@@ -453,27 +427,25 @@ void tdMergeTwoDataCols(SDataCols *target, SDataCols *src1, int *iter1, SDataCol
TSKEY key1 = (*iter1 >= src1->numOfPoints) ? INT64_MAX : ((TSKEY *)(src1->cols[0].pData))[*iter1];
TSKEY key2 = (*iter2 >= src2->numOfPoints) ? INT64_MAX : ((TSKEY *)(src2->cols[0].pData))[*iter2];
- if (key1 < key2) {
+ if (key1 <= key2) {
for (int i = 0; i < src1->numOfCols; i++) {
ASSERT(target->cols[i].type == src1->cols[i].type);
- dataColAppendVal(target->cols[i].pData, tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfPoints,
+ dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src1->cols + i, *iter1), target->numOfPoints,
target->maxPoints);
}
target->numOfPoints++;
(*iter1)++;
- } else if (key1 > key2) {
+ if (key1 == key2) (*iter2)++;
+ } else {
for (int i = 0; i < src2->numOfCols; i++) {
ASSERT(target->cols[i].type == src2->cols[i].type);
- dataColAppendVal(target->cols[i].pData, tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfPoints,
+ dataColAppendVal(&(target->cols[i]), tdGetColDataOfRow(src2->cols + i, *iter2), target->numOfPoints,
target->maxPoints);
}
target->numOfPoints++;
(*iter2)++;
- } else {
- // TODO: deal with duplicate keys
- ASSERT(false);
}
}
}
\ No newline at end of file
diff --git a/src/common/src/tglobal.c b/src/common/src/tglobal.c
index ac7bc31c3d..6b348b7fc7 100644
--- a/src/common/src/tglobal.c
+++ b/src/common/src/tglobal.c
@@ -107,7 +107,6 @@ int32_t tsReplications = TSDB_DEFAULT_REPLICA_NUM;
int16_t tsAffectedRowsMod = 0;
int32_t tsNumOfMPeers = 3;
int32_t tsMaxShellConns = 2000;
-int32_t tsMaxTables = 100000;
char tsDefaultDB[TSDB_DB_NAME_LEN] = {0};
char tsDefaultUser[64] = "root";
@@ -141,7 +140,7 @@ int32_t rpcDebugFlag = 135;
int32_t uDebugFlag = 131;
int32_t debugFlag = 131;
int32_t sDebugFlag = 135;
-int32_t tsdbDebugFlag = 135;
+int32_t tsdbDebugFlag = 131;
// the maximum number of results for projection query on super table that are returned from
// one virtual node, to order according to timestamp
diff --git a/src/connector/python/linux/python2/taos/cursor.py b/src/connector/python/linux/python2/taos/cursor.py
index 3d738ab22d..7ea52aa5ad 100644
--- a/src/connector/python/linux/python2/taos/cursor.py
+++ b/src/connector/python/linux/python2/taos/cursor.py
@@ -34,6 +34,7 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
+ self._logfile = ""
if connection is not None:
self._connection = connection
@@ -83,6 +84,9 @@ class TDengineCursor(object):
"""
pass
+ def log(self, logfile):
+ self._logfile = logfile
+
def close(self):
"""Close the cursor.
"""
@@ -113,6 +117,11 @@ class TDengineCursor(object):
pass
res = CTaosInterface.query(self._connection._conn, stmt)
+
+ if (self._logfile):
+ with open(self._logfile, "a") as logfile:
+ logfile.write("%s;\n" % operation)
+
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(
diff --git a/src/connector/python/linux/python3/taos/cursor.py b/src/connector/python/linux/python3/taos/cursor.py
index ea7e9e5404..dc038a72ba 100644
--- a/src/connector/python/linux/python3/taos/cursor.py
+++ b/src/connector/python/linux/python3/taos/cursor.py
@@ -36,6 +36,7 @@ class TDengineCursor(object):
self._block_rows = -1
self._block_iter = 0
self._affected_rows = 0
+ self._logfile = ""
if connection is not None:
self._connection = connection
@@ -85,6 +86,9 @@ class TDengineCursor(object):
"""
pass
+ def log(self, logfile):
+ self._logfile = logfile
+
def close(self):
"""Close the cursor.
"""
@@ -121,6 +125,10 @@ class TDengineCursor(object):
res = CTaosInterface.query(self._connection._conn, stmt)
# print(" << Query ({}) Exec Done".format(localSeqNum))
+ if (self._logfile):
+ with open(self._logfile, "a") as logfile:
+ logfile.write("%s;\n" % operation)
+
if res == 0:
if CTaosInterface.fieldsCount(self._connection._conn) == 0:
self._affected_rows += CTaosInterface.affectedRows(
diff --git a/src/cq/src/cqMain.c b/src/cq/src/cqMain.c
index e4f3142b89..7935bb7ff5 100644
--- a/src/cq/src/cqMain.c
+++ b/src/cq/src/cqMain.c
@@ -40,6 +40,7 @@ typedef struct {
int num; // number of continuous streams
struct SCqObj *pHead;
void *dbConn;
+ int master;
pthread_mutex_t mutex;
} SCqContext;
@@ -58,6 +59,7 @@ typedef struct SCqObj {
int cqDebugFlag = 135;
static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row);
+static void cqCreateStream(SCqContext *pContext, SCqObj *pObj);
void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
@@ -69,6 +71,7 @@ void *cqOpen(void *ahandle, const SCqCfg *pCfg) {
pContext->vgId = pCfg->vgId;
pContext->cqWrite = pCfg->cqWrite;
pContext->ahandle = ahandle;
+ tscEmbedded = 1;
pthread_mutex_init(&pContext->mutex, NULL);
@@ -84,6 +87,8 @@ void cqClose(void *handle) {
cqStop(pContext);
// free all resources
+ pthread_mutex_lock(&pContext->mutex);
+
SCqObj *pObj = pContext->pHead;
while (pObj) {
SCqObj *pTemp = pObj;
@@ -91,6 +96,8 @@ void cqClose(void *handle) {
free(pTemp);
}
+ pthread_mutex_unlock(&pContext->mutex);
+
pthread_mutex_destroy(&pContext->mutex);
cTrace("vgId:%d, CQ is closed", pContext->vgId);
@@ -100,28 +107,15 @@ void cqClose(void *handle) {
void cqStart(void *handle) {
SCqContext *pContext = handle;
cTrace("vgId:%d, start all CQs", pContext->vgId);
- if (pContext->dbConn) return;
+ if (pContext->dbConn || pContext->master) return;
pthread_mutex_lock(&pContext->mutex);
- tscEmbedded = 1;
- pContext->dbConn = taos_connect("localhost", pContext->user, pContext->pass, NULL, 0);
- if (pContext->dbConn == NULL) {
- cError("vgId:%d, failed to connect to TDengine(%s)", pContext->vgId, tstrerror(terrno));
- pthread_mutex_unlock(&pContext->mutex);
- return;
- }
+ pContext->master = 1;
SCqObj *pObj = pContext->pHead;
while (pObj) {
- int64_t lastKey = 0;
- pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, lastKey, pObj, NULL);
- if (pObj->pStream) {
- pContext->num++;
- cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
- } else {
- cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
- }
+ cqCreateStream(pContext, pObj);
pObj = pObj->next;
}
@@ -131,10 +125,11 @@ void cqStart(void *handle) {
void cqStop(void *handle) {
SCqContext *pContext = handle;
cTrace("vgId:%d, stop all CQs", pContext->vgId);
- if (pContext->dbConn == NULL) return;
+ if (pContext->dbConn == NULL || pContext->master == 0) return;
pthread_mutex_lock(&pContext->mutex);
+ pContext->master = 0;
SCqObj *pObj = pContext->pHead;
while (pObj) {
if (pObj->pStream) {
@@ -176,16 +171,7 @@ void *cqCreate(void *handle, int tid, char *sqlStr, SSchema *pSchema, int column
if (pContext->pHead) pContext->pHead->prev = pObj;
pContext->pHead = pObj;
- if (pContext->dbConn) {
- int64_t lastKey = 0;
- pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, lastKey, pObj, NULL);
- if (pObj->pStream) {
- pContext->num++;
- cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
- } else {
- cError("vgId:%d, id:%d CQ:%s, failed to launch", pContext->vgId, pObj->tid, pObj->sqlStr);
- }
- }
+ cqCreateStream(pContext, pObj);
pthread_mutex_unlock(&pContext->mutex);
@@ -218,6 +204,26 @@ void cqDrop(void *handle) {
pthread_mutex_lock(&pContext->mutex);
}
+static void cqCreateStream(SCqContext *pContext, SCqObj *pObj) {
+
+ if (pContext->dbConn == NULL) {
+ pContext->dbConn = taos_connect("localhost", pContext->user, pContext->pass, NULL, 0);
+ if (pContext->dbConn == NULL) {
+ cError("vgId:%d, failed to connect to TDengine(%s)", pContext->vgId, tstrerror(terrno));
+ }
+ return;
+ }
+
+ int64_t lastKey = 0;
+ pObj->pStream = taos_open_stream(pContext->dbConn, pObj->sqlStr, cqProcessStreamRes, lastKey, pObj, NULL);
+ if (pObj->pStream) {
+ pContext->num++;
+ cTrace("vgId:%d, id:%d CQ:%s is openned", pContext->vgId, pObj->tid, pObj->sqlStr);
+ } else {
+ cError("vgId:%d, id:%d CQ:%s, failed to open", pContext->vgId, pObj->tid, pObj->sqlStr);
+ }
+}
+
static void cqProcessStreamRes(void *param, TAOS_RES *tres, TAOS_ROW row) {
SCqObj *pObj = (SCqObj *)param;
SCqContext *pContext = pObj->pContext;
diff --git a/src/dnode/src/dnodeModule.c b/src/dnode/src/dnodeModule.c
index 86193fcebc..f5c28c9573 100644
--- a/src/dnode/src/dnodeModule.c
+++ b/src/dnode/src/dnodeModule.c
@@ -114,18 +114,20 @@ void dnodeStartModules() {
}
void dnodeProcessModuleStatus(uint32_t moduleStatus) {
- bool enableMgmtModule = moduleStatus & (1 << TSDB_MOD_MGMT);
- if (!tsModule[TSDB_MOD_MGMT].enable && enableMgmtModule) {
- dPrint("module status is received, start mgmt module", tsModuleStatus, moduleStatus);
- tsModule[TSDB_MOD_MGMT].enable = true;
- dnodeSetModuleStatus(TSDB_MOD_MGMT);
- (*tsModule[TSDB_MOD_MGMT].startFp)();
- }
+ for (int32_t module = TSDB_MOD_MGMT; module < TSDB_MOD_HTTP; ++module) {
+ bool enableModule = moduleStatus & (1 << module);
+ if (!tsModule[module].enable && enableModule) {
+ dPrint("module status:%u is received, start %s module", tsModuleStatus, tsModule[module].name);
+ tsModule[module].enable = true;
+ dnodeSetModuleStatus(module);
+ (*tsModule[module].startFp)();
+ }
- if (tsModule[TSDB_MOD_MGMT].enable && !enableMgmtModule) {
- dPrint("module status is received, stop mgmt module", tsModuleStatus, moduleStatus);
- tsModule[TSDB_MOD_MGMT].enable = false;
- dnodeUnSetModuleStatus(TSDB_MOD_MGMT);
- (*tsModule[TSDB_MOD_MGMT].stopFp)();
+ if (tsModule[module].enable && !enableModule) {
+ dPrint("module status:%u is received, stop %s module", tsModuleStatus, tsModule[module].name);
+ tsModule[module].enable = false;
+ dnodeUnSetModuleStatus(module);
+ (*tsModule[module].stopFp)();
+ }
}
}
diff --git a/src/inc/taosdef.h b/src/inc/taosdef.h
index aeccd92d9a..b6a37d85a2 100644
--- a/src/inc/taosdef.h
+++ b/src/inc/taosdef.h
@@ -340,13 +340,14 @@ void tsDataSwap(void *pLeft, void *pRight, int32_t type, int32_t size);
#define TSDB_SESSIONS_PER_VNODE (300)
#define TSDB_SESSIONS_PER_DNODE (TSDB_SESSIONS_PER_VNODE * TSDB_MAX_VNODES)
-#define TSDB_MAX_MNODES 5
-#define TSDB_MAX_DNODES 10
-#define TSDB_MAX_ACCOUNTS 10
-#define TSDB_MAX_USERS 20
-#define TSDB_MAX_DBS 100
-#define TSDB_MAX_VGROUPS 1000
-#define TSDB_MAX_SUPER_TABLES 100
+#define TSDB_DEFAULT_MNODES_HASH_SIZE 5
+#define TSDB_DEFAULT_DNODES_HASH_SIZE 10
+#define TSDB_DEFAULT_ACCOUNTS_HASH_SIZE 10
+#define TSDB_DEFAULT_USERS_HASH_SIZE 20
+#define TSDB_DEFAULT_DBS_HASH_SIZE 100
+#define TSDB_DEFAULT_VGROUPS_HASH_SIZE 100
+#define TSDB_DEFAULT_STABLES_HASH_SIZE 100
+#define TSDB_DEFAULT_CTABLES_HASH_SIZE 10000
#define TSDB_PORT_DNODESHELL 0
#define TSDB_PORT_DNODEDNODE 5
diff --git a/src/inc/taoserror.h b/src/inc/taoserror.h
index 1390d66113..ae30efd93e 100644
--- a/src/inc/taoserror.h
+++ b/src/inc/taoserror.h
@@ -156,6 +156,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_SERV_OUT_OF_MEMORY, 0, 405, "server out of m
TAOS_DEFINE_ERROR(TSDB_CODE_NO_DISK_PERMISSIONS, 0, 406, "no disk permissions")
TAOS_DEFINE_ERROR(TSDB_CODE_FILE_CORRUPTED, 0, 407, "file corrupted")
TAOS_DEFINE_ERROR(TSDB_CODE_MEMORY_CORRUPTED, 0, 408, "memory corrupted")
+TAOS_DEFINE_ERROR(TSDB_CODE_NOT_SUCH_FILE_OR_DIR, 0, 409, "no such file or directory")
// client
TAOS_DEFINE_ERROR(TSDB_CODE_INVALID_CLIENT_VERSION, 0, 451, "invalid client version")
diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h
index e09e0cef78..83a35bfa94 100644
--- a/src/inc/taosmsg.h
+++ b/src/inc/taosmsg.h
@@ -269,9 +269,11 @@ typedef struct {
char tableId[TSDB_TABLE_ID_LEN + 1];
char db[TSDB_DB_NAME_LEN + 1];
int16_t type; /* operation type */
- char tagVal[TSDB_MAX_BYTES_PER_ROW];
- int8_t numOfCols; /* number of schema */
+ int16_t numOfCols; /* number of schema */
+ int32_t tagValLen;
SSchema schema[];
+ // tagVal is padded after schema
+ // char tagVal[];
} SCMAlterTableMsg;
typedef struct {
@@ -647,6 +649,7 @@ typedef struct SMultiTableMeta {
} SMultiTableMeta;
typedef struct {
+ int32_t dataLen;
char name[TSDB_TABLE_ID_LEN + 1];
char data[TSDB_MAX_TAGS_LEN];
} STagData;
diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c
index adba091136..5ea75a1cde 100644
--- a/src/kit/taosdump/taosdump.c
+++ b/src/kit/taosdump/taosdump.c
@@ -37,6 +37,8 @@
#define COMMAND_SIZE 65536
#define DEFAULT_DUMP_FILE "taosdump.sql"
+#define MAX_DBS 100
+
int converStringToReadable(char *str, int size, char *buf, int bufsize);
int convertNCharToReadable(char *str, int size, char *buf, int bufsize);
void taosDumpCharset(FILE *fp);
@@ -359,7 +361,7 @@ int main(int argc, char *argv[]) {
void taosFreeDbInfos() {
if (dbInfos == NULL) return;
- for (int i = 0; i < TSDB_MAX_DBS; i++) tfree(dbInfos[i]);
+ for (int i = 0; i < MAX_DBS; i++) tfree(dbInfos[i]);
tfree(dbInfos);
}
@@ -437,7 +439,7 @@ int taosDumpOut(SDumpArguments *arguments) {
return -1;
}
- dbInfos = (SDbInfo **)calloc(TSDB_MAX_DBS, sizeof(SDbInfo *));
+ dbInfos = (SDbInfo **)calloc(MAX_DBS, sizeof(SDbInfo *));
if (dbInfos == NULL) {
fprintf(stderr, "failed to allocate memory\n");
goto _exit_failure;
@@ -941,7 +943,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) {
pstr += sprintf(pstr, "%d", *((int *)row[col]));
break;
case TSDB_DATA_TYPE_BIGINT:
- pstr += sprintf(pstr, "%" PRId64 "", *((int64_t *)row[col]));
+ pstr += sprintf(pstr, "%" PRId64, *((int64_t *)row[col]));
break;
case TSDB_DATA_TYPE_FLOAT:
pstr += sprintf(pstr, "%f", GET_FLOAT_VAL(row[col]));
@@ -960,7 +962,7 @@ int taosDumpTableData(FILE *fp, char *tbname, SDumpArguments *arguments) {
pstr += sprintf(pstr, "\'%s\'", tbuf);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
- pstr += sprintf(pstr, "%" PRId64 "", *(int64_t *)row[col]);
+ pstr += sprintf(pstr, "%" PRId64, *(int64_t *)row[col]);
break;
default:
break;
diff --git a/src/mnode/inc/mgmtMnode.h b/src/mnode/inc/mgmtMnode.h
index b9a135346d..0973aa6ea6 100644
--- a/src/mnode/inc/mgmtMnode.h
+++ b/src/mnode/inc/mgmtMnode.h
@@ -44,7 +44,7 @@ void mgmtDecMnodeRef(struct SMnodeObj *pMnode);
char * mgmtGetMnodeRoleStr();
void mgmtGetMnodeIpSet(SRpcIpSet *ipSet);
void mgmtGetMnodeInfos(void *mnodes);
-
+void mgmtUpdateMnodeIpSet();
#ifdef __cplusplus
}
diff --git a/src/mnode/inc/mgmtVgroup.h b/src/mnode/inc/mgmtVgroup.h
index ce4cfed532..ab0345cd20 100644
--- a/src/mnode/inc/mgmtVgroup.h
+++ b/src/mnode/inc/mgmtVgroup.h
@@ -29,6 +29,7 @@ void mgmtIncVgroupRef(SVgObj *pVgroup);
void mgmtDecVgroupRef(SVgObj *pVgroup);
void mgmtDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg);
void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode);
+void mgmtUpdateAllDbVgroups(SDbObj *pAlterDb);
void * mgmtGetNextVgroup(void *pIter, SVgObj **pVgroup);
void mgmtUpdateVgroup(SVgObj *pVgroup);
diff --git a/src/mnode/src/mgmtAcct.c b/src/mnode/src/mgmtAcct.c
index 27beff8944..a35591382c 100644
--- a/src/mnode/src/mgmtAcct.c
+++ b/src/mnode/src/mgmtAcct.c
@@ -94,7 +94,7 @@ int32_t mgmtInitAccts() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_ACCOUNT,
.tableName = "accounts",
- .hashSessions = TSDB_MAX_ACCOUNTS,
+ .hashSessions = TSDB_DEFAULT_ACCOUNTS_HASH_SIZE,
.maxRowSize = tsAcctUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
diff --git a/src/mnode/src/mgmtDb.c b/src/mnode/src/mgmtDb.c
index 3ea2e3aa87..df18fe8786 100644
--- a/src/mnode/src/mgmtDb.c
+++ b/src/mnode/src/mgmtDb.c
@@ -96,6 +96,7 @@ static int32_t mgmtDbActionUpdate(SSdbOper *pOper) {
memcpy(pSaved, pDb, pOper->rowSize);
free(pDb);
}
+ mgmtUpdateAllDbVgroups(pSaved);
mgmtDecDbRef(pSaved);
return TSDB_CODE_SUCCESS;
}
@@ -127,7 +128,7 @@ int32_t mgmtInitDbs() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_DB,
.tableName = "dbs",
- .hashSessions = TSDB_MAX_DBS,
+ .hashSessions = TSDB_DEFAULT_DBS_HASH_SIZE,
.maxRowSize = tsDbUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
diff --git a/src/mnode/src/mgmtDnode.c b/src/mnode/src/mgmtDnode.c
index c3ae8b5ab1..1189da92e2 100644
--- a/src/mnode/src/mgmtDnode.c
+++ b/src/mnode/src/mgmtDnode.c
@@ -130,7 +130,7 @@ int32_t mgmtInitDnodes() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_DNODE,
.tableName = "dnodes",
- .hashSessions = TSDB_MAX_DNODES,
+ .hashSessions = TSDB_DEFAULT_DNODES_HASH_SIZE,
.maxRowSize = tsDnodeUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_AUTO,
diff --git a/src/mnode/src/mgmtMnode.c b/src/mnode/src/mgmtMnode.c
index d9ddd465f6..3ba7042c40 100644
--- a/src/mnode/src/mgmtMnode.c
+++ b/src/mnode/src/mgmtMnode.c
@@ -36,6 +36,25 @@ static int32_t tsMnodeUpdateSize = 0;
static int32_t mgmtGetMnodeMeta(STableMetaMsg *pMeta, SShowObj *pShow, void *pConn);
static int32_t mgmtRetrieveMnodes(SShowObj *pShow, char *data, int32_t rows, void *pConn);
+static SRpcIpSet tsMnodeRpcIpSet;
+static SDMMnodeInfos tsMnodeInfos;
+
+#if defined(LINUX)
+ static pthread_rwlock_t tsMnodeLock;
+ #define mgmtMnodeWrLock() pthread_rwlock_wrlock(&tsMnodeLock)
+ #define mgmtMnodeRdLock() pthread_rwlock_rdlock(&tsMnodeLock)
+ #define mgmtMnodeUnLock() pthread_rwlock_unlock(&tsMnodeLock)
+ #define mgmtMnodeInitLock() pthread_rwlock_init(&tsMnodeLock, NULL)
+ #define mgmtMnodeDestroyLock() pthread_rwlock_destroy(&tsMnodeLock)
+#else
+ static pthread_mutex_t tsMnodeLock;
+ #define mgmtMnodeWrLock() pthread_mutex_lock(&tsMnodeLock)
+ #define mgmtMnodeRdLock() pthread_mutex_lock(&tsMnodeLock)
+ #define mgmtMnodeUnLock() pthread_mutex_unlock(&tsMnodeLock)
+ #define mgmtMnodeInitLock() pthread_mutex_init(&tsMnodeLock, NULL)
+ #define mgmtMnodeDestroyLock() pthread_mutex_destroy(&tsMnodeLock)
+#endif
+
static int32_t mgmtMnodeActionDestroy(SSdbOper *pOper) {
tfree(pOper->pObj);
return TSDB_CODE_SUCCESS;
@@ -102,17 +121,22 @@ static int32_t mgmtMnodeActionRestored() {
}
sdbFreeIter(pIter);
}
+
+ mgmtUpdateMnodeIpSet();
+
return TSDB_CODE_SUCCESS;
}
int32_t mgmtInitMnodes() {
+ mgmtMnodeInitLock();
+
SMnodeObj tObj;
tsMnodeUpdateSize = (int8_t *)tObj.updateEnd - (int8_t *)&tObj;
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_MNODE,
.tableName = "mnodes",
- .hashSessions = TSDB_MAX_MNODES,
+ .hashSessions = TSDB_DEFAULT_MNODES_HASH_SIZE,
.maxRowSize = tsMnodeUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_INT,
@@ -140,6 +164,7 @@ int32_t mgmtInitMnodes() {
void mgmtCleanupMnodes() {
sdbCloseTable(tsMnodeSdb);
+ mgmtMnodeDestroyLock();
}
int32_t mgmtGetMnodesNum() {
@@ -177,8 +202,16 @@ char *mgmtGetMnodeRoleStr(int32_t role) {
}
}
-void mgmtGetMnodeIpSet(SRpcIpSet *ipSet) {
- void *pIter = NULL;
+void mgmtUpdateMnodeIpSet() {
+ SRpcIpSet *ipSet = &tsMnodeRpcIpSet;
+ SDMMnodeInfos *mnodes = &tsMnodeInfos;
+
+ mPrint("update mnodes ipset, numOfIps:%d ", mgmtGetMnodesNum());
+
+ mgmtMnodeWrLock();
+
+ int32_t index = 0;
+ void * pIter = NULL;
while (1) {
SMnodeObj *pMnode = NULL;
pIter = mgmtGetNextMnode(pIter, &pMnode);
@@ -187,40 +220,39 @@ void mgmtGetMnodeIpSet(SRpcIpSet *ipSet) {
strcpy(ipSet->fqdn[ipSet->numOfIps], pMnode->pDnode->dnodeFqdn);
ipSet->port[ipSet->numOfIps] = htons(pMnode->pDnode->dnodePort);
- if (pMnode->role == TAOS_SYNC_ROLE_MASTER) {
- ipSet->inUse = ipSet->numOfIps;
- }
-
- ipSet->numOfIps++;
-
- mgmtDecMnodeRef(pMnode);
- }
- sdbFreeIter(pIter);
-}
-
-void mgmtGetMnodeInfos(void *param) {
- SDMMnodeInfos *mnodes = param;
- mnodes->inUse = 0;
-
- int32_t index = 0;
- void *pIter = NULL;
- while (1) {
- SMnodeObj *pMnode = NULL;
- pIter = mgmtGetNextMnode(pIter, &pMnode);
- if (pMnode == NULL) break;
-
mnodes->nodeInfos[index].nodeId = htonl(pMnode->mnodeId);
strcpy(mnodes->nodeInfos[index].nodeEp, pMnode->pDnode->dnodeEp);
+
if (pMnode->role == TAOS_SYNC_ROLE_MASTER) {
+ ipSet->inUse = ipSet->numOfIps;
mnodes->inUse = index;
}
+ mPrint("mnode:%d, ep:%s %s", index, pMnode->pDnode->dnodeEp, pMnode->role == TAOS_SYNC_ROLE_MASTER ? "master" : "");
+
+ ipSet->numOfIps++;
index++;
+
mgmtDecMnodeRef(pMnode);
}
- sdbFreeIter(pIter);
mnodes->nodeNum = index;
+
+ sdbFreeIter(pIter);
+
+ mgmtMnodeUnLock();
+}
+
+void mgmtGetMnodeIpSet(SRpcIpSet *ipSet) {
+ mgmtMnodeRdLock();
+ *ipSet = tsMnodeRpcIpSet;
+ mgmtMnodeUnLock();
+}
+
+void mgmtGetMnodeInfos(void *mnodeInfos) {
+ mgmtMnodeRdLock();
+ *(SDMMnodeInfos *)mnodeInfos = tsMnodeInfos;
+ mgmtMnodeUnLock();
}
int32_t mgmtAddMnode(int32_t dnodeId) {
@@ -240,6 +272,8 @@ int32_t mgmtAddMnode(int32_t dnodeId) {
code = TSDB_CODE_SDB_ERROR;
}
+ mgmtUpdateMnodeIpSet();
+
return code;
}
@@ -250,6 +284,8 @@ void mgmtDropMnodeLocal(int32_t dnodeId) {
sdbDeleteRow(&oper);
mgmtDecMnodeRef(pMnode);
}
+
+ mgmtUpdateMnodeIpSet();
}
int32_t mgmtDropMnode(int32_t dnodeId) {
@@ -270,6 +306,9 @@ int32_t mgmtDropMnode(int32_t dnodeId) {
}
sdbDecRef(tsMnodeSdb, pMnode);
+
+ mgmtUpdateMnodeIpSet();
+
return code;
}
diff --git a/src/mnode/src/mgmtSdb.c b/src/mnode/src/mgmtSdb.c
index 47fb71680a..ef7668c88e 100644
--- a/src/mnode/src/mgmtSdb.c
+++ b/src/mnode/src/mgmtSdb.c
@@ -196,6 +196,8 @@ void sdbUpdateMnodeRoles() {
mgmtDecMnodeRef(pMnode);
}
}
+
+ mgmtUpdateMnodeIpSet();
}
static uint32_t sdbGetFileInfo(void *ahandle, char *name, uint32_t *index, int32_t *size, uint64_t *fversion) {
@@ -442,8 +444,8 @@ static int32_t sdbInsertHash(SSdbTable *pTable, SSdbOper *pOper) {
pthread_mutex_unlock(&pTable->mutex);
- sdbTrace("table:%s, insert record:%s to hash, numOfRows:%d version:%" PRIu64, pTable->tableName,
- sdbGetKeyStrFromObj(pTable, pOper->pObj), pTable->numOfRows, sdbGetVersion());
+ sdbTrace("table:%s, insert record:%s to hash, rowSize:%d vnumOfRows:%d version:%" PRIu64, pTable->tableName,
+ sdbGetKeyStrFromObj(pTable, pOper->pObj), pOper->rowSize, pTable->numOfRows, sdbGetVersion());
(*pTable->insertFp)(pOper);
return TSDB_CODE_SUCCESS;
diff --git a/src/mnode/src/mgmtShell.c b/src/mnode/src/mgmtShell.c
index d8bcf67242..1da811de13 100644
--- a/src/mnode/src/mgmtShell.c
+++ b/src/mnode/src/mgmtShell.c
@@ -119,7 +119,7 @@ static void mgmtDoDealyedAddToShellQueue(void *param, void *tmrId) {
void mgmtDealyedAddToShellQueue(SQueuedMsg *queuedMsg) {
void *unUsed = NULL;
- taosTmrReset(mgmtDoDealyedAddToShellQueue, 1000, queuedMsg, tsMgmtTmr, &unUsed);
+ taosTmrReset(mgmtDoDealyedAddToShellQueue, 300, queuedMsg, tsMgmtTmr, &unUsed);
}
void mgmtProcessMsgFromShell(SRpcMsg *rpcMsg) {
diff --git a/src/mnode/src/mgmtTable.c b/src/mnode/src/mgmtTable.c
index 7b010a536a..0d821ee9e8 100644
--- a/src/mnode/src/mgmtTable.c
+++ b/src/mnode/src/mgmtTable.c
@@ -340,7 +340,7 @@ static int32_t mgmtInitChildTables() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_CTABLE,
.tableName = "ctables",
- .hashSessions = tsMaxTables,
+ .hashSessions = TSDB_DEFAULT_CTABLES_HASH_SIZE,
.maxRowSize = sizeof(SChildTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN + TSDB_CQ_SQL_SIZE,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_VAR_STRING,
@@ -507,7 +507,7 @@ static int32_t mgmtInitSuperTables() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_STABLE,
.tableName = "stables",
- .hashSessions = TSDB_MAX_SUPER_TABLES,
+ .hashSessions = TSDB_DEFAULT_STABLES_HASH_SIZE,
.maxRowSize = sizeof(SSuperTableObj) + sizeof(SSchema) * (TSDB_MAX_TAGS + TSDB_MAX_COLUMNS + 16) + TSDB_TABLE_ID_LEN,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_VAR_STRING,
@@ -1334,13 +1334,13 @@ static void mgmtProcessDropSuperTableRsp(SRpcMsg *rpcMsg) {
}
static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableObj *pTable) {
- char * pTagData = NULL;
+ STagData * pTagData = NULL;
int32_t tagDataLen = 0;
int32_t totalCols = 0;
int32_t contLen = 0;
if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) {
- pTagData = pMsg->schema + TSDB_TABLE_ID_LEN + 1;
- tagDataLen = htonl(pMsg->contLen) - sizeof(SCMCreateTableMsg) - TSDB_TABLE_ID_LEN - 1;
+ pTagData = (STagData*)pMsg->schema;
+ tagDataLen = ntohl(pTagData->dataLen);
totalCols = pTable->superTable->numOfColumns + pTable->superTable->numOfTags;
contLen = sizeof(SMDCreateTableMsg) + totalCols * sizeof(SSchema) + tagDataLen + pTable->sqlLen;
} else {
@@ -1393,7 +1393,7 @@ static void *mgmtBuildCreateChildTableMsg(SCMCreateTableMsg *pMsg, SChildTableOb
}
if (pTable->info.type == TSDB_CHILD_TABLE && pMsg != NULL) {
- memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData, tagDataLen);
+ memcpy(pCreate->data + totalCols * sizeof(SSchema), pTagData->data, tagDataLen);
memcpy(pCreate->data + totalCols * sizeof(SSchema) + tagDataLen, pTable->sql, pTable->sqlLen);
}
@@ -1420,10 +1420,10 @@ static SChildTableObj* mgmtDoCreateChildTable(SCMCreateTableMsg *pCreate, SVgObj
pTable->vgId = pVgroup->vgId;
if (pTable->info.type == TSDB_CHILD_TABLE) {
- char *pTagData = (char *) pCreate->schema; // it is a tag key
- SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData);
+ STagData *pTagData = (STagData *) pCreate->schema; // it is a tag key
+ SSuperTableObj *pSuperTable = mgmtGetSuperTable(pTagData->name);
if (pSuperTable == NULL) {
- mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData);
+ mError("table:%s, corresponding super table:%s does not exist", pCreate->tableId, pTagData->name);
free(pTable);
terrno = TSDB_CODE_INVALID_TABLE;
return NULL;
@@ -1538,7 +1538,7 @@ static void mgmtProcessCreateChildTableMsg(SQueuedMsg *pMsg) {
SRpcIpSet ipSet = mgmtGetIpSetFromVgroup(pVgroup);
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
newMsg->ahandle = pMsg->pTable;
- newMsg->maxRetry = 5;
+ newMsg->maxRetry = 10;
SRpcMsg rpcMsg = {
.handle = newMsg,
.pCont = pMDCreate,
@@ -1742,7 +1742,9 @@ static int32_t mgmtDoGetChildTableMeta(SQueuedMsg *pMsg, STableMetaMsg *pMeta) {
static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) {
SCMTableInfoMsg *pInfo = pMsg->pCont;
- int32_t contLen = sizeof(SCMCreateTableMsg) + sizeof(STagData);
+ STagData* pTag = (STagData*)pInfo->tags;
+
+ int32_t contLen = sizeof(SCMCreateTableMsg) + offsetof(STagData, data) + ntohl(pTag->dataLen);
SCMCreateTableMsg *pCreateMsg = rpcMallocCont(contLen);
if (pCreateMsg == NULL) {
mError("table:%s, failed to create table while get meta info, no enough memory", pInfo->tableId);
@@ -1756,14 +1758,9 @@ static void mgmtAutoCreateChildTable(SQueuedMsg *pMsg) {
pCreateMsg->getMeta = 1;
pCreateMsg->contLen = htonl(contLen);
- contLen = sizeof(STagData);
- if (contLen > pMsg->contLen - sizeof(SCMTableInfoMsg)) {
- contLen = pMsg->contLen - sizeof(SCMTableInfoMsg);
- }
- memcpy(pCreateMsg->schema, pInfo->tags, contLen);
+ memcpy(pCreateMsg->schema, pInfo->tags, contLen - sizeof(SCMCreateTableMsg));
SQueuedMsg *newMsg = mgmtCloneQueuedMsg(pMsg);
- pMsg->pCont = newMsg->pCont;
newMsg->msgType = TSDB_MSG_TYPE_CM_CREATE_TABLE;
newMsg->pCont = pCreateMsg;
@@ -2201,6 +2198,8 @@ static void mgmtProcessAlterTableMsg(SQueuedMsg *pMsg) {
}
pAlter->type = htons(pAlter->type);
+ pAlter->numOfCols = htons(pAlter->numOfCols);
+ pAlter->tagValLen = htonl(pAlter->tagValLen);
if (pAlter->numOfCols > 2) {
mError("table:%s, error numOfCols:%d in alter table", pAlter->tableId, pAlter->numOfCols);
@@ -2232,7 +2231,8 @@ static void mgmtProcessAlterTableMsg(SQueuedMsg *pMsg) {
mTrace("table:%s, start to alter ctable", pAlter->tableId);
SChildTableObj *pTable = (SChildTableObj *)pMsg->pTable;
if (pAlter->type == TSDB_ALTER_TABLE_UPDATE_TAG_VAL) {
- code = mgmtModifyChildTableTagValue(pTable, pAlter->schema[0].name, pAlter->tagVal);
+ char *tagVal = (char*)(pAlter->schema + pAlter->numOfCols);
+ code = mgmtModifyChildTableTagValue(pTable, pAlter->schema[0].name, tagVal);
} else if (pAlter->type == TSDB_ALTER_TABLE_ADD_COLUMN) {
code = mgmtAddNormalTableColumn(pMsg->pDb, pTable, pAlter->schema, 1);
} else if (pAlter->type == TSDB_ALTER_TABLE_DROP_COLUMN) {
diff --git a/src/mnode/src/mgmtUser.c b/src/mnode/src/mgmtUser.c
index cc22ed2169..62a98c4170 100644
--- a/src/mnode/src/mgmtUser.c
+++ b/src/mnode/src/mgmtUser.c
@@ -117,7 +117,7 @@ int32_t mgmtInitUsers() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_USER,
.tableName = "users",
- .hashSessions = TSDB_MAX_USERS,
+ .hashSessions = TSDB_DEFAULT_USERS_HASH_SIZE,
.maxRowSize = tsUserUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_STRING,
diff --git a/src/mnode/src/mgmtVgroup.c b/src/mnode/src/mgmtVgroup.c
index ef5582f395..4c85351148 100644
--- a/src/mnode/src/mgmtVgroup.c
+++ b/src/mnode/src/mgmtVgroup.c
@@ -121,6 +121,20 @@ static int32_t mgmtVgroupActionDelete(SSdbOper *pOper) {
return TSDB_CODE_SUCCESS;
}
+static void mgmtVgroupUpdateIdPool(SVgObj *pVgroup) {
+ int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
+ SDbObj *pDb = pVgroup->pDb;
+ if (pDb != NULL) {
+ if (pDb->cfg.maxTables != oldTables) {
+ mPrint("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
+ taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
+ int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
+ pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
+ memset(pVgroup->tableList + oldTables, 0, (pDb->cfg.maxTables - oldTables) * sizeof(SChildTableObj **));
+ }
+ }
+}
+
static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
SVgObj *pNew = pOper->pObj;
SVgObj *pVgroup = mgmtGetVgroup(pNew->vgId);
@@ -146,20 +160,11 @@ static int32_t mgmtVgroupActionUpdate(SSdbOper *pOper) {
}
}
- int32_t oldTables = taosIdPoolMaxSize(pVgroup->idPool);
- SDbObj *pDb = pVgroup->pDb;
- if (pDb != NULL) {
- if (pDb->cfg.maxTables != oldTables) {
- mPrint("vgId:%d tables change from %d to %d", pVgroup->vgId, oldTables, pDb->cfg.maxTables);
- taosUpdateIdPool(pVgroup->idPool, pDb->cfg.maxTables);
- int32_t size = sizeof(SChildTableObj *) * pDb->cfg.maxTables;
- pVgroup->tableList = (SChildTableObj **)realloc(pVgroup->tableList, size);
- }
- }
+ mgmtVgroupUpdateIdPool(pVgroup);
mgmtDecVgroupRef(pVgroup);
- mTrace("vgId:%d, is updated, numOfVnode:%d tables:%d", pVgroup->vgId, pVgroup->numOfVnodes, pDb == NULL ? 0 : pDb->cfg.maxTables);
+ mTrace("vgId:%d, is updated, numOfVnode:%d", pVgroup->vgId, pVgroup->numOfVnodes);
return TSDB_CODE_SUCCESS;
}
@@ -196,7 +201,7 @@ int32_t mgmtInitVgroups() {
SSdbTableDesc tableDesc = {
.tableId = SDB_TABLE_VGROUP,
.tableName = "vgroups",
- .hashSessions = TSDB_MAX_VGROUPS,
+ .hashSessions = TSDB_DEFAULT_VGROUPS_HASH_SIZE,
.maxRowSize = tsVgUpdateSize,
.refCountPos = (int8_t *)(&tObj.refCount) - (int8_t *)&tObj,
.keyType = SDB_KEY_AUTO,
@@ -762,6 +767,28 @@ void mgmtDropAllDnodeVgroups(SDnodeObj *pDropDnode) {
sdbFreeIter(pIter);
}
+void mgmtUpdateAllDbVgroups(SDbObj *pAlterDb) {
+ void * pIter = NULL;
+ SVgObj *pVgroup = NULL;
+
+ mPrint("db:%s, all vgroups will be update in sdb", pAlterDb->name);
+
+ while (1) {
+ pIter = mgmtGetNextVgroup(pIter, &pVgroup);
+ if (pVgroup == NULL) break;
+
+ if (pVgroup->pDb == pAlterDb) {
+ mgmtVgroupUpdateIdPool(pVgroup);
+ }
+
+ mgmtDecVgroupRef(pVgroup);
+ }
+
+ sdbFreeIter(pIter);
+
+ mPrint("db:%s, all vgroups is updated in sdb", pAlterDb->name);
+}
+
void mgmtDropAllDbVgroups(SDbObj *pDropDb, bool sendMsg) {
void * pIter = NULL;
int32_t numOfVgroups = 0;
diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c
index 11b7692e7e..cdb8476546 100644
--- a/src/query/src/qExecutor.c
+++ b/src/query/src/qExecutor.c
@@ -97,7 +97,9 @@ typedef struct {
STSCursor cur;
} SQueryStatusInfo;
+#define CLEAR_QUERY_STATUS(q, st) ((q)->status &= (~(st)))
static void setQueryStatus(SQuery *pQuery, int8_t status);
+
static bool isIntervalQuery(SQuery *pQuery) { return pQuery->intervalTime > 0; }
// todo move to utility
@@ -278,6 +280,26 @@ int64_t getNumOfResult(SQueryRuntimeEnv *pRuntimeEnv) {
return maxOutput;
}
+/*
+ * the value of number of result needs to be update due to offset value upated.
+ */
+void updateNumOfResult(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOfRes) {
+ SQuery *pQuery = pRuntimeEnv->pQuery;
+
+ for (int32_t j = 0; j < pQuery->numOfOutput; ++j) {
+ SResultInfo *pResInfo = GET_RES_INFO(&pRuntimeEnv->pCtx[j]);
+
+ int16_t functionId = pRuntimeEnv->pCtx[j].functionId;
+ if (functionId == TSDB_FUNC_TS || functionId == TSDB_FUNC_TAG || functionId == TSDB_FUNC_TAGPRJ ||
+ functionId == TSDB_FUNC_TS_DUMMY) {
+ continue;
+ }
+
+ assert(pResInfo->numOfRes > numOfRes);
+ pResInfo->numOfRes = numOfRes;
+ }
+}
+
static int32_t getGroupResultId(int32_t groupIndex) {
int32_t base = 200000;
return base + (groupIndex * 10000);
@@ -354,9 +376,7 @@ bool isSelectivityWithTagsQuery(SQuery *pQuery) {
bool isTSCompQuery(SQuery *pQuery) { return pQuery->pSelectExpr[0].base.functionId == TSDB_FUNC_TS_COMP; }
-static bool limitResults(SQInfo *pQInfo) {
- SQuery *pQuery = pQInfo->runtimeEnv.pQuery;
-
+static bool limitResults(SQuery *pQuery) {
if ((pQuery->limit.limit > 0) && (pQuery->rec.total + pQuery->rec.rows > pQuery->limit.limit)) {
pQuery->rec.rows = pQuery->limit.limit - pQuery->rec.total;
assert(pQuery->rec.rows > 0);
@@ -626,6 +646,7 @@ static void doCheckQueryCompleted(SQueryRuntimeEnv *pRuntimeEnv, TSKEY lastKey,
int32_t i = 0;
int64_t skey = TSKEY_INITIAL_VAL;
+ // TODO opt performance: get the closed time window here
for (i = 0; i < pWindowResInfo->size; ++i) {
SWindowResult *pResult = &pWindowResInfo->pResult[i];
if (pResult->status.closed) {
@@ -1303,6 +1324,10 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl
if (numOfRes >= pQuery->rec.threshold) {
setQueryStatus(pQuery, QUERY_RESBUF_FULL);
}
+
+ if ((pQuery->limit.limit >= 0) && numOfRes >= (pQuery->limit.limit + pQuery->limit.offset)) {
+ setQueryStatus(pQuery, QUERY_COMPLETED);
+ }
}
return numOfRes;
@@ -2408,6 +2433,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
char *tmp = realloc(pQuery->sdata[i], bytes * newSize + sizeof(tFilePage));
if (tmp == NULL) { // todo handle the oom
+ assert(0);
} else {
pQuery->sdata[i] = (tFilePage *)tmp;
}
@@ -2421,7 +2447,7 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
}
}
- qTrace("QInfo: %p realloc output buffer, new size: %d rows, old:%d, remain:%d", GET_QINFO_ADDR(pRuntimeEnv),
+ qTrace("QInfo:%p realloc output buffer, new size: %d rows, old:%d, remain:%d", GET_QINFO_ADDR(pRuntimeEnv),
newSize, pRec->capacity, newSize - pRec->rows);
pRec->capacity = newSize;
@@ -2434,11 +2460,11 @@ static int64_t doScanAllDataBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
pQuery->pos = QUERY_IS_ASC_QUERY(pQuery) ? 0 : blockInfo.rows - 1;
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, pStatis, binarySearchForKey, pDataBlock);
- qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d", GET_QINFO_ADDR(pRuntimeEnv),
+ qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d", GET_QINFO_ADDR(pRuntimeEnv),
blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes);
- // save last access position
- if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
+ // while the output buffer is full or limit/offset is applied, query may be paused here
+ if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL | QUERY_COMPLETED)) {
break;
}
}
@@ -3004,11 +3030,13 @@ static void updateTableQueryInfoForReverseScan(SQuery *pQuery, STableQueryInfo *
// order has change already!
int32_t step = GET_FORWARD_DIRECTION_FACTOR(pQuery->order.order);
- if (!QUERY_IS_ASC_QUERY(pQuery)) {
- assert(pTableQueryInfo->win.ekey >= pTableQueryInfo->lastKey + step);
- } else {
- assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step);
- }
+
+ // TODO validate the assertion
+// if (!QUERY_IS_ASC_QUERY(pQuery)) {
+// assert(pTableQueryInfo->win.ekey >= pTableQueryInfo->lastKey + step);
+// } else {
+// assert(pTableQueryInfo->win.ekey <= pTableQueryInfo->lastKey + step);
+// }
pTableQueryInfo->win.ekey = pTableQueryInfo->lastKey + step;
@@ -3087,7 +3115,7 @@ void disableFuncInReverseScan(SQInfo *pQInfo) {
void switchCtxOrder(SQueryRuntimeEnv *pRuntimeEnv) {
SQuery *pQuery = pRuntimeEnv->pQuery;
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
- SWITCH_ORDER(pRuntimeEnv->pCtx[i] .order);
+ SWITCH_ORDER(pRuntimeEnv->pCtx[i].order);
}
}
@@ -3173,30 +3201,38 @@ void skipResults(SQueryRuntimeEnv *pRuntimeEnv) {
}
if (pQuery->rec.rows <= pQuery->limit.offset) {
+ qTrace("QInfo:%p skip rows:%d, new offset:%" PRIu64, GET_QINFO_ADDR(pRuntimeEnv), pQuery->rec.rows,
+ pQuery->limit.offset - pQuery->rec.rows);
+
pQuery->limit.offset -= pQuery->rec.rows;
pQuery->rec.rows = 0;
resetCtxOutputBuf(pRuntimeEnv);
- // clear the buffer is full flag if exists
- pQuery->status &= (~QUERY_RESBUF_FULL);
+ // clear the buffer full flag if exists
+ CLEAR_QUERY_STATUS(pQuery, QUERY_RESBUF_FULL);
} else {
- int32_t numOfSkip = (int32_t) pQuery->limit.offset;
+ int64_t numOfSkip = pQuery->limit.offset;
pQuery->rec.rows -= numOfSkip;
-
+ pQuery->limit.offset = 0;
+
+ qTrace("QInfo:%p skip row:%"PRId64", new offset:%d, numOfRows remain:%" PRIu64, GET_QINFO_ADDR(pRuntimeEnv), numOfSkip,
+ 0, pQuery->rec.rows);
+
for (int32_t i = 0; i < pQuery->numOfOutput; ++i) {
int32_t functionId = pQuery->pSelectExpr[i].base.functionId;
int32_t bytes = pRuntimeEnv->pCtx[i].outputBytes;
- memmove(pQuery->sdata[i]->data, pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->rec.rows * bytes);
- pRuntimeEnv->pCtx[i].aOutputBuf += bytes * numOfSkip;
+ memmove(pQuery->sdata[i]->data, (char*) pQuery->sdata[i]->data + bytes * numOfSkip, pQuery->rec.rows * bytes);
+ pRuntimeEnv->pCtx[i].aOutputBuf = ((char*) pQuery->sdata[i]->data) + pQuery->rec.rows * bytes;
if (functionId == TSDB_FUNC_DIFF || functionId == TSDB_FUNC_TOP || functionId == TSDB_FUNC_BOTTOM) {
- pRuntimeEnv->pCtx[i].ptsOutputBuf += TSDB_KEYSIZE * numOfSkip;
+ pRuntimeEnv->pCtx[i].ptsOutputBuf = pRuntimeEnv->pCtx[0].aOutputBuf;
}
}
-
- pQuery->limit.offset = 0;
+
+
+ updateNumOfResult(pRuntimeEnv, pQuery->rec.rows);
}
}
@@ -3205,7 +3241,7 @@ void setQueryStatus(SQuery *pQuery, int8_t status) {
pQuery->status = status;
} else {
// QUERY_NOT_COMPLETED is not compatible with any other status, so clear its position first
- pQuery->status &= (~QUERY_NOT_COMPLETED);
+ CLEAR_QUERY_STATUS(pQuery, QUERY_NOT_COMPLETED);
pQuery->status |= status;
}
}
@@ -3957,7 +3993,7 @@ static void updateOffsetVal(SQueryRuntimeEnv *pRuntimeEnv, SDataBlockInfo *pBloc
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, pBlockInfo, NULL, binarySearchForKey, pDataBlock);
- qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d", GET_QINFO_ADDR(pRuntimeEnv),
+ qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d", GET_QINFO_ADDR(pRuntimeEnv),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, numOfRes);
}
@@ -3986,7 +4022,7 @@ void skipBlocks(SQueryRuntimeEnv *pRuntimeEnv) {
pTableQueryInfo->lastKey = (QUERY_IS_ASC_QUERY(pQuery)) ? blockInfo.window.ekey : blockInfo.window.skey;
pTableQueryInfo->lastKey += step;
- qTrace("QInfo:%p skip rows:%d, offset:%" PRId64 "", GET_QINFO_ADDR(pRuntimeEnv), blockInfo.rows,
+ qTrace("QInfo:%p skip rows:%d, offset:%" PRId64, GET_QINFO_ADDR(pRuntimeEnv), blockInfo.rows,
pQuery->limit.offset);
} else { // find the appropriated start position in current block
updateOffsetVal(pRuntimeEnv, &blockInfo);
@@ -4075,7 +4111,7 @@ static bool skipTimeInterval(SQueryRuntimeEnv *pRuntimeEnv, TSKEY* start) {
int32_t numOfRes = tableApplyFunctionsOnBlock(pRuntimeEnv, &blockInfo, NULL, binarySearchForKey, pDataBlock);
pRuntimeEnv->windowResInfo.curIndex = index; // restore the window index
- qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, res:%d",
+ qTrace("QInfo:%p check data block, brange:%" PRId64 "-%" PRId64 ", rows:%d, numOfRes:%d",
GET_QINFO_ADDR(pRuntimeEnv), blockInfo.window.skey, blockInfo.window.ekey, blockInfo.rows, numOfRes);
return true;
} else { // do nothing
@@ -4350,10 +4386,11 @@ static int64_t queryOnDataBlocks(SQInfo *pQInfo) {
SArray *pDataBlock = loadDataBlockOnDemand(pRuntimeEnv, pQueryHandle, &blockInfo, &pStatis);
- TSKEY nextKey = blockInfo.window.skey;
if (!isIntervalQuery(pQuery)) {
- setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIdx, nextKey);
+ int32_t step = QUERY_IS_ASC_QUERY(pQuery)? 1:-1;
+ setExecutionContext(pQInfo, &pTableQueryInfo->id, pTableQueryInfo->groupIdx, blockInfo.window.ekey + step);
} else { // interval query
+ TSKEY nextKey = blockInfo.window.skey;
setIntervalQueryRange(pQInfo, nextKey);
int32_t ret = setAdditionalInfo(pQInfo, &pTableQueryInfo->id, pTableQueryInfo);
@@ -4532,8 +4569,6 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
continue;
}
- // SPointInterpoSupporter pointInterpSupporter = {0};
-
// TODO handle the limit offset problem
if (pQuery->numOfFilterCols == 0 && pQuery->limit.offset > 0) {
// skipBlocks(pRuntimeEnv);
@@ -4544,12 +4579,10 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
}
scanAllDataBlocks(pRuntimeEnv, pQuery->current->lastKey);
-
- pQuery->rec.rows = getNumOfResult(pRuntimeEnv);
skipResults(pRuntimeEnv);
// the limitation of output result is reached, set the query completed
- if (limitResults(pQInfo)) {
+ if (limitResults(pQuery)) {
pQInfo->tableIndex = pQInfo->groupInfo.numOfTables;
break;
}
@@ -4578,18 +4611,15 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
break;
}
- } else { // forward query range
- pQuery->window.skey = pQuery->current->lastKey;
-
+ } else {
// all data in the result buffer are skipped due to the offset, continue to retrieve data from current meter
if (pQuery->rec.rows == 0) {
assert(!Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL));
continue;
} else {
- // pQInfo->pTableQuerySupporter->pMeterSidExtInfo[k]->key = pQuery->lastKey;
- // // buffer is full, wait for the next round to retrieve data from current meter
- // assert(Q_STATUS_EQUAL(pQuery->over, QUERY_RESBUF_FULL));
- // break;
+ // buffer is full, wait for the next round to retrieve data from current meter
+ assert(Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL));
+ break;
}
}
}
@@ -4633,10 +4663,8 @@ static void sequentialTableProcess(SQInfo *pQInfo) {
copyFromWindowResToSData(pQInfo, pWindowResInfo->pResult);
}
- pQuery->rec.total += pQuery->rec.rows;
-
qTrace(
- "QInfo %p, numOfTables:%d, index:%d, numOfGroups:%d, %d points returned, total:%"PRId64", offset:%" PRId64,
+ "QInfo %p numOfTables:%d, index:%d, numOfGroups:%d, %d points returned, total:%"PRId64", offset:%" PRId64,
pQInfo, pQInfo->groupInfo.numOfTables, pQInfo->tableIndex, numOfGroups, pQuery->rec.rows, pQuery->rec.total,
pQuery->limit.offset);
}
@@ -4809,7 +4837,7 @@ static void tableFixedOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
pQuery->rec.rows = getNumOfResult(pRuntimeEnv);
skipResults(pRuntimeEnv);
- limitResults(pQInfo);
+ limitResults(pQuery);
}
static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
@@ -4857,7 +4885,7 @@ static void tableMultiOutputProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo)
resetCtxOutputBuf(pRuntimeEnv);
}
- limitResults(pQInfo);
+ limitResults(pQuery);
if (Q_STATUS_EQUAL(pQuery->status, QUERY_RESBUF_FULL)) {
qTrace("QInfo:%p query paused due to output limitation, next qrange:%" PRId64 "-%" PRId64, pQInfo,
pQuery->current->lastKey, pQuery->window.ekey);
@@ -4935,7 +4963,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
// the offset is handled at prepare stage if no interpolation involved
if (pQuery->fillType == TSDB_FILL_NONE || pQuery->rec.rows == 0) {
- limitResults(pQInfo);
+ limitResults(pQuery);
break;
} else {
TSKEY ekey = taosGetRevisedEndKey(pQuery->window.ekey, pQuery->order.order, pQuery->slidingTime,
@@ -4947,7 +4975,7 @@ static void tableIntervalProcess(SQInfo *pQInfo, STableQueryInfo* pTableInfo) {
qTrace("QInfo: %p fill results completed, final:%d", pQInfo, pQuery->rec.rows);
if (pQuery->rec.rows > 0 || Q_STATUS_EQUAL(pQuery->status, QUERY_COMPLETED)) {
- limitResults(pQInfo);
+ limitResults(pQuery);
break;
}
@@ -4982,7 +5010,7 @@ static void tableQueryImpl(SQInfo *pQInfo) {
qTrace("QInfo: %p fill results completed, final:%d", pQInfo, pQuery->rec.rows);
if (pQuery->rec.rows > 0) {
- limitResults(pQInfo);
+ limitResults(pQuery);
}
qTrace("QInfo:%p current:%d returned, total:%d", pQInfo, pQuery->rec.rows, pQuery->rec.total);
@@ -5106,7 +5134,7 @@ bool validateExprColumnInfo(SQueryTableMsg *pQueryMsg, SSqlFuncMsg *pExprMsg, SC
static bool validateQueryMsg(SQueryTableMsg *pQueryMsg) {
if (pQueryMsg->intervalTime < 0) {
- qError("qmsg:%p illegal value of interval time %" PRId64 "", pQueryMsg, pQueryMsg->intervalTime);
+ qError("qmsg:%p illegal value of interval time %" PRId64, pQueryMsg, pQueryMsg->intervalTime);
return false;
}
diff --git a/src/query/src/qtokenizer.c b/src/query/src/qtokenizer.c
index fc8e712530..e6340cc678 100644
--- a/src/query/src/qtokenizer.c
+++ b/src/query/src/qtokenizer.c
@@ -282,11 +282,7 @@ int tSQLKeywordCode(const char* z, int n) {
}
SKeyword** pKey = (SKeyword**)taosHashGet(KeywordHashTable, key, n);
- if (pKey != NULL) {
- return (*pKey)->type;
- } else {
- return TK_ID;
- }
+ return (pKey != NULL)? (*pKey)->type:TK_ID;
}
/*
@@ -594,31 +590,28 @@ SSQLToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
while (1) {
*i += t0.n;
- bool hasComma = false;
- while ((str[*i] == ' ' || str[*i] == '\n' || str[*i] == '\r' || str[*i] == '\t' || str[*i] == '\f')
- || str[*i] == ',') {
- if (str[*i] == ',') {
- if (false == hasComma) {
- hasComma = true;
- } else { // comma only allowed once
- t0.n = 0;
- return t0;
- }
+ int32_t numOfComma = 0;
+ char t = str[*i];
+ while (t == ' ' || t == '\n' || t == '\r' || t == '\t' || t == '\f' || t == ',') {
+ if (t == ',' && (++numOfComma > 1)) { // comma only allowed once
+ t0.n = 0;
+ return t0;
}
- (*i)++;
+
+ t = str[++(*i)];
}
t0.n = tSQLGetToken(&str[*i], &t0.type);
- bool ignoreFlag = false;
+ bool ignore = false;
for (uint32_t k = 0; k < numOfIgnoreToken; k++) {
if (t0.type == ignoreTokenTypes[k]) {
- ignoreFlag = true;
+ ignore = true;
break;
}
}
- if (!ignoreFlag) {
+ if (!ignore) {
break;
}
}
@@ -662,114 +655,4 @@ SSQLToken tStrGetToken(char* str, int32_t* i, bool isPrevOptr, uint32_t numOfIgn
return t0;
}
-FORCE_INLINE bool isKeyWord(const char* z, int32_t len) { return (tSQLKeywordCode((char*)z, len) != TK_ID); }
-
-FORCE_INLINE bool isNumber(const SSQLToken* pToken) {
- return (pToken->type == TK_INTEGER || pToken->type == TK_FLOAT || pToken->type == TK_HEX || pToken->type == TK_BIN);
-}
-
-int32_t isValidNumber(const SSQLToken* pToken) {
- const char* z = pToken->z;
- int32_t type = TK_ILLEGAL;
-
- int32_t i = 0;
- for(; i < pToken->n; ++i) {
- switch (z[i]) {
- case '+':
- case '-': {
- break;
- }
- case '.': {
- /*
- * handle the the float number with out integer part
- * .123
- * .123e4
- */
- if (!isdigit(z[i+1])) {
- return TK_ILLEGAL;
- }
-
- for (i += 2; isdigit(z[i]); i++) {
- }
-
- if ((z[i] == 'e' || z[i] == 'E') &&
- (isdigit(z[i + 1]) || ((z[i + 1] == '+' || z[i + 1] == '-') && isdigit(z[i + 2])))) {
- i += 2;
- while (isdigit(z[i])) {
- i++;
- }
- }
-
- type = TK_FLOAT;
- goto _end;
- }
-
- case '0': {
- char next = z[i + 1];
- if (next == 'b') { // bin number
- type = TK_BIN;
- for (i += 2; (z[i] == '0' || z[i] == '1'); ++i) {
- }
-
- goto _end;
- } else if (next == 'x') { //hex number
- type = TK_HEX;
- for (i += 2; isdigit(z[i]) || (z[i] >= 'a' && z[i] <= 'f') || (z[i] >= 'A' && z[i] <= 'F'); ++i) {
- }
-
- goto _end;
- }
- }
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- case '8':
- case '9': {
- type = TK_INTEGER;
- for (; isdigit(z[i]); i++) {
- }
-
- int32_t seg = 0;
- while (z[i] == '.' && isdigit(z[i + 1])) {
- i += 2;
-
- while (isdigit(z[i])) {
- i++;
- }
-
- seg++;
- type = TK_FLOAT;
- }
-
- if (seg > 1) {
- return TK_ILLEGAL;
- }
-
- if ((z[i] == 'e' || z[i] == 'E') &&
- (isdigit(z[i + 1]) || ((z[i + 1] == '+' || z[i + 1] == '-') && isdigit(z[i + 2])))) {
- i += 2;
- while (isdigit(z[i])) {
- i++;
- }
-
- type = TK_FLOAT;
- }
-
- goto _end;
- }
- default:
- return TK_ILLEGAL;
- }
- }
-
- _end:
- if (i < pToken->n) {
- return TK_ILLEGAL;
- } else {
- return type;
- }
-}
\ No newline at end of file
+bool isKeyWord(const char* z, int32_t len) { return (tSQLKeywordCode((char*)z, len) != TK_ID); }
\ No newline at end of file
diff --git a/src/rpc/src/rpcMain.c b/src/rpc/src/rpcMain.c
index dcbfbcf9ac..0410281f0d 100644
--- a/src/rpc/src/rpcMain.c
+++ b/src/rpc/src/rpcMain.c
@@ -869,7 +869,7 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
if (pRecv->ip==0 && pConn) {
rpcProcessBrokenLink(pConn);
- tfree(pRecv->msg);
+ rpcFreeMsg(pRecv->msg);
return NULL;
}
@@ -889,12 +889,12 @@ static void *rpcProcessMsgFromPeer(SRecvInfo *pRecv) {
rpcSendErrorMsgToPeer(pRecv, code);
tTrace("%s %p %p, %s is sent with error code:%x", pRpc->label, pConn, (void *)pHead->ahandle, taosMsg[pHead->msgType+1], code);
}
- } else { // parsing OK
+ } else { // msg is passed to app only parsing is ok
rpcProcessIncomingMsg(pConn, pHead);
}
}
- if (code) rpcFreeMsg(pRecv->msg);
+ if (code) rpcFreeMsg(pRecv->msg); // parsing failed, msg shall be freed
return pConn;
}
diff --git a/src/tsdb/inc/tsdbMain.h b/src/tsdb/inc/tsdbMain.h
index 8a0a9e1208..7779dd5479 100644
--- a/src/tsdb/inc/tsdbMain.h
+++ b/src/tsdb/inc/tsdbMain.h
@@ -500,6 +500,7 @@ int tsdbLoadCompInfo(SRWHelper *pHelper, void *target);
int tsdbLoadCompData(SRWHelper *pHelper, SCompBlock *pCompBlock, void *target);
int tsdbLoadBlockDataCols(SRWHelper *pHelper, SDataCols *pDataCols, int blkIdx, int16_t *colIds, int numOfColIds);
int tsdbLoadBlockData(SRWHelper *pHelper, SCompBlock *pCompBlock, SDataCols *target);
+// void tsdbGetDataStatis(SRWHelper *pHelper, SDataStatis *pStatis, int numOfCols);
// --------- For write operations
int tsdbWriteDataBlock(SRWHelper *pHelper, SDataCols *pDataCols);
diff --git a/src/tsdb/src/tsdbCache.c b/src/tsdb/src/tsdbCache.c
index 2761ed5e8e..be339d2816 100644
--- a/src/tsdb/src/tsdbCache.c
+++ b/src/tsdb/src/tsdbCache.c
@@ -162,6 +162,7 @@ int tsdbAlterCacheTotalBlocks(STsdbRepo *pRepo, int totalBlocks) {
pCache->totalCacheBlocks = totalBlocks;
tsdbAdjustCacheBlocks(pCache);
}
+ pRepo->config.totalBlocks = totalBlocks;
tsdbUnLockRepo((TsdbRepoT *)pRepo);
tsdbTrace("vgId:%d, tsdb total cache blocks changed from %d to %d", pRepo->config.tsdbId, oldNumOfBlocks, totalBlocks);
diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c
index cac5279b76..a9f0d9b6ec 100644
--- a/src/tsdb/src/tsdbMain.c
+++ b/src/tsdb/src/tsdbMain.c
@@ -37,7 +37,8 @@ static TSKEY tsdbNextIterKey(SSkipListIterator *pIter);
static int tsdbHasDataToCommit(SSkipListIterator **iters, int nIters, TSKEY minKey, TSKEY maxKey);
static void tsdbAlterCompression(STsdbRepo *pRepo, int8_t compression);
static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep);
-static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
+static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables);
+static int32_t tsdbSaveConfig(STsdbRepo *pRepo);
#define TSDB_GET_TABLE_BY_ID(pRepo, sid) (((STSDBRepo *)pRepo)->pTableList)[sid]
#define TSDB_GET_TABLE_BY_NAME(pRepo, name)
@@ -319,10 +320,25 @@ int32_t tsdbConfigRepo(TsdbRepoT *repo, STsdbCfg *pCfg) {
ASSERT(pRCfg->maxRowsPerFileBlock == pCfg->maxRowsPerFileBlock);
ASSERT(pRCfg->precision == pCfg->precision);
- if (pRCfg->compression != pCfg->compression) tsdbAlterCompression(pRepo, pCfg->compression);
- if (pRCfg->keep != pCfg->keep) tsdbAlterKeep(pRepo, pCfg->keep);
- if (pRCfg->totalBlocks != pCfg->totalBlocks) tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
- if (pRCfg->maxTables != pCfg->maxTables) tsdbAlterMaxTables(pRepo, pCfg->maxTables);
+ bool configChanged = false;
+ if (pRCfg->compression != pCfg->compression) {
+ configChanged = true;
+ tsdbAlterCompression(pRepo, pCfg->compression);
+ }
+ if (pRCfg->keep != pCfg->keep) {
+ configChanged = true;
+ tsdbAlterKeep(pRepo, pCfg->keep);
+ }
+ if (pRCfg->totalBlocks != pCfg->totalBlocks) {
+ configChanged = true;
+ tsdbAlterCacheTotalBlocks(pRepo, pCfg->totalBlocks);
+ }
+ if (pRCfg->maxTables != pCfg->maxTables) {
+ configChanged = true;
+ tsdbAlterMaxTables(pRepo, pCfg->maxTables);
+ }
+
+ if (configChanged) tsdbSaveConfig(pRepo);
return TSDB_CODE_SUCCESS;
}
@@ -1134,8 +1150,10 @@ static void tsdbAlterKeep(STsdbRepo *pRepo, int32_t keep) {
int maxFiles = keep / pCfg->maxTables + 3;
if (pRepo->config.keep > keep) {
+ pRepo->config.keep = keep;
pRepo->tsdbFileH->maxFGroups = maxFiles;
} else {
+ pRepo->config.keep = keep;
pRepo->tsdbFileH->fGroup = realloc(pRepo->tsdbFileH->fGroup, sizeof(SFileGroup));
if (pRepo->tsdbFileH->fGroup == NULL) {
// TODO: deal with the error
@@ -1155,6 +1173,8 @@ static void tsdbAlterMaxTables(STsdbRepo *pRepo, int32_t maxTables) {
pMeta->maxTables = maxTables;
pMeta->tables = realloc(pMeta->tables, maxTables * sizeof(STable *));
+ memset(&pMeta->tables[oldMaxTables], 0, sizeof(STable *) * (maxTables-oldMaxTables));
+ pRepo->config.maxTables = maxTables;
tsdbTrace("vgId:%d, tsdb maxTables is changed from %d to %d!", pRepo->config.tsdbId, oldMaxTables, maxTables);
}
diff --git a/src/tsdb/src/tsdbMeta.c b/src/tsdb/src/tsdbMeta.c
index 356e9c77f1..95680f95c4 100644
--- a/src/tsdb/src/tsdbMeta.c
+++ b/src/tsdb/src/tsdbMeta.c
@@ -16,7 +16,7 @@ static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx)
static int tsdbAddTableIntoIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbRemoveTableFromIndex(STsdbMeta *pMeta, STable *pTable);
static int tsdbEstimateTableEncodeSize(STable *pTable);
-static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable);
+static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable, bool rmFromIdx);
/**
* Encode a TSDB table object as a binary content
@@ -127,7 +127,7 @@ int tsdbRestoreTable(void *pHandle, void *cont, int contLen) {
if (pTable->type == TSDB_SUPER_TABLE) {
STColumn* pColSchema = schemaColAt(pTable->tagSchema, 0);
pTable->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, pColSchema->type, pColSchema->bytes,
- 1, 0, 0, getTagIndexKey);
+ 1, 0, 1, getTagIndexKey);
}
tsdbAddTableToMeta(pMeta, pTable, false);
@@ -289,6 +289,13 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
if (tsdbCheckTableCfg(pCfg) < 0) return -1;
+ STable *pTable = tsdbGetTableByUid(pMeta, pCfg->tableId.uid);
+ if (pTable != NULL) {
+ tsdbError("vgId:%d table %s already exists, tid %d uid %" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name),
+ pTable->tableId.tid, pTable->tableId.uid);
+ return TSDB_CODE_TABLE_ALREADY_EXIST;
+ }
+
STable *super = NULL;
int newSuper = 0;
@@ -316,7 +323,7 @@ int tsdbCreateTable(TsdbRepoT *repo, STableCfg *pCfg) {
// index the first tag column
STColumn* pColSchema = schemaColAt(super->tagSchema, 0);
super->pIndex = tSkipListCreate(TSDB_SUPER_TABLE_SL_LEVEL, pColSchema->type, pColSchema->bytes,
- 1, 0, 0, getTagIndexKey); // Allow duplicate key, no lock
+ 1, 0, 1, getTagIndexKey); // Allow duplicate key, no lock
if (super->pIndex == NULL) {
tdFreeSchema(super->schema);
@@ -411,7 +418,7 @@ int tsdbDropTable(TsdbRepoT *repo, STableId tableId) {
tsdbTrace("vgId:%d, table %s is dropped! tid:%d, uid:%" PRId64, pRepo->config.tsdbId, varDataVal(pTable->name),
tableId.tid, tableId.uid);
- if (tsdbRemoveTableFromMeta(pMeta, pTable) < 0) return -1;
+ if (tsdbRemoveTableFromMeta(pMeta, pTable, true) < 0) return -1;
return 0;
@@ -440,6 +447,7 @@ static int tsdbFreeTable(STable *pTable) {
// Free content
if (TSDB_TABLE_IS_SUPER_TABLE(pTable)) {
+ tdFreeSchema(pTable->tagSchema);
tSkipListDestroy(pTable->pIndex);
}
@@ -500,7 +508,7 @@ static int tsdbAddTableToMeta(STsdbMeta *pMeta, STable *pTable, bool addIdx) {
return 0;
}
-static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable) {
+static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable, bool rmFromIdx) {
if (pTable->type == TSDB_SUPER_TABLE) {
SSkipListIterator *pIter = tSkipListCreateIter(pTable->pIndex);
while (tSkipListIterNext(pIter)) {
@@ -509,7 +517,7 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable) {
ASSERT(tTable != NULL && tTable->type == TSDB_CHILD_TABLE);
- tsdbRemoveTableFromMeta(pMeta, tTable);
+ tsdbRemoveTableFromMeta(pMeta, tTable, false);
}
tSkipListDestroyIter(pIter);
@@ -525,7 +533,7 @@ static int tsdbRemoveTableFromMeta(STsdbMeta *pMeta, STable *pTable) {
}
} else {
pMeta->tables[pTable->tableId.tid] = NULL;
- if (pTable->type == TSDB_CHILD_TABLE) {
+ if (pTable->type == TSDB_CHILD_TABLE && rmFromIdx) {
tsdbRemoveTableFromIndex(pMeta, pTable);
}
diff --git a/src/tsdb/src/tsdbRWHelper.c b/src/tsdb/src/tsdbRWHelper.c
index c5ef00c233..7205a70ecc 100644
--- a/src/tsdb/src/tsdbRWHelper.c
+++ b/src/tsdb/src/tsdbRWHelper.c
@@ -409,7 +409,7 @@ int tsdbWriteCompInfo(SRWHelper *pHelper) {
if (pIdx->offset > 0) {
pIdx->offset = lseek(pHelper->files.nHeadF.fd, 0, SEEK_END);
if (pIdx->offset < 0) return -1;
- ASSERT(pIdx->offset >= tsizeof(pHelper->pCompIdx));
+ ASSERT(pIdx->offset >= TSDB_FILE_HEAD_SIZE);
if (tsendfile(pHelper->files.nHeadF.fd, pHelper->files.headF.fd, NULL, pIdx->len) < pIdx->len) return -1;
}
@@ -489,6 +489,7 @@ int tsdbLoadCompIdx(SRWHelper *pHelper, void *target) {
}
ASSERT(((char *)ptr - (char *)pHelper->pBuffer) == (pFile->info.len - sizeof(TSCKSUM)));
+ if (lseek(fd, TSDB_FILE_HEAD_SIZE, SEEK_SET) < 0) return -1;
}
}
diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c
index ad3da226f6..d3d890f361 100644
--- a/src/tsdb/src/tsdbRead.c
+++ b/src/tsdb/src/tsdbRead.c
@@ -395,6 +395,7 @@ static int32_t getFileCompInfo(STsdbQueryHandle* pQueryHandle, int32_t* numOfBlo
SCompIdx* compIndex = &pQueryHandle->rhelper.pCompIdx[pCheckInfo->tableId.tid];
if (compIndex->len == 0 || compIndex->numOfBlocks == 0) { // no data block in this file, try next file
+ pCheckInfo->numOfBlocks = 0;
continue;//no data blocks in the file belongs to pCheckInfo->pTable
} else {
if (pCheckInfo->compSize < compIndex->len) {
@@ -544,9 +545,10 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
/*bool hasData = */ initTableMemIterator(pQueryHandle, pCheckInfo);
TSKEY k1 = TSKEY_INITIAL_VAL, k2 = TSKEY_INITIAL_VAL;
- if (pCheckInfo->iter != NULL) {
+ if (pCheckInfo->iter != NULL && tSkipListIterGet(pCheckInfo->iter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iter);
- SDataRow row = SL_GET_NODE_DATA(node);
+
+ SDataRow row = SL_GET_NODE_DATA(node);
k1 = dataRowKey(row);
if (k1 == binfo.window.skey) {
@@ -560,9 +562,10 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
}
}
- if (pCheckInfo->iiter != NULL) {
+ if (pCheckInfo->iiter != NULL && tSkipListIterGet(pCheckInfo->iiter) != NULL) {
SSkipListNode* node = tSkipListIterGet(pCheckInfo->iiter);
- SDataRow row = SL_GET_NODE_DATA(node);
+
+ SDataRow row = SL_GET_NODE_DATA(node);
k2 = dataRowKey(row);
if (k2 == binfo.window.skey) {
@@ -582,6 +585,12 @@ static bool loadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlock
mergeDataInDataBlock(pQueryHandle, pCheckInfo, pBlock, sa);
} else {
pQueryHandle->realNumOfRows = binfo.rows;
+
+ cur->rows = binfo.rows;
+ cur->win = binfo.window;
+ cur->mixBlock = false;
+ cur->blockCompleted = true;
+ cur->lastKey = binfo.window.ekey + (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)? 1:-1);
}
}
} else { //desc order
@@ -858,6 +867,7 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
}
}
+ pos += (end - start + 1) * step;
cur->blockCompleted = (((pos >= endPos || cur->lastKey > pQueryHandle->window.ekey) && ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) ||
((pos <= endPos || cur->lastKey < pQueryHandle->window.ekey) && !ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)));
@@ -912,7 +922,10 @@ static void mergeDataInDataBlock(STsdbQueryHandle* pQueryHandle, STableCheckInfo
int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC) ? TSDB_ORDER_DESC : TSDB_ORDER_ASC;
int32_t end = vnodeBinarySearchKey(pCols->cols[0].pData, pCols->numOfPoints, key, order);
-
+ if (tsArray[end] == key) { // the value of key in cache equals to the end timestamp value, ignore it
+ tSkipListIterNext(pCheckInfo->iter);
+ }
+
int32_t start = -1;
if (ASCENDING_ORDER_TRAVERSE(pQueryHandle->order)) {
int32_t remain = end - pos + 1;
diff --git a/src/util/inc/tstoken.h b/src/util/inc/tstoken.h
index 13190e800d..60a79f69af 100644
--- a/src/util/inc/tstoken.h
+++ b/src/util/inc/tstoken.h
@@ -21,6 +21,8 @@ extern "C" {
#endif
#include "os.h"
+#include "tutil.h"
+#include "ttokendef.h"
#define TK_SPACE 200
#define TK_COMMENT 201
@@ -31,7 +33,7 @@ extern "C" {
#define TK_FILE 206
#define TK_QUESTION 207 // denoting the placeholder of "?",when invoking statement bind query
-#define TSQL_TBNAME "TBNAME"
+#define TSQL_TBNAME "TBNAME"
#define TSQL_TBNAME_L "tbname"
// used to denote the minimum unite in sql parsing
@@ -74,14 +76,117 @@ bool isKeyWord(const char *z, int32_t len);
* @param pToken
* @return
*/
-bool isNumber(const SSQLToken *pToken);
+#define isNumber(tk) \
+((tk)->type == TK_INTEGER || (tk)->type == TK_FLOAT || (tk)->type == TK_HEX || (tk)->type == TK_BIN)
+
/**
* check if it is a token or not
* @param pToken
- * @return token type, if it is not a number, TK_ILLEGAL will return
+ * @return token type, if it is not a number, TK_ILLEGAL will return
*/
-int32_t isValidNumber(const SSQLToken* pToken);
+static FORCE_INLINE int32_t isValidNumber(const SSQLToken* pToken) {
+ const char* z = pToken->z;
+ int32_t type = TK_ILLEGAL;
+
+ int32_t i = 0;
+ for(; i < pToken->n; ++i) {
+ switch (z[i]) {
+ case '+':
+ case '-': {
+ break;
+ }
+
+ case '.': {
+ /*
+ * handle the the float number with out integer part
+ * .123
+ * .123e4
+ */
+ if (!isdigit(z[i+1])) {
+ return TK_ILLEGAL;
+ }
+
+ for (i += 2; isdigit(z[i]); i++) {
+ }
+
+ if ((z[i] == 'e' || z[i] == 'E') &&
+ (isdigit(z[i + 1]) || ((z[i + 1] == '+' || z[i + 1] == '-') && isdigit(z[i + 2])))) {
+ i += 2;
+ while (isdigit(z[i])) {
+ i++;
+ }
+ }
+
+ type = TK_FLOAT;
+ goto _end;
+ }
+
+ case '0': {
+ char next = z[i + 1];
+ if (next == 'b') { // bin number
+ type = TK_BIN;
+ for (i += 2; (z[i] == '0' || z[i] == '1'); ++i) {
+ }
+
+ goto _end;
+ } else if (next == 'x') { //hex number
+ type = TK_HEX;
+ for (i += 2; isdigit(z[i]) || (z[i] >= 'a' && z[i] <= 'f') || (z[i] >= 'A' && z[i] <= 'F'); ++i) {
+ }
+
+ goto _end;
+ }
+ }
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9': {
+ type = TK_INTEGER;
+ for (; isdigit(z[i]); i++) {
+ }
+
+ int32_t seg = 0;
+ while (z[i] == '.' && isdigit(z[i + 1])) {
+ i += 2;
+
+ while (isdigit(z[i])) {
+ i++;
+ }
+
+ seg++;
+ type = TK_FLOAT;
+ }
+
+ if (seg > 1) {
+ return TK_ILLEGAL;
+ }
+
+ if ((z[i] == 'e' || z[i] == 'E') &&
+ (isdigit(z[i + 1]) || ((z[i + 1] == '+' || z[i + 1] == '-') && isdigit(z[i + 2])))) {
+ i += 2;
+ while (isdigit(z[i])) {
+ i++;
+ }
+
+ type = TK_FLOAT;
+ }
+
+ goto _end;
+ }
+ default:
+ return TK_ILLEGAL;
+ }
+ }
+
+ _end:
+ return (i < pToken->n)? TK_ILLEGAL:type;
+}
#ifdef __cplusplus
}
diff --git a/src/util/src/hash.c b/src/util/src/hash.c
index 12f7b43b79..f19438159d 100644
--- a/src/util/src/hash.c
+++ b/src/util/src/hash.c
@@ -102,7 +102,32 @@ static void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode);
* @param hashVal hash value by hash function
* @return
*/
-static SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal);
+FORCE_INLINE SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) {
+ uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
+
+ int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
+ SHashEntry *pEntry = pHashObj->hashList[slot];
+
+ SHashNode *pNode = pEntry->next;
+ while (pNode) {
+ if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
+ break;
+ }
+
+ pNode = pNode->next;
+ }
+
+ if (pNode) {
+ assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
+ }
+
+ // return the calculated hash value, to avoid calculating it again in other functions
+ if (hashVal != NULL) {
+ *hashVal = hash;
+ }
+
+ return pNode;
+}
/**
* Resize the hash list if the threshold is reached
@@ -438,33 +463,6 @@ void doUpdateHashTable(SHashObj *pHashObj, SHashNode *pNode) {
}
}
-SHashNode *doGetNodeFromHashTable(SHashObj *pHashObj, const void *key, uint32_t keyLen, uint32_t *hashVal) {
- uint32_t hash = (*pHashObj->hashFp)(key, keyLen);
-
- int32_t slot = HASH_INDEX(hash, pHashObj->capacity);
- SHashEntry *pEntry = pHashObj->hashList[slot];
-
- SHashNode *pNode = pEntry->next;
- while (pNode) {
- if ((pNode->keyLen == keyLen) && (memcmp(pNode->key, key, keyLen) == 0)) {
- break;
- }
-
- pNode = pNode->next;
- }
-
- if (pNode) {
- assert(HASH_INDEX(pNode->hashVal, pHashObj->capacity) == slot);
- }
-
- // return the calculated hash value, to avoid calculating it again in other functions
- if (hashVal != NULL) {
- *hashVal = hash;
- }
-
- return pNode;
-}
-
void taosHashTableResize(SHashObj *pHashObj) {
if (pHashObj->size < pHashObj->capacity * HASH_DEFAULT_LOAD_FACTOR) {
return;
diff --git a/src/util/src/thashutil.c b/src/util/src/thashutil.c
index f4b4e9faa2..33f0d6d430 100644
--- a/src/util/src/thashutil.c
+++ b/src/util/src/thashutil.c
@@ -10,7 +10,7 @@
#include "hashfunc.h"
#include "tutil.h"
-#define ROTL32(x, r) ((x) << (r) | (x) >> (32 - (r)))
+#define ROTL32(x, r) ((x) << (r) | (x) >> (32u - (r)))
#define FMIX32(h) \
do { \
@@ -20,12 +20,12 @@
(h) *= 0xc2b2ae35; \
(h) ^= (h) >> 16; \
} while (0)
-
-static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out) {
+
+uint32_t MurmurHash3_32(const char *key, uint32_t len) {
const uint8_t *data = (const uint8_t *)key;
- const int nblocks = len / 4;
+ const int nblocks = len >> 2u;
- uint32_t h1 = seed;
+ uint32_t h1 = 0x12345678;
const uint32_t c1 = 0xcc9e2d51;
const uint32_t c2 = 0x1b873593;
@@ -36,11 +36,11 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out)
uint32_t k1 = blocks[i];
k1 *= c1;
- k1 = ROTL32(k1, 15);
+ k1 = ROTL32(k1, 15u);
k1 *= c2;
h1 ^= k1;
- h1 = ROTL32(h1, 13);
+ h1 = ROTL32(h1, 13u);
h1 = h1 * 5 + 0xe6546b64;
}
@@ -48,7 +48,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out)
uint32_t k1 = 0;
- switch (len & 3) {
+ switch (len & 3u) {
case 3:
k1 ^= tail[2] << 16;
case 2:
@@ -56,7 +56,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out)
case 1:
k1 ^= tail[0];
k1 *= c1;
- k1 = ROTL32(k1, 15);
+ k1 = ROTL32(k1, 15u);
k1 *= c2;
h1 ^= k1;
};
@@ -65,16 +65,7 @@ static void MurmurHash3_32_s(const void *key, int len, uint32_t seed, void *out)
FMIX32(h1);
- *(uint32_t *)out = h1;
-}
-
-uint32_t MurmurHash3_32(const char *key, uint32_t len) {
- const int32_t hashSeed = 0x12345678;
-
- uint32_t val = 0;
- MurmurHash3_32_s(key, len, hashSeed, &val);
-
- return val;
+ return h1;
}
uint32_t taosIntHash_32(const char *key, uint32_t UNUSED_PARAM(len)) { return *(uint32_t *)key; }
diff --git a/src/vnode/src/vnodeMain.c b/src/vnode/src/vnodeMain.c
index 4514d80a54..6f0b19b0c6 100644
--- a/src/vnode/src/vnodeMain.c
+++ b/src/vnode/src/vnodeMain.c
@@ -73,13 +73,18 @@ int32_t vnodeCreate(SMDCreateVnodeMsg *pVnodeCfg) {
return TSDB_CODE_SUCCESS;
}
+ mkdir(tsVnodeDir, 0755);
+
char rootDir[TSDB_FILENAME_LEN] = {0};
sprintf(rootDir, "%s/vnode%d", tsVnodeDir, pVnodeCfg->cfg.vgId);
if (mkdir(rootDir, 0755) != 0) {
+ vPrint("vgId:%d, failed to create vnode, reason:%s dir:%s", pVnodeCfg->cfg.vgId, strerror(errno), rootDir);
if (errno == EACCES) {
return TSDB_CODE_NO_DISK_PERMISSIONS;
} else if (errno == ENOSPC) {
return TSDB_CODE_SERV_NO_DISKSPACE;
+ } else if (errno == ENOENT) {
+ return TSDB_CODE_NOT_SUCH_FILE_OR_DIR;
} else if (errno == EEXIST) {
} else {
return TSDB_CODE_VG_INIT_FAILED;
@@ -239,6 +244,10 @@ int32_t vnodeOpen(int32_t vnode, char *rootDir) {
syncInfo.notifyFileSynced = vnodeNotifyFileSynced;
pVnode->sync = syncStart(&syncInfo);
+#ifndef _SYNC
+ pVnode->role = TAOS_SYNC_ROLE_MASTER;
+#endif
+
// start continuous query
if (pVnode->role == TAOS_SYNC_ROLE_MASTER)
cqStart(pVnode->cq);
@@ -429,7 +438,7 @@ static void vnodeNotifyRole(void *ahandle, int8_t role) {
static void vnodeNotifyFileSynced(void *ahandle, uint64_t fversion) {
SVnodeObj *pVnode = ahandle;
- vTrace("vgId:%d, data file is synced, fversion:%" PRId64 "", pVnode->vgId, fversion);
+ vTrace("vgId:%d, data file is synced, fversion:%" PRId64, pVnode->vgId, fversion);
pVnode->fversion = fversion;
pVnode->version = fversion;
diff --git a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md
index 7c42d47d1b..b285fe8155 100644
--- a/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md
+++ b/tests/How-To-Run-Test-And-How-To-Add-New-Test-Case.md
@@ -11,6 +11,19 @@
4. pip install src/connector/python/linux/python2 ; pip3 install
src/connector/python/linux/python3
+> Note: Both Python2 and Python3 are currently supported by the Python test
+> framework. Since Python2 is no longer officially supported by Python Software
+> Foundation since January 1, 2020, it is recommended that subsequent test case
+> development be guaranteed to run correctly on Python3.
+
+> For Python2, please consider being compatible if appropriate without
+> additional burden.
+>
+> If you use some new Linux distribution like Ubuntu 20.04 which already do not
+> include Python2, please do not install Python2-related packages.
+>
+> Â
+
### How to run Python test suite
1. cd \/tests/pytest
@@ -211,13 +224,6 @@ def checkAffectedRows(self, expectAffectedRows):
...
-> Note: Both Python2 and Python3 are currently supported by the Python test
-> case. Since Python2 is no longer officially supported by January 1, 2020, it
-> is recommended that subsequent test case development be guaranteed to run
-> correctly on Python3. For Python2, please consider being compatible if
-> appropriate without additional
-> burden. Â
-
### CI submission adoption principle.
- Every commit / PR compilation must pass. Currently, the warning is treated
diff --git a/tests/pytest/account/account_create.py b/tests/pytest/account/account_create.py
index 24bcc8df2f..85adfff199 100644
--- a/tests/pytest/account/account_create.py
+++ b/tests/pytest/account/account_create.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
try:
diff --git a/tests/pytest/account/account_del.py b/tests/pytest/account/account_del.py
index 43690c6e37..7f05a39765 100644
--- a/tests/pytest/account/account_del.py
+++ b/tests/pytest/account/account_del.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
print("==========step1")
diff --git a/tests/pytest/alter/alter_stable.py b/tests/pytest/alter/alter_stable.py
index 5772edcf7f..6852f4ef4a 100644
--- a/tests/pytest/alter/alter_stable.py
+++ b/tests/pytest/alter/alter_stable.py
@@ -9,9 +9,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
diff --git a/tests/pytest/alter/alter_table.py b/tests/pytest/alter/alter_table.py
index 6e0c591da6..6d4f72556b 100644
--- a/tests/pytest/alter/alter_table.py
+++ b/tests/pytest/alter/alter_table.py
@@ -9,9 +9,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
self.types = [
"int",
"bigint",
diff --git a/tests/pytest/alter/file_corrupt.py b/tests/pytest/alter/file_corrupt.py
index 51ea882270..28d50cdd38 100644
--- a/tests/pytest/alter/file_corrupt.py
+++ b/tests/pytest/alter/file_corrupt.py
@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/dbmgmt/database-name-boundary.py b/tests/pytest/dbmgmt/database-name-boundary.py
index ff6dce22ae..df3b027ba7 100644
--- a/tests/pytest/dbmgmt/database-name-boundary.py
+++ b/tests/pytest/dbmgmt/database-name-boundary.py
@@ -23,9 +23,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
diff --git a/tests/pytest/dbmgmt/dropDB_memory_test.py b/tests/pytest/dbmgmt/dropDB_memory_test.py
index b029945be2..0f0925c268 100644
--- a/tests/pytest/dbmgmt/dropDB_memory_test.py
+++ b/tests/pytest/dbmgmt/dropDB_memory_test.py
@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh
index ab5db30051..173062fb1f 100755
--- a/tests/pytest/fulltest.sh
+++ b/tests/pytest/fulltest.sh
@@ -125,3 +125,6 @@ python3 ./test.py $1 -f user/pass_len.py
# table
#python3 ./test.py $1 -f table/del_stable.py
+#query
+python3 ./test.py $1 -f query/filter.py
+
diff --git a/tests/pytest/import_merge/importBlock1H.py b/tests/pytest/import_merge/importBlock1H.py
index aef29444d6..fca37ff1b1 100644
--- a/tests/pytest/import_merge/importBlock1H.py
+++ b/tests/pytest/import_merge/importBlock1H.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1HO.py b/tests/pytest/import_merge/importBlock1HO.py
index ad4bcf1288..c853d3a6af 100644
--- a/tests/pytest/import_merge/importBlock1HO.py
+++ b/tests/pytest/import_merge/importBlock1HO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1HPO.py b/tests/pytest/import_merge/importBlock1HPO.py
index 6aabc035ec..968f21f50f 100644
--- a/tests/pytest/import_merge/importBlock1HPO.py
+++ b/tests/pytest/import_merge/importBlock1HPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1S.py b/tests/pytest/import_merge/importBlock1S.py
index 4b2adfb961..6a0ce18edf 100644
--- a/tests/pytest/import_merge/importBlock1S.py
+++ b/tests/pytest/import_merge/importBlock1S.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1Sub.py b/tests/pytest/import_merge/importBlock1Sub.py
index 343b87c757..2eaf5fc26e 100644
--- a/tests/pytest/import_merge/importBlock1Sub.py
+++ b/tests/pytest/import_merge/importBlock1Sub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1T.py b/tests/pytest/import_merge/importBlock1T.py
index 40f4bbfdec..ffac67a7e1 100644
--- a/tests/pytest/import_merge/importBlock1T.py
+++ b/tests/pytest/import_merge/importBlock1T.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1TO.py b/tests/pytest/import_merge/importBlock1TO.py
index db8b036d6f..5d6c6624d9 100644
--- a/tests/pytest/import_merge/importBlock1TO.py
+++ b/tests/pytest/import_merge/importBlock1TO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock1TPO.py b/tests/pytest/import_merge/importBlock1TPO.py
index f2361712e7..e7807e892a 100644
--- a/tests/pytest/import_merge/importBlock1TPO.py
+++ b/tests/pytest/import_merge/importBlock1TPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2H.py b/tests/pytest/import_merge/importBlock2H.py
index 62552980bd..eea1e4bcf8 100644
--- a/tests/pytest/import_merge/importBlock2H.py
+++ b/tests/pytest/import_merge/importBlock2H.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2HO.py b/tests/pytest/import_merge/importBlock2HO.py
index 0f53210f4a..99a9662f1c 100644
--- a/tests/pytest/import_merge/importBlock2HO.py
+++ b/tests/pytest/import_merge/importBlock2HO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2HPO.py b/tests/pytest/import_merge/importBlock2HPO.py
index 3b7ffbbe44..54afef6c06 100644
--- a/tests/pytest/import_merge/importBlock2HPO.py
+++ b/tests/pytest/import_merge/importBlock2HPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2S.py b/tests/pytest/import_merge/importBlock2S.py
index 69b0291839..9d4757709a 100644
--- a/tests/pytest/import_merge/importBlock2S.py
+++ b/tests/pytest/import_merge/importBlock2S.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2Sub.py b/tests/pytest/import_merge/importBlock2Sub.py
index 5b93750584..db43a492bc 100644
--- a/tests/pytest/import_merge/importBlock2Sub.py
+++ b/tests/pytest/import_merge/importBlock2Sub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2T.py b/tests/pytest/import_merge/importBlock2T.py
index 0d9b70299d..c0d85fa3b0 100644
--- a/tests/pytest/import_merge/importBlock2T.py
+++ b/tests/pytest/import_merge/importBlock2T.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2TO.py b/tests/pytest/import_merge/importBlock2TO.py
index fe57308c42..05692c336d 100644
--- a/tests/pytest/import_merge/importBlock2TO.py
+++ b/tests/pytest/import_merge/importBlock2TO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlock2TPO.py b/tests/pytest/import_merge/importBlock2TPO.py
index 4da52bd3f4..817d2db382 100644
--- a/tests/pytest/import_merge/importBlock2TPO.py
+++ b/tests/pytest/import_merge/importBlock2TPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importBlockbetween.py b/tests/pytest/import_merge/importBlockbetween.py
index eea7f7ea7c..2e4ac8a737 100644
--- a/tests/pytest/import_merge/importBlockbetween.py
+++ b/tests/pytest/import_merge/importBlockbetween.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileH.py b/tests/pytest/import_merge/importCacheFileH.py
index cd2b3a73f1..3398f7bdad 100644
--- a/tests/pytest/import_merge/importCacheFileH.py
+++ b/tests/pytest/import_merge/importCacheFileH.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileHO.py b/tests/pytest/import_merge/importCacheFileHO.py
index 2e65c337b7..19520dc3d0 100644
--- a/tests/pytest/import_merge/importCacheFileHO.py
+++ b/tests/pytest/import_merge/importCacheFileHO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileHPO.py b/tests/pytest/import_merge/importCacheFileHPO.py
index f01ebd0d60..9e0a57fb46 100644
--- a/tests/pytest/import_merge/importCacheFileHPO.py
+++ b/tests/pytest/import_merge/importCacheFileHPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileS.py b/tests/pytest/import_merge/importCacheFileS.py
index 0bb9107562..2f0af569e5 100644
--- a/tests/pytest/import_merge/importCacheFileS.py
+++ b/tests/pytest/import_merge/importCacheFileS.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileSub.py b/tests/pytest/import_merge/importCacheFileSub.py
index cd5d250968..300bb6e8d0 100644
--- a/tests/pytest/import_merge/importCacheFileSub.py
+++ b/tests/pytest/import_merge/importCacheFileSub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileT.py b/tests/pytest/import_merge/importCacheFileT.py
index be79e26bc7..ab33cf6a93 100644
--- a/tests/pytest/import_merge/importCacheFileT.py
+++ b/tests/pytest/import_merge/importCacheFileT.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileTO.py b/tests/pytest/import_merge/importCacheFileTO.py
index dd17de3adf..00e22da976 100644
--- a/tests/pytest/import_merge/importCacheFileTO.py
+++ b/tests/pytest/import_merge/importCacheFileTO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importCacheFileTPO.py b/tests/pytest/import_merge/importCacheFileTPO.py
index 948b99ed21..c6089e1d68 100644
--- a/tests/pytest/import_merge/importCacheFileTPO.py
+++ b/tests/pytest/import_merge/importCacheFileTPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataH2.py b/tests/pytest/import_merge/importDataH2.py
index b5e53d862e..a21f0c47be 100644
--- a/tests/pytest/import_merge/importDataH2.py
+++ b/tests/pytest/import_merge/importDataH2.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataHO.py b/tests/pytest/import_merge/importDataHO.py
index f6d65a5c53..fdcaedd83c 100644
--- a/tests/pytest/import_merge/importDataHO.py
+++ b/tests/pytest/import_merge/importDataHO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataHO2.py b/tests/pytest/import_merge/importDataHO2.py
index 6246b55b32..b094701132 100644
--- a/tests/pytest/import_merge/importDataHO2.py
+++ b/tests/pytest/import_merge/importDataHO2.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataHPO.py b/tests/pytest/import_merge/importDataHPO.py
index c749dbd113..9d74c0c352 100644
--- a/tests/pytest/import_merge/importDataHPO.py
+++ b/tests/pytest/import_merge/importDataHPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastH.py b/tests/pytest/import_merge/importDataLastH.py
index 830711a420..c8e5f62423 100644
--- a/tests/pytest/import_merge/importDataLastH.py
+++ b/tests/pytest/import_merge/importDataLastH.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastHO.py b/tests/pytest/import_merge/importDataLastHO.py
index 037c81f087..33215997a4 100644
--- a/tests/pytest/import_merge/importDataLastHO.py
+++ b/tests/pytest/import_merge/importDataLastHO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastHPO.py b/tests/pytest/import_merge/importDataLastHPO.py
index 46a7e5909d..fa8542f35b 100644
--- a/tests/pytest/import_merge/importDataLastHPO.py
+++ b/tests/pytest/import_merge/importDataLastHPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastS.py b/tests/pytest/import_merge/importDataLastS.py
index 2dd7cdb744..2f595fef54 100644
--- a/tests/pytest/import_merge/importDataLastS.py
+++ b/tests/pytest/import_merge/importDataLastS.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastSub.py b/tests/pytest/import_merge/importDataLastSub.py
index bb9953057e..5c2069c90f 100644
--- a/tests/pytest/import_merge/importDataLastSub.py
+++ b/tests/pytest/import_merge/importDataLastSub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastT.py b/tests/pytest/import_merge/importDataLastT.py
index 29f0afaf1a..08e944eb68 100644
--- a/tests/pytest/import_merge/importDataLastT.py
+++ b/tests/pytest/import_merge/importDataLastT.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastTO.py b/tests/pytest/import_merge/importDataLastTO.py
index 47639130b5..a82c054141 100644
--- a/tests/pytest/import_merge/importDataLastTO.py
+++ b/tests/pytest/import_merge/importDataLastTO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataLastTPO.py b/tests/pytest/import_merge/importDataLastTPO.py
index 4190836505..ff75a1b2ae 100644
--- a/tests/pytest/import_merge/importDataLastTPO.py
+++ b/tests/pytest/import_merge/importDataLastTPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataS.py b/tests/pytest/import_merge/importDataS.py
index daa4b2e025..37627e8d6b 100644
--- a/tests/pytest/import_merge/importDataS.py
+++ b/tests/pytest/import_merge/importDataS.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataSub.py b/tests/pytest/import_merge/importDataSub.py
index e946a254c2..17e2b141b7 100644
--- a/tests/pytest/import_merge/importDataSub.py
+++ b/tests/pytest/import_merge/importDataSub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataT.py b/tests/pytest/import_merge/importDataT.py
index abb5e312ef..b0b7b82b79 100644
--- a/tests/pytest/import_merge/importDataT.py
+++ b/tests/pytest/import_merge/importDataT.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataTO.py b/tests/pytest/import_merge/importDataTO.py
index 2a6d9e272b..c0b57136af 100644
--- a/tests/pytest/import_merge/importDataTO.py
+++ b/tests/pytest/import_merge/importDataTO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importDataTPO.py b/tests/pytest/import_merge/importDataTPO.py
index 06d5cf3c1a..8a1c9264b4 100644
--- a/tests/pytest/import_merge/importDataTPO.py
+++ b/tests/pytest/import_merge/importDataTPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHORestart.py b/tests/pytest/import_merge/importHORestart.py
index cfbfa61c90..f74c4c76d6 100644
--- a/tests/pytest/import_merge/importHORestart.py
+++ b/tests/pytest/import_merge/importHORestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHPORestart.py b/tests/pytest/import_merge/importHPORestart.py
index 7e96d44a1a..e5f79fbe6c 100644
--- a/tests/pytest/import_merge/importHPORestart.py
+++ b/tests/pytest/import_merge/importHPORestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHRestart.py b/tests/pytest/import_merge/importHRestart.py
index aa1783977e..be67039789 100644
--- a/tests/pytest/import_merge/importHRestart.py
+++ b/tests/pytest/import_merge/importHRestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHead.py b/tests/pytest/import_merge/importHead.py
index 6971986ebc..80e6d92c69 100644
--- a/tests/pytest/import_merge/importHead.py
+++ b/tests/pytest/import_merge/importHead.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHeadOverlap.py b/tests/pytest/import_merge/importHeadOverlap.py
index df5f07b5a2..535c0c2859 100644
--- a/tests/pytest/import_merge/importHeadOverlap.py
+++ b/tests/pytest/import_merge/importHeadOverlap.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importHeadPartOverlap.py b/tests/pytest/import_merge/importHeadPartOverlap.py
index 8c9885e22f..85ddd64f0a 100644
--- a/tests/pytest/import_merge/importHeadPartOverlap.py
+++ b/tests/pytest/import_merge/importHeadPartOverlap.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastH.py b/tests/pytest/import_merge/importLastH.py
index a6f9fa087c..17fa233e37 100644
--- a/tests/pytest/import_merge/importLastH.py
+++ b/tests/pytest/import_merge/importLastH.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastHO.py b/tests/pytest/import_merge/importLastHO.py
index e6468b243e..adb44fc0ea 100644
--- a/tests/pytest/import_merge/importLastHO.py
+++ b/tests/pytest/import_merge/importLastHO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastHPO.py b/tests/pytest/import_merge/importLastHPO.py
index 4a299ed823..d8ed2d9ef1 100644
--- a/tests/pytest/import_merge/importLastHPO.py
+++ b/tests/pytest/import_merge/importLastHPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastS.py b/tests/pytest/import_merge/importLastS.py
index 2a5de46eb2..bf222a0d5f 100644
--- a/tests/pytest/import_merge/importLastS.py
+++ b/tests/pytest/import_merge/importLastS.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastSub.py b/tests/pytest/import_merge/importLastSub.py
index fa1b2387f3..5a6b9f4150 100644
--- a/tests/pytest/import_merge/importLastSub.py
+++ b/tests/pytest/import_merge/importLastSub.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastT.py b/tests/pytest/import_merge/importLastT.py
index b7a1e58bc5..2b1be1fe2b 100644
--- a/tests/pytest/import_merge/importLastT.py
+++ b/tests/pytest/import_merge/importLastT.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastTO.py b/tests/pytest/import_merge/importLastTO.py
index 541cbd29ca..ce189f6371 100644
--- a/tests/pytest/import_merge/importLastTO.py
+++ b/tests/pytest/import_merge/importLastTO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importLastTPO.py b/tests/pytest/import_merge/importLastTPO.py
index 6ec21d0c79..627d090855 100644
--- a/tests/pytest/import_merge/importLastTPO.py
+++ b/tests/pytest/import_merge/importLastTPO.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importSRestart.py b/tests/pytest/import_merge/importSRestart.py
index 0771b8bf9c..29f5a19b54 100644
--- a/tests/pytest/import_merge/importSRestart.py
+++ b/tests/pytest/import_merge/importSRestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importSpan.py b/tests/pytest/import_merge/importSpan.py
index 736c4bad64..4e20e9754f 100644
--- a/tests/pytest/import_merge/importSpan.py
+++ b/tests/pytest/import_merge/importSpan.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importSubRestart.py b/tests/pytest/import_merge/importSubRestart.py
index f7f33d32c1..b1a6f30c43 100644
--- a/tests/pytest/import_merge/importSubRestart.py
+++ b/tests/pytest/import_merge/importSubRestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTORestart.py b/tests/pytest/import_merge/importTORestart.py
index 194756cd12..07eb6c28cb 100644
--- a/tests/pytest/import_merge/importTORestart.py
+++ b/tests/pytest/import_merge/importTORestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTPORestart.py b/tests/pytest/import_merge/importTPORestart.py
index 36d4b64390..10bbf3efce 100644
--- a/tests/pytest/import_merge/importTPORestart.py
+++ b/tests/pytest/import_merge/importTPORestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTRestart.py b/tests/pytest/import_merge/importTRestart.py
index 9308518d8c..63a9368eca 100644
--- a/tests/pytest/import_merge/importTRestart.py
+++ b/tests/pytest/import_merge/importTRestart.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTail.py b/tests/pytest/import_merge/importTail.py
index a80db730a0..a552edb244 100644
--- a/tests/pytest/import_merge/importTail.py
+++ b/tests/pytest/import_merge/importTail.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTailOverlap.py b/tests/pytest/import_merge/importTailOverlap.py
index 98596d2f77..78e51f93f9 100644
--- a/tests/pytest/import_merge/importTailOverlap.py
+++ b/tests/pytest/import_merge/importTailOverlap.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importTailPartOverlap.py b/tests/pytest/import_merge/importTailPartOverlap.py
index 0263114a25..e433cb66f3 100644
--- a/tests/pytest/import_merge/importTailPartOverlap.py
+++ b/tests/pytest/import_merge/importTailPartOverlap.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
diff --git a/tests/pytest/import_merge/importToCommit.py b/tests/pytest/import_merge/importToCommit.py
index b1a0065d47..dd2c27918a 100644
--- a/tests/pytest/import_merge/importToCommit.py
+++ b/tests/pytest/import_merge/importToCommit.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 1
@@ -45,7 +45,8 @@ class TDTestCase:
tdLog.info("================= step2")
tdLog.info('insert data until the first commit')
dnodesDir = tdDnodes.getDnodesRootDir()
- dataDir = dnodesDir + '/dnode1/data/data'
+ dataDir = dnodesDir + '/dnode1/data/vnode'
+ tdLog.info('CBD: dataDir=%s' % dataDir)
startTime = self.startTime
rid0 = 1
while (True):
diff --git a/tests/pytest/insert/basic.py b/tests/pytest/insert/basic.py
index 35f830d951..dcb5834d55 100644
--- a/tests/pytest/insert/basic.py
+++ b/tests/pytest/insert/basic.py
@@ -18,9 +18,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/bigint.py b/tests/pytest/insert/bigint.py
index 214200900a..7c7d2d0f95 100644
--- a/tests/pytest/insert/bigint.py
+++ b/tests/pytest/insert/bigint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/binary-boundary.py b/tests/pytest/insert/binary-boundary.py
index 583217a732..bb86ddf61c 100644
--- a/tests/pytest/insert/binary-boundary.py
+++ b/tests/pytest/insert/binary-boundary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/binary.py b/tests/pytest/insert/binary.py
index 677ae4bf29..0d583aa2cc 100644
--- a/tests/pytest/insert/binary.py
+++ b/tests/pytest/insert/binary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/bool.py b/tests/pytest/insert/bool.py
index c175afd8b5..1412567cd7 100644
--- a/tests/pytest/insert/bool.py
+++ b/tests/pytest/insert/bool.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/date.py b/tests/pytest/insert/date.py
index fb7f1816c4..6e22e5b72e 100644
--- a/tests/pytest/insert/date.py
+++ b/tests/pytest/insert/date.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/double.py b/tests/pytest/insert/double.py
index 19be9653c0..1b66ed1c44 100644
--- a/tests/pytest/insert/double.py
+++ b/tests/pytest/insert/double.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/float.py b/tests/pytest/insert/float.py
index 414833877e..8b6277d436 100644
--- a/tests/pytest/insert/float.py
+++ b/tests/pytest/insert/float.py
@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/int.py b/tests/pytest/insert/int.py
index 350426a5bd..4a94a331d6 100644
--- a/tests/pytest/insert/int.py
+++ b/tests/pytest/insert/int.py
@@ -20,9 +20,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/multi.py b/tests/pytest/insert/multi.py
index c14d7dc2e0..5f81f12fdb 100644
--- a/tests/pytest/insert/multi.py
+++ b/tests/pytest/insert/multi.py
@@ -20,9 +20,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/nchar-boundary.py b/tests/pytest/insert/nchar-boundary.py
index 255cc5b79a..05a66f6066 100644
--- a/tests/pytest/insert/nchar-boundary.py
+++ b/tests/pytest/insert/nchar-boundary.py
@@ -18,9 +18,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/nchar-unicode.py b/tests/pytest/insert/nchar-unicode.py
index 12eef379d3..152a09723e 100644
--- a/tests/pytest/insert/nchar-unicode.py
+++ b/tests/pytest/insert/nchar-unicode.py
@@ -18,9 +18,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/nchar.py b/tests/pytest/insert/nchar.py
index d61a44844d..b8e365f143 100644
--- a/tests/pytest/insert/nchar.py
+++ b/tests/pytest/insert/nchar.py
@@ -18,9 +18,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/smallint.py b/tests/pytest/insert/smallint.py
index 0d85e45cfa..16322e9aeb 100644
--- a/tests/pytest/insert/smallint.py
+++ b/tests/pytest/insert/smallint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/insert/tinyint.py b/tests/pytest/insert/tinyint.py
index 6d7178054e..a10c999e8c 100644
--- a/tests/pytest/insert/tinyint.py
+++ b/tests/pytest/insert/tinyint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/query/filter.py b/tests/pytest/query/filter.py
new file mode 100644
index 0000000000..e58907aa47
--- /dev/null
+++ b/tests/pytest/query/filter.py
@@ -0,0 +1,114 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import *
+from util.cases import *
+from util.sql import *
+
+
+class TDTestCase:
+ def init(self, conn):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor())
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ tdSql.execute(
+ "create table if not exists st (ts timestamp, tagtype int, name nchar(16)) tags(dev nchar(50))")
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_001 using st tags("dev_01")')
+ tdSql.execute(
+ 'CREATE TABLE if not exists dev_002 using st tags("dev_02")')
+
+ print("==============step2")
+
+ tdSql.execute(
+ """INSERT INTO dev_001(ts, tagtype, name) VALUES('2020-05-13 10:00:00.000', 1, 'first'),('2020-05-13 10:00:00.001', 2, 'second'),
+ ('2020-05-13 10:00:00.002', 3, 'third') dev_002 VALUES('2020-05-13 10:00:00.003', 1, 'first'), ('2020-05-13 10:00:00.004', 2, 'second'),
+ ('2020-05-13 10:00:00.005', 3, 'third')""")
+
+ # > for timestamp type
+ tdSql.query("select * from db.st where ts > '2020-05-13 10:00:00.002'")
+ tdSql.checkRows(3)
+
+ # > for numeric type
+ tdSql.query("select * from db.st where tagtype > 2")
+ tdSql.checkRows(2)
+
+ # < for timestamp type
+ tdSql.query("select * from db.st where ts < '2020-05-13 10:00:00.002'")
+ tdSql.checkRows(2)
+
+ # < for numeric type
+ tdSql.query("select * from db.st where tagtype < 2")
+ tdSql.checkRows(2)
+
+ # >= for timestamp type
+ tdSql.query("select * from db.st where ts >= '2020-05-13 10:00:00.002'")
+ tdSql.checkRows(4)
+
+ # >= for numeric type
+ tdSql.query("select * from db.st where tagtype >= 2")
+ tdSql.checkRows(4)
+
+ # <= for timestamp type
+ tdSql.query("select * from db.st where ts <= '2020-05-13 10:00:00.002'")
+ tdSql.checkRows(3)
+
+ # <= for numeric type
+ tdSql.query("select * from db.st where tagtype <= 2")
+ tdSql.checkRows(4)
+
+ # = for timestamp type
+ tdSql.query("select * from db.st where ts = '2020-05-13 10:00:00.002'")
+ tdSql.checkRows(1)
+
+ # = for numeric type
+ tdSql.query("select * from db.st where tagtype = 2")
+ tdSql.checkRows(2)
+
+ # = for nchar type
+ tdSql.query("select * from db.st where name = 'first'")
+ tdSql.checkRows(2)
+
+ # <> for timestamp type
+ tdSql.query("select * from db.st where ts <> '2020-05-13 10:00:00.002'")
+ #tdSql.checkRows(4)
+
+ # <> for numeric type
+ tdSql.query("select * from db.st where tagtype <> 2")
+ tdSql.checkRows(4)
+
+ # <> for nchar type
+ tdSql.query("select * from db.st where name <> 'first'")
+ tdSql.checkRows(4)
+
+ # % for nchar type
+ tdSql.query("select * from db.st where name like 'fi%'")
+ tdSql.checkRows(2)
+
+ # - for nchar type
+ tdSql.query("select * from db.st where name like '_econd'")
+ tdSql.checkRows(2)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/query/query.py b/tests/pytest/query/query.py
index 68b8baab64..da0ef96d0e 100644
--- a/tests/pytest/query/query.py
+++ b/tests/pytest/query/query.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/query/tbname.py b/tests/pytest/query/tbname.py
index 5ea89fff82..08416ba3ed 100644
--- a/tests/pytest/query/tbname.py
+++ b/tests/pytest/query/tbname.py
@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/random-test/random-test.py b/tests/pytest/random-test/random-test.py
index a3f4c00ea5..4e1e6ac2fa 100644
--- a/tests/pytest/random-test/random-test.py
+++ b/tests/pytest/random-test/random-test.py
@@ -108,11 +108,23 @@ class Test:
tdDnodes.start(1)
tdSql.prepare()
+ def delete_datafiles(self):
+ tdLog.info("delete data files")
+ dnodesDir = tdDnodes.getDnodesRootDir()
+ dataDir = dnodesDir + '/dnode1/*'
+ deleteCmd = 'rm -rf %s' % dataDir
+ os.system(deleteCmd)
+
+ self.current_tb = ""
+ self.last_tb = ""
+ self.written = 0
+ tdDnodes.start(1)
+ tdSql.prepare()
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
@@ -129,10 +141,11 @@ class TDTestCase:
7: test.drop_table,
8: test.reset_query_cache,
9: test.reset_database,
+ 10: test.delete_datafiles,
}
for x in range(1, 100):
- r = random.randint(1, 9)
+ r = random.randint(1, 10)
tdLog.notice("iteration %d run func %d" % (x, r))
switch.get(r, lambda: "ERROR")()
diff --git a/tests/pytest/smoketest.sh b/tests/pytest/smoketest.sh
index 5f48789d45..16fa38487d 100755
--- a/tests/pytest/smoketest.sh
+++ b/tests/pytest/smoketest.sh
@@ -50,3 +50,8 @@ python3 ./test.py $1 -s && sleep 1
#tag
python3 ./test.py $1 -f tag_lite/filter.py
python3 ./test.py $1 -s && sleep 1
+
+#query
+python3 ./test.py $1 -f query/filter.py
+python3 ./test.py $1 -s && sleep 1
+
diff --git a/tests/pytest/stable/insert.py b/tests/pytest/stable/insert.py
index 9f9e7c6e06..3d37e6726c 100644
--- a/tests/pytest/stable/insert.py
+++ b/tests/pytest/stable/insert.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/alter_column.py b/tests/pytest/table/alter_column.py
index 15bc9f7aa7..73fdbe1ae9 100644
--- a/tests/pytest/table/alter_column.py
+++ b/tests/pytest/table/alter_column.py
@@ -20,9 +20,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/boundary.py b/tests/pytest/table/boundary.py
index faa222231b..b68671c61a 100644
--- a/tests/pytest/table/boundary.py
+++ b/tests/pytest/table/boundary.py
@@ -10,140 +10,133 @@ from util.sql import *
class TDTestCase:
- def init( self, conn ):
+ def init(self, conn):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
-
- def getLimitFromSourceCode( self, name ):
+ def getLimitFromSourceCode(self, name):
cmd = "grep -w '#define %s' ../../src/inc/taosdef.h|awk '{print $3}'" % name
return int(subprocess.check_output(cmd, shell=True))
-
- def generateString( self, length ):
+ def generateString(self, length):
chars = string.ascii_uppercase + string.ascii_lowercase
v = ""
- for i in range( length ):
- v += random.choice( chars )
+ for i in range(length):
+ v += random.choice(chars)
return v
-
- def checkTagBoundaries( self ):
- tdLog.debug( "checking tag boundaries" )
+ def checkTagBoundaries(self):
+ tdLog.debug("checking tag boundaries")
tdSql.prepare()
- maxTags = self.getLimitFromSourceCode( 'TSDB_MAX_TAGS' )
- totalTagsLen = self.getLimitFromSourceCode( 'TSDB_MAX_TAGS_LEN' )
- tdLog.notice( "max tags is %d" % maxTags )
- tdLog.notice( "max total tag length is %d" % totalTagsLen )
+ maxTags = self.getLimitFromSourceCode('TSDB_MAX_TAGS')
+ totalTagsLen = self.getLimitFromSourceCode('TSDB_MAX_TAGS_LEN')
+ tdLog.notice("max tags is %d" % maxTags)
+ tdLog.notice("max total tag length is %d" % totalTagsLen)
# for binary tags, 2 bytes are used for length
tagLen = (totalTagsLen - maxTags * 2) // maxTags
firstTagLen = totalTagsLen - 2 * maxTags - tagLen * (maxTags - 1)
sql = "create table cars(ts timestamp, f int) tags(t0 binary(%d)" % firstTagLen
- for i in range( 1, maxTags ):
+ for i in range(1, maxTags):
sql += ", t%d binary(%d)" % (i, tagLen)
sql += ");"
- tdLog.debug( "creating super table: " + sql )
- tdSql.execute( sql )
- tdSql.query( 'show stables' )
- tdSql.checkRows( 1 )
+ tdLog.debug("creating super table: " + sql)
+ tdSql.execute(sql)
+ tdSql.query('show stables')
+ tdSql.checkRows(1)
- for i in range( 10 ):
+ for i in range(10):
sql = "create table car%d using cars tags('%d'" % (i, i)
sql += ", '0'" * (maxTags - 1) + ");"
- tdLog.debug( "creating table: " + sql )
- tdSql.execute( sql )
+ tdLog.debug("creating table: " + sql)
+ tdSql.execute(sql)
sql = "insert into car%d values(now, 0);" % i
- tdLog.debug( "inserting data: " + sql )
- tdSql.execute( sql )
+ tdLog.debug("inserting data: " + sql)
+ tdSql.execute(sql)
- tdSql.query( 'show tables' )
- tdLog.info( 'tdSql.checkRow(10)' )
- tdSql.checkRows( 10 )
+ tdSql.query('show tables')
+ tdLog.info('tdSql.checkRow(10)')
+ tdSql.checkRows(10)
- tdSql.query( 'select * from cars;' )
- tdSql.checkRows( 10 )
+ tdSql.query('select * from cars;')
+ tdSql.checkRows(10)
-
- def checkColumnBoundaries( self ):
- tdLog.debug( "checking column boundaries" )
+ def checkColumnBoundaries(self):
+ tdLog.debug("checking column boundaries")
tdSql.prepare()
# one column is for timestamp
- maxCols = self.getLimitFromSourceCode( 'TSDB_MAX_COLUMNS' ) - 1
+ maxCols = self.getLimitFromSourceCode('TSDB_MAX_COLUMNS') - 1
sql = "create table cars (ts timestamp"
- for i in range( maxCols ):
+ for i in range(maxCols):
sql += ", c%d int" % i
sql += ");"
- tdSql.execute( sql )
- tdSql.query( 'show tables' )
- tdSql.checkRows( 1 )
+ tdSql.execute(sql)
+ tdSql.query('show tables')
+ tdSql.checkRows(1)
sql = "insert into cars values (now"
- for i in range( maxCols ):
+ for i in range(maxCols):
sql += ", %d" % i
sql += ");"
- tdSql.execute( sql )
- tdSql.query( 'select * from cars' )
- tdSql.checkRows( 1 )
+ tdSql.execute(sql)
+ tdSql.query('select * from cars')
+ tdSql.checkRows(1)
-
- def checkTableNameBoundaries( self ):
- tdLog.debug( "checking table name boundaries" )
+ def checkTableNameBoundaries(self):
+ tdLog.debug("checking table name boundaries")
tdSql.prepare()
- maxTableNameLen = self.getLimitFromSourceCode( 'TSDB_TABLE_NAME_LEN' )
- tdLog.notice( "table name max length is %d" % maxTableNameLen )
+ maxTableNameLen = self.getLimitFromSourceCode('TSDB_TABLE_NAME_LEN')
+ tdLog.notice("table name max length is %d" % maxTableNameLen)
- name = self.generateString( maxTableNameLen - 1)
- tdLog.info( "table name is '%s'" % name )
+ name = self.generateString(maxTableNameLen - 1)
+ tdLog.info("table name is '%s'" % name)
- tdSql.execute( "create table %s (ts timestamp, value int)" % name )
- tdSql.execute( "insert into %s values(now, 0)" % name )
+ tdSql.execute("create table %s (ts timestamp, value int)" % name)
+ tdSql.execute("insert into %s values(now, 0)" % name)
- tdSql.query( 'show tables' )
- tdSql.checkRows( 1 )
+ tdSql.query('show tables')
+ tdSql.checkRows(1)
- tdSql.query( 'select * from %s' % name )
- tdSql.checkRows( 1 )
+ tdSql.query('select * from %s' % name)
+ tdSql.checkRows(1)
-
- def checkRowBoundaries( self ):
- tdLog.debug( "checking row boundaries" )
+ def checkRowBoundaries(self):
+ tdLog.debug("checking row boundaries")
tdSql.prepare()
# 8 bytes for timestamp
maxRowSize = 65536 - 8
- maxCols = self.getLimitFromSourceCode( 'TSDB_MAX_COLUMNS' ) - 1
+ maxCols = self.getLimitFromSourceCode('TSDB_MAX_COLUMNS') - 1
# for binary cols, 2 bytes are used for length
colLen = (maxRowSize - maxCols * 2) // maxCols
firstColLen = maxRowSize - 2 * maxCols - colLen * (maxCols - 1)
sql = "create table cars (ts timestamp, c0 binary(%d)" % firstColLen
- for i in range( 1, maxCols ):
+ for i in range(1, maxCols):
sql += ", c%d binary(%d)" % (i, colLen)
sql += ");"
- tdSql.execute( sql )
- tdSql.query( 'show tables' )
- tdSql.checkRows( 1 )
+ tdSql.execute(sql)
+ tdSql.query('show tables')
+ tdSql.checkRows(1)
- col = self.generateString( firstColLen )
+ col = self.generateString(firstColLen)
sql = "insert into cars values (now, '%s'" % col
- col = self.generateString( colLen )
- for i in range( 1, maxCols ):
- sql += ", '%s'" % col
+ col = self.generateString(colLen)
+ for i in range(1, maxCols):
+ sql += ", '%s'" % col
sql += ");"
- tdLog.info( sql );
- tdSql.execute( sql )
- tdSql.query( "select * from cars" )
- tdSql.checkRows( 1 )
-
+ tdLog.info(sql)
+ tdSql.execute(sql)
+ tdSql.query("select * from cars")
+ tdSql.checkRows(1)
def run(self):
self.checkTagBoundaries()
@@ -151,7 +144,6 @@ class TDTestCase:
self.checkTableNameBoundaries()
self.checkRowBoundaries()
-
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)
diff --git a/tests/pytest/table/column_name.py b/tests/pytest/table/column_name.py
index aa958fd60c..a180d3f752 100644
--- a/tests/pytest/table/column_name.py
+++ b/tests/pytest/table/column_name.py
@@ -10,9 +10,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/column_num.py b/tests/pytest/table/column_num.py
index 877f0409dc..3abedb083c 100644
--- a/tests/pytest/table/column_num.py
+++ b/tests/pytest/table/column_num.py
@@ -8,9 +8,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/create-a-lot.py b/tests/pytest/table/create-a-lot.py
new file mode 100644
index 0000000000..7db4a8eaca
--- /dev/null
+++ b/tests/pytest/table/create-a-lot.py
@@ -0,0 +1,45 @@
+###################################################################
+# Copyright (c) 2016 by TAOS Technologies, Inc.
+# All rights reserved.
+#
+# This file is proprietary and confidential to TAOS Technologies.
+# No part of this file may be reproduced, stored, transmitted,
+# disclosed or used in any form or by any means other than as
+# expressly provided by the written permission from Jianhui Tao
+#
+###################################################################
+
+# -*- coding: utf-8 -*-
+
+import sys
+import taos
+from util.log import tdLog
+from util.cases import tdCases
+from util.sql import tdSql
+
+
+class TDTestCase:
+ def init(self, conn, logSql):
+ tdLog.debug("start to execute %s" % __file__)
+ tdSql.init(conn.cursor(), logSql)
+
+ def run(self):
+ tdSql.prepare()
+
+ print("==============step1")
+ print("prepare data")
+
+ for x in range(0, 1000000):
+ tb_name = "tb%d" % x
+ tdLog.info("create table %s (ts timestamp, i int)" % tb_name)
+ tdSql.execute("create table %s (ts timestamp, i int)" % tb_name)
+ tdLog.info("insert into %s values(now, 1)" % tb_name)
+ tdSql.execute("insert into %s values(now, 1)" % tb_name)
+
+ def stop(self):
+ tdSql.close()
+ tdLog.success("%s successfully executed" % __file__)
+
+
+tdCases.addWindows(__file__, TDTestCase())
+tdCases.addLinux(__file__, TDTestCase())
diff --git a/tests/pytest/table/create.py b/tests/pytest/table/create.py
index b456b444f4..8fedd4e920 100644
--- a/tests/pytest/table/create.py
+++ b/tests/pytest/table/create.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/db_table.py b/tests/pytest/table/db_table.py
index d4a8568375..5ead829e26 100644
--- a/tests/pytest/table/db_table.py
+++ b/tests/pytest/table/db_table.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/del_stable.py b/tests/pytest/table/del_stable.py
index 3932f32536..e7fd0f1fb1 100644
--- a/tests/pytest/table/del_stable.py
+++ b/tests/pytest/table/del_stable.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/table/tablename-boundary.py b/tests/pytest/table/tablename-boundary.py
index 335073065c..ce843c3fe7 100644
--- a/tests/pytest/table/tablename-boundary.py
+++ b/tests/pytest/table/tablename-boundary.py
@@ -10,9 +10,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/3.py b/tests/pytest/tag_lite/3.py
index e5b5ba05e0..68d2e6c5ba 100644
--- a/tests/pytest/tag_lite/3.py
+++ b/tests/pytest/tag_lite/3.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/4.py b/tests/pytest/tag_lite/4.py
index 2b5b69a965..66641f9dfe 100644
--- a/tests/pytest/tag_lite/4.py
+++ b/tests/pytest/tag_lite/4.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/5.py b/tests/pytest/tag_lite/5.py
index 66fc4b721c..6e94e692cd 100644
--- a/tests/pytest/tag_lite/5.py
+++ b/tests/pytest/tag_lite/5.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/6.py b/tests/pytest/tag_lite/6.py
index ca1058d51b..52f6fe1b37 100644
--- a/tests/pytest/tag_lite/6.py
+++ b/tests/pytest/tag_lite/6.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/add.py b/tests/pytest/tag_lite/add.py
index e9bc01afd2..e9e3675485 100644
--- a/tests/pytest/tag_lite/add.py
+++ b/tests/pytest/tag_lite/add.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/bigint.py b/tests/pytest/tag_lite/bigint.py
index 875633901c..77a161c5bc 100644
--- a/tests/pytest/tag_lite/bigint.py
+++ b/tests/pytest/tag_lite/bigint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/binary.py b/tests/pytest/tag_lite/binary.py
index 476d9e4062..543b00b34e 100644
--- a/tests/pytest/tag_lite/binary.py
+++ b/tests/pytest/tag_lite/binary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/binary_binary.py b/tests/pytest/tag_lite/binary_binary.py
index e05f7e3d0e..c0d1c0bfe7 100644
--- a/tests/pytest/tag_lite/binary_binary.py
+++ b/tests/pytest/tag_lite/binary_binary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/bool.py b/tests/pytest/tag_lite/bool.py
index e8b00ce4d9..c43d5b2ee5 100644
--- a/tests/pytest/tag_lite/bool.py
+++ b/tests/pytest/tag_lite/bool.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/bool_binary.py b/tests/pytest/tag_lite/bool_binary.py
index 7df7ba8c03..7105cc4e78 100644
--- a/tests/pytest/tag_lite/bool_binary.py
+++ b/tests/pytest/tag_lite/bool_binary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/bool_int.py b/tests/pytest/tag_lite/bool_int.py
index 9706cca945..b7504b1127 100644
--- a/tests/pytest/tag_lite/bool_int.py
+++ b/tests/pytest/tag_lite/bool_int.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/change.py b/tests/pytest/tag_lite/change.py
index ab2c80485b..e1df901d5b 100644
--- a/tests/pytest/tag_lite/change.py
+++ b/tests/pytest/tag_lite/change.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/column.py b/tests/pytest/tag_lite/column.py
index 6b2285794d..4669f74498 100644
--- a/tests/pytest/tag_lite/column.py
+++ b/tests/pytest/tag_lite/column.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/commit.py b/tests/pytest/tag_lite/commit.py
index 4070ebd368..4ad2ecf8f9 100644
--- a/tests/pytest/tag_lite/commit.py
+++ b/tests/pytest/tag_lite/commit.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/create-tags-boundary.py b/tests/pytest/tag_lite/create-tags-boundary.py
index e80f458f0c..b98ae627c2 100644
--- a/tests/pytest/tag_lite/create-tags-boundary.py
+++ b/tests/pytest/tag_lite/create-tags-boundary.py
@@ -19,9 +19,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/create.py b/tests/pytest/tag_lite/create.py
index 446f8fd38e..39a2d83a31 100644
--- a/tests/pytest/tag_lite/create.py
+++ b/tests/pytest/tag_lite/create.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/datatype-without-alter.py b/tests/pytest/tag_lite/datatype-without-alter.py
index 1a8d05d648..42bc42bdbf 100644
--- a/tests/pytest/tag_lite/datatype-without-alter.py
+++ b/tests/pytest/tag_lite/datatype-without-alter.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 10
diff --git a/tests/pytest/tag_lite/datatype.py b/tests/pytest/tag_lite/datatype.py
index 1fcf7ce19e..bc99cf74b0 100644
--- a/tests/pytest/tag_lite/datatype.py
+++ b/tests/pytest/tag_lite/datatype.py
@@ -20,9 +20,9 @@ from util.dnodes import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
self.ntables = 10
diff --git a/tests/pytest/tag_lite/delete.py b/tests/pytest/tag_lite/delete.py
index 34fe6e6f51..d4a55a4a75 100644
--- a/tests/pytest/tag_lite/delete.py
+++ b/tests/pytest/tag_lite/delete.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/double.py b/tests/pytest/tag_lite/double.py
index 92e7d23677..f70c6871af 100644
--- a/tests/pytest/tag_lite/double.py
+++ b/tests/pytest/tag_lite/double.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/filter.py b/tests/pytest/tag_lite/filter.py
index b726e7646e..cdba8964ef 100644
--- a/tests/pytest/tag_lite/filter.py
+++ b/tests/pytest/tag_lite/filter.py
@@ -18,9 +18,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/float.py b/tests/pytest/tag_lite/float.py
index 19140abe13..1bfb1ac94d 100644
--- a/tests/pytest/tag_lite/float.py
+++ b/tests/pytest/tag_lite/float.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/int.py b/tests/pytest/tag_lite/int.py
index 769e6009c8..b831244790 100644
--- a/tests/pytest/tag_lite/int.py
+++ b/tests/pytest/tag_lite/int.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/int_binary.py b/tests/pytest/tag_lite/int_binary.py
index 2f3f818cd2..62d8f2a604 100644
--- a/tests/pytest/tag_lite/int_binary.py
+++ b/tests/pytest/tag_lite/int_binary.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/int_float.py b/tests/pytest/tag_lite/int_float.py
index 4171085ad7..8a544c6110 100644
--- a/tests/pytest/tag_lite/int_float.py
+++ b/tests/pytest/tag_lite/int_float.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/set.py b/tests/pytest/tag_lite/set.py
index 6e1a5aed9d..44f5a0890d 100644
--- a/tests/pytest/tag_lite/set.py
+++ b/tests/pytest/tag_lite/set.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/smallint.py b/tests/pytest/tag_lite/smallint.py
index 324deb3632..a047b0afee 100644
--- a/tests/pytest/tag_lite/smallint.py
+++ b/tests/pytest/tag_lite/smallint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/tag_lite/tinyint.py b/tests/pytest/tag_lite/tinyint.py
index 9406f0b6c7..6e1820713c 100644
--- a/tests/pytest/tag_lite/tinyint.py
+++ b/tests/pytest/tag_lite/tinyint.py
@@ -7,9 +7,9 @@ from util.sql import *
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
tdSql.prepare()
diff --git a/tests/pytest/test.py b/tests/pytest/test.py
index 86417ea931..5b35563e1b 100644
--- a/tests/pytest/test.py
+++ b/tests/pytest/test.py
@@ -31,9 +31,10 @@ if __name__ == "__main__":
masterIp = ""
testCluster = False
valgrind = 0
+ logSql = True
stop = 0
- opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:scgh', [
- 'file=', 'path=', 'master', 'stop', 'cluster', 'valgrind', 'help'])
+ opts, args = getopt.gnu_getopt(sys.argv[1:], 'f:p:m:l:scgh', [
+ 'file=', 'path=', 'master', 'logSql', 'stop', 'cluster', 'valgrind', 'help'])
for key, value in opts:
if key in ['-h', '--help']:
tdLog.printNoPrefix(
@@ -41,8 +42,10 @@ if __name__ == "__main__":
tdLog.printNoPrefix('-f Name of test case file written by Python')
tdLog.printNoPrefix('-p Deploy Path for Simulator')
tdLog.printNoPrefix('-m Master Ip for Simulator')
- tdLog.printNoPrefix('-c Test Cluster Flag')
+ tdLog.printNoPrefix('-l logSql Flag')
tdLog.printNoPrefix('-s stop All dnodes')
+ tdLog.printNoPrefix('-c Test Cluster Flag')
+ tdLog.printNoPrefix('-g valgrind Test Flag')
sys.exit(0)
if key in ['-f', '--file']:
@@ -54,6 +57,15 @@ if __name__ == "__main__":
if key in ['-m', '--master']:
masterIp = value
+ if key in ['-l', '--logSql']:
+ if (value.upper() == "TRUE"):
+ logSql = True
+ elif (value.upper() == "FALSE"):
+ logSql = False
+ else:
+ tdLog.printNoPrefix("logSql value %s is invalid" % logSql)
+ sys.exit(0)
+
if key in ['-c', '--cluster']:
testCluster = True
@@ -100,6 +112,8 @@ if __name__ == "__main__":
tdLog.info("Procedures for tdengine deployed in %s" % (host))
+ tdCases.logSql(logSql)
+
if testCluster:
tdLog.info("Procedures for testing cluster")
if fileName == "all":
diff --git a/tests/pytest/user/pass_len.py b/tests/pytest/user/pass_len.py
index 40bd1ca5fc..346b8424fe 100644
--- a/tests/pytest/user/pass_len.py
+++ b/tests/pytest/user/pass_len.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
print("==============step1")
diff --git a/tests/pytest/user/user_create.py b/tests/pytest/user/user_create.py
index f8ac5c6799..a00f670a36 100644
--- a/tests/pytest/user/user_create.py
+++ b/tests/pytest/user/user_create.py
@@ -19,9 +19,9 @@ from util.sql import tdSql
class TDTestCase:
- def init(self, conn):
+ def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
- tdSql.init(conn.cursor())
+ tdSql.init(conn.cursor(), logSql)
def run(self):
print("==============step1")
diff --git a/tests/pytest/util/cases.py b/tests/pytest/util/cases.py
index f65b0dfde3..2fc1ac8515 100644
--- a/tests/pytest/util/cases.py
+++ b/tests/pytest/util/cases.py
@@ -24,6 +24,7 @@ class TDCase:
def __init__(self, name, case):
self.name = name
self.case = case
+ self._logSql = True
class TDCases:
@@ -36,6 +37,9 @@ class TDCases:
moduleName = fileName.replace(".py", "").replace("/", ".")
return importlib.import_module(moduleName, package='..')
+ def logSql(self, logSql):
+ self._logSql = logSql
+
def addWindows(self, name, case):
self.windowsCases.append(TDCase(name, case))
@@ -66,7 +70,7 @@ class TDCases:
for tmp in self.linuxCases:
if tmp.name.find(fileName) != -1:
case = testModule.TDTestCase()
- case.init(conn)
+ case.init(conn, self._logSql)
try:
case.run()
except Exception as e:
diff --git a/tests/pytest/util/sql.py b/tests/pytest/util/sql.py
index eb53129722..245e4b0945 100644
--- a/tests/pytest/util/sql.py
+++ b/tests/pytest/util/sql.py
@@ -25,9 +25,15 @@ class TDSql:
self.queryCols = 0
self.affectedRows = 0
- def init(self, cursor):
+ def init(self, cursor, log=True):
self.cursor = cursor
+ if (log):
+ frame = inspect.stack()[1]
+ callerModule = inspect.getmodule(frame[0])
+ callerFilename = callerModule.__file__
+ self.cursor.log(callerFilename + ".sql")
+
def close(self):
self.cursor.close()
@@ -101,7 +107,6 @@ class TDSql:
return self.cursor.istype(col, dataType)
-
def checkData(self, row, col, data):
frame = inspect.stack()[1]
callerModule = inspect.getmodule(frame[0])
diff --git a/tests/script/general/db/alter_tables_d2.sim b/tests/script/general/db/alter_tables_d2.sim
new file mode 100644
index 0000000000..7e8d4e117a
--- /dev/null
+++ b/tests/script/general/db/alter_tables_d2.sim
@@ -0,0 +1,468 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 2
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2
+
+system sh/deploy.sh -n dnode2 -i 2
+system sh/cfg.sh -n dnode2 -c wallevel -v 2
+system sh/cfg.sh -n dnode2 -c numOfTotalVnodes -v 2
+
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+
+sleep 3000
+sql connect
+sql create dnode $hostname2
+sleep 1000
+
+print ============================ step1
+
+sql create database db maxTables 5
+sql create table db.st (ts timestamp, i int) tags(t int)
+sql create table db.t000 using db.st tags(0)
+sql create table db.t001 using db.st tags(1)
+sql create table db.t002 using db.st tags(2)
+sql create table db.t003 using db.st tags(3)
+sql create table db.t004 using db.st tags(4)
+sql create table db.t005 using db.st tags(5)
+sql create table db.t006 using db.st tags(6)
+sql create table db.t007 using db.st tags(7)
+sql create table db.t008 using db.st tags(8)
+sql create table db.t009 using db.st tags(9)
+sql create table db.t010 using db.st tags(0)
+sql create table db.t011 using db.st tags(1)
+sql create table db.t012 using db.st tags(2)
+sql create table db.t013 using db.st tags(3)
+sql create table db.t014 using db.st tags(4)
+sql create table db.t015 using db.st tags(5)
+sql create table db.t016 using db.st tags(6)
+sql create table db.t017 using db.st tags(7)
+sql create table db.t018 using db.st tags(8)
+sql create table db.t019 using db.st tags(9)
+
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+sql insert into db.t000 values(now, 1)
+sql insert into db.t001 values(now, 1)
+sql insert into db.t002 values(now, 1)
+sql insert into db.t003 values(now, 1)
+sql insert into db.t004 values(now, 1)
+sql insert into db.t005 values(now, 1)
+sql insert into db.t006 values(now, 1)
+sql insert into db.t007 values(now, 1)
+sql insert into db.t008 values(now, 1)
+sql insert into db.t009 values(now, 1)
+sql insert into db.t010 values(now, 1)
+sql insert into db.t011 values(now, 1)
+sql insert into db.t012 values(now, 1)
+sql insert into db.t013 values(now, 1)
+sql insert into db.t014 values(now, 1)
+sql insert into db.t015 values(now, 1)
+sql insert into db.t016 values(now, 1)
+sql insert into db.t017 values(now, 1)
+sql insert into db.t018 values(now, 1)
+sql insert into db.t019 values(now, 1)
+
+print ============================ step2
+sql_error create table db.t100 using db.st tags(10)
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+print ============================ step3
+
+sql alter database db maxTables 10
+sleep 1000
+
+sql create table db.t100 using db.st tags(0)
+sql create table db.t101 using db.st tags(1)
+sql create table db.t102 using db.st tags(2)
+sql create table db.t103 using db.st tags(3)
+sql create table db.t104 using db.st tags(4)
+sql create table db.t105 using db.st tags(5)
+sql create table db.t106 using db.st tags(6)
+sql create table db.t107 using db.st tags(7)
+sql create table db.t108 using db.st tags(8)
+sql create table db.t109 using db.st tags(9)
+sql create table db.t110 using db.st tags(0)
+sql create table db.t111 using db.st tags(1)
+sql create table db.t112 using db.st tags(2)
+sql create table db.t113 using db.st tags(3)
+sql create table db.t114 using db.st tags(4)
+sql create table db.t115 using db.st tags(5)
+sql create table db.t116 using db.st tags(6)
+sql create table db.t117 using db.st tags(7)
+sql create table db.t118 using db.st tags(8)
+sql create table db.t119 using db.st tags(9)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+
+sql insert into db.t100 values(now, 1)
+sql insert into db.t101 values(now, 1)
+sql insert into db.t102 values(now, 1)
+sql insert into db.t103 values(now, 1)
+sql insert into db.t104 values(now, 1)
+sql insert into db.t105 values(now, 1)
+sql insert into db.t106 values(now, 1)
+sql insert into db.t107 values(now, 1)
+sql insert into db.t108 values(now, 1)
+sql insert into db.t109 values(now, 1)
+sql insert into db.t110 values(now, 1)
+sql insert into db.t111 values(now, 1)
+sql insert into db.t112 values(now, 1)
+sql insert into db.t113 values(now, 1)
+sql insert into db.t114 values(now, 1)
+sql insert into db.t115 values(now, 1)
+sql insert into db.t116 values(now, 1)
+sql insert into db.t117 values(now, 1)
+sql insert into db.t118 values(now, 1)
+sql insert into db.t119 values(now, 1)
+
+print ============================ step4
+sql_error create table db.t200 using db.st tags(10)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+print ============================ step5
+
+sql alter database db maxTables 15
+sleep 1000
+
+sql create table db.t200 using db.st tags(0)
+sql create table db.t201 using db.st tags(1)
+sql create table db.t202 using db.st tags(2)
+sql create table db.t203 using db.st tags(3)
+sql create table db.t204 using db.st tags(4)
+sql create table db.t205 using db.st tags(5)
+sql create table db.t206 using db.st tags(6)
+sql create table db.t207 using db.st tags(7)
+sql create table db.t208 using db.st tags(8)
+sql create table db.t209 using db.st tags(9)
+sql create table db.t210 using db.st tags(0)
+sql create table db.t211 using db.st tags(1)
+sql create table db.t212 using db.st tags(2)
+sql create table db.t213 using db.st tags(3)
+sql create table db.t214 using db.st tags(4)
+sql create table db.t215 using db.st tags(5)
+sql create table db.t216 using db.st tags(6)
+sql create table db.t217 using db.st tags(7)
+sql create table db.t218 using db.st tags(8)
+sql create table db.t219 using db.st tags(9)
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+sql insert into db.t200 values(now, 1)
+sql insert into db.t201 values(now, 1)
+sql insert into db.t202 values(now, 1)
+sql insert into db.t203 values(now, 1)
+sql insert into db.t204 values(now, 1)
+sql insert into db.t205 values(now, 1)
+sql insert into db.t206 values(now, 1)
+sql insert into db.t207 values(now, 1)
+sql insert into db.t208 values(now, 1)
+sql insert into db.t209 values(now, 1)
+sql insert into db.t210 values(now, 1)
+sql insert into db.t211 values(now, 1)
+sql insert into db.t212 values(now, 1)
+sql insert into db.t213 values(now, 1)
+sql insert into db.t214 values(now, 1)
+sql insert into db.t215 values(now, 1)
+sql insert into db.t216 values(now, 1)
+sql insert into db.t217 values(now, 1)
+sql insert into db.t218 values(now, 1)
+sql insert into db.t219 values(now, 1)
+
+print ============================ step6
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step7
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step8
+sql_error create table db.t300 using db.st tags(10)
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step9
+
+sql alter database db maxTables 20
+sleep 1000
+
+sql create table db.t300 using db.st tags(0)
+sql create table db.t301 using db.st tags(1)
+sql create table db.t302 using db.st tags(2)
+sql create table db.t303 using db.st tags(3)
+sql create table db.t304 using db.st tags(4)
+sql create table db.t305 using db.st tags(5)
+sql create table db.t306 using db.st tags(6)
+sql create table db.t307 using db.st tags(7)
+sql create table db.t308 using db.st tags(8)
+sql create table db.t309 using db.st tags(9)
+sql create table db.t310 using db.st tags(0)
+sql create table db.t311 using db.st tags(1)
+sql create table db.t312 using db.st tags(2)
+sql create table db.t313 using db.st tags(3)
+sql create table db.t314 using db.st tags(4)
+sql create table db.t315 using db.st tags(5)
+sql create table db.t316 using db.st tags(6)
+sql create table db.t317 using db.st tags(7)
+sql create table db.t318 using db.st tags(8)
+sql create table db.t319 using db.st tags(9)
+
+sql insert into db.t300 values(now, 1)
+sql insert into db.t301 values(now, 1)
+sql insert into db.t302 values(now, 1)
+sql insert into db.t303 values(now, 1)
+sql insert into db.t304 values(now, 1)
+sql insert into db.t305 values(now, 1)
+sql insert into db.t306 values(now, 1)
+sql insert into db.t307 values(now, 1)
+sql insert into db.t308 values(now, 1)
+sql insert into db.t309 values(now, 1)
+sql insert into db.t310 values(now, 1)
+sql insert into db.t311 values(now, 1)
+sql insert into db.t312 values(now, 1)
+sql insert into db.t313 values(now, 1)
+sql insert into db.t314 values(now, 1)
+sql insert into db.t315 values(now, 1)
+sql insert into db.t316 values(now, 1)
+sql insert into db.t317 values(now, 1)
+sql insert into db.t318 values(now, 1)
+sql insert into db.t319 values(now, 1)
+
+sql show db.tables
+if $rows != 80 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 80 then
+ return -1
+endi
+
+print ============================ step10
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+system sh/exec.sh -n dnode2 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 80 then
+ return -1
+endi
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 80 then
+ return -1
+endi
+
+print ============================ step11
+sql_error create table db.t400 using db.st tags(10)
+sql show db.tables
+if $rows != 50 then
+ return -1
+endi
+
+print ============================ step9
+
+sql alter database db maxTables 25
+sleep 1000
+
+sql create table db.t400 using db.st tags(0)
+sql create table db.t401 using db.st tags(1)
+sql create table db.t402 using db.st tags(2)
+sql create table db.t403 using db.st tags(3)
+sql create table db.t404 using db.st tags(4)
+sql create table db.t405 using db.st tags(5)
+sql create table db.t406 using db.st tags(6)
+sql create table db.t407 using db.st tags(7)
+sql create table db.t408 using db.st tags(8)
+sql create table db.t409 using db.st tags(9)
+sql create table db.t410 using db.st tags(0)
+sql create table db.t411 using db.st tags(1)
+sql create table db.t412 using db.st tags(2)
+sql create table db.t413 using db.st tags(3)
+sql create table db.t414 using db.st tags(4)
+sql create table db.t415 using db.st tags(5)
+sql create table db.t416 using db.st tags(6)
+sql create table db.t417 using db.st tags(7)
+sql create table db.t418 using db.st tags(8)
+sql create table db.t419 using db.st tags(9)
+
+sql insert into db.t400 values(now, 1)
+sql insert into db.t401 values(now, 1)
+sql insert into db.t402 values(now, 1)
+sql insert into db.t403 values(now, 1)
+sql insert into db.t404 values(now, 1)
+sql insert into db.t405 values(now, 1)
+sql insert into db.t406 values(now, 1)
+sql insert into db.t407 values(now, 1)
+sql insert into db.t408 values(now, 1)
+sql insert into db.t409 values(now, 1)
+sql insert into db.t410 values(now, 1)
+sql insert into db.t411 values(now, 1)
+sql insert into db.t412 values(now, 1)
+sql insert into db.t413 values(now, 1)
+sql insert into db.t414 values(now, 1)
+sql insert into db.t415 values(now, 1)
+sql insert into db.t416 values(now, 1)
+sql insert into db.t417 values(now, 1)
+sql insert into db.t418 values(now, 1)
+sql insert into db.t419 values(now, 1)
+
+sql show db.tables
+if $rows != 50 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t400
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 100 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/exec.sh -n dnode2 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/db/alter_tables_v1.sim b/tests/script/general/db/alter_tables_v1.sim
new file mode 100644
index 0000000000..17aa746ce3
--- /dev/null
+++ b/tests/script/general/db/alter_tables_v1.sim
@@ -0,0 +1,352 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 0
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
+
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ============================ step1
+
+sql create database db maxTables 10
+sql create table db.st (ts timestamp, i int) tags(t int)
+sql create table db.t0 using db.st tags(0)
+sql create table db.t1 using db.st tags(1)
+sql create table db.t2 using db.st tags(2)
+sql create table db.t3 using db.st tags(3)
+sql create table db.t4 using db.st tags(4)
+sql create table db.t5 using db.st tags(5)
+sql create table db.t6 using db.st tags(6)
+sql create table db.t7 using db.st tags(7)
+sql create table db.t8 using db.st tags(8)
+sql create table db.t9 using db.st tags(9)
+
+sql show db.tables
+if $rows != 10 then
+ return -1
+endi
+
+sql insert into db.t0 values(now, 1)
+sql insert into db.t1 values(now, 1)
+sql insert into db.t2 values(now, 1)
+sql insert into db.t3 values(now, 1)
+sql insert into db.t4 values(now, 1)
+sql insert into db.t5 values(now, 1)
+sql insert into db.t6 values(now, 1)
+sql insert into db.t7 values(now, 1)
+sql insert into db.t8 values(now, 1)
+sql insert into db.t9 values(now, 1)
+
+print ============================ step2
+sql_error create table db.t10 using db.st tags(10)
+sql show db.tables
+if $rows != 10 then
+ return -1
+endi
+
+print ============================ step3
+
+sql alter database db maxTables 20
+sleep 1000
+
+sql create table db.t10 using db.st tags(0)
+sql create table db.t11 using db.st tags(1)
+sql create table db.t12 using db.st tags(2)
+sql create table db.t13 using db.st tags(3)
+sql create table db.t14 using db.st tags(4)
+sql create table db.t15 using db.st tags(5)
+sql create table db.t16 using db.st tags(6)
+sql create table db.t17 using db.st tags(7)
+sql create table db.t18 using db.st tags(8)
+sql create table db.t19 using db.st tags(9)
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+sql insert into db.t10 values(now, 1)
+sql insert into db.t11 values(now, 1)
+sql insert into db.t12 values(now, 1)
+sql insert into db.t13 values(now, 1)
+sql insert into db.t14 values(now, 1)
+sql insert into db.t15 values(now, 1)
+sql insert into db.t16 values(now, 1)
+sql insert into db.t17 values(now, 1)
+sql insert into db.t18 values(now, 1)
+sql insert into db.t19 values(now, 1)
+
+print ============================ step4
+sql_error create table db.t20 using db.st tags(10)
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+print ============================ step5
+
+sql alter database db maxTables 30
+sleep 1000
+
+sql create table db.t20 using db.st tags(0)
+sql create table db.t21 using db.st tags(1)
+sql create table db.t22 using db.st tags(2)
+sql create table db.t23 using db.st tags(3)
+sql create table db.t24 using db.st tags(4)
+sql create table db.t25 using db.st tags(5)
+sql create table db.t26 using db.st tags(6)
+sql create table db.t27 using db.st tags(7)
+sql create table db.t28 using db.st tags(8)
+sql create table db.t29 using db.st tags(9)
+sql show db.tables
+if $rows != 30 then
+ return -1
+endi
+
+sql insert into db.t20 values(now, 1)
+sql insert into db.t21 values(now, 1)
+sql insert into db.t22 values(now, 1)
+sql insert into db.t23 values(now, 1)
+sql insert into db.t24 values(now, 1)
+sql insert into db.t25 values(now, 1)
+sql insert into db.t26 values(now, 1)
+sql insert into db.t27 values(now, 1)
+sql insert into db.t28 values(now, 1)
+sql insert into db.t29 values(now, 1)
+
+print ============================ step6
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t0
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t10
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t20
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 30 then
+ return -1
+endi
+
+print ============================ step7
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 30 then
+ return -1
+endi
+
+sql select * from db.t0
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t10
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t20
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 30 then
+ return -1
+endi
+
+print ============================ step8
+sql_error create table db.t30 using db.st tags(10)
+sql show db.tables
+if $rows != 30 then
+ return -1
+endi
+
+print ============================ step9
+
+sql alter database db maxTables 40
+sleep 1000
+
+sql create table db.t30 using db.st tags(0)
+sql create table db.t31 using db.st tags(1)
+sql create table db.t32 using db.st tags(2)
+sql create table db.t33 using db.st tags(3)
+sql create table db.t34 using db.st tags(4)
+sql create table db.t35 using db.st tags(5)
+sql create table db.t36 using db.st tags(6)
+sql create table db.t37 using db.st tags(7)
+sql create table db.t38 using db.st tags(8)
+sql create table db.t39 using db.st tags(9)
+
+sql insert into db.t30 values(now, 1)
+sql insert into db.t31 values(now, 1)
+sql insert into db.t32 values(now, 1)
+sql insert into db.t33 values(now, 1)
+sql insert into db.t34 values(now, 1)
+sql insert into db.t35 values(now, 1)
+sql insert into db.t36 values(now, 1)
+sql insert into db.t37 values(now, 1)
+sql insert into db.t38 values(now, 1)
+sql insert into db.t39 values(now, 1)
+
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t0
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t10
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t20
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t30
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 40 then
+ return -1
+endi
+
+
+print ============================ step10
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+sql select * from db.t0
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t10
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t20
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 40 then
+ return -1
+endi
+
+print ============================ step11
+sql_error create table db.t40 using db.st tags(10)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+print ============================ step12
+
+sql alter database db maxTables 50
+sleep 1000
+
+sql create table db.t40 using db.st tags(0)
+sql create table db.t41 using db.st tags(1)
+sql create table db.t42 using db.st tags(2)
+sql create table db.t43 using db.st tags(3)
+sql create table db.t44 using db.st tags(4)
+sql create table db.t45 using db.st tags(5)
+sql create table db.t46 using db.st tags(6)
+sql create table db.t47 using db.st tags(7)
+sql create table db.t48 using db.st tags(8)
+sql create table db.t49 using db.st tags(9)
+
+sql insert into db.t40 values(now, 1)
+sql insert into db.t41 values(now, 1)
+sql insert into db.t42 values(now, 1)
+sql insert into db.t43 values(now, 1)
+sql insert into db.t44 values(now, 1)
+sql insert into db.t45 values(now, 1)
+sql insert into db.t46 values(now, 1)
+sql insert into db.t47 values(now, 1)
+sql insert into db.t48 values(now, 1)
+sql insert into db.t49 values(now, 1)
+
+sql show db.tables
+if $rows != 50 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t0
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t10
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t20
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t30
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t40
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 50 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/db/alter_tables_v4.sim b/tests/script/general/db/alter_tables_v4.sim
new file mode 100644
index 0000000000..db00219ed0
--- /dev/null
+++ b/tests/script/general/db/alter_tables_v4.sim
@@ -0,0 +1,457 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 0
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 4
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
+
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ============================ step1
+
+sql create database db maxTables 5
+sql create table db.st (ts timestamp, i int) tags(t int)
+sql create table db.t000 using db.st tags(0)
+sql create table db.t001 using db.st tags(1)
+sql create table db.t002 using db.st tags(2)
+sql create table db.t003 using db.st tags(3)
+sql create table db.t004 using db.st tags(4)
+sql create table db.t005 using db.st tags(5)
+sql create table db.t006 using db.st tags(6)
+sql create table db.t007 using db.st tags(7)
+sql create table db.t008 using db.st tags(8)
+sql create table db.t009 using db.st tags(9)
+sql create table db.t010 using db.st tags(0)
+sql create table db.t011 using db.st tags(1)
+sql create table db.t012 using db.st tags(2)
+sql create table db.t013 using db.st tags(3)
+sql create table db.t014 using db.st tags(4)
+sql create table db.t015 using db.st tags(5)
+sql create table db.t016 using db.st tags(6)
+sql create table db.t017 using db.st tags(7)
+sql create table db.t018 using db.st tags(8)
+sql create table db.t019 using db.st tags(9)
+
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+sql insert into db.t000 values(now, 1)
+sql insert into db.t001 values(now, 1)
+sql insert into db.t002 values(now, 1)
+sql insert into db.t003 values(now, 1)
+sql insert into db.t004 values(now, 1)
+sql insert into db.t005 values(now, 1)
+sql insert into db.t006 values(now, 1)
+sql insert into db.t007 values(now, 1)
+sql insert into db.t008 values(now, 1)
+sql insert into db.t009 values(now, 1)
+sql insert into db.t010 values(now, 1)
+sql insert into db.t011 values(now, 1)
+sql insert into db.t012 values(now, 1)
+sql insert into db.t013 values(now, 1)
+sql insert into db.t014 values(now, 1)
+sql insert into db.t015 values(now, 1)
+sql insert into db.t016 values(now, 1)
+sql insert into db.t017 values(now, 1)
+sql insert into db.t018 values(now, 1)
+sql insert into db.t019 values(now, 1)
+
+print ============================ step2
+sql_error create table db.t100 using db.st tags(10)
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+print ============================ step3
+
+sql alter database db maxTables 10
+sleep 1000
+
+sql create table db.t100 using db.st tags(0)
+sql create table db.t101 using db.st tags(1)
+sql create table db.t102 using db.st tags(2)
+sql create table db.t103 using db.st tags(3)
+sql create table db.t104 using db.st tags(4)
+sql create table db.t105 using db.st tags(5)
+sql create table db.t106 using db.st tags(6)
+sql create table db.t107 using db.st tags(7)
+sql create table db.t108 using db.st tags(8)
+sql create table db.t109 using db.st tags(9)
+sql create table db.t110 using db.st tags(0)
+sql create table db.t111 using db.st tags(1)
+sql create table db.t112 using db.st tags(2)
+sql create table db.t113 using db.st tags(3)
+sql create table db.t114 using db.st tags(4)
+sql create table db.t115 using db.st tags(5)
+sql create table db.t116 using db.st tags(6)
+sql create table db.t117 using db.st tags(7)
+sql create table db.t118 using db.st tags(8)
+sql create table db.t119 using db.st tags(9)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+
+sql insert into db.t100 values(now, 1)
+sql insert into db.t101 values(now, 1)
+sql insert into db.t102 values(now, 1)
+sql insert into db.t103 values(now, 1)
+sql insert into db.t104 values(now, 1)
+sql insert into db.t105 values(now, 1)
+sql insert into db.t106 values(now, 1)
+sql insert into db.t107 values(now, 1)
+sql insert into db.t108 values(now, 1)
+sql insert into db.t109 values(now, 1)
+sql insert into db.t110 values(now, 1)
+sql insert into db.t111 values(now, 1)
+sql insert into db.t112 values(now, 1)
+sql insert into db.t113 values(now, 1)
+sql insert into db.t114 values(now, 1)
+sql insert into db.t115 values(now, 1)
+sql insert into db.t116 values(now, 1)
+sql insert into db.t117 values(now, 1)
+sql insert into db.t118 values(now, 1)
+sql insert into db.t119 values(now, 1)
+
+print ============================ step4
+sql_error create table db.t200 using db.st tags(10)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+print ============================ step5
+
+sql alter database db maxTables 15
+sleep 1000
+
+sql create table db.t200 using db.st tags(0)
+sql create table db.t201 using db.st tags(1)
+sql create table db.t202 using db.st tags(2)
+sql create table db.t203 using db.st tags(3)
+sql create table db.t204 using db.st tags(4)
+sql create table db.t205 using db.st tags(5)
+sql create table db.t206 using db.st tags(6)
+sql create table db.t207 using db.st tags(7)
+sql create table db.t208 using db.st tags(8)
+sql create table db.t209 using db.st tags(9)
+sql create table db.t210 using db.st tags(0)
+sql create table db.t211 using db.st tags(1)
+sql create table db.t212 using db.st tags(2)
+sql create table db.t213 using db.st tags(3)
+sql create table db.t214 using db.st tags(4)
+sql create table db.t215 using db.st tags(5)
+sql create table db.t216 using db.st tags(6)
+sql create table db.t217 using db.st tags(7)
+sql create table db.t218 using db.st tags(8)
+sql create table db.t219 using db.st tags(9)
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+sql insert into db.t200 values(now, 1)
+sql insert into db.t201 values(now, 1)
+sql insert into db.t202 values(now, 1)
+sql insert into db.t203 values(now, 1)
+sql insert into db.t204 values(now, 1)
+sql insert into db.t205 values(now, 1)
+sql insert into db.t206 values(now, 1)
+sql insert into db.t207 values(now, 1)
+sql insert into db.t208 values(now, 1)
+sql insert into db.t209 values(now, 1)
+sql insert into db.t210 values(now, 1)
+sql insert into db.t211 values(now, 1)
+sql insert into db.t212 values(now, 1)
+sql insert into db.t213 values(now, 1)
+sql insert into db.t214 values(now, 1)
+sql insert into db.t215 values(now, 1)
+sql insert into db.t216 values(now, 1)
+sql insert into db.t217 values(now, 1)
+sql insert into db.t218 values(now, 1)
+sql insert into db.t219 values(now, 1)
+
+print ============================ step6
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step7
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step8
+sql_error create table db.t300 using db.st tags(10)
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+print ============================ step9
+
+sql alter database db maxTables 20
+sleep 1000
+
+sql create table db.t300 using db.st tags(0)
+sql create table db.t301 using db.st tags(1)
+sql create table db.t302 using db.st tags(2)
+sql create table db.t303 using db.st tags(3)
+sql create table db.t304 using db.st tags(4)
+sql create table db.t305 using db.st tags(5)
+sql create table db.t306 using db.st tags(6)
+sql create table db.t307 using db.st tags(7)
+sql create table db.t308 using db.st tags(8)
+sql create table db.t309 using db.st tags(9)
+sql create table db.t310 using db.st tags(0)
+sql create table db.t311 using db.st tags(1)
+sql create table db.t312 using db.st tags(2)
+sql create table db.t313 using db.st tags(3)
+sql create table db.t314 using db.st tags(4)
+sql create table db.t315 using db.st tags(5)
+sql create table db.t316 using db.st tags(6)
+sql create table db.t317 using db.st tags(7)
+sql create table db.t318 using db.st tags(8)
+sql create table db.t319 using db.st tags(9)
+
+sql insert into db.t300 values(now, 1)
+sql insert into db.t301 values(now, 1)
+sql insert into db.t302 values(now, 1)
+sql insert into db.t303 values(now, 1)
+sql insert into db.t304 values(now, 1)
+sql insert into db.t305 values(now, 1)
+sql insert into db.t306 values(now, 1)
+sql insert into db.t307 values(now, 1)
+sql insert into db.t308 values(now, 1)
+sql insert into db.t309 values(now, 1)
+sql insert into db.t310 values(now, 1)
+sql insert into db.t311 values(now, 1)
+sql insert into db.t312 values(now, 1)
+sql insert into db.t313 values(now, 1)
+sql insert into db.t314 values(now, 1)
+sql insert into db.t315 values(now, 1)
+sql insert into db.t316 values(now, 1)
+sql insert into db.t317 values(now, 1)
+sql insert into db.t318 values(now, 1)
+sql insert into db.t319 values(now, 1)
+
+sql show db.tables
+if $rows != 80 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 80 then
+ return -1
+endi
+
+print ============================ step10
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql reset query cache
+sleep 1000
+
+sql show db.tables
+if $rows != 80 then
+ return -1
+endi
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 80 then
+ return -1
+endi
+
+print ============================ step11
+sql_error create table db.t400 using db.st tags(10)
+sql show db.tables
+if $rows != 80 then
+ return -1
+endi
+
+print ============================ step12
+
+sql alter database db maxTables 25
+sleep 1000
+
+sql create table db.t400 using db.st tags(0)
+sql create table db.t401 using db.st tags(1)
+sql create table db.t402 using db.st tags(2)
+sql create table db.t403 using db.st tags(3)
+sql create table db.t404 using db.st tags(4)
+sql create table db.t405 using db.st tags(5)
+sql create table db.t406 using db.st tags(6)
+sql create table db.t407 using db.st tags(7)
+sql create table db.t408 using db.st tags(8)
+sql create table db.t409 using db.st tags(9)
+sql create table db.t410 using db.st tags(0)
+sql create table db.t411 using db.st tags(1)
+sql create table db.t412 using db.st tags(2)
+sql create table db.t413 using db.st tags(3)
+sql create table db.t414 using db.st tags(4)
+sql create table db.t415 using db.st tags(5)
+sql create table db.t416 using db.st tags(6)
+sql create table db.t417 using db.st tags(7)
+sql create table db.t418 using db.st tags(8)
+sql create table db.t419 using db.st tags(9)
+
+sql insert into db.t400 values(now, 1)
+sql insert into db.t401 values(now, 1)
+sql insert into db.t402 values(now, 1)
+sql insert into db.t403 values(now, 1)
+sql insert into db.t404 values(now, 1)
+sql insert into db.t405 values(now, 1)
+sql insert into db.t406 values(now, 1)
+sql insert into db.t407 values(now, 1)
+sql insert into db.t408 values(now, 1)
+sql insert into db.t409 values(now, 1)
+sql insert into db.t410 values(now, 1)
+sql insert into db.t411 values(now, 1)
+sql insert into db.t412 values(now, 1)
+sql insert into db.t413 values(now, 1)
+sql insert into db.t414 values(now, 1)
+sql insert into db.t415 values(now, 1)
+sql insert into db.t416 values(now, 1)
+sql insert into db.t417 values(now, 1)
+sql insert into db.t418 values(now, 1)
+sql insert into db.t419 values(now, 1)
+
+sql show db.tables
+if $rows != 100 then
+ return -1
+endi
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t300
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t400
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 100 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/db/alter_vgroups.sim b/tests/script/general/db/alter_vgroups.sim
new file mode 100644
index 0000000000..1aae7b9383
--- /dev/null
+++ b/tests/script/general/db/alter_vgroups.sim
@@ -0,0 +1,210 @@
+system sh/stop_dnodes.sh
+system sh/deploy.sh -n dnode1 -i 1
+system sh/cfg.sh -n dnode1 -c wallevel -v 0
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 1
+system sh/cfg.sh -n dnode1 -c maxtablesPerVnode -v 1000
+
+system sh/exec.sh -n dnode1 -s start
+
+sleep 3000
+sql connect
+
+print ============================ step1
+
+sql create database db maxTables 20
+sql create table db.st (ts timestamp, i int) tags(t int)
+sql create table db.t000 using db.st tags(0)
+sql create table db.t001 using db.st tags(1)
+sql create table db.t002 using db.st tags(2)
+sql create table db.t003 using db.st tags(3)
+sql create table db.t004 using db.st tags(4)
+sql create table db.t005 using db.st tags(5)
+sql create table db.t006 using db.st tags(6)
+sql create table db.t007 using db.st tags(7)
+sql create table db.t008 using db.st tags(8)
+sql create table db.t009 using db.st tags(9)
+sql create table db.t010 using db.st tags(0)
+sql create table db.t011 using db.st tags(1)
+sql create table db.t012 using db.st tags(2)
+sql create table db.t013 using db.st tags(3)
+sql create table db.t014 using db.st tags(4)
+sql create table db.t015 using db.st tags(5)
+sql create table db.t016 using db.st tags(6)
+sql create table db.t017 using db.st tags(7)
+sql create table db.t018 using db.st tags(8)
+sql create table db.t019 using db.st tags(9)
+
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+sql insert into db.t000 values(now, 1)
+sql insert into db.t001 values(now, 1)
+sql insert into db.t002 values(now, 1)
+sql insert into db.t003 values(now, 1)
+sql insert into db.t004 values(now, 1)
+sql insert into db.t005 values(now, 1)
+sql insert into db.t006 values(now, 1)
+sql insert into db.t007 values(now, 1)
+sql insert into db.t008 values(now, 1)
+sql insert into db.t009 values(now, 1)
+sql insert into db.t010 values(now, 1)
+sql insert into db.t011 values(now, 1)
+sql insert into db.t012 values(now, 1)
+sql insert into db.t013 values(now, 1)
+sql insert into db.t014 values(now, 1)
+sql insert into db.t015 values(now, 1)
+sql insert into db.t016 values(now, 1)
+sql insert into db.t017 values(now, 1)
+sql insert into db.t018 values(now, 1)
+sql insert into db.t019 values(now, 1)
+
+print ============================ step2
+sql_error create table db.t100 using db.st tags(10)
+sql show db.tables
+if $rows != 20 then
+ return -1
+endi
+
+print ============================ step3
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 2
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql create table db.t100 using db.st tags(0)
+sql create table db.t101 using db.st tags(1)
+sql create table db.t102 using db.st tags(2)
+sql create table db.t103 using db.st tags(3)
+sql create table db.t104 using db.st tags(4)
+sql create table db.t105 using db.st tags(5)
+sql create table db.t106 using db.st tags(6)
+sql create table db.t107 using db.st tags(7)
+sql create table db.t108 using db.st tags(8)
+sql create table db.t109 using db.st tags(9)
+sql create table db.t110 using db.st tags(0)
+sql create table db.t111 using db.st tags(1)
+sql create table db.t112 using db.st tags(2)
+sql create table db.t113 using db.st tags(3)
+sql create table db.t114 using db.st tags(4)
+sql create table db.t115 using db.st tags(5)
+sql create table db.t116 using db.st tags(6)
+sql create table db.t117 using db.st tags(7)
+sql create table db.t118 using db.st tags(8)
+sql create table db.t119 using db.st tags(9)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+
+sql insert into db.t100 values(now, 1)
+sql insert into db.t101 values(now, 1)
+sql insert into db.t102 values(now, 1)
+sql insert into db.t103 values(now, 1)
+sql insert into db.t104 values(now, 1)
+sql insert into db.t105 values(now, 1)
+sql insert into db.t106 values(now, 1)
+sql insert into db.t107 values(now, 1)
+sql insert into db.t108 values(now, 1)
+sql insert into db.t109 values(now, 1)
+sql insert into db.t110 values(now, 1)
+sql insert into db.t111 values(now, 1)
+sql insert into db.t112 values(now, 1)
+sql insert into db.t113 values(now, 1)
+sql insert into db.t114 values(now, 1)
+sql insert into db.t115 values(now, 1)
+sql insert into db.t116 values(now, 1)
+sql insert into db.t117 values(now, 1)
+sql insert into db.t118 values(now, 1)
+sql insert into db.t119 values(now, 1)
+
+print ============================ step4
+sql_error create table db.t200 using db.st tags(10)
+sql show db.tables
+if $rows != 40 then
+ return -1
+endi
+
+print ============================ step5
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
+system sh/cfg.sh -n dnode1 -c numOfTotalVnodes -v 3
+sleep 1000
+system sh/exec.sh -n dnode1 -s start
+sleep 3000
+
+sql create table db.t200 using db.st tags(0)
+sql create table db.t201 using db.st tags(1)
+sql create table db.t202 using db.st tags(2)
+sql create table db.t203 using db.st tags(3)
+sql create table db.t204 using db.st tags(4)
+sql create table db.t205 using db.st tags(5)
+sql create table db.t206 using db.st tags(6)
+sql create table db.t207 using db.st tags(7)
+sql create table db.t208 using db.st tags(8)
+sql create table db.t209 using db.st tags(9)
+sql create table db.t210 using db.st tags(0)
+sql create table db.t211 using db.st tags(1)
+sql create table db.t212 using db.st tags(2)
+sql create table db.t213 using db.st tags(3)
+sql create table db.t214 using db.st tags(4)
+sql create table db.t215 using db.st tags(5)
+sql create table db.t216 using db.st tags(6)
+sql create table db.t217 using db.st tags(7)
+sql create table db.t218 using db.st tags(8)
+sql create table db.t219 using db.st tags(9)
+sql show db.tables
+if $rows != 60 then
+ return -1
+endi
+
+sql insert into db.t200 values(now, 1)
+sql insert into db.t201 values(now, 1)
+sql insert into db.t202 values(now, 1)
+sql insert into db.t203 values(now, 1)
+sql insert into db.t204 values(now, 1)
+sql insert into db.t205 values(now, 1)
+sql insert into db.t206 values(now, 1)
+sql insert into db.t207 values(now, 1)
+sql insert into db.t208 values(now, 1)
+sql insert into db.t209 values(now, 1)
+sql insert into db.t210 values(now, 1)
+sql insert into db.t211 values(now, 1)
+sql insert into db.t212 values(now, 1)
+sql insert into db.t213 values(now, 1)
+sql insert into db.t214 values(now, 1)
+sql insert into db.t215 values(now, 1)
+sql insert into db.t216 values(now, 1)
+sql insert into db.t217 values(now, 1)
+sql insert into db.t218 values(now, 1)
+sql insert into db.t219 values(now, 1)
+
+print ============================ step6
+
+sql reset query cache
+sleep 1000
+
+sql select * from db.t000
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t100
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.t200
+if $rows != 1 then
+ return -1
+endi
+
+sql select * from db.st
+if $rows != 60 then
+ return -1
+endi
+
+system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/limit1.sim b/tests/script/general/parser/limit1.sim
index 48fb6aaede..7971cb81c6 100644
--- a/tests/script/general/parser/limit1.sim
+++ b/tests/script/general/parser/limit1.sim
@@ -65,7 +65,7 @@ sleep 2000
system sh/exec.sh -n dnode1 -s start
print ================== server restart completed
-run general/parser/limit1_tb.sim
+#run general/parser/limit1_tb.sim
run general/parser/limit1_stb.sim
system sh/exec.sh -n dnode1 -s stop -x SIGINT
\ No newline at end of file
diff --git a/tests/script/general/parser/limit1_tb.sim b/tests/script/general/parser/limit1_tb.sim
index 8ed21a47e6..a9484d10db 100644
--- a/tests/script/general/parser/limit1_tb.sim
+++ b/tests/script/general/parser/limit1_tb.sim
@@ -111,16 +111,16 @@ endi
if $data09 != nchar0 then
return -1
endi
-if $data11 != NULL then
+if $data11 != null then
return -1
endi
-if $data12 != NULL then
+if $data12 != null then
return -1
endi
-if $data13 != NULL then
+if $data13 != null then
return -1
endi
-if $data14 != NULL then
+if $data14 != null then
return -1
endi
@@ -543,7 +543,7 @@ endi
if $data14 != 8.000000000 then
return -1
endi
-if $data21 != NULL then
+if $data21 != null then
return -1
endi
@@ -613,7 +613,7 @@ endi
if $data21 != 7.000000000 then
return -1
endi
-if $data31 != NULL then
+if $data31 != null then
return -1
endi
sql select avg(c1), avg(c2), avg(c3), avg(c4), avg(c5), avg(c6) from $tb where ts >= $ts0 and ts <= $tsu interval(30m) limit 3 offset 1
diff --git a/tests/script/jenkins/basic.txt b/tests/script/jenkins/basic.txt
index ea2c538ddf..027204af5b 100644
--- a/tests/script/jenkins/basic.txt
+++ b/tests/script/jenkins/basic.txt
@@ -44,6 +44,10 @@ cd ../../../debug; make
./test.sh -f general/compute/top.sim
./test.sh -f general/db/alter_option.sim
+#./test.sh -f general/db/alter_tables_d2.sim
+./test.sh -f general/db/alter_tables_v1.sim
+./test.sh -f general/db/alter_tables_v4.sim
+./test.sh -f general/db/alter_vgroups.sim
./test.sh -f general/db/basic.sim
./test.sh -f general/db/basic1.sim
./test.sh -f general/db/basic2.sim
@@ -103,18 +107,18 @@ cd ../../../debug; make
#unsupport ./test.sh -f general/parser/alter_stable.sim
./test.sh -f general/parser/auto_create_tb.sim
./test.sh -f general/parser/auto_create_tb_drop_tb.sim
-#liao ./test.sh -f general/parser/col_arithmetic_operation.sim
-./test.sh -f general/parser/columnValue.sim
-#liao ./test.sh -f general/parser/commit.sim
+./test.sh -f general/parser/col_arithmetic_operation.sim
+#/test.sh -f general/parser/columnValue.sim
+./test.sh -f general/parser/commit.sim
# ./test.sh -f general/parser/create_db.sim
-# ./test.sh -f general/parser/create_mt.sim
-# ./test.sh -f general/parser/create_tb.sim
-# ./test.sh -f general/parser/dbtbnameValidate.sim
+./test.sh -f general/parser/create_mt.sim
+./test.sh -f general/parser/create_tb.sim
+./test.sh -f general/parser/dbtbnameValidate.sim
./test.sh -f general/parser/import_commit1.sim
./test.sh -f general/parser/import_commit2.sim
./test.sh -f general/parser/import_commit3.sim
-# ./test.sh -f general/parser/insert_tb.sim
-# ./test.sh -f general/parser/first_last.sim
+./test.sh -f general/parser/insert_tb.sim
+./test.sh -f general/parser/first_last.sim
# ./test.sh -f general/parser/import_file.sim
# ./test.sh -f general/parser/lastrow.sim
# ./test.sh -f general/parser/nchar.sim
@@ -271,7 +275,7 @@ cd ../../../debug; make
./test.sh -u -f unique/dnode/offline1.sim
#jeff ./test.sh -u -f unique/dnode/offline2.sim
./test.sh -u -f unique/dnode/remove1.sim
-#hongze ./test.sh -u -f unique/dnode/remove2.sim
+#jeff ./test.sh -u -f unique/dnode/remove2.sim
./test.sh -u -f unique/dnode/vnode_clean.sim
./test.sh -u -f unique/http/admin.sim
diff --git a/tests/script/tjenkins b/tests/script/tjenkins
index b05ab3c900..9b93527de9 100755
Binary files a/tests/script/tjenkins and b/tests/script/tjenkins differ
diff --git a/tests/test-all.sh b/tests/test-all.sh
index e58a6f5132..f54d094649 100755
--- a/tests/test-all.sh
+++ b/tests/test-all.sh
@@ -33,9 +33,9 @@ echo "### run Python script ###"
cd ../pytest
if [ "$1" == "cron" ]; then
- ./fulltest.sh > /dev/null | tee pytest-out.txt
+ ./fulltest.sh 2>&1 | grep 'successfully executed\|failed\|fault' | grep -v 'default'| tee pytest-out.txt
else
- ./smoketest.sh > /dev/null | tee pytest-out.txt
+ ./smoketest.sh 2>&1 | grep 'successfully executed\|failed\|fault' | grep -v 'default'| tee pytest-out.txt
fi
totalPySuccess=`grep 'successfully executed' pytest-out.txt | wc -l`