Merge branch '3.0' of github.com:taosdata/TDengine into szhou/fixbugs

This commit is contained in:
slzhou 2022-10-26 09:19:42 +08:00
commit f68f3bcad1
63 changed files with 2394 additions and 1630 deletions

View File

@ -2,7 +2,7 @@
# taos-tools
ExternalProject_Add(taos-tools
GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
GIT_TAG cc973e0
GIT_TAG f9c1d32
SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
BINARY_DIR ""
#BUILD_IN_SOURCE TRUE

View File

@ -164,9 +164,9 @@
#define TK_SCORES 146
#define TK_TOPICS 147
#define TK_VARIABLES 148
#define TK_BNODES 149
#define TK_SNODES 150
#define TK_CLUSTER 151
#define TK_CLUSTER 149
#define TK_BNODES 150
#define TK_SNODES 151
#define TK_TRANSACTIONS 152
#define TK_DISTRIBUTED 153
#define TK_CONSUMERS 154

View File

@ -274,6 +274,7 @@ typedef struct SShowTableDistributedStmt {
typedef struct SShowDnodeVariablesStmt {
ENodeType type;
SNode* pDnodeId;
SNode* pLikePattern;
} SShowDnodeVariablesStmt;
typedef struct SShowVnodesStmt {

View File

@ -230,8 +230,8 @@ static const SSysDbTableSchema transSchema[] = {
};
static const SSysDbTableSchema configSchema[] = {
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = true},
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR, .sysInfo = false},
};
static const SSysDbTableSchema variablesSchema[] = {
@ -282,7 +282,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_USERS, userUsersSchema, tListLen(userUsersSchema), false},
{TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema), true},
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema), true},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), true},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema), false},
{TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema), true},
{TSDB_INS_TABLE_TOPICS, topicSchema, tListLen(topicSchema), false},
{TSDB_INS_TABLE_SUBSCRIPTIONS, subscriptionSchema, tListLen(subscriptionSchema), false},

View File

@ -307,6 +307,10 @@ int32_t vmGetQueueSize(SVnodeMgmt *pMgmt, int32_t vgId, EQueueType qtype) {
}
vmReleaseVnode(pMgmt, pVnode);
}
if (size < 0) {
dError("vgId:%d, can't get size from queue since %s, qtype:%d", vgId, terrstr(), qtype);
size = 0;
}
return size;
}

View File

@ -918,13 +918,19 @@ static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans) {
sendRsp = true;
}
} else {
if (pTrans->stage == TRN_STAGE_REDO_ACTION && pTrans->failedTimes > 6) {
if (pTrans->stage == TRN_STAGE_REDO_ACTION && ((code == TSDB_CODE_APP_NOT_READY && pTrans->failedTimes > 60) ||
(code != TSDB_CODE_APP_NOT_READY && pTrans->failedTimes > 6))) {
if (code == 0) code = TSDB_CODE_MND_TRANS_UNKNOW_ERROR;
sendRsp = true;
}
}
if (!sendRsp) return;
if (!sendRsp) {
return;
} else {
mInfo("trans:%d, send rsp, stage:%s failedTimes:%d code:0x%x", pTrans->id, mndTransStr(pTrans->stage),
pTrans->failedTimes, code);
}
int32_t size = taosArrayGetSize(pTrans->pRpcArray);
if (size <= 0) return;

View File

@ -260,6 +260,12 @@ void *mndBuildCreateVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVg
return NULL;
}
mInfo("vgId:%d, build create vnode req, replica:%d selfIndex:%d strict:%d", createReq.vgId, createReq.replica,
createReq.selfIndex, createReq.strict);
for (int32_t i = 0; i < createReq.replica; ++i) {
mInfo("vgId:%d, replica:%d ep:%s:%u", createReq.vgId, i, createReq.replicas[i].fqdn, createReq.replicas[i].port);
}
int32_t contLen = tSerializeSCreateVnodeReq(NULL, 0, &createReq);
if (contLen < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -293,6 +299,7 @@ static void *mndBuildAlterVnodeConfigReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pV
alterReq.strict = pDb->cfg.strict;
alterReq.cacheLast = pDb->cfg.cacheLast;
mInfo("vgId:%d, build alter vnode config req", pVgroup->vgId);
int32_t contLen = tSerializeSAlterVnodeConfigReq(NULL, 0, &alterReq);
if (contLen < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -342,7 +349,7 @@ static void *mndBuildAlterVnodeReplicaReq(SMnode *pMnode, SDbObj *pDb, SVgObj *p
}
}
alterReq.replica = pVgroup->replica;
mInfo("vgId:%d, start to alter vnode, replica:%d selfIndex:%d strict:%d", alterReq.vgId, alterReq.replica,
mInfo("vgId:%d, build alter vnode req, replica:%d selfIndex:%d strict:%d", alterReq.vgId, alterReq.replica,
alterReq.selfIndex, alterReq.strict);
for (int32_t i = 0; i < alterReq.replica; ++i) {
mInfo("vgId:%d, replica:%d ep:%s:%u", alterReq.vgId, i, alterReq.replicas[i].fqdn, alterReq.replicas[i].port);
@ -377,6 +384,7 @@ void *mndBuildDropVnodeReq(SMnode *pMnode, SDnodeObj *pDnode, SDbObj *pDb, SVgOb
memcpy(dropReq.db, pDb->name, TSDB_DB_FNAME_LEN);
dropReq.dbUid = pDb->uid;
mInfo("vgId:%d, build drop vnode req", dropReq.vgId);
int32_t contLen = tSerializeSDropVnodeReq(NULL, 0, &dropReq);
if (contLen < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
@ -1185,9 +1193,21 @@ static int32_t mndAddIncVgroupReplicaToTrans(SMnode *pMnode, STrans *pTrans, SDb
pGid->dnodeId = newDnodeId;
pGid->syncState = TAOS_SYNC_STATE_ERROR;
if (pVgroup->replica == 2) {
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, -1) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else if (pVgroup->replica == 4) {
if (mndAddCreateVnodeAction(pMnode, pTrans, pDb, pVgroup, pGid) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[1].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[2].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else {
mError("vgId:%d, failed to add 1 vnode since invalid replica:%d", pVgroup->vgId, pVgroup->replica);
terrno = TSDB_CODE_MND_APP_ERROR;
return -1;
}
return 0;
}
@ -1212,9 +1232,21 @@ static int32_t mndAddDecVgroupReplicaFromTrans(SMnode *pMnode, STrans *pTrans, S
memcpy(pGid, &pVgroup->vnodeGid[pVgroup->replica], sizeof(SVnodeGid));
memset(&pVgroup->vnodeGid[pVgroup->replica], 0, sizeof(SVnodeGid));
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, -1) != 0) return -1;
if (pVgroup->replica == 1) {
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, pVgroup, &delGid, true) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else if (pVgroup->replica == 3) {
if (mndAddDropVnodeAction(pMnode, pTrans, pDb, pVgroup, &delGid, true) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[0].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[1].dnodeId) != 0) return -1;
if (mndAddAlterVnodeReplicaAction(pMnode, pTrans, pDb, pVgroup, pVgroup->vnodeGid[2].dnodeId) != 0) return -1;
if (mndAddAlterVnodeConfirmAction(pMnode, pTrans, pDb, pVgroup) != 0) return -1;
} else {
mError("vgId:%d, failed to remove 1 vnode since invalid replica:%d", pVgroup->vgId, pVgroup->replica);
terrno = TSDB_CODE_MND_APP_ERROR;
return -1;
}
return 0;
}
@ -1334,9 +1366,6 @@ _OVER:
}
static int32_t mndProcessRedistributeVgroupMsg(SRpcMsg *pReq) {
#if 1
return TSDB_CODE_OPS_NOT_SUPPORT;
#else
SMnode *pMnode = pReq->info.node;
SDnodeObj *pNew1 = NULL;
SDnodeObj *pNew2 = NULL;
@ -1530,7 +1559,6 @@ _OVER:
mndReleaseDb(pMnode, pDb);
return code;
#endif
}
static int32_t mndCheckDnodeMemory(SMnode *pMnode, SDbObj *pOldDb, SDbObj *pNewDb, SVgObj *pOldVgroup,

View File

@ -1062,7 +1062,7 @@ int32_t metaFilterCreateTime(SMeta *pMeta, SMetaFltParam *param, SArray *pUids)
if (tdbTbcMoveTo(pCursor->pCur, &ctimeKey, sizeof(ctimeKey), &cmp) < 0) {
goto END;
}
bool first = true;
int32_t valid = 0;
while (1) {
void *entryKey = NULL;
@ -1074,7 +1074,13 @@ int32_t metaFilterCreateTime(SMeta *pMeta, SMetaFltParam *param, SArray *pUids)
int32_t cmp = (*param->filterFunc)((void *)&p->ctime, (void *)&pCtimeKey->ctime, param->type);
if (cmp == 0) taosArrayPush(pUids, &p->uid);
if (param->reverse == false) {
if (cmp == -1) break;
} else if (param->reverse) {
if (cmp == 1) break;
}
valid = param->reverse ? tdbTbcMoveToPrev(pCursor->pCur) : tdbTbcMoveToNext(pCursor->pCur);
if (valid < 0) break;
}

View File

@ -572,8 +572,12 @@ static int metaBuildCtimeIdxKey(SCtimeIdxKey *ctimeKey, const SMetaEntry *pME) {
}
static int metaBuildNColIdxKey(SNcolIdxKey *ncolKey, const SMetaEntry *pME) {
if (pME->type == TSDB_NORMAL_TABLE) {
ncolKey->ncol = pME->ntbEntry.schemaRow.nCols;
ncolKey->uid = pME->uid;
} else {
return -1;
}
return 0;
}
@ -777,9 +781,13 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
terrno = TSDB_CODE_VND_INVALID_TABLE_ACTION;
goto _err;
}
// search the column to add/drop/update
pSchema = &entry.ntbEntry.schemaRow;
// save old entry
SMetaEntry oldEntry = {.type = TSDB_NORMAL_TABLE, .uid = entry.uid};
oldEntry.ntbEntry.schemaRow.nCols = pSchema->nCols;
int32_t iCol = 0;
for (;;) {
pColumn = NULL;
@ -872,6 +880,9 @@ static int metaAlterTableColumn(SMeta *pMeta, int64_t version, SVAlterTbReq *pAl
entry.version = version;
metaDeleteNcolIdx(pMeta, &oldEntry);
metaUpdateNcolIdx(pMeta, &entry);
// do actual write
metaWLock(pMeta);

View File

@ -525,7 +525,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash) {
taosHashClear(pReader->tbIdHash);
} else {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
}
if (pReader->tbIdHash == NULL) {
@ -543,7 +543,7 @@ int tqReaderSetTbUidList(STqReader* pReader, const SArray* tbUidList) {
int tqReaderAddTbUidList(STqReader* pReader, const SArray* tbUidList) {
if (pReader->tbIdHash == NULL) {
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_NO_LOCK);
pReader->tbIdHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT), true, HASH_ENTRY_LOCK);
if (pReader->tbIdHash == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;

View File

@ -1209,19 +1209,19 @@ static int32_t dataBlockPartiallyRequired(STimeWindow* pWindow, SVersionRange* p
(pVerRange->maxVer < pBlock->maxVer && pVerRange->maxVer >= pBlock->minVer);
}
static SDataBlk* getNeighborBlockOfSameTable(SFileDataBlockInfo* pFBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
static SDataBlk* getNeighborBlockOfSameTable(SFileDataBlockInfo* pBlockInfo, STableBlockScanInfo* pTableBlockScanInfo,
int32_t* nextIndex, int32_t order) {
bool asc = ASCENDING_TRAVERSE(order);
if (asc && pFBlockInfo->tbBlockIdx >= taosArrayGetSize(pTableBlockScanInfo->pBlockList) - 1) {
if (asc && pBlockInfo->tbBlockIdx >= taosArrayGetSize(pTableBlockScanInfo->pBlockList) - 1) {
return NULL;
}
if (!asc && pFBlockInfo->tbBlockIdx == 0) {
if (!asc && pBlockInfo->tbBlockIdx == 0) {
return NULL;
}
int32_t step = asc ? 1 : -1;
*nextIndex = pFBlockInfo->tbBlockIdx + step;
*nextIndex = pBlockInfo->tbBlockIdx + step;
SDataBlk* pBlock = taosMemoryCalloc(1, sizeof(SDataBlk));
int32_t* indexInMapdata = taosArrayGet(pTableBlockScanInfo->pBlockList, *nextIndex);
@ -1631,7 +1631,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo*
}
code = doMergeRowsInBuf(pIter, pBlockScanInfo->uid, k.ts, pBlockScanInfo->delSkyline, &merge, pReader);
if (code != TSDB_CODE_SUCCESS) {
if (code != TSDB_CODE_SUCCESS || merge.pTSchema == NULL) {
return code;
}
}
@ -3768,6 +3768,15 @@ bool tsdbNextDataBlock(STsdbReader* pReader) {
return false;
}
bool tsdbTableNextDataBlock(STsdbReader* pReader, uint64_t uid) {
STableBlockScanInfo* pBlockScanInfo = taosHashGet(pReader->status.pTableMap, &uid, sizeof(uid));
if (pBlockScanInfo == NULL) { // no data block for the table of given uid
return false;
}
return true;
}
static void setBlockInfo(STsdbReader* pReader, SDataBlockInfo* pDataBlockInfo) {
ASSERT(pDataBlockInfo != NULL && pReader != NULL);
pDataBlockInfo->rows = pReader->pResBlock->info.rows;

View File

@ -684,8 +684,13 @@ int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, i
}
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ON_CONDITIONS_FORMAT);
QRY_ERR_RET(
nodesNodeToSQL(pJoinNode->pMergeCondition, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
if (pJoinNode->pOnConditions) {
EXPLAIN_ROW_APPEND(" AND ");
QRY_ERR_RET(
nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}

View File

@ -279,7 +279,7 @@ int32_t removeRedundantTsCol(SLastRowScanPhysiNode* pScanNode, SColMatchInfo* pC
}
size_t size = taosArrayGetSize(pColMatchInfo->pList);
SArray* pMatchInfo = taosArrayInit(size, sizeof(SColMatchInfo));
SArray* pMatchInfo = taosArrayInit(size, sizeof(SColMatchItem));
for (int32_t i = 0; i < size; ++i) {
SColMatchItem* pColInfo = taosArrayGet(pColMatchInfo->pList, i);

View File

@ -1073,7 +1073,7 @@ int32_t extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNod
pMatchInfo->matchType = type;
SArray* pList = taosArrayInit(numOfCols, sizeof(SColMatchInfo));
SArray* pList = taosArrayInit(numOfCols, sizeof(SColMatchItem));
if (pList == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
return code;

View File

@ -233,7 +233,7 @@ int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bo
}
if (pListInfo->map == NULL) {
pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
}
// traverse to the stream scanner node to add this table id

View File

@ -2301,8 +2301,8 @@ SOperatorInfo* createExchangeOperatorInfo(void* pTransporter, SExchangePhysiNode
pOperator->exprSupp.numOfExprs = taosArrayGetSize(pInfo->pDummyBlock->pDataBlock);
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL,
destroyExchangeOperatorInfo, NULL);
pOperator->fpSet =
createOperatorFpSet(prepareLoadRemoteData, doLoadRemoteData, NULL, NULL, destroyExchangeOperatorInfo, NULL);
return pOperator;
_error:
@ -3026,8 +3026,8 @@ SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SAggPhysiN
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo,
NULL);
pOperator->fpSet =
createOperatorFpSet(doOpenAggregateOptr, getAggregateResult, NULL, NULL, destroyAggOperatorInfo, NULL);
if (downstream->operatorType == QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN) {
STableScanInfo* pTableScanInfo = downstream->info;
@ -3253,8 +3253,7 @@ SOperatorInfo* createFillOperatorInfo(SOperatorInfo* downstream, SFillPhysiNode*
pOperator->info = pInfo;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet =
createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL);
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doFill, NULL, NULL, destroyFillOperatorInfo, NULL);
code = appendDownstream(pOperator, &downstream, 1);
return pOperator;
@ -3443,7 +3442,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
return TDB_CODE_SUCCESS;
}
pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_NO_LOCK);
pTableListInfo->map = taosHashInit(32, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), false, HASH_ENTRY_LOCK);
if (pTableListInfo->map == NULL) {
return TSDB_CODE_OUT_OF_MEMORY;
}

View File

@ -2881,7 +2881,7 @@ int optSysDoCompare(__compar_fn_t func, int8_t comparType, void* a, void* b) {
default:
return -1;
}
return 1;
return cmp;
}
static int optSysFilterFuncImpl__LowerThan(void* a, void* b, int16_t dtype) {
@ -2987,10 +2987,6 @@ static int32_t sysFilte__TableName(void* arg, SNode* pNode, SArray* result) {
.val = pVal->datum.p,
.reverse = reverse,
.filterFunc = func};
int32_t ret = metaFilterCreateTime(pMeta, &param, result);
if (ret == 0) return 0;
return -1;
}
@ -3002,15 +2998,17 @@ static int32_t sysFilte__CreateTime(void* arg, SNode* pNode, SArray* result) {
bool reverse = false;
__optSysFilter func = optSysGetFilterFunc(pOper->opType, &reverse);
if (func == NULL) return -1;
SMetaFltParam param = {.suid = 0,
.cid = 0,
.type = TSDB_DATA_TYPE_BIGINT,
.val = &pVal->datum.i,
.reverse = reverse,
.filterFunc = func};
int32_t ret = metaFilterCreateTime(pMeta, &param, result);
if (func == NULL) return -1;
return 0;
return ret;
}
static int32_t sysFilte__Ncolumn(void* arg, SNode* pNode, SArray* result) {
void* pMeta = ((SSTabFltArg*)arg)->pMeta;
@ -3073,7 +3071,7 @@ static int32_t sysChkFilter__Comm(SNode* pNode) {
SOperatorNode* pOper = (SOperatorNode*)pNode;
EOperatorType opType = pOper->opType;
if (opType != OP_TYPE_EQUAL && opType != OP_TYPE_LOWER_EQUAL && opType != OP_TYPE_LOWER_THAN &&
OP_TYPE_GREATER_EQUAL && opType != OP_TYPE_GREATER_THAN) {
opType != OP_TYPE_GREATER_EQUAL && opType != OP_TYPE_GREATER_THAN) {
return -1;
}
return 0;

View File

@ -1541,7 +1541,8 @@ static void deleteIntervalDiscBuf(SStreamState* pState, SHashObj* pPullDataMap,
}
}
if (qDebugFlag & DEBUG_DEBUG) {
// for debug
if (qDebugFlag & DEBUG_DEBUG && mark > 0) {
SStreamStateCur* pCur = streamStateGetCur(pState, key);
int32_t code = streamStateCurPrev(pState, pCur);
if (code == TSDB_CODE_SUCCESS) {

View File

@ -23,11 +23,6 @@ extern "C" {
#include "function.h"
#include "functionMgt.h"
bool dummyGetEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* UNUSED_PARAM(pEnv));
bool dummyInit(SqlFunctionCtx* UNUSED_PARAM(pCtx), SResultRowEntryInfo* UNUSED_PARAM(pResultInfo));
int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx));
int32_t dummyFinalize(SqlFunctionCtx* UNUSED_PARAM(pCtx), SSDataBlock* UNUSED_PARAM(pBlock));
bool functionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo);
int32_t functionFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t functionFinalizeWithResultBuf(SqlFunctionCtx* pCtx, SSDataBlock* pBlock, char* finalResult);

View File

@ -1513,7 +1513,8 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
uint8_t dbPrec = pFunc->node.resType.precision;
if (1 != numOfParams && 3 != numOfParams && 4 != numOfParams) {
//if (1 != numOfParams && 3 != numOfParams && 4 != numOfParams) {
if (1 != numOfParams) {
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
}
@ -1523,6 +1524,7 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
}
#if 0
if (3 <= numOfParams) {
int64_t timeVal[2] = {0};
for (int32_t i = 1; i < 3; ++i) {
@ -1561,6 +1563,7 @@ static int32_t translateInterp(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
"INTERP function time interval parameter should be one of the following: [1b, 1u, 1a, 1s, 1m, 1h, 1d, 1w]");
}
}
#endif
pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
return TSDB_CODE_SUCCESS;
@ -2001,11 +2004,6 @@ static int32_t translateBlockDistInfoFunc(SFunctionNode* pFunc, char* pErrBuf, i
return TSDB_CODE_SUCCESS;
}
static int32_t translateGroupKeyFunc(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
return TSDB_CODE_SUCCESS;
}
static bool getBlockDistFuncEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* pEnv) {
pEnv->calcMemSize = sizeof(STableBlockDistInfo);
return true;
@ -2350,6 +2348,8 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.finalizeFunc = elapsedFinalize,
.invertFunc = NULL,
.combineFunc = elapsedCombine,
.pPartialFunc = "_elapsed_partial",
.pMergeFunc = "_elapsed_merge"
},
{
.name = "_elapsed_partial",

View File

@ -429,14 +429,6 @@ typedef struct SGroupKeyInfo {
(_p).val = (_v); \
} while (0)
bool dummyGetEnv(SFunctionNode* UNUSED_PARAM(pFunc), SFuncExecEnv* UNUSED_PARAM(pEnv)) { return true; }
bool dummyInit(SqlFunctionCtx* UNUSED_PARAM(pCtx), SResultRowEntryInfo* UNUSED_PARAM(pResultInfo)) { return true; }
int32_t dummyProcess(SqlFunctionCtx* UNUSED_PARAM(pCtx)) { return 0; }
int32_t dummyFinalize(SqlFunctionCtx* UNUSED_PARAM(pCtx), SSDataBlock* UNUSED_PARAM(pBlock)) { return 0; }
bool functionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultInfo) {
if (pResultInfo->initialized) {
return false;

View File

@ -203,7 +203,6 @@ int32_t nodesReleaseAllocator(int64_t allocatorId) {
return taosReleaseRef(g_allocatorReqRefPool, allocatorId);
}
int64_t nodesMakeAllocatorWeakRef(int64_t allocatorId) {
if (allocatorId <= 0) {
return 0;
@ -936,6 +935,7 @@ void nodesDestroyNode(SNode* pNode) {
}
case QUERY_NODE_SHOW_DNODE_VARIABLES_STMT:
nodesDestroyNode(((SShowDnodeVariablesStmt*)pNode)->pDnodeId);
nodesDestroyNode(((SShowDnodeVariablesStmt*)pNode)->pLikePattern);
break;
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
taosMemoryFreeClear(((SShowCreateDatabaseStmt*)pNode)->pCfg);

View File

@ -176,7 +176,7 @@ SNode* createShowStmtWithCond(SAstCreateContext* pCxt, ENodeType type, SNode* pD
SNode* createShowCreateDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName);
SNode* createShowCreateTableStmt(SAstCreateContext* pCxt, ENodeType type, SNode* pRealTable);
SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable);
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId);
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pLikePattern);
SNode* createShowVnodesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pDnodeEndpoint);
SNode* createCreateUserStmt(SAstCreateContext* pCxt, SToken* pUserName, const SToken* pPassword, int8_t sysinfo);
SNode* createAlterUserStmt(SAstCreateContext* pCxt, SToken* pUserName, int8_t alterType, const SToken* pVal);

View File

@ -410,8 +410,9 @@ cmd ::= SHOW QUERIES.
cmd ::= SHOW SCORES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SCORES_STMT); }
cmd ::= SHOW TOPICS. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_TOPICS_STMT); }
cmd ::= SHOW VARIABLES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); }
cmd ::= SHOW CLUSTER VARIABLES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_VARIABLES_STMT); }
cmd ::= SHOW LOCAL VARIABLES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT); }
cmd ::= SHOW DNODE NK_INTEGER(A) VARIABLES. { pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A)); }
cmd ::= SHOW DNODE NK_INTEGER(A) VARIABLES like_pattern_opt(B). { pCxt->pRootNode = createShowDnodeVariablesStmt(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &A), B); }
cmd ::= SHOW BNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_BNODES_STMT); }
cmd ::= SHOW SNODES. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_SNODES_STMT); }
cmd ::= SHOW CLUSTER. { pCxt->pRootNode = createShowStmt(pCxt, QUERY_NODE_SHOW_CLUSTER_STMT); }

View File

@ -1372,11 +1372,12 @@ SNode* createShowTableDistributedStmt(SAstCreateContext* pCxt, SNode* pRealTable
return (SNode*)pStmt;
}
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId) {
SNode* createShowDnodeVariablesStmt(SAstCreateContext* pCxt, SNode* pDnodeId, SNode* pLikePattern) {
CHECK_PARSER_STATUS(pCxt);
SShowDnodeVariablesStmt* pStmt = (SShowDnodeVariablesStmt*)nodesMakeNode(QUERY_NODE_SHOW_DNODE_VARIABLES_STMT);
CHECK_OUT_OF_MEM(pStmt);
pStmt->pDnodeId = pDnodeId;
pStmt->pLikePattern = pLikePattern;
return (SNode*)pStmt;
}

View File

@ -484,11 +484,6 @@ static int32_t collectMetaKeyFromShowQueries(SCollectMetaKeyCxt* pCxt, SShowStmt
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowConfigs(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS,
pCxt->pMetaCache);
}
static int32_t collectMetaKeyFromShowVariables(SCollectMetaKeyCxt* pCxt, SShowStmt* pStmt) {
return reserveTableMetaInCache(pCxt->pParseCxt->acctId, TSDB_INFORMATION_SCHEMA_DB, TSDB_INS_TABLE_CONFIGS,
pCxt->pMetaCache);

View File

@ -125,7 +125,6 @@ static int32_t authQuery(SAuthCxt* pCxt, SNode* pStmt) {
case QUERY_NODE_SHOW_CLUSTER_STMT:
case QUERY_NODE_SHOW_LICENCES_STMT:
case QUERY_NODE_SHOW_VGROUPS_STMT:
case QUERY_NODE_SHOW_VARIABLES_STMT:
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
case QUERY_NODE_SHOW_TABLE_DISTRIBUTED_STMT:
case QUERY_NODE_SHOW_VNODES_STMT:

View File

@ -4847,6 +4847,11 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_ONLY_ONE_JSON_TAG);
}
int32_t tagsLen = 0;
for (int32_t i = 0; i < pTableMeta->tableInfo.numOfTags; ++i) {
tagsLen += pTagsSchema[i].bytes;
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType ||
TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType) {
if (TSDB_SUPER_TABLE != pTableMeta->tableType) {
@ -4860,7 +4865,38 @@ static int32_t checkAlterSuperTableBySchema(STranslateContext* pCxt, SAlterTable
pSchema->bytes >= calcTypeBytes(pStmt->dataType)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
if (TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES == pStmt->alterType &&
pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
if (TSDB_ALTER_TABLE_UPDATE_TAG_BYTES == pStmt->alterType &&
tagsLen + calcTypeBytes(pStmt->dataType) - pSchema->bytes > TSDB_MAX_TAGS_LEN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN);
}
}
if (TSDB_ALTER_TABLE_ADD_COLUMN == pStmt->alterType) {
if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
}
if (TSDB_ALTER_TABLE_ADD_TAG == pStmt->alterType) {
if (TSDB_MAX_TAGS == pTableMeta->tableInfo.numOfTags) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_NUM);
}
if (tagsLen + calcTypeBytes(pStmt->dataType) > TSDB_MAX_TAGS_LEN) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_TAGS_LENGTH, TSDB_MAX_TAGS_LEN);
}
}
return TSDB_CODE_SUCCESS;
}
@ -6259,16 +6295,28 @@ static int32_t rewriteShowStableTags(STranslateContext* pCxt, SQuery* pQuery) {
}
static int32_t rewriteShowDnodeVariables(STranslateContext* pCxt, SQuery* pQuery) {
SSelectStmt* pStmt = NULL;
int32_t code = createSelectStmtForShow(nodeType(pQuery->pRoot), &pStmt);
SShowDnodeVariablesStmt* pStmt = (SShowDnodeVariablesStmt*)pQuery->pRoot;
SNode* pDnodeCond = NULL;
SNode* pLikeCond = NULL;
SSelectStmt* pSelect = NULL;
int32_t code = createSelectStmtForShow(nodeType(pQuery->pRoot), &pSelect);
if (TSDB_CODE_SUCCESS == code) {
code = createOperatorNode(OP_TYPE_EQUAL, "dnode_id", ((SShowDnodeVariablesStmt*)pQuery->pRoot)->pDnodeId,
&pStmt->pWhere);
code = createOperatorNode(OP_TYPE_EQUAL, "dnode_id", pStmt->pDnodeId, &pDnodeCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = createOperatorNode(OP_TYPE_LIKE, "name", pStmt->pLikePattern, &pLikeCond);
}
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pLikeCond) {
code = createLogicCondNode(pDnodeCond, pLikeCond, &pSelect->pWhere);
} else {
pSelect->pWhere = pDnodeCond;
}
}
if (TSDB_CODE_SUCCESS == code) {
pQuery->showRewrite = true;
nodesDestroyNode(pQuery->pRoot);
pQuery->pRoot = (SNode*)pStmt;
pQuery->pRoot = (SNode*)pSelect;
}
return code;
}
@ -7066,6 +7114,14 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DUPLICATED_COLUMN);
}
if (TSDB_MAX_COLUMNS == pTableMeta->tableInfo.numOfColumns) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_TOO_MANY_COLUMNS);
}
if (pTableMeta->tableInfo.rowSize + calcTypeBytes(pStmt->dataType) > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
pReq->colName = strdup(pStmt->colName);
if (NULL == pReq->colName) {
return TSDB_CODE_OUT_OF_MEMORY;
@ -7073,7 +7129,6 @@ static int32_t buildAddColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, S
pReq->type = pStmt->dataType.type;
pReq->flags = COL_SMA_ON;
// pReq->bytes = pStmt->dataType.bytes;
pReq->bytes = calcTypeBytes(pStmt->dataType);
return TSDB_CODE_SUCCESS;
}
@ -7111,6 +7166,10 @@ static int32_t buildUpdateColReq(STranslateContext* pCxt, SAlterTableStmt* pStmt
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_MODIFY_COL);
}
if (pTableMeta->tableInfo.rowSize + pReq->colModBytes - pSchema->bytes > TSDB_MAX_BYTES_PER_ROW) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ROW_LENGTH, TSDB_MAX_BYTES_PER_ROW);
}
pReq->colName = strdup(pStmt->colName);
if (NULL == pReq->colName) {
return TSDB_CODE_OUT_OF_MEMORY;

File diff suppressed because it is too large Load Diff

View File

@ -100,6 +100,8 @@ TEST_F(ParserShowToUseTest, showDnodeVariables) {
useDb("root", "test");
run("SHOW DNODE 1 VARIABLES");
run("SHOW DNODE 1 VARIABLES LIKE '%debug%'");
}
TEST_F(ParserShowToUseTest, showFunctions) {

View File

@ -624,8 +624,6 @@ static int32_t createIndefRowsFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt
return code;
}
static bool isInterpFunc(int32_t funcId) { return fmIsInterpFunc(funcId) || fmIsInterpPseudoColumnFunc(funcId); }
static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect, SLogicNode** pLogicNode) {
if (!pSelect->hasInterpFunc) {
return TSDB_CODE_SUCCESS;
@ -640,7 +638,8 @@ static int32_t createInterpFuncLogicNode(SLogicPlanContext* pCxt, SSelectStmt* p
pInterpFunc->node.requireDataOrder = getRequireDataOrder(true, pSelect);
pInterpFunc->node.resultDataOrder = pInterpFunc->node.requireDataOrder;
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, isInterpFunc, &pInterpFunc->pFuncs);
// interp functions and _group_key functions
int32_t code = nodesCollectFuncs(pSelect, SQL_CLAUSE_SELECT, fmIsVectorFunc, &pInterpFunc->pFuncs);
if (TSDB_CODE_SUCCESS == code) {
code = rewriteExprsForSelect(pInterpFunc->pFuncs, pSelect, SQL_CLAUSE_SELECT);
}

View File

@ -2242,6 +2242,26 @@ static EDealRes lastRowScanOptSetColDataType(SNode* pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
static void lastRowScanOptSetLastTargets(SNodeList* pTargets, SNodeList* pLastCols) {
SNode* pTarget = NULL;
WHERE_EACH(pTarget, pTargets) {
bool found = false;
SNode* pCol = NULL;
FOREACH(pCol, pLastCols) {
if (nodesEqualNode(pCol, pTarget)) {
getLastCacheDataType(&(((SColumnNode*)pTarget)->node.resType));
found = true;
break;
}
}
if (!found) {
ERASE_NODE(pTargets);
continue;
}
WHERE_NEXT;
}
}
static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SAggLogicNode* pAgg = (SAggLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, lastRowScanOptMayBeOptimized);
@ -2265,6 +2285,7 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
}
if (FUNCTION_TYPE_LAST == funcType) {
nodesWalkExpr(nodesListGetNode(pFunc->pParameterList, 0), lastRowScanOptSetColDataType, &cxt);
nodesListErase(pFunc->pParameterList, nodesListGetCell(pFunc->pParameterList, 1));
}
}
}
@ -2274,9 +2295,9 @@ static int32_t lastRowScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogic
pScan->igLastNull = pAgg->hasLast ? true : false;
if (NULL != cxt.pLastCols) {
cxt.doAgg = false;
nodesWalkExprs(pScan->pScanCols, lastRowScanOptSetColDataType, &cxt);
lastRowScanOptSetLastTargets(pScan->pScanCols, cxt.pLastCols);
nodesWalkExprs(pScan->pScanPseudoCols, lastRowScanOptSetColDataType, &cxt);
nodesWalkExprs(pScan->node.pTargets, lastRowScanOptSetColDataType, &cxt);
lastRowScanOptSetLastTargets(pScan->node.pTargets, cxt.pLastCols);
nodesClearList(cxt.pLastCols);
}
pAgg->hasLastRow = false;
@ -2365,7 +2386,7 @@ static int32_t mergeProjectsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog
return mergeProjectsOptimizeImpl(pCxt, pLogicSubplan, pProjectNode);
}
static bool tagScanMayBeOptimized(SLogicNode* pNode) {
static bool tagScanOptShouldBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode) || (SCAN_TYPE_TAG == ((SScanLogicNode*)pNode)->scanType)) {
return false;
}
@ -2397,7 +2418,7 @@ static bool tagScanMayBeOptimized(SLogicNode* pNode) {
}
static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SScanLogicNode* pScanNode = (SScanLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, tagScanMayBeOptimized);
SScanLogicNode* pScanNode = (SScanLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, tagScanOptShouldBeOptimized);
if (NULL == pScanNode) {
return TSDB_CODE_SUCCESS;
}
@ -2440,6 +2461,29 @@ static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubp
return TSDB_CODE_SUCCESS;
}
static bool pushDownLimitOptShouldBeOptimized(SLogicNode* pNode) {
if (NULL == pNode->pLimit || 1 != LIST_LENGTH(pNode->pChildren) ||
QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(nodesListGetNode(pNode->pChildren, 0))) {
return false;
}
return true;
}
static int32_t pushDownLimitOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SLogicNode* pNode = optFindPossibleNode(pLogicSubplan->pNode, pushDownLimitOptShouldBeOptimized);
if (NULL == pNode) {
return TSDB_CODE_SUCCESS;
}
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pNode->pChildren, 0);
nodesDestroyNode(pChild->pLimit);
pChild->pLimit = pNode->pLimit;
pNode->pLimit = NULL;
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
}
// clang-format off
static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "ScanPath", .optimizeFunc = scanPathOptimize},
@ -2453,7 +2497,8 @@ static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "RewriteTail", .optimizeFunc = rewriteTailOptimize},
{.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize},
{.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize},
{.pName = "TagScan", .optimizeFunc = tagScanOptimize}
{.pName = "TagScan", .optimizeFunc = tagScanOptimize},
// {.pName = "PushDownLimit", .optimizeFunc = pushDownLimitOptimize}
};
// clang-format on

View File

@ -84,6 +84,7 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SE
if (NULL == pExchange) {
return TSDB_CODE_OUT_OF_MEMORY;
}
pExchange->srcStartGroupId = pCxt->groupId;
pExchange->srcEndGroupId = pCxt->groupId;
pExchange->node.precision = pChild->precision;
@ -91,6 +92,13 @@ static int32_t splCreateExchangeNode(SSplitContext* pCxt, SLogicNode* pChild, SE
if (NULL == pExchange->node.pTargets) {
return TSDB_CODE_OUT_OF_MEMORY;
}
if (NULL != pChild->pLimit) {
pExchange->node.pLimit = nodesCloneNode(pChild->pLimit);
if (NULL == pExchange->node.pLimit) {
return TSDB_CODE_OUT_OF_MEMORY;
}
((SLimitNode*)pChild->pLimit)->offset = 0;
}
*pOutput = pExchange;
return TSDB_CODE_SUCCESS;
@ -921,6 +929,13 @@ static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSp
if (NULL != pInfo->pSplitNode->pParent && QUERY_NODE_LOGIC_PLAN_PROJECT == nodeType(pInfo->pSplitNode->pParent) &&
NULL == pInfo->pSplitNode->pParent->pLimit && NULL == pInfo->pSplitNode->pParent->pSlimit) {
pSplitNode = pInfo->pSplitNode->pParent;
if (NULL != pInfo->pSplitNode->pLimit) {
pSplitNode->pLimit = nodesCloneNode(pInfo->pSplitNode->pLimit);
if (NULL == pSplitNode->pLimit) {
return TSDB_CODE_OUT_OF_MEMORY;
}
((SLimitNode*)pInfo->pSplitNode->pLimit)->offset = 0;
}
}
int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pSplitNode, SUBPLAN_TYPE_MERGE);
if (TSDB_CODE_SUCCESS == code) {

View File

@ -103,6 +103,9 @@ TEST_F(PlanBasicTest, interpFunc) {
run("SELECT INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT _IROWTS, INTERP(c1) FROM t1 RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
run("SELECT TBNAME, _IROWTS, INTERP(c1) FROM t1 PARTITION BY TBNAME "
"RANGE('2017-7-14 18:00:00', '2017-7-14 19:00:00') EVERY(5s) FILL(LINEAR)");
}
TEST_F(PlanBasicTest, lastRowFuncWithoutCache) {

View File

@ -95,9 +95,13 @@ TEST_F(PlanOptimizeTest, eliminateProjection) {
useDb("root", "test");
run("SELECT c1, sum(c3) FROM t1 GROUP BY c1");
run("SELECT c1 FROM t1");
run("SELECT * FROM st1");
run("SELECT c1 FROM st1s3");
// run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) order by 1 nulls first");
}
@ -137,3 +141,13 @@ TEST_F(PlanOptimizeTest, tagScan) {
run("select distinct tag1 from st1");
run("select tag1*tag1 from st1 group by tag1*tag1");
}
TEST_F(PlanOptimizeTest, pushDownLimit) {
useDb("root", "test");
run("SELECT c1 FROM t1 LIMIT 1");
run("SELECT c1 FROM st1 LIMIT 1");
run("SELECT c1 FROM st1 LIMIT 20 OFFSET 10");
}

View File

@ -85,6 +85,8 @@ TEST_F(PlanOtherTest, show) {
run("SHOW DNODE 1 VARIABLES");
run("SHOW DNODE 1 VARIABLES LIKE '%debug%'");
run("SHOW TAGS FROM st1s1");
run("SHOW TABLE TAGS FROM st1");

View File

@ -45,6 +45,11 @@
// /\ UNCHANGED <<messages, serverVars, candidateVars, leaderVars, log>>
//
void syncMaybeAdvanceCommitIndex(SSyncNode* pSyncNode) {
if (pSyncNode == NULL) {
sError("pSyncNode is NULL");
return;
}
if (pSyncNode->state != TAOS_SYNC_STATE_LEADER) {
syncNodeErrorLog(pSyncNode, "not leader, can not advance commit index");
return;
@ -172,6 +177,7 @@ static inline int64_t syncNodeAbs64(int64_t a, int64_t b) {
int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) {
return pSyncNode->quorum;
#if 0
int32_t quorum = 1; // self
int64_t timeNow = taosGetTimestampMs();
@ -228,6 +234,7 @@ int32_t syncNodeDynamicQuorum(const SSyncNode* pSyncNode) {
}
return quorum;
#endif
}
/*

View File

@ -835,7 +835,9 @@ void syncGetRetryEpSet(int64_t rid, SEpSet* pEpSet) {
sInfo("vgId:%d, sync get retry epset: index:%d %s:%d", pSyncNode->vgId, i, pEpSet->eps[i].fqdn,
pEpSet->eps[i].port);
}
if (pEpSet->numOfEps > 0) {
pEpSet->inUse = (pSyncNode->pRaftCfg->cfg.myIndex + 1) % pEpSet->numOfEps;
}
sInfo("vgId:%d, sync get retry epset in-use:%d", pSyncNode->vgId, pEpSet->inUse);
taosReleaseRef(tsNodeRefId, pSyncNode->rid);
@ -1438,12 +1440,13 @@ void syncNodeStartStandBy(SSyncNode* pSyncNode) {
}
void syncNodeClose(SSyncNode* pSyncNode) {
syncNodeEventLog(pSyncNode, "sync close");
if (pSyncNode == NULL) {
return;
}
int32_t ret;
syncNodeEventLog(pSyncNode, "sync close");
ret = raftStoreClose(pSyncNode->pRaftStore);
ASSERT(ret == 0);
@ -1879,6 +1882,10 @@ char* syncNode2Str(const SSyncNode* pSyncNode) {
}
inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
if (pSyncNode == NULL) {
return;
}
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0};
if (pSyncNode->pFsm != NULL && pSyncNode->pFsm->FpGetSnapshotInfo != NULL) {
pSyncNode->pFsm->FpGetSnapshotInfo(pSyncNode->pFsm, &snapshot);
@ -1954,6 +1961,10 @@ inline void syncNodeEventLog(const SSyncNode* pSyncNode, char* str) {
}
inline void syncNodeErrorLog(const SSyncNode* pSyncNode, char* str) {
if (pSyncNode == NULL) {
return;
}
int32_t userStrLen = strlen(str);
SSnapshot snapshot = {.data = NULL, .lastApplyIndex = -1, .lastApplyTerm = 0};
@ -2937,6 +2948,7 @@ static int32_t syncNodeEqNoop(SSyncNode* ths) {
sTrace("syncNodeEqNoop pSyncNode->FpEqMsg is NULL");
}
syncEntryDestory(pEntry);
taosMemoryFree(serialized);
syncClientRequestDestroy(pSyncMsg);
@ -3010,6 +3022,7 @@ int32_t syncNodeOnPingCb(SSyncNode* ths, SyncPing* pMsg) {
*/
syncNodeSendMsgById(&pMsgReply->destId, ths, &rpcMsg);
syncPingReplyDestroy(pMsgReply);
return ret;
}
@ -3058,6 +3071,7 @@ int32_t syncNodeOnHeartbeat(SSyncNode* ths, SyncHeartbeat* pMsg) {
// reply
syncNodeSendMsgById(&pMsgReply->destId, ths, &rpcMsg);
syncHeartbeatReplyDestroy(pMsgReply);
return 0;
}
@ -3329,6 +3343,11 @@ int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endInde
return 0;
}
if (ths == NULL) {
return -1;
}
if (ths->pFsm != NULL && ths->pFsm->FpGetSnapshotInfo != NULL) {
// advance commit index to sanpshot first
SSnapshot snapshot = {0};
ths->pFsm->FpGetSnapshotInfo(ths->pFsm, &snapshot);
@ -3341,6 +3360,7 @@ int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endInde
// update begin index
beginIndex = snapshot.lastApplyIndex + 1;
}
}
int32_t code = 0;
ESyncState state = flag;
@ -3413,9 +3433,11 @@ int32_t syncNodeDoCommit(SSyncNode* ths, SyncIndex beginIndex, SyncIndex endInde
// config change finish
if (pEntry->originalRpcType == TDMT_SYNC_CONFIG_CHANGE_FINISH) {
if (rpcMsg.pCont != NULL) {
code = syncNodeConfigChangeFinish(ths, &rpcMsg, pEntry);
ASSERT(code == 0);
}
}
#if 0
// execute in pre-commit
@ -3528,7 +3550,7 @@ bool syncNodeCanChange(SSyncNode* pSyncNode) {
for (int i = 0; i < pSyncNode->peersNum; ++i) {
SSyncSnapshotSender* pSender = syncNodeGetSnapshotSender(pSyncNode, &(pSyncNode->peersId)[i]);
if (pSender->start) {
if (pSender != NULL && pSender->start) {
sError("sync cannot change3");
return false;
}

View File

@ -411,32 +411,40 @@ SyncPing* syncPingDeserialize3(void* buf, int32_t bufLen) {
pMsg->bytes = bytes;
if (tDecodeI32(&decoder, &pMsg->vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU32(&decoder, &pMsg->msgType) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU64(&decoder, &pMsg->srcId.addr) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeI32(&decoder, &pMsg->srcId.vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU64(&decoder, &pMsg->destId.addr) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeI32(&decoder, &pMsg->destId.vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU32(&decoder, &pMsg->dataLen) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
uint32_t len;
char* data = NULL;
if (tDecodeBinary(&decoder, (uint8_t**)(&data), &len) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
ASSERT(len = pMsg->dataLen);
ASSERT(len == pMsg->dataLen);
memcpy(pMsg->data, data, len);
tEndDecode(&decoder);
@ -673,32 +681,40 @@ SyncPingReply* syncPingReplyDeserialize3(void* buf, int32_t bufLen) {
pMsg->bytes = bytes;
if (tDecodeI32(&decoder, &pMsg->vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU32(&decoder, &pMsg->msgType) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU64(&decoder, &pMsg->srcId.addr) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeI32(&decoder, &pMsg->srcId.vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU64(&decoder, &pMsg->destId.addr) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeI32(&decoder, &pMsg->destId.vgId) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
if (tDecodeU32(&decoder, &pMsg->dataLen) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
uint32_t len;
char* data = NULL;
if (tDecodeBinary(&decoder, (uint8_t**)(&data), &len) < 0) {
taosMemoryFree(pMsg);
return NULL;
}
ASSERT(len = pMsg->dataLen);
ASSERT(len == pMsg->dataLen);
memcpy(pMsg->data, data, len);
tEndDecode(&decoder);

View File

@ -532,7 +532,7 @@ int32_t raftEntryCacheGetEntry(struct SRaftEntryCache* pCache, SyncIndex index,
SSyncRaftEntry* pEntry = NULL;
int32_t code = raftEntryCacheGetEntryP(pCache, index, &pEntry);
if (code == 1) {
*ppEntry = taosMemoryMalloc(pEntry->bytes);
*ppEntry = taosMemoryMalloc((int64_t)(pEntry->bytes));
memcpy(*ppEntry, pEntry, pEntry->bytes);
(*ppEntry)->rid = -1;
} else {

View File

@ -209,7 +209,8 @@ bool syncUtilCanPrint(char c) {
}
char* syncUtilprintBin(char* ptr, uint32_t len) {
char* s = taosMemoryMalloc(len + 1);
int64_t memLen = (int64_t)(len + 1);
char* s = taosMemoryMalloc(memLen);
ASSERT(s != NULL);
memset(s, 0, len + 1);
memcpy(s, ptr, len);

View File

@ -741,9 +741,13 @@ static int tdbBtreeBalanceNonRoot(SBTree *pBt, SPage *pParent, int idx, TXN *pTx
tdbBtreeInitPage(pOldsCopy[i], &iarg, 0);
tdbPageCopy(pOlds[i], pOldsCopy[i], 0);
}
for (iNew = 0; iNew < nNews; ++iNew) {
tdbBtreeInitPage(pNews[iNew], &iarg, 0);
}
iNew = 0;
nNewCells = 0;
tdbBtreeInitPage(pNews[iNew], &iarg, 0);
for (int iOld = 0; iOld < nOlds; iOld++) {
SPage *pPage;

View File

@ -374,7 +374,11 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
} else {
tError("fail to dispatch conn to work thread");
}
if (!uv_is_closing((uv_handle_t*)req->data)) {
uv_close((uv_handle_t*)req->data, uvFreeCb);
} else {
taosMemoryFree(req->data);
}
taosMemoryFree(req);
}
@ -651,12 +655,14 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
uv_tcp_init(pObj->loop, cli);
if (uv_accept(stream, (uv_stream_t*)cli) == 0) {
#ifdef WINDOWS
if (pObj->numOfWorkerReady < pObj->numOfThreads) {
tError("worker-threads are not ready for all, need %d instead of %d.", pObj->numOfThreads,
pObj->numOfWorkerReady);
uv_close((uv_handle_t*)cli, NULL);
return;
}
#endif
uv_write_t* wr = (uv_write_t*)taosMemoryMalloc(sizeof(uv_write_t));
wr->data = cli;
@ -668,7 +674,11 @@ void uvOnAcceptCb(uv_stream_t* stream, int status) {
uv_write2(wr, (uv_stream_t*)&(pObj->pipe[pObj->workerIdx][0]), &buf, 1, (uv_stream_t*)cli, uvOnPipeWriteCb);
} else {
if (!uv_is_closing((uv_handle_t*)cli)) {
uv_close((uv_handle_t*)cli, NULL);
} else {
taosMemoryFree(cli);
}
}
}
void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
@ -681,7 +691,6 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
tWarn("failed to create connect:%p", q);
taosMemoryFree(buf->base);
uv_close((uv_handle_t*)q, NULL);
// taosMemoryFree(q);
return;
}
// free memory allocated by
@ -770,7 +779,12 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
return false;
}
#ifdef WINDOWS
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
#else
uv_pipe_init(pThrd->loop, pThrd->pipe, 1);
uv_pipe_open(pThrd->pipe, pThrd->fd);
#endif
pThrd->pipe->data = pThrd;
@ -785,8 +799,11 @@ static bool addHandleToWorkloop(SWorkThrd* pThrd, char* pipeName) {
QUEUE_INIT(&pThrd->conn);
pThrd->asyncPool = transAsyncPoolCreate(pThrd->loop, 5, pThrd, uvWorkerAsyncCb);
#ifdef WINDOWS
uv_pipe_connect(&pThrd->connect_req, pThrd->pipe, pipeName, uvOnPipeConnectionCb);
// uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
#else
uv_read_start((uv_stream_t*)pThrd->pipe, uvAllocConnBufferCb, uvOnConnectionCb);
#endif
return true;
}
@ -958,20 +975,19 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
srv->port = port;
uv_loop_init(srv->loop);
char pipeName[PATH_MAX];
#ifdef WINDOWS
int ret = uv_pipe_init(srv->loop, &srv->pipeListen, 0);
if (ret != 0) {
tError("failed to init pipe, errmsg: %s", uv_err_name(ret));
goto End;
}
#ifdef WINDOWS
char pipeName[64];
snprintf(pipeName, sizeof(pipeName), "\\\\?\\pipe\\trans.rpc.%d-%" PRIu64, taosSafeRand(), GetCurrentProcessId());
#else
char pipeName[PATH_MAX] = {0};
snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08d-%" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
taosGetSelfPthreadId());
#endif
// char pipeName[PATH_MAX] = {0};
// snprintf(pipeName, sizeof(pipeName), "%s%spipe.trans.rpc.%08d-%" PRIu64, tsTempDir, TD_DIRSEP, taosSafeRand(),
// taosGetSelfPthreadId());
ret = uv_pipe_bind(&srv->pipeListen, pipeName);
if (ret != 0) {
tError("failed to bind pipe, errmsg: %s", uv_err_name(ret));
@ -997,6 +1013,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
if (false == addHandleToWorkloop(thrd, pipeName)) {
goto End;
}
int err = taosThreadCreate(&(thrd->thread), NULL, transWorkerThread, (void*)(thrd));
if (err == 0) {
tDebug("success to create worker-thread:%d", i);
@ -1006,14 +1023,54 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
goto End;
}
}
#else
for (int i = 0; i < srv->numOfThreads; i++) {
SWorkThrd* thrd = (SWorkThrd*)taosMemoryCalloc(1, sizeof(SWorkThrd));
thrd->pTransInst = shandle;
thrd->quit = false;
thrd->pTransInst = shandle;
srv->pipe[i] = (uv_pipe_t*)taosMemoryCalloc(2, sizeof(uv_pipe_t));
srv->pThreadObj[i] = thrd;
uv_os_sock_t fds[2];
if (uv_socketpair(AF_UNIX, SOCK_STREAM, fds, UV_NONBLOCK_PIPE, UV_NONBLOCK_PIPE) != 0) {
goto End;
}
uv_pipe_init(srv->loop, &(srv->pipe[i][0]), 1);
uv_pipe_open(&(srv->pipe[i][0]), fds[1]);
thrd->pipe = &(srv->pipe[i][1]); // init read
thrd->fd = fds[0];
if (false == addHandleToWorkloop(thrd, pipeName)) {
goto End;
}
int err = taosThreadCreate(&(thrd->thread), NULL, transWorkerThread, (void*)(thrd));
if (err == 0) {
tDebug("success to create worker-thread:%d", i);
} else {
// TODO: clear all other resource later
tError("failed to create worker-thread:%d", i);
goto End;
}
}
#endif
if (false == taosValidIpAndPort(srv->ip, srv->port)) {
terrno = TAOS_SYSTEM_ERROR(errno);
tError("invalid ip/port, %d:%d, reason:%s", srv->ip, srv->port, terrstr());
goto End;
}
if (false == addHandleToAcceptloop(srv)) {
goto End;
}
int err = taosThreadCreate(&srv->thread, NULL, transAcceptThread, (void*)srv);
if (err == 0) {
tDebug("success to create accept-thread");
@ -1022,6 +1079,7 @@ void* transInitServer(uint32_t ip, uint32_t port, char* label, int numOfThreads,
goto End;
// clear all resource later
}
srv->inited = true;
return srv;
End:

View File

@ -371,6 +371,7 @@ int32_t tsDecompressBoolImp(const char *const input, const int32_t nelements, ch
return nelements;
}
#if 0
/* Run Length Encoding(RLE) Method */
int32_t tsCompressBoolRLEImp(const char *const input, const int32_t nelements, char *const output) {
int32_t _pos = 0;
@ -419,6 +420,7 @@ int32_t tsDecompressBoolRLEImp(const char *const input, const int32_t nelements,
}
}
}
#endif
/* ----------------------------------------------String Compression
* ---------------------------------------------- */

View File

@ -183,6 +183,7 @@ void tdigestAdd(TDigest *t, double x, int64_t w) {
if (t->num_buffered_pts >= t->threshold) tdigestCompress(t);
}
#if 0
double tdigestCDF(TDigest *t, double x) {
if (t == NULL) return 0;
@ -233,6 +234,7 @@ double tdigestCDF(TDigest *t, double x) {
return 1;
}
#endif
double tdigestQuantile(TDigest *t, double q) {
if (t == NULL) return 0;

View File

@ -16,6 +16,9 @@
#define _DEFAULT_SOURCE
#include "tfunctional.h"
FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) { return pSavedFunc->func(pSavedFunc->args); }
#if 0
tGenericSavedFunc* genericSavedFuncInit(GenericVaFunc func, int32_t numOfArgs) {
tGenericSavedFunc* pSavedFunc = taosMemoryMalloc(sizeof(tGenericSavedFunc) + numOfArgs * (sizeof(void*)));
if (pSavedFunc == NULL) return NULL;
@ -37,10 +40,9 @@ tVoidSavedFunc* voidSavedFuncInit(VoidVaFunc func, int32_t numOfArgs) {
return pSavedFunc;
}
FORCE_INLINE void* genericInvoke(tGenericSavedFunc* const pSavedFunc) { return pSavedFunc->func(pSavedFunc->args); }
FORCE_INLINE int32_t i32Invoke(tI32SavedFunc* const pSavedFunc) { return pSavedFunc->func(pSavedFunc->args); }
FORCE_INLINE void voidInvoke(tVoidSavedFunc* const pSavedFunc) {
if (pSavedFunc) pSavedFunc->func(pSavedFunc->args);
}
#endif

View File

@ -137,6 +137,8 @@ int32_t taosQueueItemSize(STaosQueue *queue) {
taosThreadMutexLock(&queue->mutex);
int32_t numOfItems = queue->numOfItems;
taosThreadMutexUnlock(&queue->mutex);
uTrace("queue:%p, numOfItems:%d memOfItems:%" PRId64, queue, queue->numOfItems, queue->memOfItems);
return numOfItems;
}

View File

@ -50,11 +50,11 @@
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
# unsupport ./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
./test.sh -f tsim/dnode/offline_reason.sim
# unsupport ./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
# unsupport ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
# unsupport ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
# unsupport ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
# unsupport ./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica1.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v2.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v3.sim
# unsupport ./test.sh -f tsim/dnode/vnode_clean.sim
./test.sh -f tsim/dnode/use_dropped_dnode.sim

View File

@ -85,3 +85,4 @@ echo statusInterval 1 >> %TAOS_CFG%
echo asyncLog 0 >> %TAOS_CFG%
echo locale en_US.UTF-8 >> %TAOS_CFG%
echo telemetryReporting 0 >> %TAOS_CFG%
echo querySmaOptimize 1 >> %TAOS_CFG%

View File

@ -144,4 +144,5 @@ echo "numOfLogLines 20000000" >> $TAOS_CFG
echo "asyncLog 0" >> $TAOS_CFG
echo "locale en_US.UTF-8" >> $TAOS_CFG
echo "telemetryReporting 0" >> $TAOS_CFG
echo "querySmaOptimize 1" >> $TAOS_CFG
echo " " >> $TAOS_CFG

View File

@ -55,7 +55,7 @@ sql_error alter table tb modify column c3 nchar(10);
sql_error alter table tb modify column c3 nchar(0);
sql_error alter table tb modify column c3 nchar(-1);
sql_error alter table tb modify column c3 binary(80);
sql alter table tb modify column c3 nchar(17000);
sql_error alter table tb modify column c3 nchar(17000);
sql_error alter table tb modify column c3 nchar(100), c2 binary(30);
sql_error alter table tb modify column c1 nchar(100), c2 binary(30);
sql_error alter stable tb modify column c2 binary(30);

View File

@ -34,7 +34,7 @@ sql alter table tb1 set tag len = 379
# case TD-5594
sql create stable st5520(ts timestamp, f int) tags(t0 bool, t1 nchar(4093), t2 nchar(1))
sql alter stable st5520 modify tag t2 nchar(2);
sql_error alter stable st5520 modify tag t2 nchar(2);
# test end
sql drop database $db

View File

@ -47,6 +47,7 @@ sql explain verbose true select ts from tb1 where f1 > 0;
sql explain verbose true select * from st1 where f1 > 0 and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00';
sql explain verbose true select count(*) from st1 partition by tbname slimit 1 soffset 2 limit 2 offset 1;
sql explain verbose true select * from information_schema.ins_stables where db_name='db2';
sql explain verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0;
print ======== step4
sql explain analyze select ts from st1 where -2;
@ -75,6 +76,7 @@ sql explain analyze verbose true select f1 from st1 where f1 > 0 and ts > '2020-
sql explain analyze verbose true select * from information_schema.ins_stables where db_name='db2';
sql explain analyze verbose true select * from (select min(f1),count(*) a from st1 where f1 > 0) where a < 0;
sql explain analyze verbose true select count(f1) from st1 group by tbname;
sql explain analyze verbose true select st1.f1 from st1 join st2 on st1.ts=st2.ts and st1.f1 > 0;
#not pass case
#sql explain verbose true select count(*),sum(f1) as aa from tb1 where (f1 > 0 or f1 < -1) and ts > '2020-10-31 00:00:00' and ts < '2021-10-31 00:00:00' order by aa;
@ -95,4 +97,4 @@ sql explain analyze verbose true select count(f1) from st1 group by tbname;
#sql explain analyze verbose true select min(f1) from st1 interval(3m, 2a) sliding(1m);
system sh/exec.sh -n dnode1 -s stop -x SIGINT
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -30,12 +30,13 @@ sql insert into ct1 values('2022-10-19 09:55:46.682', 11, 2.1, 3.1)('2022-10-19
print =============== create sma index from super table
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m)
sql create sma index sma_index_name1 on stb function(max(c1),max(c2),min(c1)) interval(5m,10s) sliding(5m) watermark 1s max_delay 1s
print $data00 $data01 $data02 $data03
print =============== trigger stream to execute sma aggr task and insert sma data into sma store
sql insert into ct1 values('2022-10-19 09:55:50.682', 20, 20.0, 30.0)
#===================================================================
#==================== sleep 2s to wait for tsma result
sleep 2000
print =============== show streams ================================
sql show streams;

View File

@ -137,10 +137,9 @@ sql_error show create database d2
sql show create table d2.stb2;
sql show create table d2.ctb2;
sql show create table d2.ntb2;
sql_error show variables;
sql show local variables;
sql_error show dnode 1 variables;
sql_error show variables;
sql show variables;
print =============== check information_schema
@ -167,7 +166,7 @@ sql select * from information_schema.ins_subscriptions
sql select * from information_schema.ins_streams
sql_error select * from information_schema.ins_grants
sql_error select * from information_schema.ins_vgroups
sql_error select * from information_schema.ins_configs
sql select * from information_schema.ins_configs
sql_error select * from information_schema.ins_dnode_variables
print =============== check performance_schema

View File

@ -14,6 +14,7 @@ from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.cluster import *
class TDTestCase:
#updatecfgDict = {'clientCfg': {'serverPort': 7080, 'firstEp': 'trd02:7080', 'secondEp':'trd02:7080'},\
@ -106,12 +107,39 @@ class TDTestCase:
tdLog.printNoPrefix("%s"%taosdCmd)
os.system(f"{taosdCmd}")
def run(self):
tdSql.prepare()
# time.sleep(2)
tdSql.query("create user testpy pass 'testpy'")
def preData(self):
# database\stb\tb\chiild-tb\rows\topics
tdSql.execute("create user testpy pass 'testpy'")
tdSql.execute("drop database if exists db0;")
tdSql.execute("create database db0;")
tdSql.execute("use db0;")
tdSql.execute("create table if not exists db0.stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned);")
tdSql.execute("create table db0.ct1 using db0.stb tags(1000);")
tdSql.execute("create table db0.ct2 using db0.stb tags(2000);")
tdSql.execute("create table if not exists db0.ntb (ts timestamp, c1 int, c2 float, c3 double) ;")
tdSql.query("show db0.stables;")
tdSql.execute("insert into db0.ct1 values(now+0s, 10, 2.0, 3.0);")
tdSql.execute("insert into db0.ct1 values(now+1s, 11, 2.1, 3.1)(now+2s, 12, 2.2, 3.2)(now+3s, 13, 2.3, 3.3);")
tdSql.execute("insert into db0.ntb values(now+2s, 10, 2.0, 3.0);")
tdSql.execute("create sma index sma_index_name1 on db0.stb function(max(c1),max(c2),min(c1)) interval(6m,10s) sliding(6m);")
tdSql.execute("create topic tpc1 as select * from db0.ct2; ")
#hostname = socket.gethostname()
#stream
tdSql.execute("drop database if exists source_db;")
tdSql.query("create database source_db vgroups 3;")
tdSql.query("use source_db")
tdSql.query("create table if not exists source_db.stb (ts timestamp, k int) tags (a int);")
tdSql.query("create table source_db.ct1 using source_db.stb tags(1000);create table source_db.ct2 using source_db.stb tags(2000);create table source_db.ct3 using source_db.stb tags(3000);")
tdSql.query("create stream s1 into source_db.output_stb as select _wstart AS start, min(k), max(k), sum(k) from source_db.stb interval(10m);")
def run(self):
# tdSql.prepare()
# time.sleep(2)
self.preData()
#tdLog.info ("hostname: %s" % hostname)
buildPath = self.getBuildPath()
@ -128,7 +156,15 @@ class TDTestCase:
# keyDict['h'] = self.hostname
# keyDict['c'] = cfgPath
# keyDict['P'] = self.serverPort
tdDnodes.stop(1)
tdDnodes=cluster.dnodes
for i in range(5):
tdDnodes[i].stoptaosd()
startAction = " -s -c " + taosdCfgPath
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandExe(startAction,taosdCmdRun)
startAction = " --help"
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
@ -153,9 +189,7 @@ class TDTestCase:
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandStop(startAction,taosdCmdRun)
startAction = " -s"
tdLog.printNoPrefix("================================ parameter: %s"%startAction)
self.taosdCommandExe(startAction,taosdCmdRun)
startAction = " -e TAOS_QUERY_POLICY=2 "
tdLog.printNoPrefix("================================ parameter: %s"%startAction)

View File

@ -1,4 +1,6 @@
import datetime
import random
import os
from util.log import *
from util.sql import *
@ -30,7 +32,11 @@ class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(), logSql)
self.testcasePath = os.path.split(__file__)[0]
self.testcaseFilename = os.path.split(__file__)[-1]
os.system("rm -rf %s/%s.sql" % (self.testcasePath,self.testcaseFilename))
def __query_condition(self,tbname):
query_condition = [f"{tbname}.{col}" for col in ALL_COL]
@ -132,6 +138,67 @@ class TDTestCase:
return
return f"explain select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio_verbose_true(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} verbose true select {select_clause} from {from_clause} {where_condition} {group_condition}"
def __single_sql_analyze_ratio_verbose_false(self, select_clause, from_clause, where_condition="", group_condition=""):
ratio = random.uniform(0.001,1)
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0].split("(")[-1] != from_clause.split(".")[0]:
return
return f"explain analyze ratio {ratio} verbose false select {select_clause} from {from_clause} {where_condition} {group_condition}"
@property
def __tb_list(self, dbname=DBNAME):
return [
@ -158,6 +225,53 @@ class TDTestCase:
self.__single_sql(select_claus, tb,),
self.__single_sql(select_claus, tb, where_condition=where_claus),
self.__single_sql(select_claus, tb, group_condition=group_claus),
self.__single_sql_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_verbose_true(select_claus, tb,),
self.__single_sql_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_verbose_false(select_claus, tb,),
self.__single_sql_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio(select_claus, tb,),
self.__single_sql_ratio(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb,),
self.__single_sql_ratio_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb,),
self.__single_sql_ratio_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_ratio_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze(select_claus, tb,),
self.__single_sql_analyze(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb,),
self.__single_sql_analyze_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb,),
self.__single_sql_analyze_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_verbose_false(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio(select_claus, tb,),
self.__single_sql_analyze_ratio(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb,),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio_verbose_true(select_claus, tb, group_condition=group_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_claus, having_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb,),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, where_condition=where_claus),
self.__single_sql_analyze_ratio_verbose_false(select_claus, tb, group_condition=group_claus),
)
)
@ -172,6 +286,9 @@ class TDTestCase:
tdSql.query(sqls[i])
def __test_current(self, dbname=DBNAME):
ratio = random.uniform(0.001,1)
tdSql.query(f"explain select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain select 1 from {dbname}.ct2")
tdSql.query(f"explain select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
@ -181,8 +298,111 @@ class TDTestCase:
self.explain_check()
# tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.ct1")
# tdSql.query(f"explain verbose true select 1 from {dbname}.ct2")
# tdSql.query(f"explain verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
# tdSql.query(f"explain verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
# tdSql.query(f"explain verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
# tdSql.query(f"explain verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
# self.explain_check()
tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain ratio {ratio} select 1 from {dbname}.ct2")
tdSql.query(f"explain ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
# tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1")
# tdSql.query(f"explain ratio {ratio} verbose true select 1 from {dbname}.ct2")
# tdSql.query(f"explain ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
# tdSql.query(f"explain ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
# tdSql.query(f"explain ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
# tdSql.query(f"explain ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
# self.explain_check()
tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain ratio {ratio} verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
# tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.ct1")
# tdSql.query(f"explain analyze verbose true select 1 from {dbname}.ct2")
# tdSql.query(f"explain analyze verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
# tdSql.query(f"explain analyze verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
# tdSql.query(f"explain analyze verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
# tdSql.query(f"explain analyze verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
# self.explain_check()
tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze ratio {ratio} select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze ratio {ratio} select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze ratio {ratio} select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze ratio {ratio} select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze ratio {ratio} select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
# tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.ct1")
# tdSql.query(f"explain analyze ratio {ratio} verbose true select 1 from {dbname}.ct2")
# tdSql.query(f"explain analyze ratio {ratio} verbose true select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
# tdSql.query(f"explain analyze ratio {ratio} verbose true select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
# tdSql.query(f"explain analyze ratio {ratio} verbose true select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
# tdSql.query(f"explain analyze ratio {ratio} verbose true select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
# self.explain_check()
tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.ct1")
tdSql.query(f"explain analyze ratio {ratio} verbose false select 1 from {dbname}.ct2")
tdSql.query(f"explain analyze ratio {ratio} verbose false select cast(ceil({DOUBLE_COL}) as bigint) from {dbname}.ct4 group by {DOUBLE_COL}")
tdSql.query(f"explain analyze ratio {ratio} verbose false select count({SINT_COL}) from {dbname}.ct4 group by {BOOL_COL} having count({SINT_COL}) > 0")
tdSql.query(f"explain analyze ratio {ratio} verbose false select ct2.{SINT_COL} from {dbname}.ct4 ct4 join {dbname}.ct2 ct2 on ct4.ts=ct2.ts")
tdSql.query(f"explain analyze ratio {ratio} verbose false select {INT_COL} from {dbname}.stb1 where {INT_COL} is not null and {INT_COL} in (0, 1, 2) or {INT_COL} between 2 and 100 ")
self.explain_check()
def __test_error(self, dbname=DBNAME):
ratio = random.uniform(0.001,1)
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain show databases " )
@ -196,6 +416,143 @@ class TDTestCase:
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain verbose true show databases " )
tdSql.error( f"explain verbose true show {dbname}.stables " )
tdSql.error( f"explain verbose true show {dbname}.tables " )
tdSql.error( f"explain verbose true show {dbname}.vgroups " )
tdSql.error( f"explain verbose true show dnodes " )
tdSql.error( f'''explain verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain verbose false show databases " )
tdSql.error( f"explain verbose false show {dbname}.stables " )
tdSql.error( f"explain verbose false show {dbname}.tables " )
tdSql.error( f"explain verbose false show {dbname}.vgroups " )
tdSql.error( f"explain verbose false show dnodes " )
tdSql.error( f'''explain verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} show databases " )
tdSql.error( f"explain ratio {ratio} show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} show dnodes " )
tdSql.error( f'''explain ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} verbose true show databases " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} verbose true show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} verbose true show dnodes " )
tdSql.error( f'''explain ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain ratio {ratio} verbose false show databases " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.stables " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.tables " )
tdSql.error( f"explain ratio {ratio} verbose false show {dbname}.vgroups " )
tdSql.error( f"explain ratio {ratio} verbose false show dnodes " )
tdSql.error( f'''explain ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain analyze select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze show databases " )
tdSql.error( f"explain analyze show {dbname}.stables " )
tdSql.error( f"explain analyze show {dbname}.tables " )
tdSql.error( f"explain analyze show {dbname}.vgroups " )
tdSql.error( f"explain analyze show dnodes " )
tdSql.error( f'''explain analyze select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze verbose true show databases " )
tdSql.error( f"explain analyze verbose true show {dbname}.stables " )
tdSql.error( f"explain analyze verbose true show {dbname}.tables " )
tdSql.error( f"explain analyze verbose true show {dbname}.vgroups " )
tdSql.error( f"explain analyze verbose true show dnodes " )
tdSql.error( f'''explain analyze verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze verbose false show databases " )
tdSql.error( f"explain analyze verbose false show {dbname}.stables " )
tdSql.error( f"explain analyze verbose false show {dbname}.tables " )
tdSql.error( f"explain analyze verbose false show {dbname}.vgroups " )
tdSql.error( f"explain analyze verbose false show dnodes " )
tdSql.error( f'''explain analyze verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdLog.printNoPrefix("===step 0: err case, must return err")
tdSql.error( f"explain analyze ratio {ratio} select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} show databases " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze ratio {ratio} verbose true select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} verbose true show databases " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} verbose true show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} verbose true select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
tdSql.error( f"explain analyze ratio {ratio} verbose false select hyperloglog({INT_COL}) from {dbname}.ct8" )
tdSql.error( f"explain analyze ratio {ratio} verbose false show databases " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.stables " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.tables " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show {dbname}.vgroups " )
tdSql.error( f"explain analyze ratio {ratio} verbose false show dnodes " )
tdSql.error( f'''explain analyze ratio {ratio} verbose false select hyperloglog(['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'])
from {dbname}.ct1
where ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null
group by ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}']
having ['{INT_COL} + {INT_COL}', '{INT_COL} + {BINT_COL}', '{INT_COL} + {SINT_COL}', '{INT_COL} + {TINT_COL}', '{INT_COL} + {FLOAT_COL}', '{INT_COL} + {DOUBLE_COL}', '{INT_COL} + {BOOL_COL}', '{INT_COL} + {BINARY_COL}', '{INT_COL} + {NCHAR_COL}', '{INT_COL} + {TS_COL}'] is not null ''' )
def all_test(self):
self.__test_error()
self.__test_current()

View File

@ -240,12 +240,14 @@ class TDTestCase:
time.sleep(3)
tdLog.info("================= stop dnode, and remove data file, then start dnode ===========================")
tdDnodes.stop(1)
# time.sleep(5)
dataPath = buildPath + "/../sim/dnode1/data/*"
shellCmd = 'rm -rf ' + dataPath
tdLog.info(shellCmd)
os.system(shellCmd)
tdDnodes.start(1)
#tdDnodes.start(1)
tdDnodes.starttaosd(1)
time.sleep(2)
######### redo to consume

View File

@ -104,7 +104,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -149,7 +149,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -251,7 +251,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}

View File

@ -104,7 +104,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -149,7 +149,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -251,7 +251,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}

View File

@ -104,7 +104,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -149,7 +149,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
@ -252,7 +252,7 @@ class TDTestCase:
'rowsPerTbl': 1000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'pollDelay': 10,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}

View File

@ -13,7 +13,7 @@ python3 ./test.py -f 0-others/udf_restart_taosd.py
python3 ./test.py -f 0-others/cachemodel.py
python3 ./test.py -f 0-others/udf_cfg1.py
python3 ./test.py -f 0-others/udf_cfg2.py
python3 ./test.py -f 0-others/taosdShell.py -N 5 -M 3 -Q 3
python3 ./test.py -f 0-others/sysinfo.py
python3 ./test.py -f 0-others/user_control.py
python3 ./test.py -f 0-others/fsync.py
@ -363,7 +363,7 @@ python3 ./test.py -f 2-query/concat.py -Q 2
python3 ./test.py -f 2-query/concat2.py -Q 2
python3 ./test.py -f 2-query/concat_ws.py -Q 2
python3 ./test.py -f 2-query/concat_ws2.py -Q 2
#python3 ./test.py -f 2-query/check_tsdb.py -Q 2
python3 ./test.py -f 2-query/check_tsdb.py -Q 2
python3 ./test.py -f 2-query/spread.py -Q 2
python3 ./test.py -f 2-query/hyperloglog.py -Q 2
python3 ./test.py -f 2-query/explain.py -Q 2
@ -402,13 +402,17 @@ python3 ./test.py -f 2-query/arctan.py -Q 2
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 2
python3 ./test.py -f 2-query/interp.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery.py -Q 2
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
python3 ./test.py -f 2-query/nestedQuery.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 2
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 2
python3 ./test.py -f 2-query/stablity.py -Q 2
python3 ./test.py -f 2-query/stablity_1.py -Q 2
python3 ./test.py -f 2-query/avg.py -Q 2
# python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/elapsed.py -Q 2
python3 ./test.py -f 2-query/csum.py -Q 2
#python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/mavg.py -Q 2
python3 ./test.py -f 2-query/sample.py -Q 2
python3 ./test.py -f 2-query/function_diff.py -Q 2
python3 ./test.py -f 2-query/unique.py -Q 2
@ -432,8 +436,9 @@ python3 ./test.py -f 2-query/count_partition.py -Q 2
python3 ./test.py -f 2-query/max_partition.py -Q 2
python3 ./test.py -f 2-query/last_row.py -Q 2
python3 ./test.py -f 2-query/tsbsQuery.py -Q 2
#------------querPolicy 3-----------
python3 ./test.py -f 2-query/sml.py -Q 2
#------------querPolicy 3-----------
python3 ./test.py -f 2-query/between.py -Q 3
python3 ./test.py -f 2-query/distinct.py -Q 3
python3 ./test.py -f 2-query/varchar.py -Q 3
@ -453,7 +458,7 @@ python3 ./test.py -f 2-query/concat.py -Q 3
python3 ./test.py -f 2-query/concat2.py -Q 3
python3 ./test.py -f 2-query/concat_ws.py -Q 3
python3 ./test.py -f 2-query/concat_ws2.py -Q 3
#python3 ./test.py -f 2-query/check_tsdb.py -Q 3
python3 ./test.py -f 2-query/check_tsdb.py -Q 3
python3 ./test.py -f 2-query/spread.py -Q 3
python3 ./test.py -f 2-query/hyperloglog.py -Q 3
python3 ./test.py -f 2-query/explain.py -Q 3
@ -464,7 +469,7 @@ python3 ./test.py -f 2-query/Today.py -Q 3
python3 ./test.py -f 2-query/max.py -Q 3
python3 ./test.py -f 2-query/min.py -Q 3
python3 ./test.py -f 2-query/count.py -Q 3
#python3 ./test.py -f 2-query/last.py -Q 3
python3 ./test.py -f 2-query/last.py -Q 3
python3 ./test.py -f 2-query/first.py -Q 3
python3 ./test.py -f 2-query/To_iso8601.py -Q 3
python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 3
@ -490,12 +495,18 @@ python3 ./test.py -f 2-query/arcsin.py -Q 3
python3 ./test.py -f 2-query/arccos.py -Q 3
python3 ./test.py -f 2-query/arctan.py -Q 3
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 3
# python3 ./test.py -f 2-query/nestedQuery.py -Q 3
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3
# python3 ./test.py -f 2-query/avg.py -Q 3
# python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/nestedQuery.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 3
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 3
python3 ./test.py -f 2-query/stablity.py -Q 3
python3 ./test.py -f 2-query/stablity_1.py -Q 3
python3 ./test.py -f 2-query/avg.py -Q 3
python3 ./test.py -f 2-query/elapsed.py -Q 3
python3 ./test.py -f 2-query/csum.py -Q 3
#python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/mavg.py -Q 3
python3 ./test.py -f 2-query/sample.py -Q 3
python3 ./test.py -f 2-query/function_diff.py -Q 3
python3 ./test.py -f 2-query/unique.py -Q 3
@ -544,7 +555,7 @@ python3 ./test.py -f 2-query/concat.py -Q 4
python3 ./test.py -f 2-query/concat2.py -Q 4
python3 ./test.py -f 2-query/concat_ws.py -Q 4
python3 ./test.py -f 2-query/concat_ws2.py -Q 4
#python3 ./test.py -f 2-query/check_tsdb.py -Q 4
python3 ./test.py -f 2-query/check_tsdb.py -Q 4
python3 ./test.py -f 2-query/spread.py -Q 4
python3 ./test.py -f 2-query/hyperloglog.py -Q 4
python3 ./test.py -f 2-query/explain.py -Q 4
@ -555,7 +566,7 @@ python3 ./test.py -f 2-query/Today.py -Q 4
python3 ./test.py -f 2-query/max.py -Q 4
python3 ./test.py -f 2-query/min.py -Q 4
python3 ./test.py -f 2-query/count.py -Q 4
#python3 ./test.py -f 2-query/last.py -Q 4
python3 ./test.py -f 2-query/last.py -Q 4
python3 ./test.py -f 2-query/first.py -Q 4
python3 ./test.py -f 2-query/To_iso8601.py -Q 4
python3 ./test.py -f 2-query/To_unixtimestamp.py -Q 4
@ -581,12 +592,19 @@ python3 ./test.py -f 2-query/arcsin.py -Q 4
python3 ./test.py -f 2-query/arccos.py -Q 4
python3 ./test.py -f 2-query/arctan.py -Q 4
python3 ./test.py -f 2-query/query_cols_tags_and_or.py -Q 4
#python3 ./test.py -f 2-query/nestedQuery.py -Q 4
# python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4
# python3 ./test.py -f 2-query/avg.py -Q 4
# python3 ./test.py -f 2-query/elapsed.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_str.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_math.py -Q 4
python3 ./test.py -f 2-query/nestedQuery_time.py -Q 4
python3 ./test.py -f 2-query/stablity.py -Q 4
python3 ./test.py -f 2-query/stablity_1.py -Q 4
python3 ./test.py -f 2-query/avg.py -Q 4
python3 ./test.py -f 2-query/elapsed.py -Q 4
python3 ./test.py -f 2-query/csum.py -Q 4
#python3 ./test.py -f 2-query/mavg.py -Q 4
python3 ./test.py -f 2-query/mavg.py -Q 4
python3 ./test.py -f 2-query/sample.py -Q 4
python3 ./test.py -f 2-query/function_diff.py -Q 4
python3 ./test.py -f 2-query/unique.py -Q 4
@ -610,5 +628,5 @@ python3 ./test.py -f 2-query/count_partition.py -Q 4
python3 ./test.py -f 2-query/max_partition.py -Q 4
python3 ./test.py -f 2-query/last_row.py -Q 4
python3 ./test.py -f 2-query/tsbsQuery.py -Q 4
#python3 ./test.py -f 2-query/sml.py -Q 4
python3 ./test.py -f 2-query/sml.py -Q 4
python3 ./test.py -f 2-query/interp.py -Q 4

View File

@ -341,6 +341,26 @@ if __name__ == "__main__":
print("check dnode ready")
except Exception as r:
print(r)
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if int(res[i][1]) == int(queryPolicy):
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
if ucase is not None and hasattr(ucase, 'noConn') and ucase.noConn == True:
conn = None
else:
@ -455,6 +475,26 @@ if __name__ == "__main__":
except Exception as r:
print(r)
if queryPolicy != 1:
queryPolicy=int(queryPolicy)
if restful:
conn = taosrest.connect(url=f"http://{host}:6041")
else:
conn = taos.connect(host,config=tdDnodes.getSimCfgPath())
cursor = conn.cursor()
cursor.execute("create qnode on dnode 1")
cursor.execute(f'alter local "queryPolicy" "{queryPolicy}"')
cursor.execute("show local variables")
res = cursor.fetchall()
for i in range(cursor.rowcount):
if res[i][0] == "queryPolicy" :
if int(res[i][1]) == int(queryPolicy):
tdLog.success(f'alter queryPolicy to {queryPolicy} successfully')
else:
tdLog.debug(res)
tdLog.exit(f"alter queryPolicy to {queryPolicy} failed")
if testCluster:
tdLog.info("Procedures for testing cluster")