Merge branch '3.0' of github.com:taosdata/TDengine into 3.0

This commit is contained in:
wenzhouwww@live.cn 2022-07-12 10:38:26 +08:00
commit 3f47bdffb2
61 changed files with 1733 additions and 480 deletions

View File

@ -822,6 +822,20 @@ typedef struct {
int32_t tSerializeSTrimDbReq(void* buf, int32_t bufLen, STrimDbReq* pReq);
int32_t tDeserializeSTrimDbReq(void* buf, int32_t bufLen, STrimDbReq* pReq);
typedef struct {
int32_t timestamp;
} SVTrimDbReq;
int32_t tSerializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
int32_t tDeserializeSVTrimDbReq(void* buf, int32_t bufLen, SVTrimDbReq* pReq);
typedef struct {
int32_t timestamp;
} SVDropTtlTableReq;
int32_t tSerializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
int32_t tDeserializeSVDropTtlTableReq(void* buf, int32_t bufLen, SVDropTtlTableReq* pReq);
typedef struct {
int32_t numOfVgroups;
int32_t numOfStables;

View File

@ -199,9 +199,10 @@ enum {
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_REPLICA, "alter-replica", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_CONFIRM, "alter-confirm", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_ALTER_HASHRANGE, "alter-hashrange", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "compact", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "drop-ttl-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "commit vnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMPACT, "vnode-compact", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_DROP_TTL_TABLE, "vnode-drop-ttl-stb", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_TRIM, "vnode-trim", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_COMMIT, "vnode-commit", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_VND_MAX_MSG, "vnd-max", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_SCH_MSG)

View File

@ -25,10 +25,11 @@ extern "C" {
static FORCE_INLINE int32_t tRealloc(uint8_t **ppBuf, int64_t size) {
int32_t code = 0;
int64_t bsize = 0;
uint8_t *pBuf;
uint8_t *pBuf = NULL;
if (*ppBuf) {
bsize = *(int64_t *)((*ppBuf) - sizeof(int64_t));
pBuf = (*ppBuf) - sizeof(int64_t);
bsize = *(int64_t *)pBuf;
}
if (bsize >= size) goto _exit;
@ -38,7 +39,7 @@ static FORCE_INLINE int32_t tRealloc(uint8_t **ppBuf, int64_t size) {
bsize *= 2;
}
pBuf = (uint8_t *)taosMemoryRealloc(*ppBuf ? (*ppBuf) - sizeof(int64_t) : *ppBuf, bsize + sizeof(int64_t));
pBuf = (uint8_t *)taosMemoryRealloc(pBuf, bsize + sizeof(int64_t));
if (pBuf == NULL) {
code = TSDB_CODE_OUT_OF_MEMORY;
goto _exit;

View File

@ -328,7 +328,7 @@ function install_header() {
${csudo}ln -s ${install_main_dir}/include/taoserror.h ${inc_link_dir}/taoserror.h
${csudo}ln -s ${install_main_dir}/include/taosudf.h ${inc_link_dir}/taosudf.h
${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
[ -f ${install_main_dir}/include/taosws.h ] && ${csudo}ln -s ${install_main_dir}/include/taosws.h ${inc_link_dir}/taosws.h || :
}
function add_newHostname_to_hosts() {

View File

@ -161,13 +161,11 @@ if [[ $productName == "TDengine" ]]; then
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
if [ "$osType" != "Darwin" ]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
[ -f ${build_dir}/lib/*.jar ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
fi
git clone --depth 1 https://github.com/taosdata/driver-go ${install_dir}/connector/go
rm -rf ${install_dir}/connector/go/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector

View File

@ -290,19 +290,17 @@ fi
# Copy driver
mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt
cp ${wslib_files} ${install_dir}/driver || :
[ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || :
# Copy connector
if [ "$verMode" == "cluster" ]; then
connector_dir="${code_dir}/connector"
mkdir -p ${install_dir}/connector
if [[ "$pagMode" != "lite" ]] && [[ "$cpuType" != "aarch32" ]]; then
cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
if find ${connector_dir}/go -mindepth 1 -maxdepth 1 | read; then
cp -r ${connector_dir}/go ${install_dir}/connector
else
echo "WARNING: go connector not found, please check if want to use it!"
fi
[ -f ${build_dir}/lib/*.jar ] && cp ${build_dir}/lib/*.jar ${install_dir}/connector || :
git clone --depth 1 https://github.com/taosdata/driver-go ${install_dir}/connector/go
rm -rf ${install_dir}/connector/go/.git ||:
git clone --depth 1 https://github.com/taosdata/taos-connector-python ${install_dir}/connector/python
rm -rf ${install_dir}/connector/python/.git ||:
@ -314,6 +312,7 @@ if [ "$verMode" == "cluster" ]; then
git clone --depth 1 https://github.com/taosdata/libtaos-rs ${install_dir}/connector/rust
rm -rf ${install_dir}/connector/rust/.git ||:
# cp -r ${connector_dir}/python ${install_dir}/connector
# cp -r ${connector_dir}/nodejs ${install_dir}/connector
fi

View File

@ -114,6 +114,7 @@ function clean_header() {
${csudo}rm -f ${inc_link_dir}/taos.h || :
${csudo}rm -f ${inc_link_dir}/taosdef.h || :
${csudo}rm -f ${inc_link_dir}/taoserror.h || :
${csudo}rm -f ${inc_link_dir}/taosudf.h || :
${csudo}rm -f ${inc_link_dir}/taosws.h || :
}

View File

@ -357,6 +357,7 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
pStmt->bInfo.inExecCache = true;
if (pStmt->sql.autoCreateTbl) {
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
}
@ -365,9 +366,11 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (pStmt->bInfo.inExecCache) {
ASSERT(taosHashGetSize(pStmt->exec.pBlockHash) == 1);
pStmt->bInfo.needParse = false;
tscDebug("reuse stmt block for tb %s in execBlock", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
tscDebug("no stmt block cache for tb %s", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
@ -391,6 +394,8 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
STMT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
tscDebug("reuse stmt block for tb %s in sqlBlock, suid:0x%" PRIx64 , pStmt->bInfo.tbFName, pStmt->bInfo.tbSuid);
return TSDB_CODE_SUCCESS;
}
@ -406,6 +411,8 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (TSDB_CODE_PAR_TABLE_NOT_EXIST == code) {
STMT_ERR_RET(stmtCleanBindInfo(pStmt));
tscDebug("tb %s not exist", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
@ -420,6 +427,8 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
if (uid == pStmt->bInfo.tbUid) {
pStmt->bInfo.needParse = false;
tscDebug("tb %s is current table", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
@ -440,6 +449,8 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
pStmt->bInfo.boundTags = pCache->boundTags;
pStmt->bInfo.tagsCached = true;
tscDebug("tb %s in execBlock list, set to current", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
@ -461,6 +472,8 @@ int32_t stmtGetFromCache(STscStmt* pStmt) {
STMT_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
tscDebug("tb %s in sqlBlock list, set to current", pStmt->bInfo.tbFName);
return TSDB_CODE_SUCCESS;
}
@ -510,6 +523,8 @@ TAOS_STMT* stmtInit(STscObj* taos) {
int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to prepare");
if (pStmt->sql.status >= STMT_PREPARE) {
STMT_ERR_RET(stmtResetStmt(pStmt));
}
@ -529,6 +544,8 @@ int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to set tbName: %s", tbName);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTBNAME));
int32_t insert = 0;
@ -559,6 +576,8 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to set tbTags");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTAGS));
if (pStmt->bInfo.inExecCache) {
@ -572,6 +591,7 @@ int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
STMT_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
tscDebug("start to bind stmt tag values");
STMT_ERR_RET(qBindStmtTagsValue(*pDataBlock, pStmt->bInfo.boundTags, pStmt->bInfo.tbSuid, pStmt->bInfo.sname.tname,
tags, pStmt->exec.pRequest->msgBuf, pStmt->exec.pRequest->msgBufLen));
@ -617,6 +637,8 @@ int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields
int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("start to bind stmt data, colIdx: %d", colIdx);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_BIND));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
@ -707,6 +729,8 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
int stmtAddBatch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to add batch");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_ADD_BATCH));
STMT_ERR_RET(stmtCacheBlock(pStmt));
@ -715,6 +739,8 @@ int stmtAddBatch(TAOS_STMT* stmt) {
}
int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks);
if (pRsp->nBlocks <= 0) {
tscError("invalid submit resp block number %d", pRsp->nBlocks);
STMT_ERR_RET(TSDB_CODE_TSC_APP_ERROR);
@ -727,11 +753,6 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
char* key = taosHashGetKey(pIter, &keyLen);
STableMeta* pMeta = qGetTableMetaInDataBlock(pBlock);
if (pMeta->uid != pStmt->bInfo.tbUid) {
tscError("table uid %" PRIx64 " mis-match with current table uid %" PRIx64, pMeta->uid, pStmt->bInfo.tbUid);
STMT_ERR_RET(TSDB_CODE_TSC_APP_ERROR);
}
if (pMeta->uid) {
pIter = taosHashIterate(pStmt->exec.pBlockHash, pIter);
continue;
@ -775,6 +796,8 @@ int stmtExec(TAOS_STMT* stmt) {
SSubmitRsp* pRsp = NULL;
bool autoCreateTbl = pStmt->exec.autoCreateTbl;
tscDebug("stmt start to exec");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
if (STMT_TYPE_QUERY == pStmt->sql.type) {

View File

@ -2672,6 +2672,56 @@ int32_t tDeserializeSTrimDbReq(void *buf, int32_t bufLen, STrimDbReq *pReq) {
return 0;
}
int32_t tSerializeSVTrimDbReq(void *buf, int32_t bufLen, SVTrimDbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestamp) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSVTrimDbReq(void *buf, int32_t bufLen, SVTrimDbReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestamp) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->timestamp) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSVDropTtlTableReq(void *buf, int32_t bufLen, SVDropTtlTableReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->timestamp) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSDbCfgRsp(void *buf, int32_t bufLen, const SDbCfgRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);

View File

@ -171,6 +171,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_USE_DB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_ALTER_DB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_COMPACT_DB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_TRIM_DB, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_GET_DB_CFG, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_VGROUP_LIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_REDISTRIBUTE_VGROUP, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -138,6 +138,8 @@ static void vmGenerateVnodeCfg(SCreateVnodeReq *pCreate, SVnodeCfg *pCfg) {
pCfg->dbId = pCreate->dbUid;
pCfg->szPage = pCreate->pageSize * 1024;
pCfg->szCache = pCreate->pages;
pCfg->cacheLast = pCreate->cacheLast;
pCfg->cacheLastSize = pCreate->cacheLastSize;
pCfg->szBuf = (uint64_t)pCreate->buffer * 1024 * 1024;
pCfg->isWeak = true;
pCfg->isTsma = pCreate->isTsma;
@ -371,6 +373,7 @@ SArray *vmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_CONFIRM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_ALTER_HASHRANGE, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_COMPACT, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_VND_TRIM, vmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CREATE_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_VNODE, vmPutMsgToMgmtQueue, 0) == NULL) goto _OVER;

View File

@ -54,6 +54,7 @@ typedef enum {
MND_OPER_ALTER_DB,
MND_OPER_DROP_DB,
MND_OPER_COMPACT_DB,
MND_OPER_TRIM_DB,
MND_OPER_USE_DB,
MND_OPER_WRITE_DB,
MND_OPER_READ_DB,

View File

@ -42,6 +42,7 @@ static int32_t mndProcessAlterDbReq(SRpcMsg *pReq);
static int32_t mndProcessDropDbReq(SRpcMsg *pReq);
static int32_t mndProcessUseDbReq(SRpcMsg *pReq);
static int32_t mndProcessCompactDbReq(SRpcMsg *pReq);
static int32_t mndProcessTrimDbReq(SRpcMsg *pReq);
static int32_t mndRetrieveDbs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rowsCapacity);
static void mndCancelGetNextDb(SMnode *pMnode, void *pIter);
static int32_t mndProcessGetDbCfgReq(SRpcMsg *pReq);
@ -62,6 +63,7 @@ int32_t mndInitDb(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_DROP_DB, mndProcessDropDbReq);
mndSetMsgHandle(pMnode, TDMT_MND_USE_DB, mndProcessUseDbReq);
mndSetMsgHandle(pMnode, TDMT_MND_COMPACT_DB, mndProcessCompactDbReq);
mndSetMsgHandle(pMnode, TDMT_MND_TRIM_DB, mndProcessTrimDbReq);
mndSetMsgHandle(pMnode, TDMT_MND_GET_DB_CFG, mndProcessGetDbCfgReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_DB, mndRetrieveDbs);
@ -1268,6 +1270,8 @@ int32_t mndValidateDbInfo(SMnode *pMnode, SDbVgVersion *pDbs, int32_t numOfDbs,
return 0;
}
static int32_t mndCompactDb(SMnode *pMnode, SDbObj *pDb) { return 0; }
static int32_t mndProcessCompactDbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@ -1279,7 +1283,7 @@ static int32_t mndProcessCompactDbReq(SRpcMsg *pReq) {
goto _OVER;
}
mDebug("db:%s, start to sync", compactReq.db);
mDebug("db:%s, start to compact", compactReq.db);
pDb = mndAcquireDb(pMnode, compactReq.db);
if (pDb == NULL) {
@ -1290,7 +1294,7 @@ static int32_t mndProcessCompactDbReq(SRpcMsg *pReq) {
goto _OVER;
}
// code = mndCompactDb();
code = mndCompactDb(pMnode, pDb);
_OVER:
if (code != 0) {
@ -1301,6 +1305,75 @@ _OVER:
return code;
}
static int32_t mndTrimDb(SMnode *pMnode, SDbObj *pDb) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
SVTrimDbReq trimReq = {.timestamp = taosGetTimestampSec()};
int32_t reqLen = tSerializeSVTrimDbReq(NULL, 0, &trimReq);
int32_t contLen = reqLen + sizeof(SMsgHead);
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
SMsgHead *pHead = rpcMallocCont(contLen);
if (pHead == NULL) {
sdbCancelFetch(pSdb, pVgroup);
sdbRelease(pSdb, pVgroup);
continue;
}
pHead->contLen = htonl(contLen);
pHead->vgId = htonl(pVgroup->vgId);
tSerializeSVTrimDbReq((char *)pHead + sizeof(SMsgHead), contLen, &trimReq);
SRpcMsg rpcMsg = {.msgType = TDMT_VND_TRIM, .pCont = pHead, .contLen = contLen};
SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t code = tmsgSendReq(&epSet, &rpcMsg);
if (code != 0) {
mError("vgId:%d, failed to send vnode-trim request to vnode since 0x%x", pVgroup->vgId, code);
} else {
mDebug("vgId:%d, send vnode-trim request to vnode, time:%d", pVgroup->vgId, trimReq.timestamp);
}
sdbRelease(pSdb, pVgroup);
}
return 0;
}
static int32_t mndProcessTrimDbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
SDbObj *pDb = NULL;
STrimDbReq trimReq = {0};
if (tDeserializeSTrimDbReq(pReq->pCont, pReq->contLen, &trimReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
mDebug("db:%s, start to trim", trimReq.db);
pDb = mndAcquireDb(pMnode, trimReq.db);
if (pDb == NULL) {
goto _OVER;
}
if (mndCheckDbPrivilege(pMnode, pReq->info.conn.user, MND_OPER_TRIM_DB, pDb) != 0) {
goto _OVER;
}
code = mndTrimDb(pMnode, pDb);
_OVER:
if (code != 0) {
mError("db:%s, failed to process trim db req since %s", trimReq.db, terrstr());
}
mndReleaseDb(pMnode, pDb);
return code;
}
const char *mndGetDbStr(const char *src) {
char *pos = strstr(src, TS_PATH_DELIMITER);
if (pos != NULL) ++pos;

View File

@ -531,8 +531,7 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (pMsg->msgType == TDMT_SCH_QUERY || pMsg->msgType == TDMT_SCH_MERGE_QUERY ||
pMsg->msgType == TDMT_SCH_QUERY_CONTINUE || pMsg->msgType == TDMT_SCH_QUERY_HEARTBEAT ||
pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_SCH_MERGE_FETCH ||
pMsg->msgType == TDMT_SCH_DROP_TASK) {
pMsg->msgType == TDMT_SCH_FETCH || pMsg->msgType == TDMT_SCH_MERGE_FETCH || pMsg->msgType == TDMT_SCH_DROP_TASK) {
return 0;
}
if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;

View File

@ -155,7 +155,8 @@ int32_t mndCheckDbPrivilege(SMnode *pMnode, const char *user, EOperType operType
if (pUser->sysInfo) goto _OVER;
}
if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB) {
if (operType == MND_OPER_ALTER_DB || operType == MND_OPER_DROP_DB || operType == MND_OPER_COMPACT_DB ||
operType == MND_OPER_TRIM_DB) {
if (strcmp(pUser->user, pDb->createUser) == 0 && pUser->sysInfo) goto _OVER;
}

View File

@ -204,6 +204,8 @@ _OVER:
mError("sma:%s, failed to decode from raw:%p since %s", pSma->name, pRaw, terrstr());
taosMemoryFreeClear(pSma->expr);
taosMemoryFreeClear(pSma->tagsFilter);
taosMemoryFreeClear(pSma->sql);
taosMemoryFreeClear(pSma->ast);
taosMemoryFreeClear(pRow);
return NULL;
}
@ -221,6 +223,8 @@ static int32_t mndSmaActionDelete(SSdb *pSdb, SSmaObj *pSma) {
mTrace("sma:%s, perform delete action, row:%p", pSma->name, pSma);
taosMemoryFreeClear(pSma->tagsFilter);
taosMemoryFreeClear(pSma->expr);
taosMemoryFreeClear(pSma->sql);
taosMemoryFreeClear(pSma->ast);
return 0;
}

View File

@ -813,16 +813,18 @@ int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
}
static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
SVDropTtlTableReq ttlReq = {.timestamp = taosGetTimestampSec()};
int32_t reqLen = tSerializeSVDropTtlTableReq(NULL, 0, &ttlReq);
int32_t contLen = reqLen + sizeof(SMsgHead);
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
int32_t contLen = sizeof(SMsgHead) + sizeof(int32_t);
SMsgHead *pHead = rpcMallocCont(contLen);
if (pHead == NULL) {
sdbCancelFetch(pSdb, pVgroup);
@ -831,17 +833,15 @@ static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
}
pHead->contLen = htonl(contLen);
pHead->vgId = htonl(pVgroup->vgId);
int32_t t = taosGetTimestampSec();
*(int32_t *)((char *)pHead + sizeof(SMsgHead)) = htonl(t);
tSerializeSVDropTtlTableReq((char *)pHead + sizeof(SMsgHead), contLen, &ttlReq);
SRpcMsg rpcMsg = {.msgType = TDMT_VND_DROP_TTL_TABLE, .pCont = pHead, .contLen = contLen};
SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t code = tmsgSendReq(&epSet, &rpcMsg);
if (code != 0) {
mError("failed to send ttl time seed, code:0x%x", code);
mError("vgId:%d, failed to send drop ttl table request to vnode since 0x%x", pVgroup->vgId, code);
} else {
mDebug("send ttl time seed success, time:%d", t);
mDebug("vgId:%d, send drop ttl table request to vnode, time:%d", pVgroup->vgId, ttlReq.timestamp);
}
sdbRelease(pSdb, pVgroup);
}

View File

@ -307,7 +307,7 @@ void *mndBuildAlterVnodeReq(SMnode *pMnode, SDbObj *pDb, SVgObj *pVgroup, int32_
terrno = TSDB_CODE_OUT_OF_MEMORY;
return NULL;
}
contLen += +sizeof(SMsgHead);
contLen += sizeof(SMsgHead);
void *pReq = taosMemoryMalloc(contLen);
if (pReq == NULL) {

View File

@ -140,7 +140,10 @@ int32_t tsdbLastRowReaderOpen(void *pVnode, int32_t type, SArray *pTableIdList,
void **pReader);
int32_t tsdbRetrieveLastRow(void *pReader, SSDataBlock *pResBlock, const int32_t *slotIds);
int32_t tsdbLastrowReaderClose(void *pReader);
int32_t tsdbGetTableSchema(SVnode* pVnode, int64_t uid, STSchema** pSchema, int64_t* suid);
int32_t tsdbGetTableSchema(SVnode *pVnode, int64_t uid, STSchema **pSchema, int64_t *suid);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
// tq
@ -210,11 +213,13 @@ struct SVnodeCfg {
int32_t vgId;
char dbname[TSDB_DB_FNAME_LEN];
uint64_t dbId;
int32_t cacheLastSize;
int32_t szPage;
int32_t szCache;
uint64_t szBuf;
bool isHeap;
bool isWeak;
int8_t cacheLast;
int8_t isTsma;
int8_t isRsma;
int8_t hashMethod;

View File

@ -32,39 +32,38 @@ extern "C" {
#define tsdbTrace(...) do { if (tsdbDebugFlag & DEBUG_TRACE) { taosPrintLog("TSDB ", DEBUG_TRACE, tsdbDebugFlag, __VA_ARGS__); }} while(0)
// clang-format on
typedef struct TSDBROW TSDBROW;
typedef struct TABLEID TABLEID;
typedef struct TSDBKEY TSDBKEY;
typedef struct SDelData SDelData;
typedef struct SDelIdx SDelIdx;
typedef struct STbData STbData;
typedef struct SMemTable SMemTable;
typedef struct STbDataIter STbDataIter;
typedef struct STable STable;
typedef struct SMapData SMapData;
typedef struct SBlockIdx SBlockIdx;
typedef struct SBlock SBlock;
typedef struct SBlockStatis SBlockStatis;
typedef struct SAggrBlkCol SAggrBlkCol;
typedef struct SColData SColData;
typedef struct SBlockDataHdr SBlockDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct STsdbCacheFile STsdbCacheFile;
typedef struct SHeadFile SHeadFile;
typedef struct SDataFile SDataFile;
typedef struct SLastFile SLastFile;
typedef struct SSmaFile SSmaFile;
typedef struct SDFileSet SDFileSet;
typedef struct SDataFWriter SDataFWriter;
typedef struct SDataFReader SDataFReader;
typedef struct SDelFWriter SDelFWriter;
typedef struct SDelFReader SDelFReader;
typedef struct SRowIter SRowIter;
typedef struct STsdbFS STsdbFS;
typedef struct SRowMerger SRowMerger;
typedef struct STsdbFSState STsdbFSState;
typedef struct STsdbSnapHdr STsdbSnapHdr;
typedef struct TSDBROW TSDBROW;
typedef struct TABLEID TABLEID;
typedef struct TSDBKEY TSDBKEY;
typedef struct SDelData SDelData;
typedef struct SDelIdx SDelIdx;
typedef struct STbData STbData;
typedef struct SMemTable SMemTable;
typedef struct STbDataIter STbDataIter;
typedef struct STable STable;
typedef struct SMapData SMapData;
typedef struct SBlockIdx SBlockIdx;
typedef struct SBlock SBlock;
typedef struct SBlockStatis SBlockStatis;
typedef struct SAggrBlkCol SAggrBlkCol;
typedef struct SColData SColData;
typedef struct SBlockDataHdr SBlockDataHdr;
typedef struct SBlockData SBlockData;
typedef struct SDelFile SDelFile;
typedef struct SHeadFile SHeadFile;
typedef struct SDataFile SDataFile;
typedef struct SLastFile SLastFile;
typedef struct SSmaFile SSmaFile;
typedef struct SDFileSet SDFileSet;
typedef struct SDataFWriter SDataFWriter;
typedef struct SDataFReader SDataFReader;
typedef struct SDelFWriter SDelFWriter;
typedef struct SDelFReader SDelFReader;
typedef struct SRowIter SRowIter;
typedef struct STsdbFS STsdbFS;
typedef struct SRowMerger SRowMerger;
typedef struct STsdbFSState STsdbFSState;
typedef struct STsdbSnapHdr STsdbSnapHdr;
#define TSDB_MAX_SUBBLOCKS 8
#define TSDB_FHDR_SIZE 512
@ -163,6 +162,7 @@ int32_t tGetMapData(uint8_t *p, SMapData *pMapData);
// other
int32_t tsdbKeyFid(TSKEY key, int32_t minutes, int8_t precision);
void tsdbFidKeyRange(int32_t fid, int32_t minutes, int8_t precision, TSKEY *minKey, TSKEY *maxKey);
int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t now);
int32_t tsdbBuildDeleteSkyline(SArray *aDelData, int32_t sidx, int32_t eidx, SArray *aSkyline);
void tsdbCalcColDataSMA(SColData *pColData, SColumnDataAgg *pColAgg);
// tsdbMemTable ==============================================================================================
@ -200,6 +200,7 @@ int32_t tsdbFSRollback(STsdbFS *pFS);
int32_t tsdbFSStateUpsertDelFile(STsdbFSState *pState, SDelFile *pDelFile);
int32_t tsdbFSStateUpsertDFileSet(STsdbFSState *pState, SDFileSet *pSet);
void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid);
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState);
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid);
// tsdbReaderWriter.c ==============================================================================================
@ -213,6 +214,7 @@ int32_t tsdbWriteBlockData(SDataFWriter *pWriter, SBlockData *pBlockData, uint8_
SBlockIdx *pBlockIdx, SBlock *pBlock, int8_t cmprAlg);
SDFileSet *tsdbDataFWriterGetWSet(SDataFWriter *pWriter);
int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo);
// SDataFReader
int32_t tsdbDataFReaderOpen(SDataFReader **ppReader, STsdb *pTsdb, SDFileSet *pSet);
int32_t tsdbDataFReaderClose(SDataFReader **ppReader);
@ -235,6 +237,10 @@ int32_t tsdbDelFReaderClose(SDelFReader **ppReader);
int32_t tsdbReadDelData(SDelFReader *pReader, SDelIdx *pDelIdx, SArray *aDelData, uint8_t **ppBuf);
int32_t tsdbReadDelIdx(SDelFReader *pReader, SArray *aDelIdx, uint8_t **ppBuf);
#define TSDB_CACHE_NO(c) ((c).cacheLast == 0)
#define TSDB_CACHE_LAST_ROW(c) (((c).cacheLast & 1) > 0)
#define TSDB_CACHE_LAST(c) (((c).cacheLast & 2) > 0)
// tsdbCache
int32_t tsdbOpenCache(STsdb *pTsdb);
void tsdbCloseCache(SLRUCache *pCache);
@ -244,8 +250,13 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
int32_t tsdbCacheGetLastrowH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHandle **h);
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h);
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey);
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity);
size_t tsdbCacheGetCapacity(SVnode *pVnode);
int32_t tsdbCacheLastArray2Row(SArray *pLastArray, STSRow **ppRow, STSchema *pSchema);
// structs =======================

View File

@ -122,6 +122,7 @@ int tsdbOpen(SVnode* pVnode, STsdb** ppTsdb, const char* dir, STsdbKeepC
int tsdbClose(STsdb** pTsdb);
int32_t tsdbBegin(STsdb* pTsdb);
int32_t tsdbCommit(STsdb* pTsdb);
int32_t tsdbDoRetention(STsdb* pTsdb, int64_t now);
int tsdbScanAndConvertSubmitMsg(STsdb* pTsdb, SSubmitReq* pMsg);
int tsdbInsertData(STsdb* pTsdb, int64_t version, SSubmitReq* pMsg, SSubmitRsp* pRsp);
int32_t tsdbInsertTableData(STsdb* pTsdb, int64_t version, SSubmitMsgIter* pMsgIter, SSubmitBlk* pBlock,

View File

@ -15,11 +15,15 @@
#include "tsdb.h"
typedef struct {
TSKEY ts;
SColVal colVal;
} SLastCol;
int32_t tsdbOpenCache(STsdb *pTsdb) {
int32_t code = 0;
SLRUCache *pCache = NULL;
// TODO: get cfg from vnode config: pTsdb->pVnode->config.lruCapacity
size_t cfgCapacity = 1024 * 1024;
size_t cfgCapacity = pTsdb->pVnode->config.cacheLastSize * 1024 * 1024;
pCache = taosLRUCacheInit(cfgCapacity, -1, .5);
if (pCache == NULL) {
@ -61,10 +65,11 @@ static void deleteTableCacheLastrow(const void *key, size_t keyLen, void *value)
static void deleteTableCacheLast(const void *key, size_t keyLen, void *value) { taosArrayDestroy(value); }
static int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
char key[32] = {0};
int keyLen = 0;
// getTableCacheKey(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
@ -83,18 +88,79 @@ static int32_t tsdbCacheDeleteLastrow(SLRUCache *pCache, tb_uid_t uid, TSKEY eKe
return code;
}
static int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t tsdbCacheDeleteLast(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
char key[32] = {0};
int keyLen = 0;
// getTableCacheKey(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
// clear last cache anyway, no matter where eKey ends.
taosLRUCacheRelease(pCache, h, true);
SArray *pLast = (SArray *)taosLRUCacheValue(pCache, h);
bool invalidate = false;
int16_t nCol = taosArrayGetSize(pLast);
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (eKey >= tTsVal->ts) {
invalidate = true;
break;
}
}
if (invalidate) {
taosLRUCacheRelease(pCache, h, true);
} else {
taosLRUCacheRelease(pCache, h, false);
}
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
}
return code;
}
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
// getTableCacheKey(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
STSRow *pRow = (STSRow *)taosLRUCacheValue(pCache, h);
if (pRow->ts <= eKey) {
taosLRUCacheRelease(pCache, h, true);
} else {
taosLRUCacheRelease(pCache, h, false);
}
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
}
// getTableCacheKey(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
SArray *pLast = (SArray *)taosLRUCacheValue(pCache, h);
bool invalidate = false;
int16_t nCol = taosArrayGetSize(pLast);
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (eKey >= tTsVal->ts) {
invalidate = true;
break;
}
}
if (invalidate) {
taosLRUCacheRelease(pCache, h, true);
} else {
taosLRUCacheRelease(pCache, h, false);
}
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
}
@ -173,11 +239,6 @@ int32_t tsdbCacheInsertLastrow(SLRUCache *pCache, STsdb *pTsdb, tb_uid_t uid, ST
return code;
}
typedef struct {
TSKEY ts;
SColVal colVal;
} SLastCol;
int32_t tsdbCacheInsertLast(SLRUCache *pCache, tb_uid_t uid, STSRow *row, STsdb *pTsdb) {
int32_t code = 0;
STSRow *cacheRow = NULL;
@ -405,7 +466,7 @@ static int32_t getNextRowFromFS(void *iter, TSDBROW **ppRow) {
case SFSNEXTROW_FS:
state->aDFileSet = state->pTsdb->fs->cState->aDFileSet;
state->nFileSet = taosArrayGetSize(state->aDFileSet);
state->iFileSet = state->nFileSet - 1;
state->iFileSet = state->nFileSet;
state->pBlockData = NULL;
@ -1679,52 +1740,6 @@ int32_t tsdbCacheGetLastH(SLRUCache *pCache, tb_uid_t uid, STsdb *pTsdb, LRUHand
return code;
}
int32_t tsdbCacheDelete(SLRUCache *pCache, tb_uid_t uid, TSKEY eKey) {
int32_t code = 0;
char key[32] = {0};
int keyLen = 0;
// getTableCacheKey(uid, "lr", key, &keyLen);
getTableCacheKey(uid, 0, key, &keyLen);
LRUHandle *h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
STSRow *pRow = (STSRow *)taosLRUCacheValue(pCache, h);
if (pRow->ts <= eKey) {
taosLRUCacheRelease(pCache, h, true);
} else {
taosLRUCacheRelease(pCache, h, false);
}
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
}
// getTableCacheKey(uid, "l", key, &keyLen);
getTableCacheKey(uid, 1, key, &keyLen);
h = taosLRUCacheLookup(pCache, key, keyLen);
if (h) {
SArray *pLast = (SArray *)taosLRUCacheValue(pCache, h);
bool invalidate = false;
int16_t nCol = taosArrayGetSize(pLast);
for (int16_t iCol = 0; iCol < nCol; ++iCol) {
SLastCol *tTsVal = (SLastCol *)taosArrayGet(pLast, iCol);
if (eKey >= tTsVal->ts) {
invalidate = true;
break;
}
}
if (invalidate) {
taosLRUCacheRelease(pCache, h, true);
} else {
taosLRUCacheRelease(pCache, h, false);
}
// void taosLRUCacheErase(SLRUCache * cache, const void *key, size_t keyLen);
}
return code;
}
int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) {
int32_t code = 0;
@ -1732,3 +1747,9 @@ int32_t tsdbCacheRelease(SLRUCache *pCache, LRUHandle *h) {
return code;
}
void tsdbCacheSetCapacity(SVnode *pVnode, size_t capacity) {
taosLRUCacheSetCapacity(pVnode->pTsdb->lruCache, capacity);
}
size_t tsdbCacheGetCapacity(SVnode *pVnode) { return taosLRUCacheGetCapacity(pVnode->pTsdb->lruCache); }

View File

@ -688,6 +688,14 @@ _exit:
return code;
}
void tsdbFSStateDeleteDFileSet(STsdbFSState *pState, int32_t fid) {
int32_t idx;
idx = taosArraySearchIdx(pState->aDFileSet, &(SDFileSet){.fid = fid}, tDFileSetCmprFn, TD_EQ);
ASSERT(idx >= 0);
taosArrayRemove(pState->aDFileSet, idx);
}
SDelFile *tsdbFSStateGetDelFile(STsdbFSState *pState) { return pState->pDelFile; }
SDFileSet *tsdbFSStateGetDFileSet(STsdbFSState *pState, int32_t fid) {

View File

@ -181,8 +181,12 @@ int32_t tsdbDeleteTableData(STsdb *pTsdb, int64_t version, tb_uid_t suid, tb_uid
pMemTable->maxVersion = TMAX(pMemTable->maxVersion, version);
pMemTable->nDel++;
if (tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
tsdbCacheDelete(pTsdb->lruCache, pTbData->uid, eKey);
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && tsdbKeyCmprFn(&lastKey, &pTbData->maxKey) >= 0) {
tsdbCacheDeleteLastrow(pTsdb->lruCache, pTbData->uid, eKey);
}
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheDeleteLast(pTsdb->lruCache, pTbData->uid, eKey);
}
tsdbError("vgId:%d, delete data from table suid:%" PRId64 " uid:%" PRId64 " skey:%" PRId64 " eKey:%" PRId64
@ -556,12 +560,14 @@ static int32_t tsdbInsertTableDataImpl(SMemTable *pMemTable, STbData *pTbData, i
pTbData->maxKey = key.ts;
}
if (pLastRow != NULL) {
if (TSDB_CACHE_LAST_ROW(pMemTable->pTsdb->pVnode->config) && pLastRow != NULL) {
tsdbCacheInsertLastrow(pMemTable->pTsdb->lruCache, pMemTable->pTsdb, pTbData->uid, pLastRow, true);
}
}
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, pLastRow, pMemTable->pTsdb);
if (TSDB_CACHE_LAST(pMemTable->pTsdb->pVnode->config)) {
tsdbCacheInsertLast(pMemTable->pTsdb->lruCache, pTbData->uid, pLastRow, pMemTable->pTsdb);
}
pTbData->minVersion = TMIN(pTbData->minVersion, version);
pTbData->maxVersion = TMAX(pTbData->maxVersion, version);

View File

@ -1913,3 +1913,114 @@ _err:
taosArrayDestroy(aBlockCol);
return code;
}
int32_t tsdbDFileSetCopy(STsdb *pTsdb, SDFileSet *pSetFrom, SDFileSet *pSetTo) {
int32_t code = 0;
int64_t n;
int64_t size;
TdFilePtr pOutFD = NULL; // TODO
TdFilePtr PInFD = NULL; // TODO
char fNameFrom[TSDB_FILENAME_LEN];
char fNameTo[TSDB_FILENAME_LEN];
// head
tsdbDataFileName(pTsdb, pSetFrom, TSDB_HEAD_FILE, fNameFrom);
tsdbDataFileName(pTsdb, pSetTo, TSDB_HEAD_FILE, fNameTo);
pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pOutFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->fHead.size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
taosCloseFile(&pOutFD);
taosCloseFile(&PInFD);
// data
tsdbDataFileName(pTsdb, pSetFrom, TSDB_DATA_FILE, fNameFrom);
tsdbDataFileName(pTsdb, pSetTo, TSDB_DATA_FILE, fNameTo);
pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pOutFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->fData.size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
taosCloseFile(&pOutFD);
taosCloseFile(&PInFD);
// last
tsdbDataFileName(pTsdb, pSetFrom, TSDB_LAST_FILE, fNameFrom);
tsdbDataFileName(pTsdb, pSetTo, TSDB_LAST_FILE, fNameTo);
pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pOutFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->fLast.size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
taosCloseFile(&pOutFD);
taosCloseFile(&PInFD);
// sma
tsdbDataFileName(pTsdb, pSetFrom, TSDB_SMA_FILE, fNameFrom);
tsdbDataFileName(pTsdb, pSetTo, TSDB_SMA_FILE, fNameTo);
pOutFD = taosOpenFile(fNameTo, TD_FILE_WRITE | TD_FILE_CREATE | TD_FILE_TRUNC);
if (pOutFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
PInFD = taosOpenFile(fNameFrom, TD_FILE_READ);
if (PInFD == NULL) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
n = taosFSendFile(pOutFD, PInFD, 0, pSetFrom->fSma.size);
if (n < 0) {
code = TAOS_SYSTEM_ERROR(errno);
goto _err;
}
taosCloseFile(&pOutFD);
taosCloseFile(&PInFD);
return code;
_err:
tsdbError("vgId:%d tsdb DFileSet copy failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
return code;
}

View File

@ -15,19 +15,80 @@
#include "tsdb.h"
static int32_t tsdbDoRetentionImpl(STsdb *pTsdb, int64_t now, int8_t try, int8_t *canDo) {
int32_t code = 0;
STsdbFSState *pState;
if (try) {
pState = pTsdb->fs->cState;
*canDo = 0;
} else {
pState = pTsdb->fs->nState;
}
for (int32_t iSet = 0; iSet < taosArrayGetSize(pState->aDFileSet); iSet++) {
SDFileSet *pDFileSet = (SDFileSet *)taosArrayGet(pState->aDFileSet, iSet);
int32_t expLevel = tsdbFidLevel(pDFileSet->fid, &pTsdb->keepCfg, now);
SDiskID did;
// check
if (expLevel == pDFileSet->diskId.id) continue;
// delete or move
if (expLevel < 0) {
if (try) {
*canDo = 1;
} else {
tsdbFSStateDeleteDFileSet(pState, pDFileSet->fid);
iSet--;
}
} else {
// alloc
if (tfsAllocDisk(pTsdb->pVnode->pTfs, expLevel, &did) < 0) {
code = terrno;
goto _exit;
}
if (did.level == pDFileSet->diskId.level) continue;
if (try) {
*canDo = 1;
} else {
// copy the file to new disk
SDFileSet nDFileSet = *pDFileSet;
nDFileSet.diskId = did;
tfsMkdirRecurAt(pTsdb->pVnode->pTfs, pTsdb->path, did);
code = tsdbDFileSetCopy(pTsdb, pDFileSet, &nDFileSet);
if (code) goto _exit;
code = tsdbFSStateUpsertDFileSet(pState, &nDFileSet);
if (code) goto _exit;
}
}
}
_exit:
return code;
}
int32_t tsdbDoRetention(STsdb *pTsdb, int64_t now) {
int32_t code = 0;
int8_t canDo;
// try
tsdbDoRetentionImpl(pTsdb, now, 1, &canDo);
if (!canDo) goto _exit;
// begin
code = tsdbFSBegin(pTsdb->fs);
if (code) goto _err;
// do retention
for (int32_t iSet = 0; iSet < taosArrayGetSize(pTsdb->fs->nState->aDFileSet); iSet++) {
SDFileSet *pDFileSet = (SDFileSet *)taosArrayGet(pTsdb->fs->nState->aDFileSet, iSet);
// TODO
}
code = tsdbDoRetentionImpl(pTsdb, now, 0, NULL);
if (code) goto _err;
// commit
code = tsdbFSCommit(pTsdb->fs);
@ -38,5 +99,6 @@ _exit:
_err:
tsdbError("vgId:%d tsdb do retention failed since %s", TD_VID(pTsdb->pVnode), tstrerror(code));
tsdbFSRollback(pTsdb->fs);
return code;
}

View File

@ -312,6 +312,9 @@ struct STsdbSnapWriter {
// config
int32_t minutes;
int8_t precision;
int32_t minRow;
int32_t maxRow;
int8_t cmprAlg;
// for data file
int32_t fid;
@ -321,14 +324,18 @@ struct STsdbSnapWriter {
SBlockIdx* pBlockIdx;
SMapData mBlock;
int32_t iBlock;
SBlock* pBlock;
SBlock block;
SBlockData blockData;
int32_t iRow;
SDataFWriter* pDataFWriter;
SArray* aBlockIdxN;
SBlockIdx* pBlockIdxN;
SBlockIdx blockIdx;
SMapData mBlockN;
SBlock block;
SBlock* pBlockN;
SBlock blockN;
SBlockData nBlockData;
// for del file
@ -394,13 +401,114 @@ _err:
return code;
}
static int32_t tsdbSnapWriteTableDataEnd(STsdbSnapWriter* pWrite) {
int32_t code = 0;
// TODO
return code;
}
static int32_t tsdbSnapWriteTableData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
TABLEID id = {0}; // TODO
// skip
while (pWriter->pBlockIdx && tTABLEIDCmprFn(&id, pWriter->pBlockIdx) < 0) {
code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
pWriter->iBlockIdx++;
if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
pWriter->pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
} else {
pWriter->pBlockIdx = NULL;
}
}
// new or merge
if (pWriter->pBlockIdx == NULL || tTABLEIDCmprFn(&id, pWriter->pBlockIdx) < 0) {
int32_t c;
if (pWriter->pBlockIdxN && ((c = tTABLEIDCmprFn(&id, pWriter->pBlockIdxN)) != 0)) {
ASSERT(c > 0);
code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
}
if (pWriter->pBlockIdxN == NULL) {
pWriter->pBlockIdx = &pWriter->blockIdx;
pWriter->pBlockIdx->suid = id.suid;
pWriter->pBlockIdx->uid = id.uid;
}
// loop to write the data
TSDBROW* pRow = NULL; // todo
int32_t nRow = 0; // todo
SBlockData* pBlockData = NULL; // todo
for (int32_t iRow = 0; iRow < nRow; iRow++) {
code = tBlockDataAppendRow(&pWriter->nBlockData, &tsdbRowFromBlockData(pBlockData, iRow), NULL);
if (code) goto _err;
if (pWriter->nBlockData.nRow > pWriter->maxRow * 4 / 5) {
code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->nBlockData, NULL, NULL, pWriter->pBlockIdxN,
pWriter->pBlockN, pWriter->cmprAlg);
if (code) goto _err;
}
}
} else {
// skip
while (true) {
if (pWriter->pBlock == NULL) break;
if (pWriter->pBlock->last) break;
if (tBlockCmprFn(&(SBlock){.minKey = {0}, .maxKey = {0}}, pWriter->pBlock) >= 0) break;
code = tMapDataPutItem(&pWriter->mBlockN, pWriter->pBlock, tPutBlock);
if (code) goto _err;
}
if (pWriter->pBlock) {
if (pWriter->pBlock->last) {
// load the last block and merge with the data (todo)
} else {
int32_t c = tBlockCmprFn(&(SBlock){0 /*TODO*/}, pWriter->pBlock);
if (c > 0) {
// commit until pWriter->pBlock (todo)
} else {
// load the block and merge with the data (todo)
}
}
} else {
int32_t nRow = 0;
SBlockData* pBlockData = NULL;
for (int32_t iRow = 0; iRow < nRow; iRow++) {
code = tBlockDataAppendRow(&pWriter->nBlockData, &tsdbRowFromBlockData(pBlockData, iRow), NULL);
if (code) goto _err;
if (pWriter->nBlockData.nRow >= pWriter->maxRow * 4 / 5) {
code = tsdbWriteBlockData(pWriter->pDataFWriter, &pWriter->nBlockData, NULL, NULL, pWriter->pBlockIdxN,
pWriter->pBlockN, pWriter->cmprAlg);
if (code) goto _err;
tBlockDataClearData(&pWriter->nBlockData);
}
}
}
}
return code;
_err:
tsdbError("vgId:%d tsdb snapshot write table data failed since %s", TD_VID(pWriter->pTsdb->pVnode), tstrerror(code));
return code;
}
static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint32_t nData) {
int32_t code = 0;
STsdb* pTsdb = pWriter->pTsdb;
int64_t suid = 0; // todo
int64_t uid = 0; // todo
int64_t skey; // todo
int64_t ekey; // todo
int64_t skey; // todo
int64_t ekey; // todo
int32_t fid = tsdbKeyFid(skey, pWriter->minutes, pWriter->precision);
ASSERT(fid == tsdbKeyFid(ekey, pWriter->minutes, pWriter->precision));
@ -440,47 +548,8 @@ static int32_t tsdbSnapWriteData(STsdbSnapWriter* pWriter, uint8_t* pData, uint3
taosArrayClear(pWriter->aBlockIdxN);
}
// process
TABLEID id = {0}; // TODO
TSKEY minKey = 0; // TODO
TSKEY maxKey = 0; // TODO
while (true) {
if (pWriter->pBlockIdx) {
int32_t c = tTABLEIDCmprFn(&id, pWriter->pBlockIdx);
if (c == 0) {
} else if (c < 0) {
// keep merge
} else {
// code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
pWriter->iBlockIdx++;
if (pWriter->iBlockIdx < taosArrayGetSize(pWriter->aBlockIdx)) {
pWriter->pBlockIdx = (SBlockIdx*)taosArrayGet(pWriter->aBlockIdx, pWriter->iBlockIdx);
} else {
pWriter->pBlockIdx = NULL;
}
if (pWriter->pBlockIdx) {
code = tsdbReadBlock(pWriter->pDataFReader, pWriter->pBlockIdx, &pWriter->mBlock, NULL);
if (code) goto _err;
}
}
} else {
int32_t c = tTABLEIDCmprFn(&id, &pWriter->blockIdx);
if (c == 0) {
// merge commit the block data
} else if (c > 0) {
// code = tsdbSnapWriteTableDataEnd(pWriter);
if (code) goto _err;
} else {
ASSERT(0);
}
}
}
code = tsdbSnapWriteTableData(pWriter, pData, nData);
if (code) goto _err;
return code;

View File

@ -465,17 +465,37 @@ void tsdbFidKeyRange(int32_t fid, int32_t minutes, int8_t precision, TSKEY *minK
*maxKey = *minKey + minutes * tsTickPerMin[precision] - 1;
}
// int tsdFidLevel(int fid, TSKEY now, minute) {
// if (fid >= pRtn->maxFid) {
// return 0;
// } else if (fid >= pRtn->midFid) {
// return 1;
// } else if (fid >= pRtn->minFid) {
// return 2;
// } else {
// return -1;
// }
// }
int32_t tsdbFidLevel(int32_t fid, STsdbKeepCfg *pKeepCfg, int64_t now) {
int32_t aFid[3];
TSKEY key;
if (pKeepCfg->precision == TSDB_TIME_PRECISION_MILLI) {
now = now * 1000;
} else if (pKeepCfg->precision == TSDB_TIME_PRECISION_MICRO) {
now = now * 1000000l;
} else if (pKeepCfg->precision == TSDB_TIME_PRECISION_NANO) {
now = now * 1000000000l;
} else {
ASSERT(0);
}
key = now - pKeepCfg->keep0 * tsTickPerMin[pKeepCfg->precision];
aFid[0] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
key = now - pKeepCfg->keep1 * tsTickPerMin[pKeepCfg->precision];
aFid[1] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
key = now - pKeepCfg->keep2 * tsTickPerMin[pKeepCfg->precision];
aFid[2] = tsdbKeyFid(key, pKeepCfg->days, pKeepCfg->precision);
if (fid >= aFid[0]) {
return 0;
} else if (fid >= aFid[1]) {
return 1;
} else if (fid >= aFid[2]) {
return 2;
} else {
return -1;
}
}
// TSDBROW ======================================================
void tsdbRowGetColVal(TSDBROW *pRow, STSchema *pTSchema, int32_t iCol, SColVal *pColVal) {

View File

@ -20,6 +20,8 @@ const SVnodeCfg vnodeCfgDefault = {.vgId = -1,
.dbId = 0,
.szPage = 4096,
.szCache = 256,
.cacheLast = 3,
.cacheLastSize = 8,
.szBuf = 96 * 1024 * 1024,
.isHeap = false,
.isWeak = 0,
@ -60,6 +62,8 @@ int vnodeEncodeConfig(const void *pObj, SJson *pJson) {
if (tjsonAddIntegerToObject(pJson, "dbId", pCfg->dbId) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "szPage", pCfg->szPage) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "szCache", pCfg->szCache) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "cacheLast", pCfg->cacheLast) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "cacheLastSize", pCfg->cacheLastSize) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "szBuf", pCfg->szBuf) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "isHeap", pCfg->isHeap) < 0) return -1;
if (tjsonAddIntegerToObject(pJson, "isWeak", pCfg->isWeak) < 0) return -1;
@ -133,6 +137,10 @@ int vnodeDecodeConfig(const SJson *pJson, void *pObj) {
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "szCache", pCfg->szCache, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "cacheLast", pCfg->cacheLast, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "cacheLastSize", pCfg->cacheLastSize, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "szBuf", pCfg->szBuf, code);
if (code < 0) return -1;
tjsonGetNumberValue(pJson, "isHeap", pCfg->isHeap, code);

View File

@ -28,7 +28,7 @@ int vnodeCreate(const char *path, SVnodeCfg *pCfg, STfs *pTfs) {
}
// create vnode env
if (tfsMkdir(pTfs, path) < 0) {
if (tfsMkdirAt(pTfs, path, (SDiskID){0}) < 0) {
vError("vgId:%d, failed to create vnode since: %s", pCfg->vgId, tstrerror(terrno));
return -1;
}

View File

@ -27,6 +27,7 @@ static int32_t vnodeProcessAlterConfirmReq(SVnode *pVnode, int64_t version, void
static int32_t vnodeProcessAlterHashRangeReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
static int32_t vnodeProcessDeleteReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp);
int32_t vnodePreProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg) {
@ -173,9 +174,12 @@ int32_t vnodeProcessWriteMsg(SVnode *pVnode, SRpcMsg *pMsg, int64_t version, SRp
case TDMT_VND_DROP_TTL_TABLE:
if (vnodeProcessDropTtlTbReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_CREATE_SMA: {
case TDMT_VND_TRIM:
if (vnodeProcessTrimReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
break;
case TDMT_VND_CREATE_SMA:
if (vnodeProcessCreateTSmaReq(pVnode, version, pReq, len, pRsp) < 0) goto _err;
} break;
break;
/* TSDB */
case TDMT_VND_SUBMIT:
if (vnodeProcessSubmitReq(pVnode, version, pMsg->pCont, pMsg->contLen, pRsp) < 0) goto _err;
@ -347,13 +351,38 @@ void vnodeUpdateMetaRsp(SVnode *pVnode, STableMetaRsp *pMetaRsp) {
pMetaRsp->precision = pVnode->config.tsdbCfg.precision;
}
static int32_t vnodeProcessTrimReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
int32_t code = 0;
SVTrimDbReq trimReq = {0};
vInfo("vgId:%d, trim vnode request will be processed, time:%d", pVnode->config.vgId, trimReq.timestamp);
// decode
if (tDeserializeSVTrimDbReq(pReq, len, &trimReq) != 0) {
code = TSDB_CODE_INVALID_MSG;
goto _exit;
}
// process
code = tsdbDoRetention(pVnode->pTsdb, trimReq.timestamp);
if (code) goto _exit;
_exit:
return code;
}
static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *pReq, int32_t len, SRpcMsg *pRsp) {
SArray *tbUids = taosArrayInit(8, sizeof(int64_t));
if (tbUids == NULL) return TSDB_CODE_OUT_OF_MEMORY;
int32_t t = ntohl(*(int32_t *)pReq);
vDebug("rec ttl time:%d", t);
int32_t ret = metaTtlDropTable(pVnode->pMeta, t, tbUids);
SVDropTtlTableReq ttlReq = {0};
if (tDeserializeSVDropTtlTableReq(pReq, len, &ttlReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto end;
}
vInfo("vgId:%d, drop ttl table req will be processed, time:%d", pVnode->config.vgId, ttlReq.timestamp);
int32_t ret = metaTtlDropTable(pVnode->pMeta, ttlReq.timestamp, tbUids);
if (ret != 0) {
goto end;
}
@ -907,6 +936,11 @@ static int32_t vnodeProcessAlterConfigReq(SVnode *pVnode, int64_t version, void
vInfo("vgId:%d, start to alter vnode config, cacheLast:%d cacheLastSize:%d", TD_VID(pVnode), alterReq.cacheLast,
alterReq.cacheLastSize);
if (pVnode->config.cacheLastSize != alterReq.cacheLastSize) {
pVnode->config.cacheLastSize = alterReq.cacheLastSize;
// TODO: save config
tsdbCacheSetCapacity(pVnode, (size_t)pVnode->config.cacheLastSize * 1024 * 1024);
}
return 0;
}

View File

@ -3504,11 +3504,7 @@ static void destroyOperatorInfo(SOperatorInfo* pOperator) {
pOperator->numOfDownstream = 0;
}
if (pOperator->exprSupp.pExprInfo != NULL) {
destroyExprInfo(pOperator->exprSupp.pExprInfo, pOperator->exprSupp.numOfExprs);
}
taosMemoryFreeClear(pOperator->exprSupp.pExprInfo);
cleanupExprSupp(&pOperator->exprSupp);
taosMemoryFreeClear(pOperator);
}
@ -3587,6 +3583,25 @@ void initBasicInfo(SOptrBasicInfo* pInfo, SSDataBlock* pBlock) {
initResultRowInfo(&pInfo->resultRowInfo);
}
static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
if (pCtx == NULL) {
return NULL;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
for (int32_t j = 0; j < pCtx[i].numOfParams; ++j) {
taosVariantDestroy(&pCtx[i].param[j].param);
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
taosMemoryFreeClear(pCtx);
return NULL;
}
int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr) {
pSup->pExprInfo = pExprInfo;
pSup->numOfExprs = numOfExpr;
@ -3600,6 +3615,16 @@ int32_t initExprSupp(SExprSupp* pSup, SExprInfo* pExprInfo, int32_t numOfExpr) {
return TSDB_CODE_SUCCESS;
}
void cleanupExprSupp(SExprSupp* pSupp) {
destroySqlFunctionCtx(pSupp->pCtx, pSupp->numOfExprs);
if (pSupp->pExprInfo != NULL) {
destroyExprInfo(pSupp->pExprInfo, pSupp->numOfExprs);
}
taosMemoryFreeClear(pSupp->pExprInfo);
taosMemoryFree(pSupp->rowEntryInfoOffset);
}
SOperatorInfo* createAggregateOperatorInfo(SOperatorInfo* downstream, SExprInfo* pExprInfo, int32_t numOfCols,
SSDataBlock* pResultBlock, SNode* pCondition, SExprInfo* pScalarExprInfo,
int32_t numOfScalarExpr, SExecTaskInfo* pTaskInfo) {
@ -3650,25 +3675,6 @@ _error:
return NULL;
}
static void* destroySqlFunctionCtx(SqlFunctionCtx* pCtx, int32_t numOfOutput) {
if (pCtx == NULL) {
return NULL;
}
for (int32_t i = 0; i < numOfOutput; ++i) {
for (int32_t j = 0; j < pCtx[i].numOfParams; ++j) {
taosVariantDestroy(&pCtx[i].param[j].param);
}
taosMemoryFreeClear(pCtx[i].subsidiaries.pCtx);
taosMemoryFree(pCtx[i].input.pData);
taosMemoryFree(pCtx[i].input.pColumnDataAgg);
}
taosMemoryFreeClear(pCtx);
return NULL;
}
void cleanupBasicInfo(SOptrBasicInfo* pInfo) {
assert(pInfo != NULL);
cleanupResultRowInfo(&pInfo->resultRowInfo);
@ -3710,13 +3716,6 @@ static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) {
taosMemoryFreeClear(param);
}
void cleanupExprSupp(SExprSupp* pSupp) {
destroySqlFunctionCtx(pSupp->pCtx, pSupp->numOfExprs);
destroyExprInfo(pSupp->pExprInfo, pSupp->numOfExprs);
taosMemoryFree(pSupp->rowEntryInfoOffset);
}
static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) {
SIndefOperatorInfo* pInfo = (SIndefOperatorInfo*)param;
cleanupBasicInfo(&pInfo->binfo);

View File

@ -578,6 +578,7 @@ static void destroyTableScanOperatorInfo(void* param, int32_t numOfOutput) {
taosArrayDestroy(pTableScanInfo->pColMatchInfo);
}
cleanupExprSupp(&pTableScanInfo->pseudoSup);
taosMemoryFreeClear(param);
}

View File

@ -157,6 +157,7 @@ int32_t elapsedCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx);
bool getHistogramFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
bool histogramFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
int32_t histogramFunction(SqlFunctionCtx* pCtx);
int32_t histogramFunctionPartial(SqlFunctionCtx* pCtx);
int32_t histogramFunctionMerge(SqlFunctionCtx* pCtx);
int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
int32_t histogramPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);

View File

@ -1427,9 +1427,12 @@ static int32_t translateIrate(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
}
static int32_t translateFirstLast(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
// first(col_list) will be rewritten as first(col)
if (1 != LIST_LENGTH(pFunc->pParameterList)) {
return TSDB_CODE_SUCCESS;
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
for (int32_t i = 0; i < numOfParams; ++i) {
SNode* pParamNode = nodesListGetNode(pFunc->pParameterList, i);
if (QUERY_NODE_VALUE == nodeType(pParamNode)) {
return invaildFuncParaValueErrMsg(pErrBuf, len, pFunc->functionName);
}
}
pFunc->node.resType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, 0))->resType;
@ -2323,7 +2326,7 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
.translateFunc = translateHistogramPartial,
.getEnvFunc = getHistogramFuncEnv,
.initFunc = histogramFunctionSetup,
.processFunc = histogramFunction,
.processFunc = histogramFunctionPartial,
.finalizeFunc = histogramPartialFinalize,
.invertFunc = NULL,
.combineFunc = histogramCombine,

View File

@ -4098,7 +4098,7 @@ bool histogramFunctionSetup(SqlFunctionCtx* pCtx, SResultRowEntryInfo* pResultIn
return true;
}
int32_t histogramFunction(SqlFunctionCtx* pCtx) {
static int32_t histogramFunctionImpl(SqlFunctionCtx* pCtx, bool isPartial) {
SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
SInputColumnInfoData* pInput = &pCtx->input;
@ -4130,10 +4130,22 @@ int32_t histogramFunction(SqlFunctionCtx* pCtx) {
}
}
SET_VAL(GET_RES_INFO(pCtx), numOfElems, pInfo->numOfBins);
if (!isPartial) {
SET_VAL(GET_RES_INFO(pCtx), numOfElems, pInfo->numOfBins);
} else {
SET_VAL(GET_RES_INFO(pCtx), numOfElems, 1);
}
return TSDB_CODE_SUCCESS;
}
int32_t histogramFunction(SqlFunctionCtx* pCtx) {
return histogramFunctionImpl(pCtx, false);
}
int32_t histogramFunctionPartial(SqlFunctionCtx* pCtx) {
return histogramFunctionImpl(pCtx, true);
}
static void histogramTransferInfo(SHistoFuncInfo* pInput, SHistoFuncInfo* pOutput) {
pOutput->normalized = pInput->normalized;
pOutput->numOfBins = pInput->numOfBins;
@ -4153,10 +4165,12 @@ int32_t histogramFunctionMerge(SqlFunctionCtx* pCtx) {
SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t start = pInput->startRowIndex;
char* data = colDataGetData(pCol, start);
SHistoFuncInfo* pInputInfo = (SHistoFuncInfo*)varDataVal(data);
histogramTransferInfo(pInputInfo, pInfo);
for(int32_t i = start; i < start + pInput->numOfRows; ++i) {
char* data = colDataGetData(pCol, i);
SHistoFuncInfo* pInputInfo = (SHistoFuncInfo*)varDataVal(data);
histogramTransferInfo(pInputInfo, pInfo);
}
SET_VAL(GET_RES_INFO(pCtx), pInfo->numOfBins, pInfo->numOfBins);
return TSDB_CODE_SUCCESS;
@ -4199,6 +4213,7 @@ int32_t histogramFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
}
int32_t histogramPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
SHistoFuncInfo* pInfo = GET_ROWCELL_INTERBUF(GET_RES_INFO(pCtx));
int32_t resultBytes = getHistogramInfoSize();
char* res = taosMemoryCalloc(resultBytes + VARSTR_HEADER_SIZE, sizeof(char));
@ -4212,7 +4227,7 @@ int32_t histogramPartialFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
colDataAppend(pCol, pBlock->info.rows, res, false);
taosMemoryFree(res);
return 1;
return pResInfo->numOfRes;
}
int32_t histogramCombine(SqlFunctionCtx* pDestCtx, SqlFunctionCtx* pSourceCtx) {

View File

@ -20,6 +20,7 @@
#include "os.h"
#include "tname.h"
#include "ttypes.h"
#include "query.h"
#define IS_DATA_COL_ORDERED(spd) ((spd->orderStatus) == (int8_t)ORDER_STATUS_ORDERED)

View File

@ -630,6 +630,17 @@ int32_t qCloneStmtDataBlock(void** pDst, void* pSrc) {
memcpy(*pDst, pSrc, sizeof(STableDataBlocks));
((STableDataBlocks*)(*pDst))->cloned = true;
STableDataBlocks* pBlock = (STableDataBlocks*)(*pDst);
if (pBlock->pTableMeta) {
void *pNewMeta = taosMemoryMalloc(TABLE_META_SIZE(pBlock->pTableMeta));
if (NULL == pNewMeta) {
taosMemoryFreeClear(*pDst);
return TSDB_CODE_OUT_OF_MEMORY;
}
memcpy(pNewMeta, pBlock->pTableMeta, TABLE_META_SIZE(pBlock->pTableMeta));
pBlock->pTableMeta = pNewMeta;
}
return qResetStmtDataBlock(*pDst, false);
}

View File

@ -476,12 +476,12 @@ static int32_t createAggLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect,
int32_t code = TSDB_CODE_SUCCESS;
// set grouyp keys, agg funcs and having conditions
if (TSDB_CODE_SUCCESS == code && pSelect->hasAggFuncs) {
if (TSDB_CODE_SUCCESS == code) {
code = nodesCollectFuncs(pSelect, SQL_CLAUSE_GROUP_BY, fmIsAggFunc, &pAgg->pAggFuncs);
}
// rewrite the expression in subsequent clauses
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_CODE_SUCCESS == code && NULL != pAgg->pAggFuncs) {
code = rewriteExprsForSelect(pAgg->pAggFuncs, pSelect, SQL_CLAUSE_GROUP_BY);
}

View File

@ -1364,9 +1364,9 @@ static EDealRes partTagsOptHasColImpl(SNode* pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
static bool partTagsOptHasCol(SNodeList* pPartKeys) {
static bool planOptNodeListHasCol(SNodeList* pKeys) {
bool hasCol = false;
nodesWalkExprs(pPartKeys, partTagsOptHasColImpl, &hasCol);
nodesWalkExprs(pKeys, partTagsOptHasColImpl, &hasCol);
return hasCol;
}
@ -1409,7 +1409,7 @@ static bool partTagsOptMayBeOptimized(SLogicNode* pNode) {
return false;
}
return !partTagsOptHasCol(partTagsGetPartKeys(pNode)) && partTagsOptAreSupportedFuncs(partTagsGetFuncs(pNode));
return !planOptNodeListHasCol(partTagsGetPartKeys(pNode)) && partTagsOptAreSupportedFuncs(partTagsGetFuncs(pNode));
}
static int32_t partTagsOptRebuildTbanme(SNodeList* pPartKeys) {
@ -1986,7 +1986,8 @@ static bool lastRowScanOptMayBeOptimized(SLogicNode* pNode) {
SNode* pFunc = NULL;
FOREACH(pFunc, ((SAggLogicNode*)pNode)->pAggFuncs) {
if (FUNCTION_TYPE_LAST_ROW != ((SFunctionNode*)pFunc)->funcType) {
if (FUNCTION_TYPE_LAST_ROW != ((SFunctionNode*)pFunc)->funcType &&
FUNCTION_TYPE_SELECT_VALUE != ((SFunctionNode*)pFunc)->funcType) {
return false;
}
}
@ -2095,6 +2096,37 @@ static int32_t mergeProjectsOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLog
return mergeProjectsOptimizeImpl(pCxt, pLogicSubplan, pProjectNode);
}
static bool tagScanMayBeOptimized(SLogicNode* pNode) {
if (QUERY_NODE_LOGIC_PLAN_SCAN != nodeType(pNode) || (SCAN_TYPE_TAG == ((SScanLogicNode*)pNode)->scanType)) {
return false;
}
SScanLogicNode *pScan = (SScanLogicNode*)pNode;
if (NULL != pScan->pScanCols) {
return false;
}
if (NULL == pNode->pParent || QUERY_NODE_LOGIC_PLAN_AGG != nodeType(pNode->pParent) || 1 != LIST_LENGTH(pNode->pParent->pChildren)) {
return false;
}
SAggLogicNode* pAgg = (SAggLogicNode*)(pNode->pParent);
if (NULL == pAgg->pGroupKeys || NULL != pAgg->pAggFuncs || planOptNodeListHasCol(pAgg->pGroupKeys)) {
return false;
}
return true;
}
static int32_t tagScanOptimize(SOptimizeContext* pCxt, SLogicSubplan* pLogicSubplan) {
SScanLogicNode* pScanNode = (SScanLogicNode*)optFindPossibleNode(pLogicSubplan->pNode, tagScanMayBeOptimized);
if (NULL == pScanNode) {
return TSDB_CODE_SUCCESS;
}
pScanNode->scanType = SCAN_TYPE_TAG;
pCxt->optimized = true;
return TSDB_CODE_SUCCESS;
}
// clang-format off
static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "ScanPath", .optimizeFunc = scanPathOptimize},
@ -2107,7 +2139,8 @@ static const SOptimizeRule optimizeRuleSet[] = {
{.pName = "EliminateSetOperator", .optimizeFunc = eliminateSetOpOptimize},
{.pName = "RewriteTail", .optimizeFunc = rewriteTailOptimize},
{.pName = "RewriteUnique", .optimizeFunc = rewriteUniqueOptimize},
{.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize}
{.pName = "LastRowScan", .optimizeFunc = lastRowScanOptimize},
{.pName = "TagScan", .optimizeFunc = tagScanOptimize}
};
// clang-format on

View File

@ -1519,6 +1519,7 @@ static SSubplan* makeSubplan(SPhysiPlanContext* pCxt, SLogicSubplan* pLogicSubpl
static int32_t buildInsertValuesSubplan(SPhysiPlanContext* pCxt, SVnodeModifyLogicNode* pModify, SSubplan* pSubplan) {
pSubplan->msgType = pModify->msgType;
pSubplan->execNode.nodeId = pModify->pVgDataBlocks->vg.vgId;
pSubplan->execNode.epSet = pModify->pVgDataBlocks->vg.epSet;
return createDataInserter(pCxt, pModify->pVgDataBlocks, &pSubplan->pDataSink);
}

View File

@ -877,7 +877,7 @@ static int32_t stbSplSplitSortNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
return code;
}
static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
static int32_t stbSplSplitScanNodeWithoutPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
int32_t code = splCreateExchangeNodeForSubplan(pCxt, pInfo->pSubplan, pInfo->pSplitNode, SUBPLAN_TYPE_MERGE);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
@ -887,6 +887,25 @@ static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo)
return code;
}
static int32_t stbSplSplitScanNodeWithPartTags(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
int32_t code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pInfo->pSplitNode, NULL, pInfo->pSplitNode, true);
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pInfo->pSplitNode, SPLIT_FLAG_STABLE_SPLIT));
}
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
++(pCxt->groupId);
return code;
}
static int32_t stbSplSplitScanNode(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SScanLogicNode* pScan = (SScanLogicNode*)pInfo->pSplitNode;
if (NULL != pScan->pGroupTags) {
return stbSplSplitScanNodeWithPartTags(pCxt, pInfo);
}
return stbSplSplitScanNodeWithoutPartTags(pCxt, pInfo);
}
static SNode* stbSplFindPrimaryKeyFromScan(SScanLogicNode* pScan) {
SNode* pCol = NULL;
FOREACH(pCol, pScan->pScanCols) {

View File

@ -87,4 +87,11 @@ TEST_F(PlanOptimizeTest, eliminateProjection) {
TEST_F(PlanOptimizeTest, pushDownProjectCond) {
useDb("root", "test");
run("select 1-abs(c1) from (select unique(c1) c1 from st1s3) where 1-c1>5 order by 1 nulls first");
}
TEST_F(PlanOptimizeTest, tagScan) {
useDb("root", "test");
run("select tag1 from st1 group by tag1");
run("select distinct tag1 from st1");
run("select tag1*tag1 from st1 group by tag1*tag1");
}

View File

@ -59,4 +59,6 @@ TEST_F(PlanPartitionByTest, withGroupBy) {
useDb("root", "test");
run("select count(*) from t1 partition by c1 group by c2");
run("SELECT TBNAME, c1 FROM st1 PARTITION BY TBNAME GROUP BY c1");
}

View File

@ -201,6 +201,43 @@ static SyncTerm raftLogLastTerm(struct SSyncLogStore* pLogStore) {
return SYNC_TERM_INVALID;
}
static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
SyncIndex index = 0;
SWalSyncInfo syncMeta;
syncMeta.isWeek = pEntry->isWeak;
syncMeta.seqNum = pEntry->seqNum;
syncMeta.term = pEntry->term;
index = walAppendLog(pWal, pEntry->originalRpcType, syncMeta, pEntry->data, pEntry->dataLen);
if (index < 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
int32_t sysErr = errno;
const char* sysErrStr = strerror(errno);
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "wal write error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s",
pEntry->index, err, err, errStr, sysErr, sysErrStr);
syncNodeErrorLog(pData->pSyncNode, logBuf);
ASSERT(0);
return -1;
}
pEntry->index = index;
do {
char eventLog[128];
snprintf(eventLog, sizeof(eventLog), "write index:%" PRId64 ", type:%s,%d, type2:%s,%d", pEntry->index,
TMSG_INFO(pEntry->msgType), pEntry->msgType, TMSG_INFO(pEntry->originalRpcType), pEntry->originalRpcType);
syncNodeEventLog(pData->pSyncNode, eventLog);
} while (0);
return 0;
}
#if 0
static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
@ -243,6 +280,7 @@ static int32_t raftLogAppendEntry(struct SSyncLogStore* pLogStore, SSyncRaftEntr
return code;
}
#endif
// entry found, return 0
// entry not found, return -1, terrno = TSDB_CODE_WAL_LOG_NOT_EXIST
@ -361,6 +399,8 @@ static int32_t raftLogGetLastEntry(SSyncLogStore* pLogStore, SSyncRaftEntry** pp
//-------------------------------
// log[0 .. n]
#if 0
int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
@ -397,6 +437,44 @@ int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
return code;
}
#endif
int32_t logStoreAppendEntry(SSyncLogStore* pLogStore, SSyncRaftEntry* pEntry) {
SSyncLogStoreData* pData = pLogStore->data;
SWal* pWal = pData->pWal;
SyncIndex index = 0;
SWalSyncInfo syncMeta;
syncMeta.isWeek = pEntry->isWeak;
syncMeta.seqNum = pEntry->seqNum;
syncMeta.term = pEntry->term;
index = walAppendLog(pWal, pEntry->originalRpcType, syncMeta, pEntry->data, pEntry->dataLen);
if (index < 0) {
int32_t err = terrno;
const char* errStr = tstrerror(err);
int32_t sysErr = errno;
const char* sysErrStr = strerror(errno);
char logBuf[128];
snprintf(logBuf, sizeof(logBuf), "wal write error, index:%" PRId64 ", err:%d %X, msg:%s, syserr:%d, sysmsg:%s",
pEntry->index, err, err, errStr, sysErr, sysErrStr);
syncNodeErrorLog(pData->pSyncNode, logBuf);
ASSERT(0);
return -1;
}
pEntry->index = index;
do {
char eventLog[128];
snprintf(eventLog, sizeof(eventLog), "write2 index:%" PRId64 ", type:%s,%d, type2:%s,%d", pEntry->index,
TMSG_INFO(pEntry->msgType), pEntry->msgType, TMSG_INFO(pEntry->originalRpcType), pEntry->originalRpcType);
syncNodeEventLog(pData->pSyncNode, eventLog);
} while (0);
return 0;
}
SSyncRaftEntry* logStoreGetEntry(SSyncLogStore* pLogStore, SyncIndex index) {
SSyncLogStoreData* pData = pLogStore->data;

View File

@ -311,6 +311,17 @@ void transCtxMerge(STransCtx* dst, STransCtx* src);
void* transCtxDumpVal(STransCtx* ctx, int32_t key);
void* transCtxDumpBrokenlinkVal(STransCtx* ctx, int32_t* msgType);
// request list
typedef struct STransReq {
queue q;
void* data;
} STransReq;
void transReqQueueInit(queue* q);
void* transReqQueuePushReq(queue* q);
void* transReqQueueRemove(void* arg);
void transReqQueueClear(queue* q);
// queue sending msgs
typedef struct {
SArray* q;

View File

@ -19,7 +19,7 @@ typedef struct SCliConn {
T_REF_DECLARE()
uv_connect_t connReq;
uv_stream_t* stream;
uv_write_t writeReq;
queue wreqQueue;
void* hostThrd;
@ -586,9 +586,10 @@ static SCliConn* cliCreateConn(SCliThrd* pThrd) {
uv_tcp_init(pThrd->loop, (uv_tcp_t*)(conn->stream));
conn->stream->data = conn;
conn->writeReq.data = conn;
conn->connReq.data = conn;
transReqQueueInit(&conn->wreqQueue);
transQueueInit(&conn->cliMsgs, NULL);
QUEUE_INIT(&conn->conn);
conn->hostThrd = pThrd;
@ -627,6 +628,8 @@ static void cliDestroy(uv_handle_t* handle) {
transCtxCleanup(&conn->ctx);
transQueueDestroy(&conn->cliMsgs);
tTrace("%s conn %p destroy successfully", CONN_GET_INST_LABEL(conn), conn);
transReqQueueClear(&conn->wreqQueue);
transDestroyBuffer(&conn->readBuf);
taosMemoryFree(conn);
}
@ -649,11 +652,8 @@ static bool cliHandleNoResp(SCliConn* conn) {
return res;
}
static void cliSendCb(uv_write_t* req, int status) {
SCliConn* pConn = req && req->handle ? req->handle->data : NULL;
taosMemoryFree(req);
if (pConn == NULL) {
return;
}
SCliConn* pConn = transReqQueueRemove(req);
if (pConn == NULL) return;
if (status == 0) {
tTrace("%s conn %p data already was written out", CONN_GET_INST_LABEL(pConn), pConn);
@ -711,7 +711,7 @@ void cliSend(SCliConn* pConn) {
CONN_SET_PERSIST_BY_APP(pConn);
}
uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t));
uv_write_t* req = transReqQueuePushReq(&pConn->wreqQueue);
uv_write(req, (uv_stream_t*)pConn->stream, &wb, 1, cliSendCb);
return;
_RETURN:

View File

@ -293,6 +293,48 @@ void* transCtxDumpBrokenlinkVal(STransCtx* ctx, int32_t* msgType) {
return ret;
}
void transReqQueueInit(queue* q) {
// init req queue
QUEUE_INIT(q);
}
void* transReqQueuePushReq(queue* q) {
uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t));
STransReq* wreq = taosMemoryCalloc(1, sizeof(STransReq));
wreq->data = req;
req->data = wreq;
QUEUE_PUSH(q, &wreq->q);
return req;
}
void* transReqQueueRemove(void* arg) {
void* ret = NULL;
uv_write_t* req = arg;
STransReq* wreq = req && req->data ? req->data : NULL;
assert(wreq->data == req);
if (wreq == NULL || wreq->data == NULL) {
taosMemoryFree(wreq->data);
taosMemoryFree(wreq);
return req;
}
QUEUE_REMOVE(&wreq->q);
ret = req && req->handle ? req->handle->data : NULL;
taosMemoryFree(wreq->data);
taosMemoryFree(wreq);
return ret;
}
void transReqQueueClear(queue* q) {
while (!QUEUE_IS_EMPTY(q)) {
queue* h = QUEUE_HEAD(q);
QUEUE_REMOVE(h);
STransReq* req = QUEUE_DATA(h, STransReq, q);
taosMemoryFree(req->data);
taosMemoryFree(req);
}
}
void transQueueInit(STransQueue* queue, void (*freeFunc)(const void* arg)) {
queue->q = taosArrayInit(2, sizeof(void*));
queue->freeFunc = (void (*)(const void*))freeFunc;

View File

@ -29,7 +29,7 @@ typedef struct {
typedef struct SSvrConn {
T_REF_DECLARE()
uv_tcp_t* pTcp;
uv_write_t pWriter;
queue wreqQueue;
uv_timer_t pTimer;
queue queue;
@ -331,8 +331,7 @@ void uvOnTimeoutCb(uv_timer_t* handle) {
}
void uvOnSendCb(uv_write_t* req, int status) {
SSvrConn* conn = req && req->handle ? req->handle->data : NULL;
taosMemoryFree(req);
SSvrConn* conn = transReqQueueRemove(req);
if (conn == NULL) return;
if (status == 0) {
@ -436,13 +435,12 @@ static void uvStartSendRespInternal(SSvrMsg* smsg) {
transRefSrvHandle(pConn);
uv_write_t* req = taosMemoryCalloc(1, sizeof(uv_write_t));
uv_write_t* req = transReqQueuePushReq(&pConn->wreqQueue);
uv_write(req, (uv_stream_t*)pConn->pTcp, &wb, 1, uvOnSendCb);
}
static void uvStartSendResp(SSvrMsg* smsg) {
// impl
SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
transFreeMsg(smsg->msg.pCont);
@ -639,8 +637,6 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
uv_tcp_init(pThrd->loop, pConn->pTcp);
pConn->pTcp->data = pConn;
pConn->pWriter.data = pConn;
transSetConnOption((uv_tcp_t*)pConn->pTcp);
if (uv_accept(q, (uv_stream_t*)(pConn->pTcp)) == 0) {
@ -748,6 +744,8 @@ static SSvrConn* createConn(void* hThrd) {
SWorkThrd* pThrd = hThrd;
SSvrConn* pConn = (SSvrConn*)taosMemoryCalloc(1, sizeof(SSvrConn));
transReqQueueInit(&pConn->wreqQueue);
QUEUE_INIT(&pConn->queue);
QUEUE_PUSH(&pThrd->conn, &pConn->queue);
@ -823,6 +821,8 @@ static void uvDestroyConn(uv_handle_t* handle) {
SSvrMsg* msg = transQueueGet(&conn->srvMsgs, i);
destroySmsg(msg);
}
transReqQueueClear(&conn->wreqQueue);
transQueueDestroy(&conn->srvMsgs);
QUEUE_REMOVE(&conn->queue);

View File

@ -4,7 +4,8 @@ import logging
import os
import sys
from typing import Optional
import time , datetime
from datetime import datetime
import taos
@ -43,6 +44,10 @@ class MyLoggingAdapter(logging.LoggerAdapter):
class Logging:
logger = None # type: Optional[MyLoggingAdapter]
@classmethod
def _get_datetime(cls):
return datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-1]
@classmethod
def getLogger(cls):
return cls.logger
@ -64,22 +69,22 @@ class Logging:
# global logger
cls.logger = MyLoggingAdapter(_logger, {})
cls.logger.setLevel(logging.DEBUG if debugMode else logging.INFO) # default seems to be INFO
@classmethod
def info(cls, msg):
cls.logger.info(msg)
cls.logger.info("[time]: " + cls._get_datetime() +" [msg]: "+ msg)
@classmethod
def debug(cls, msg):
cls.logger.debug(msg)
cls.logger.debug("[time]: " + cls._get_datetime() +" [msg]: "+ msg)
@classmethod
def warning(cls, msg):
cls.logger.warning(msg)
cls.logger.warning("[time]: " + cls._get_datetime() +" [msg]: "+ msg)
@classmethod
def error(cls, msg):
cls.logger.error(msg)
cls.logger.error("[time]: " + cls._get_datetime() +" [msg]: "+ msg)
class Status:
STATUS_EMPTY = 99

View File

@ -41,6 +41,30 @@ class TDSetSql:
create_stb_sql = f'create table {stbname} ({column_sql[:-1]}) tags({tag_sql[:-1]})'
return create_stb_sql
def set_insertsql(self,column_dict,tbname,binary_str,nchar_str):
sql = ''
for k, v in column_dict.items():
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
sql += '%d,'
elif v.lower() == 'float' or v.lower() == 'double':
sql += '%f,'
elif 'binary' in v.lower():
sql += f'"{binary_str}%d",'
elif 'nchar' in v.lower():
sql += f'"{nchar_str}%d",'
return (f'insert into {tbname} values({sql[:-1]})')
def insert_values(self,column_dict,i,insert_sql,insert_list,ts):
for k, v in column_dict.items():
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
'binary' in v.lower() or 'nchar' in v.lower():
insert_list.append(0 + i)
elif v.lower() == 'float' or v.lower() == 'double':
insert_list.append(0.1 + i)
elif v.lower() == 'bool':
insert_list.append(i % 2)
elif v.lower() == 'timestamp':
insert_list.append(ts + i)
tdSql.execute(insert_sql%(tuple(insert_list)))

View File

@ -58,7 +58,7 @@ print ----> start to check if there are ERRORS in vagrind log file for each dnod
system_content sh/checkValgrind.sh -n dnode1
print cmd return result ----> [ $system_content ]
if $system_content <= 2 then
if $system_content <= 0 then
return 0
endi

View File

@ -184,20 +184,36 @@ class TDTestCase:
for k,v in self.tag_dict.items():
if v.lower() == 'tinyint':
self.tag_check(i,k,tag_tinyint)
for error in [constant.TINYINT_MIN-1,constant.TINYINT_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'smallint':
self.tag_check(i,k,tag_smallint)
for error in [constant.SMALLINT_MIN-1,constant.SMALLINT_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'int':
self.tag_check(i,k,tag_int)
for error in [constant.INT_MIN-1,constant.INT_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'bigint':
self.tag_check(i,k,tag_bigint)
for error in [constant.BIGINT_MIN-1,constant.BIGINT_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'tinyint unsigned':
self.tag_check(i,k,tag_untinyint)
for error in [constant.TINYINT_UN_MIN-1,constant.TINYINT_UN_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'smallint unsigned':
self.tag_check(i,k,tag_unsmallint)
for error in [constant.SMALLINT_UN_MIN-1,constant.SMALLINT_UN_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'int unsigned':
self.tag_check(i,k,tag_unint)
self.tag_check(i,k,tag_unint)
for error in [constant.INT_UN_MIN-1,constant.INT_UN_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'bigint unsigned':
self.tag_check(i,k,tag_unbigint)
for error in [constant.BIGINT_UN_MIN-1,constant.BIGINT_UN_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'bool':
self.tag_check(i,k,tag_bool)
elif v.lower() == 'float':
@ -207,6 +223,8 @@ class TDTestCase:
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
else:
tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure')
# for error in [constant.FLOAT_MIN*10,constant.FLOAT_MAX*10]:
# tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif v.lower() == 'double':
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = {tag_double}')
tdSql.query(f'select {k} from {self.stbname}_{i}')
@ -214,12 +232,17 @@ class TDTestCase:
tdSql.checkEqual(tdSql.queryResult[0][0],tdSql.queryResult[0][0])
else:
tdLog.exit(f'select {k} from {self.stbname}_{i},data check failure')
for error in [constant.DOUBLE_MIN-1,constant.DOUBLE_MAX+1]:
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = {error}')
elif 'binary' in v.lower():
tag_binary_error = tdCom.getLongName(self.binary_length+1)
tdSql.error(f'alter table {self.stbname}_{i} set tag {k} = "{tag_binary_error}"')
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_binary}"')
tdSql.query(f'select {k} from {self.stbname}_{i}')
tdSql.checkData(0,0,tag_binary)
elif 'nchar' in v.lower():
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar}"')
tag_nchar_error = tdCom.getLongName(self.nchar_length+1)
tdSql.execute(f'alter table {self.stbname}_{i} set tag {k} = "{tag_nchar_error}"')
tdSql.query(f'select {k} from {self.stbname}_{i}')
tdSql.checkData(0,0,tag_nchar)

View File

@ -15,90 +15,118 @@ from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
tdSql.init(conn.cursor(),logSql)
self.rowNum = 10
self.ts = 1537146000000
self.setsql = TDSetSql()
self.ntbname = 'ntb'
self.stbname = 'stb'
self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = {
'ts' : 'timestamp',
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': f'binary({self.binary_length})',
'col13': f'nchar({self.nchar_length})'
}
def check_apercentile(self,data,expect_data,param,percent,column):
if param == "default":
if abs((expect_data-data) <= expect_data * 0.2):
tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}")
else:
tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}")
sys.exit(1)
elif param == "t-digest":
if abs((expect_data-data) <= expect_data * 0.2):
tdLog.info(f"apercentile function values check success with col{column}, param = {param},percent = {percent}")
else:
tdLog.notice(f"apercentile function value has not as expected with col{column}, param = {param},percent = {percent}")
sys.exit(1)
self.tag_dict = {
'ts_tag' : 'timestamp',
't1': 'tinyint',
't2': 'smallint',
't3': 'int',
't4': 'bigint',
't5': 'tinyint unsigned',
't6': 'smallint unsigned',
't7': 'int unsigned',
't8': 'bigint unsigned',
't9': 'float',
't10': 'double',
't11': 'bool',
't12': f'binary({self.binary_length})',
't13': f'nchar({self.nchar_length})'
}
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
self.tbnum = 2
self.tag_ts = self.ts
self.tag_tinyint = 1
self.tag_smallint = 2
self.tag_int = 3
self.tag_bigint = 4
self.tag_utint = 5
self.tag_usint = 6
self.tag_uint = 7
self.tag_ubint = 8
self.tag_float = 9.1
self.tag_double = 10.1
self.tag_bool = True
self.tag_values = [
f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\
{self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"'
]
self.percent = [1,50,100]
self.param_list = ['default','t-digest']
def insert_data(self,column_dict,tbname,row_num):
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
for i in range(row_num):
insert_list = []
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
def run(self):
def function_check_ntb(self):
tdSql.prepare()
intData = []
floatData = []
percent_list = [0,50,100]
param_list = ['default','t-digest']
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
# percentile verifacation
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
self.insert_data(self.column_dict,self.ntbname,self.rowNum)
for k,v in self.column_dict.items():
for percent in self.percent:
for param in self.param_list:
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
tdSql.error(f'select apercentile({k},{percent},"{param}") from {self.ntbname}')
else:
tdSql.query(f"select apercentile({k},{percent},'{param}') from {self.ntbname}")
def function_check_stb(self):
tdSql.prepare()
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})")
self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum)
for i in range(self.tbnum):
for k,v in self.column_dict.items():
for percent in self.percent:
for param in self.param_list:
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
tdSql.error(f'select apercentile({k},{percent},"{param}") from {self.stbname}_{i}')
else:
tdSql.query(f"select apercentile({k},{percent},'{param}') from {self.stbname}_{i}")
for k,v in self.column_dict.items():
for percent in self.percent:
for param in self.param_list:
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
tdSql.error(f'select apercentile({k},{percent},"{param}") from {self.stbname}')
else:
tdSql.query(f"select apercentile({k},{percent},'{param}') from {self.stbname}")
def run(self):
self.function_check_ntb()
self.function_check_stb()
tdSql.error("select apercentile(ts ,20) from test")
tdSql.error("select apercentile(col7 ,20) from test")
tdSql.error("select apercentile(col8 ,20) from test")
tdSql.error("select apercentile(col9 ,20) from test")
column_list = [1,2,3,4,5,6,11,12,13,14]
for i in column_list:
for j in percent_list:
for k in param_list:
tdSql.query(f"select apercentile(col{i},{j},'{k}') from test")
data = tdSql.getData(0, 0)
tdSql.query(f"select percentile(col{i},{j}) from test")
expect_data = tdSql.getData(0, 0)
self.check_apercentile(data,expect_data,k,j,i)
error_param_list = [-1,101,'"a"']
for i in error_param_list:
tdSql.error(f'select apercentile(col1,{i}) from test')
tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))")
tdSql.execute("create table t0 using meters tags('beijing')")
tdSql.execute("create table t1 using meters tags('shanghai')")
for i in range(self.rowNum):
tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1))
tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1))
column_list = ['voltage']
for i in column_list:
for j in percent_list:
for k in param_list:
tdSql.query(f"select apercentile({i}, {j},'{k}') from t0")
data = tdSql.getData(0, 0)
tdSql.query(f"select percentile({i},{j}) from t0")
expect_data = tdSql.getData(0,0)
self.check_apercentile(data,expect_data,k,j,i)
tdSql.query(f"select apercentile({i}, {j},'{k}') from meters")
tdSql.checkRows(1)
table_list = ["meters","t0"]
for i in error_param_list:
for j in table_list:
for k in column_list:
tdSql.error(f'select apercentile({k},{i}) from {j}')
def stop(self):
tdSql.close()
tdLog.success("%s successfully executed" % __file__)

View File

@ -51,31 +51,10 @@ class TDTestCase:
self.param_list = [1,100]
def insert_data(self,column_dict,tbname,row_num):
sql = ''
for k, v in column_dict.items():
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
sql += '%d,'
elif v.lower() == 'float' or v.lower() == 'double':
sql += '%f,'
elif 'binary' in v.lower():
sql += f'"{self.binary_str}%d",'
elif 'nchar' in v.lower():
sql += f'"{self.nchar_str}%d",'
insert_sql = f'insert into {tbname} values({sql[:-1]})'
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
for i in range(row_num):
insert_list = []
for k, v in column_dict.items():
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
'binary' in v.lower() or 'nchar' in v.lower():
insert_list.append(0 + i)
elif v.lower() == 'float' or v.lower() == 'double':
insert_list.append(0.1 + i)
elif v.lower() == 'bool':
insert_list.append(i % 2)
elif v.lower() == 'timestamp':
insert_list.append(self.ts + i)
tdSql.execute(insert_sql%(tuple(insert_list)))
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
def bottom_check_data(self,tbname,tb_type):
new_column_dict = {}
for param in self.param_list:
@ -129,7 +108,7 @@ class TDTestCase:
tdSql.execute(self.setsql.set_create_stable_sql(stbname,self.column_dict,tag_dict))
for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
tdSql.execute(self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum))
self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):

View File

@ -1315,24 +1315,26 @@ class TDTestCase:
tdSql.error("select elapsed(tsv ,1s) from (select elapsed(ts,1s) tsv from regular_table_1);")
tdSql.error("select elapsed(ts ,1s) from (select elapsed(ts,1s) ts from regular_table_1);")
# # bug fix
# tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;")
tdSql.error("select elapsed(tsc ,1s) from (select tscol tsc from regular_table_1) ;")
# case TD-12276
# tdSql.error("select elapsed(ts,1s) from (select ts,tbname from regular_table_1 order by ts asc );")
tdSql.query("select elapsed(ts,1s) from (select ts,tbname from regular_table_1 order by ts asc );")
tdSql.checkData(0,0,90.000000000)
# tdSql.error("select elapsed(ts,1s) from (select ts,tbname from regular_table_1 order by ts desc );")
tdSql.query("select elapsed(ts,1s) from (select ts,tbname from regular_table_1 order by ts desc );")
tdSql.checkData(0,0,90.000000000)
# tdSql.error("select elapsed(ts,1s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) interval(1s);")
tdSql.query("select elapsed(ts,1s) from (select ts ,max(q_int),tbname from regular_table_1 order by ts ) interval(1s);")
# tdSql.error("select elapsed(ts,1s) from (select ts ,q_int,tbname from regular_table_1 order by ts ) interval(1s);")
tdSql.query("select elapsed(ts,1s) from (select ts ,q_int,tbname from regular_table_1 order by ts ) interval(10s);")
# sub table
tdSql.query("select elapsed(ts,1s) from (select ts from sub_table1_1 );")
# tdSql.error("select elapsed(ts,1s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);")
tdSql.query("select elapsed(ts,1s) from (select ts ,max(q_int),tbname from sub_table1_1 order by ts ) interval(1s);")
# tdSql.error("select elapsed(ts,1s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(1s);")
tdSql.query("select elapsed(ts,1s) from (select ts ,q_int,tbname from sub_table1_1 order by ts ) interval(10s);")
tdSql.query("select elapsed(ts,1s) from (select ts ,tbname,top(q_int,3) from sub_table1_1 ) interval(10s);")
@ -1342,7 +1344,7 @@ class TDTestCase:
tdSql.query("select elapsed(ts,1s) from (select ts ,tbname from sub_table1_1 ) interval(10s);")
# tdSql.error("select elapsed(ts,1s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);")
tdSql.error("select elapsed(ts,1s) from (select ts ,count(*),tbname from sub_table1_1 order by ts ) interval(1s);")
querys = ["count(*)","avg(q_int)", "sum(q_double)","stddev(q_float)","LEASTSQUARES(q_int,0,1)","elapsed(ts,1s)"]
@ -1488,8 +1490,8 @@ class TDTestCase:
tdSql.query('select elapsed(ts,1s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
# tdSql.error('select elapsed(ts,1s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
# tdSql.error('select elapsed(ts,1s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ')
tdSql.query('select elapsed(ts,1s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") session(ts,1w) ; ')
# tdSql.query('select elapsed(ts,1s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) session(ts,1w) ; ')
tdSql.query('select elapsed(ts,1s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" session(ts,1w) ; ')
tdSql.checkRows(0)
@ -1506,14 +1508,14 @@ class TDTestCase:
tdSql.checkRows(10)
tdSql.checkData(0,0,0)
# tdSql.error('select elapsed(ts,1s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ')
tdSql.query('select elapsed(ts,1s) from ( select * from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ')
# tdSql.error('select elapsed(ts,1s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ')
tdSql.query('select elapsed(ts,1s) from ( select ts ,q_int from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000") state_window(q_int) ; ')
# tdSql.error('select elapsed(ts,1s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ')
tdSql.error('select elapsed(ts,1s) from sub_table1_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" interval(20s) fill (next) state_window(q_int) ; ')
# tdSql.query('select elapsed(ts,1s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ')
# tdSql.checkRows(0)
tdSql.query('select elapsed(ts,1s) from sub_empty_1 where ts>="2015-01-01 00:00:00.000" and ts < "2015-01-01 00:10:00.000" state_window(q_int); ')
tdSql.checkRows(0)
def continuous_query(self):

View File

@ -11,12 +11,14 @@
# -*- coding: utf-8 -*-
from platform import java_ver
from util.log import *
from util.cases import *
from util.sql import *
import numpy as np
from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql):
@ -25,55 +27,141 @@ class TDTestCase:
self.rowNum = 10
self.ts = 1537146000000
self.setsql = TDSetSql()
self.ntbname = 'ntb'
self.stbname = 'stb'
self.binary_length = 20 # the length of binary for column_dict
self.nchar_length = 20 # the length of nchar for column_dict
self.column_dict = {
'ts' : 'timestamp',
'col1': 'tinyint',
'col2': 'smallint',
'col3': 'int',
'col4': 'bigint',
'col5': 'tinyint unsigned',
'col6': 'smallint unsigned',
'col7': 'int unsigned',
'col8': 'bigint unsigned',
'col9': 'float',
'col10': 'double',
'col11': 'bool',
'col12': f'binary({self.binary_length})',
'col13': f'nchar({self.nchar_length})'
}
def run(self):
tdSql.prepare()
self.tag_dict = {
'ts_tag' : 'timestamp',
't1': 'tinyint',
't2': 'smallint',
't3': 'int',
't4': 'bigint',
't5': 'tinyint unsigned',
't6': 'smallint unsigned',
't7': 'int unsigned',
't8': 'bigint unsigned',
't9': 'float',
't10': 'double',
't11': 'bool',
't12': f'binary({self.binary_length})',
't13': f'nchar({self.nchar_length})'
}
self.binary_str = 'taosdata'
self.nchar_str = '涛思数据'
self.tbnum = 2
self.tag_ts = self.ts
self.tag_tinyint = 1
self.tag_smallint = 2
self.tag_int = 3
self.tag_bigint = 4
self.tag_utint = 5
self.tag_usint = 6
self.tag_uint = 7
self.tag_ubint = 8
self.tag_float = 9.1
self.tag_double = 10.1
self.tag_bool = True
self.tag_values = [
f'{self.tag_ts},{self.tag_tinyint},{self.tag_smallint},{self.tag_int},{self.tag_bigint},\
{self.tag_utint},{self.tag_usint},{self.tag_uint},{self.tag_ubint},{self.tag_float},{self.tag_double},{self.tag_bool},"{self.binary_str}","{self.nchar_str}"'
]
self.param = [1,50,100]
def insert_data(self,column_dict,tbname,row_num):
intData = []
floatData = []
tdSql.execute('''create table test(ts timestamp, col1 tinyint, col2 smallint, col3 int, col4 bigint, col5 float, col6 double,
col7 bool, col8 binary(20), col9 nchar(20), col11 tinyint unsigned, col12 smallint unsigned, col13 int unsigned, col14 bigint unsigned)''')
for i in range(self.rowNum):
tdSql.execute("insert into test values(%d, %d, %d, %d, %d, %f, %f, %d, 'taosdata%d', '涛思数据%d', %d, %d, %d, %d)"
% (self.ts + i, i + 1, i + 1, i + 1, i + 1, i + 0.1, i + 0.1, i % 2, i + 1, i + 1, i + 1, i + 1, i + 1, i + 1))
intData.append(i + 1)
floatData.append(i + 0.1)
# percentile verifacation
tdSql.error("select percentile(ts ,20) from test")
tdSql.error("select percentile(col7 ,20) from test")
tdSql.error("select percentile(col8 ,20) from test")
tdSql.error("select percentile(col9 ,20) from test")
column_list = [1,2,3,4,11,12,13,14]
percent_list = [0,50,100]
for i in column_list:
for j in percent_list:
tdSql.query(f"select percentile(col{i}, {j}) from test")
tdSql.checkData(0, 0, np.percentile(intData, j))
for i in [5,6]:
for j in percent_list:
tdSql.query(f"select percentile(col{i}, {j}) from test")
tdSql.checkData(0, 0, np.percentile(floatData, j))
tdSql.execute("create table meters (ts timestamp, voltage int) tags(loc nchar(20))")
tdSql.execute("create table t0 using meters tags('beijing')")
tdSql.execute("create table t1 using meters tags('shanghai')")
for i in range(self.rowNum):
tdSql.execute("insert into t0 values(%d, %d)" % (self.ts + i, i + 1))
tdSql.execute("insert into t1 values(%d, %d)" % (self.ts + i, i + 1))
# tdSql.error("select percentile(voltage, 20) from meters")
tdSql.execute("create table st(ts timestamp, k int)")
tdSql.execute("insert into st values(now, -100)(now+1a,-99)")
tdSql.query("select apercentile(k, 20) from st")
tdSql.checkData(0, 0, -100.00)
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
for i in range(row_num):
insert_list = []
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
intData.append(i)
floatData.append(i + 0.1)
return intData,floatData
def check_tags(self,tags,param,num,value):
tdSql.query(f'select percentile({tags}, {param}) from {self.stbname}_{num}')
print(tdSql.queryResult)
tdSql.checkEqual(tdSql.queryResult[0][0], value)
def function_check_ntb(self):
tdSql.prepare()
tdSql.execute(self.setsql.set_create_normaltable_sql(self.ntbname,self.column_dict))
intData,floatData = self.insert_data(self.column_dict,self.ntbname,self.rowNum)
for k,v in self.column_dict.items():
for param in self.param:
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
tdSql.error(f'select percentile({k},{param}) from {self.ntbname}')
elif v.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']:
tdSql.query(f'select percentile({k}, {param}) from {self.ntbname}')
tdSql.checkData(0, 0, np.percentile(intData, param))
else:
tdSql.query(f'select percentile({k}, {param}) from {self.ntbname}')
tdSql.checkData(0, 0, np.percentile(floatData, param))
def function_check_ctb(self):
tdSql.prepare()
tdSql.execute(self.setsql.set_create_stable_sql(self.stbname,self.column_dict,self.tag_dict))
for i in range(self.tbnum):
tdSql.execute(f"create table {self.stbname}_{i} using {self.stbname} tags({self.tag_values[0]})")
intData,floatData = self.insert_data(self.column_dict,f'{self.stbname}_{i}',self.rowNum)
for i in range(self.tbnum):
for k,v in self.column_dict.items():
for param in self.param:
if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}')
elif v.lower() in ['tinyint','smallint','int','bigint','tinyint unsigned','smallint unsigned','int unsigned','bigint unsigned']:
tdSql.query(f'select percentile({k}, {param}) from {self.stbname}_{i}')
tdSql.checkData(0, 0, np.percentile(intData, param))
else:
tdSql.query(f'select percentile({k}, {param}) from {self.stbname}_{i}')
tdSql.checkData(0, 0, np.percentile(floatData, param))
#!bug TD-17119
# for k,v in self.tag_dict.items():
# for param in self.param:
# if v.lower() in ['timestamp','bool'] or 'binary' in v.lower() or 'nchar' in v.lower():
# tdSql.error(f'select percentile({k},{param}) from {self.stbname}_{i}')
# elif v.lower() == 'tinyint':
# self.check_tags(k,param,i,self.tag_tinyint)
# elif v.lower() == 'smallint':
# self.check_tags(k,param,i,self.tag_smallint)
# elif v.lower() == 'int':
# self.check_tags(k,param,i,self.tag_int)
# elif v.lower() == 'bigint':
# self.check_tags(k,param,i,self.tag_bigint)
# elif v.lower() == 'tinyint unsigned':
# self.check_tags(k,param,i,self.tag_utint)
# elif v.lower() == 'smallint unsigned':
# self.check_tags(k,param,i,self.tag_usint)
# elif v.lower() == 'int unsigned':
# self.check_tags(k,param,i,self.tag_uint)
# elif v.lower() == 'bigint unsigned':
# self.check_tags(k,param,i,self.tag_ubint)
# elif v.lower() == 'float':
# self.check_tags(k,param,i,self.tag_float)
# elif v.lower() == 'double':
# self.check_tags(k,param,i,self.tag_double)
def run(self):
self.function_check_ntb()
self.function_check_ctb()
def stop(self):
tdSql.close()

View File

@ -49,32 +49,10 @@ class TDTestCase:
self.param_list = [1,100]
def insert_data(self,column_dict,tbname,row_num):
sql = ''
for k, v in column_dict.items():
if v.lower() == 'timestamp' or v.lower() == 'tinyint' or v.lower() == 'smallint' or v.lower() == 'int' or v.lower() == 'bigint' or \
v.lower() == 'tinyint unsigned' or v.lower() == 'smallint unsigned' or v.lower() == 'int unsigned' or v.lower() == 'bigint unsigned' or v.lower() == 'bool':
sql += '%d,'
elif v.lower() == 'float' or v.lower() == 'double':
sql += '%f,'
elif 'binary' in v.lower():
sql += f'"{self.binary_str}%d",'
elif 'nchar' in v.lower():
sql += f'"{self.nchar_str}%d",'
insert_sql = f'insert into {tbname} values({sql[:-1]})'
insert_sql = self.setsql.set_insertsql(column_dict,tbname,self.binary_str,self.nchar_str)
for i in range(row_num):
insert_list = []
for k, v in column_dict.items():
if v.lower() in[ 'tinyint' , 'smallint' , 'int', 'bigint' , 'tinyint unsigned' , 'smallint unsigned' , 'int unsigned' , 'bigint unsigned'] or\
'binary' in v.lower() or 'nchar' in v.lower():
insert_list.append(1 + i)
elif v.lower() == 'float' or v.lower() == 'double':
insert_list.append(0.1 + i)
elif v.lower() == 'bool':
insert_list.append(i % 2)
elif v.lower() == 'timestamp':
insert_list.append(self.ts + i)
tdSql.execute(insert_sql%(tuple(insert_list)))
pass
self.setsql.insert_values(column_dict,i,insert_sql,insert_list,self.ts)
def top_check_data(self,tbname,tb_type):
new_column_dict = {}
for param in self.param_list:
@ -86,7 +64,7 @@ class TDTestCase:
tdSql.checkRows(self.rowNum)
values_list = []
for i in range(self.rowNum):
tp = (self.rowNum-i,)
tp = (self.rowNum-i-1,)
values_list.insert(0,tp)
tdSql.checkEqual(tdSql.queryResult,values_list)
elif tb_type == 'stable':
@ -96,7 +74,7 @@ class TDTestCase:
tdSql.checkRows(param)
values_list = []
for i in range(param):
tp = (self.rowNum-i,)
tp = (self.rowNum-i-1,)
values_list.insert(0,tp)
tdSql.checkEqual(tdSql.queryResult,values_list)
elif tb_type == 'stable':
@ -132,7 +110,7 @@ class TDTestCase:
for i in range(self.tbnum):
tdSql.execute(f"create table {stbname}_{i} using {stbname} tags({tag_values[0]})")
tdSql.execute(self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum))
self.insert_data(self.column_dict,f'{stbname}_{i}',self.rowNum)
tdSql.query('show tables')
vgroup_list = []
for i in range(len(tdSql.queryResult)):

View File

@ -0,0 +1,316 @@
import taos
import sys
import time
import socket
import os
import threading
from enum import Enum
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def __init__(self):
self.vgroups = 1
self.ctbNum = 100
self.rowsPerTbl = 10000
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
def prepareTestEnv(self):
tdLog.printNoPrefix("======== prepare test env include database, stable, ctables, and insert data: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 100000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("restart taosd to ensure that the data falls into the disk")
# tdDnodes.stop(1)
# tdDnodes.start(1)
tdSql.query("flush database %s"%(paraDict['dbName']))
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 5,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, paraDict['dbName'], paraDict['stbName']))
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# time.sleep(3)
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(5)
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 1 end ...... ")
def tmqCase2(self):
tdLog.printNoPrefix("======== test case 2: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 1}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("create ctb")
tmqCom.create_ctable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"],ctbPrefix=paraDict['ctbPrefix'],
ctbNum=paraDict["ctbNum"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("insert data")
tmqCom.insert_data_interlaceByMultiTbl(tsql=tdSql,dbName=paraDict["dbName"],ctbPrefix=paraDict["ctbPrefix"],
ctbNum=paraDict["ctbNum"],rowsPerTbl=paraDict["rowsPerTbl"],batchNum=paraDict["batchNum"],
startTs=paraDict["startTs"],ctbStartIdx=paraDict['ctbStartIdx'])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, paraDict['dbName'], paraDict['stbName']))
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"] * 2
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
tdLog.info("create some new child table and insert data ")
paraDict['batchNum'] = 100
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tmqCom.getStartCommitNotifyFromTmqsim()
tdLog.info("================= restart dnode ===========================")
tdDnodes.stop(1)
tdDnodes.start(1)
time.sleep(5)
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 2 end ...... ")
# 自动建表完成数据插入,启动消费
def tmqCase3(self):
tdLog.printNoPrefix("======== test case 3: ")
paraDict = {'dbName': 'dbt',
'dropFlag': 1,
'event': '',
'vgroups': 1,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1},{'type': 'TIMESTAMP', 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1},{'type': 'BIGINT', 'count':1},{'type': 'DOUBLE', 'count':1},{'type': 'BINARY', 'len':32, 'count':1},{'type': 'NCHAR', 'len':32, 'count':1}],
'ctbPrefix': 'ctb',
'ctbStartIdx': 0,
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 100,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 3,
'showMsg': 1,
'showRow': 1,
'snapshot': 0}
paraDict['vgroups'] = self.vgroups
paraDict['ctbNum'] = self.ctbNum
paraDict['rowsPerTbl'] = self.rowsPerTbl
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=paraDict["vgroups"],replica=1)
tdLog.info("create stb")
tmqCom.create_stable(tdSql, dbName=paraDict["dbName"],stbName=paraDict["stbName"])
tdLog.info("insert data by auto create ctb")
tmqCom.insert_data_with_autoCreateTbl(tdSql,paraDict["dbName"],paraDict["stbName"],"ctb",paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"])
tdLog.info("create topics from stb1")
topicFromStb1 = 'topic_stb1'
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb1, paraDict['dbName'], paraDict['stbName']))
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicFromStb1
ifcheckdata = 0
ifManualCommit = 0
keyList = 'group.id:cgrp1,\
enable.auto.commit:true,\
auto.commit.interval.ms:1000,\
auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(pollDelay=paraDict['pollDelay'],dbName=paraDict["dbName"],showMsg=paraDict['showMsg'], showRow=paraDict['showRow'],snapshot=paraDict['snapshot'])
# tdLog.info("================= restart dnode ===========================")
# tdDnodes.stop(1)
# tdDnodes.start(1)
# time.sleep(2)
tdLog.info("insert process end, and start to check consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
totalConsumeRows = 0
for i in range(expectRows):
totalConsumeRows += resultList[i]
if totalConsumeRows != expectrowcnt:
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
tdLog.exit("tmq consume rows error!")
tdSql.query("drop topic %s"%topicFromStb1)
tdLog.printNoPrefix("======== test case 3 end ...... ")
def run(self):
tdSql.prepare()
# self.tmqCase1()
# self.tmqCase2()
self.tmqCase3()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -361,19 +361,25 @@ class TMQCom:
if startTs == 0:
t = time.time()
startTs = int(round(t * 1000))
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
rowsBatched = 0
for i in range(ctbNum):
sql += " %s.%s_%d using %s.%s tags (%d) values "%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i)
tagBinaryValue = 'beijing'
if (i % 2 == 0):
tagBinaryValue = 'shanghai'
elif (i % 3 == 0):
tagBinaryValue = 'changsha'
sql += " %s.%s_%d using %s.%s tags (%d, %d, %d, '%s', '%s') values "%(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue)
for j in range(rowsPerTbl):
sql += "(%d, %d, 'tmqrow_%d') "%(startTs + j, j, j)
sql += "(%d, %d, %d, %d, 'binary_%d', 'nchar_%d', now) "%(startTs+j, j,j, j,i+ctbStartIdx,rowsBatched)
rowsBatched += 1
if ((rowsBatched == batchNum) or (j == rowsPerTbl - 1)):
tsql.execute(sql)
rowsBatched = 0
if j < rowsPerTbl - 1:
sql = "insert into %s.%s_%d using %s.%s tags (%d) values " %(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i)
sql = "insert into %s.%s_%d using %s.%s tags (%d, %d, %d, '%s', '%s') values " %(dbName,ctbPrefix,i+ctbStartIdx,dbName,stbName,i+ctbStartIdx,i+ctbStartIdx,i+ctbStartIdx,tagBinaryValue,tagBinaryValue)
else:
sql = "insert into "
#end sql

View File

@ -52,6 +52,7 @@ typedef struct {
// char autoOffsetRest[16]; // none, earliest, latest
TdFilePtr pConsumeRowsFile;
TdFilePtr pConsumeMetaFile;
int32_t ifCheckData;
int64_t expectMsgCnt;
@ -445,7 +446,7 @@ static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields
taosFprintfFile(pFile, "\n");
}
static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) {
static int32_t data_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) {
char buf[1024];
int32_t totalRows = 0;
@ -496,6 +497,52 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex)
return totalRows;
}
static int32_t meta_msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) {
char buf[1024];
int32_t totalRows = 0;
// printf("topic: %s\n", tmq_get_topic_name(msg));
int32_t vgroupId = tmq_get_vgroup_id(msg);
const char* dbName = tmq_get_db_name(msg);
taosFprintfFile(g_fp, "consumerId: %d, msg index:%" PRId64 "\n", pInfo->consumerId, msgIndex);
taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table",
tmq_get_topic_name(msg), vgroupId);
{
tmq_raw_data *raw = tmq_get_raw_meta(msg);
if(raw){
TAOS_RES* pRes = taos_query(pInfo->taos, "use metadb");
if (taos_errno(pRes) != 0) {
pError("error when use metadb, reason:%s\n", taos_errstr(pRes));
taosFprintfFile(g_fp, "error when use metadb, reason:%s\n", taos_errstr(pRes));
taosCloseFile(&g_fp);
taos_free_result(pRes);
exit(-1);
}
taos_free_result(pRes);
taosFprintfFile(g_fp, "raw:%p\n", raw);
int32_t ret = taos_write_raw_meta(pInfo->taos, raw);
taosMemoryFree(raw);
}
char* result = tmq_get_json_meta(msg);
if(result){
//printf("meta result: %s\n", result);
taosFprintfFile(pInfo->pConsumeMetaFile, "%s\n", result);
taosMemoryFree(result);
}
}
totalRows++;
return totalRows;
}
int queryDB(TAOS* taos, char* command) {
TAOS_RES* pRes = taos_query(taos, command);
int code = taos_errno(pRes);
@ -526,7 +573,7 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) {
static int32_t g_once_commit_flag = 0;
static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
pError("tmq_commit_cb_print() commit %d\n", code);
taosFprintfFile(g_fp, "tmq_commit_cb_print() commit %d\n", code);
if (0 == g_once_commit_flag) {
g_once_commit_flag = 1;
@ -630,8 +677,12 @@ void loop_consume(SThreadInfo* pInfo) {
// getCurrentTimeString(tmpString));
sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId);
pInfo->pConsumeRowsFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pInfo->pConsumeRowsFile == NULL) {
taosFprintfFile(g_fp, "%s create file fail for save rows content\n", getCurrentTimeString(tmpString));
sprintf(filename, "%s/../log/meta_consumerid_%d.txt", configDir, pInfo->consumerId);
pInfo->pConsumeMetaFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pInfo->pConsumeRowsFile == NULL || pInfo->pConsumeMetaFile == NULL) {
taosFprintfFile(g_fp, "%s create file fail for save rows or save meta\n", getCurrentTimeString(tmpString));
return;
}
}
@ -645,7 +696,11 @@ void loop_consume(SThreadInfo* pInfo) {
TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, consumeDelay);
if (tmqMsg) {
if (0 != g_stConfInfo.showMsgFlag) {
totalRows += msg_process(tmqMsg, pInfo, totalMsgs);
tmq_res_t msgType = tmq_get_res_type(tmqMsg);
if (msgType == TMQ_RES_TABLE_META) {
totalRows += meta_msg_process(tmqMsg, pInfo, totalMsgs);
} else if (msgType == TMQ_RES_DATA)
totalRows += data_msg_process(tmqMsg, pInfo, totalMsgs);
}
taos_free_result(tmqMsg);