Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/data_format
This commit is contained in:
commit
4150eb32df
|
@ -14,25 +14,6 @@ MESSAGE(STATUS "Project binary files output path: " ${PROJECT_BINARY_DIR})
|
|||
MESSAGE(STATUS "Project executable files output path: " ${EXECUTABLE_OUTPUT_PATH})
|
||||
MESSAGE(STATUS "Project library files output path: " ${LIBRARY_OUTPUT_PATH})
|
||||
|
||||
find_package(Git QUIET)
|
||||
if(GIT_FOUND AND EXISTS "${TD_SOURCE_DIR}/.git")
|
||||
# Update submodules as needed
|
||||
option(GIT_SUBMODULE "Check submodules during build" ON)
|
||||
if(GIT_SUBMODULE)
|
||||
message(STATUS "Submodule update")
|
||||
execute_process(COMMAND cd ${TD_SOURCE_DIR} && ${GIT_EXECUTABLE} submodule update --init --recursive
|
||||
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
|
||||
RESULT_VARIABLE GIT_SUBMOD_RESULT)
|
||||
if(NOT GIT_SUBMOD_RESULT EQUAL "0")
|
||||
message(WARNING "git submodule update --init --recursive failed with ${GIT_SUBMOD_RESULT}, please checkout submodules")
|
||||
endif()
|
||||
endif()
|
||||
endif()
|
||||
|
||||
if(NOT EXISTS "${TD_SOURCE_DIR}/tools/taos-tools/CMakeLists.txt")
|
||||
message(WARNING "The submodules were not downloaded! GIT_SUBMODULE was turned off or failed. Please update submodules manually if you need build them.")
|
||||
endif()
|
||||
|
||||
if (NOT DEFINED TD_GRANT)
|
||||
SET(TD_GRANT FALSE)
|
||||
endif()
|
||||
|
|
|
@ -70,7 +70,7 @@ typedef uint16_t tmsg_t;
|
|||
#define TSDB_IE_TYPE_DNODE_EXT 6
|
||||
#define TSDB_IE_TYPE_DNODE_STATE 7
|
||||
|
||||
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__MAX };
|
||||
enum { CONN_TYPE__QUERY = 1, CONN_TYPE__TMQ, CONN_TYPE__UDFD, CONN_TYPE__MAX };
|
||||
|
||||
enum {
|
||||
HEARTBEAT_KEY_USER_AUTHINFO = 1,
|
||||
|
|
|
@ -642,6 +642,7 @@ int32_t* taosGetErrno();
|
|||
#define TSDB_CODE_PAR_INVALID_INTERNAL_PK TAOS_DEF_ERROR_CODE(0, 0x2646)
|
||||
#define TSDB_CODE_PAR_INVALID_TIMELINE_FUNC TAOS_DEF_ERROR_CODE(0, 0x2647)
|
||||
#define TSDB_CODE_PAR_INVALID_PASSWD TAOS_DEF_ERROR_CODE(0, 0x2648)
|
||||
#define TSDB_CODE_PAR_INVALID_ALTER_TABLE TAOS_DEF_ERROR_CODE(0, 0x2649)
|
||||
|
||||
//planner
|
||||
#define TSDB_CODE_PLAN_INTERNAL_ERROR TAOS_DEF_ERROR_CODE(0, 0x2700)
|
||||
|
|
|
@ -1007,8 +1007,6 @@ static int32_t smlParseTelnetTags(const char* data, SArray *cols, SHashObj *dump
|
|||
while(*sql != '\0') {
|
||||
// parse value
|
||||
if (*sql == SPACE) {
|
||||
valueLen = sql - value;
|
||||
sql++;
|
||||
break;
|
||||
}
|
||||
if (*sql == EQUAL) {
|
||||
|
@ -1017,9 +1015,7 @@ static int32_t smlParseTelnetTags(const char* data, SArray *cols, SHashObj *dump
|
|||
}
|
||||
sql++;
|
||||
}
|
||||
if(valueLen == 0){
|
||||
valueLen = sql - value;
|
||||
}
|
||||
|
||||
if(valueLen == 0){
|
||||
smlBuildInvalidDataMsg(msg, "invalid value", value);
|
||||
|
@ -1365,7 +1361,7 @@ static void smlDestroySTableMeta(SSmlSTableMeta *meta){
|
|||
static void smlDestroyCols(SArray *cols) {
|
||||
if (!cols) return;
|
||||
for (int i = 0; i < taosArrayGetSize(cols); ++i) {
|
||||
void *kv = taosArrayGet(cols, i);
|
||||
void *kv = taosArrayGetP(cols, i);
|
||||
taosMemoryFree(kv);
|
||||
}
|
||||
}
|
||||
|
@ -2077,12 +2073,16 @@ static int32_t smlParseTelnetLine(SSmlHandle* info, void *data) {
|
|||
if(ret != TSDB_CODE_SUCCESS){
|
||||
uError("SML:0x%"PRIx64" smlParseTelnetLine failed", info->id);
|
||||
smlDestroyTableInfo(info, tinfo);
|
||||
smlDestroyCols(cols);
|
||||
taosArrayDestroy(cols);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if(taosArrayGetSize(tinfo->tags) <= 0 || taosArrayGetSize(tinfo->tags) > TSDB_MAX_TAGS){
|
||||
smlBuildInvalidDataMsg(&info->msgBuf, "invalidate tags length:[1,128]", NULL);
|
||||
smlDestroyTableInfo(info, tinfo);
|
||||
smlDestroyCols(cols);
|
||||
taosArrayDestroy(cols);
|
||||
return TSDB_CODE_SML_INVALID_DATA;
|
||||
}
|
||||
taosHashClear(info->dumplicateKey);
|
||||
|
|
|
@ -516,8 +516,8 @@ TEST(testCase, smlProcess_influx_Test) {
|
|||
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
TAOS_RES *res = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
|
||||
ASSERT_NE(res, nullptr);
|
||||
// TAOS_RES *res = taos_query(taos, "select * from t_6885c584b98481584ee13dac399e173d");
|
||||
// ASSERT_NE(res, nullptr);
|
||||
// int fieldNum = taos_field_count(res);
|
||||
// ASSERT_EQ(fieldNum, 5);
|
||||
// int rowNum = taos_affected_rows(res);
|
||||
|
@ -525,7 +525,7 @@ TEST(testCase, smlProcess_influx_Test) {
|
|||
// for (int i = 0; i < rowNum; ++i) {
|
||||
// TAOS_ROW rows = taos_fetch_row(res);
|
||||
// }
|
||||
taos_free_result(res);
|
||||
// taos_free_result(res);
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
@ -605,8 +605,8 @@ TEST(testCase, smlProcess_telnet_Test) {
|
|||
int ret = smlProcess(info, (char**)sql, sizeof(sql)/sizeof(sql[0]));
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
TAOS_RES *res = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
|
||||
ASSERT_NE(res, nullptr);
|
||||
// TAOS_RES *res = taos_query(taos, "select * from t_8c30283b3c4131a071d1e16cf6d7094a");
|
||||
// ASSERT_NE(res, nullptr);
|
||||
// int fieldNum = taos_field_count(res);
|
||||
// ASSERT_EQ(fieldNum, 2);
|
||||
// int rowNum = taos_affected_rows(res);
|
||||
|
@ -614,7 +614,7 @@ TEST(testCase, smlProcess_telnet_Test) {
|
|||
// for (int i = 0; i < rowNum; ++i) {
|
||||
// TAOS_ROW rows = taos_fetch_row(res);
|
||||
// }
|
||||
taos_free_result(res);
|
||||
// taos_free_result(res);
|
||||
|
||||
// res = taos_query(taos, "select * from t_6931529054e5637ca92c78a1ad441961");
|
||||
// ASSERT_NE(res, nullptr);
|
||||
|
@ -670,16 +670,16 @@ TEST(testCase, smlProcess_json1_Test) {
|
|||
int ret = smlProcess(info, (char **)(&sql), -1);
|
||||
ASSERT_EQ(ret, 0);
|
||||
|
||||
TAOS_RES *res = taos_query(taos, "select * from t_cb27a7198d637b4f1c6464bd73f756a7");
|
||||
ASSERT_NE(res, nullptr);
|
||||
int fieldNum = taos_field_count(res);
|
||||
ASSERT_EQ(fieldNum, 2);
|
||||
// TAOS_RES *res = taos_query(taos, "select * from t_cb27a7198d637b4f1c6464bd73f756a7");
|
||||
// ASSERT_NE(res, nullptr);
|
||||
// int fieldNum = taos_field_count(res);
|
||||
// ASSERT_EQ(fieldNum, 2);
|
||||
// int rowNum = taos_affected_rows(res);
|
||||
// ASSERT_EQ(rowNum, 1);
|
||||
// for (int i = 0; i < rowNum; ++i) {
|
||||
// TAOS_ROW rows = taos_fetch_row(res);
|
||||
// }
|
||||
taos_free_result(res);
|
||||
// taos_free_result(res);
|
||||
destroyRequest(request);
|
||||
smlDestroyInfo(info);
|
||||
}
|
||||
|
|
|
@ -4217,7 +4217,6 @@ int32_t tDecodeSVAlterTbReq(SDecoder *pDecoder, SVAlterTbReq *pReq) {
|
|||
|
||||
if (tDecodeCStr(pDecoder, &pReq->tbName) < 0) return -1;
|
||||
if (tDecodeI8(pDecoder, &pReq->action) < 0) return -1;
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
switch (pReq->action) {
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
if (tDecodeCStr(pDecoder, &pReq->colName) < 0) return -1;
|
||||
|
|
|
@ -214,7 +214,23 @@ static void vmProcessSyncQueue(SQueueInfo *pInfo, STaosQall *qall, int32_t numOf
|
|||
|
||||
// todo
|
||||
SRpcMsg *pRsp = NULL;
|
||||
(void)vnodeProcessSyncReq(pVnode->pImpl, &pMsg->rpcMsg, &pRsp);
|
||||
int32_t ret = vnodeProcessSyncReq(pVnode->pImpl, &pMsg->rpcMsg, &pRsp);
|
||||
if (ret != 0) {
|
||||
// if leader, send response
|
||||
if (pMsg->rpcMsg.handle != NULL) {
|
||||
SRpcMsg rsp;
|
||||
rsp.pCont = NULL;
|
||||
rsp.contLen = 0;
|
||||
|
||||
rsp.code = terrno;
|
||||
dTrace("vmProcessSyncQueue error, code:%d", terrno);
|
||||
|
||||
rsp.ahandle = pMsg->rpcMsg.ahandle;
|
||||
rsp.handle = pMsg->rpcMsg.handle;
|
||||
rsp.refId = pMsg->rpcMsg.refId;
|
||||
tmsgSendRsp(&rsp);
|
||||
}
|
||||
}
|
||||
|
||||
rpcFreeCont(pMsg->rpcMsg.pCont);
|
||||
taosFreeQitem(pMsg);
|
||||
|
|
|
@ -198,6 +198,8 @@ void smaHandleRes(void *pVnode, int64_t smaId, const SArray *data) {
|
|||
}
|
||||
|
||||
int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
||||
int32_t ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
|
||||
|
||||
if (syncEnvIsStart()) {
|
||||
SSyncNode *pSyncNode = syncNodeAcquire(pVnode->sync);
|
||||
assert(pSyncNode != NULL);
|
||||
|
@ -219,67 +221,70 @@ int vnodeProcessSyncReq(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
|
|||
SyncTimeout *pSyncMsg = syncTimeoutFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnTimeoutCb(pSyncNode, pSyncMsg);
|
||||
syncTimeoutDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING) {
|
||||
SyncPing *pSyncMsg = syncPingFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnPingCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnPingCb(pSyncNode, pSyncMsg);
|
||||
syncPingDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_PING_REPLY) {
|
||||
SyncPingReply *pSyncMsg = syncPingReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnPingReplyCb(pSyncNode, pSyncMsg);
|
||||
syncPingReplyDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_CLIENT_REQUEST) {
|
||||
SyncClientRequest *pSyncMsg = syncClientRequestFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnClientRequestCb(pSyncNode, pSyncMsg);
|
||||
syncClientRequestDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE) {
|
||||
SyncRequestVote *pSyncMsg = syncRequestVoteFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnRequestVoteCb(pSyncNode, pSyncMsg);
|
||||
syncRequestVoteDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_REQUEST_VOTE_REPLY) {
|
||||
SyncRequestVoteReply *pSyncMsg = syncRequestVoteReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnRequestVoteReplyCb(pSyncNode, pSyncMsg);
|
||||
syncRequestVoteReplyDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES) {
|
||||
SyncAppendEntries *pSyncMsg = syncAppendEntriesFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnAppendEntriesCb(pSyncNode, pSyncMsg);
|
||||
syncAppendEntriesDestroy(pSyncMsg);
|
||||
|
||||
} else if (pRpcMsg->msgType == TDMT_VND_SYNC_APPEND_ENTRIES_REPLY) {
|
||||
SyncAppendEntriesReply *pSyncMsg = syncAppendEntriesReplyFromRpcMsg2(pRpcMsg);
|
||||
assert(pSyncMsg != NULL);
|
||||
|
||||
syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
|
||||
ret = syncNodeOnAppendEntriesReplyCb(pSyncNode, pSyncMsg);
|
||||
syncAppendEntriesReplyDestroy(pSyncMsg);
|
||||
|
||||
} else {
|
||||
vError("==vnodeProcessSyncReq== error msg type:%d", pRpcMsg->msgType);
|
||||
ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
|
||||
}
|
||||
|
||||
syncNodeRelease(pSyncNode);
|
||||
} else {
|
||||
vError("==vnodeProcessSyncReq== error syncEnv stop");
|
||||
ret = TAOS_SYNC_PROPOSE_OTHER_ERROR;
|
||||
}
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vnodeProcessCreateStbReq(SVnode *pVnode, int64_t version, void *pReq, int len, SRpcMsg *pRsp) {
|
||||
|
|
|
@ -2576,12 +2576,6 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
|
|||
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
|
||||
SHashObj *metaCache = taosHashInit(gCtgMgmt.cfg.maxTblCacheNum, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_ENTRY_LOCK);
|
||||
if (NULL == metaCache) {
|
||||
qError("taosHashInit failed, num:%d", gCtgMgmt.cfg.maxTblCacheNum);
|
||||
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
|
||||
}
|
||||
|
||||
code = taosHashPut(gCtgMgmt.pCluster, &clusterId, sizeof(clusterId), &clusterCtg, POINTER_BYTES);
|
||||
if (code) {
|
||||
if (HASH_NODE_EXIST(code)) {
|
||||
|
|
|
@ -658,6 +658,7 @@ void initExecTimeWindowInfo(SColumnInfoData* pColData, STimeWindow* pQueryWin
|
|||
void cleanupAggSup(SAggSupporter* pAggSup);
|
||||
void destroyBasicOperatorInfo(void* param, int32_t numOfOutput);
|
||||
void appendOneRowToDataBlock(SSDataBlock* pBlock, STupleHandle* pTupleHandle);
|
||||
void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId);
|
||||
SInterval extractIntervalInfo(const STableScanPhysiNode* pTableScanNode);
|
||||
SColumn extractColumnFromColumnNode(SColumnNode* pColNode);
|
||||
|
||||
|
|
|
@ -2055,6 +2055,11 @@ void extractQualifiedTupleByFilterResult(SSDataBlock* pBlock, const int8_t* rowR
|
|||
SColumnInfoData* pDst = taosArrayGet(px->pDataBlock, i);
|
||||
SColumnInfoData* pSrc = taosArrayGet(pBlock->pDataBlock, i);
|
||||
|
||||
// it is a reserved column for scalar function, and no data in this column yet.
|
||||
if (pSrc->pData == NULL) {
|
||||
continue;
|
||||
}
|
||||
|
||||
int32_t numOfRows = 0;
|
||||
for (int32_t j = 0; j < totalRows; ++j) {
|
||||
if (rowRes[j] == 0) {
|
||||
|
|
|
@ -291,20 +291,7 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock)
|
|||
|
||||
// this is to handle the tbname
|
||||
if (fmIsScanPseudoColumnFunc(functionId)) {
|
||||
struct SScalarFuncExecFuncs fpSet = {0};
|
||||
fmGetScalarFuncExecFuncs(functionId, &fpSet);
|
||||
|
||||
SColumnInfoData infoData = {0};
|
||||
infoData.info.type = TSDB_DATA_TYPE_BIGINT;
|
||||
infoData.info.bytes = sizeof(uint64_t);
|
||||
colInfoDataEnsureCapacity(&infoData, 0, 1);
|
||||
|
||||
colDataAppendInt64(&infoData, 0, &pBlock->info.uid);
|
||||
SScalarParam srcParam = {
|
||||
.numOfRows = pBlock->info.rows, .param = pTableScanInfo->readHandle.meta, .columnData = &infoData};
|
||||
|
||||
SScalarParam param = {.columnData = pColInfoData};
|
||||
fpSet.process(&srcParam, 1, ¶m);
|
||||
setTbNameColData(pTableScanInfo->readHandle.meta, pBlock, pColInfoData, functionId);
|
||||
} else { // these are tags
|
||||
const char* p = metaGetTableTagVal(&mr.me, pExpr->base.pParam[0].pCol->colId);
|
||||
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
|
||||
|
@ -316,6 +303,23 @@ void addTagPseudoColumnData(STableScanInfo* pTableScanInfo, SSDataBlock* pBlock)
|
|||
metaReaderClear(&mr);
|
||||
}
|
||||
|
||||
void setTbNameColData(void* pMeta, const SSDataBlock* pBlock, SColumnInfoData* pColInfoData, int32_t functionId) {
|
||||
struct SScalarFuncExecFuncs fpSet = {0};
|
||||
fmGetScalarFuncExecFuncs(functionId, &fpSet);
|
||||
|
||||
SColumnInfoData infoData = {0};
|
||||
infoData.info.type = TSDB_DATA_TYPE_BIGINT;
|
||||
infoData.info.bytes = sizeof(uint64_t);
|
||||
colInfoDataEnsureCapacity(&infoData, 0, 1);
|
||||
|
||||
colDataAppendInt64(&infoData, 0, (int64_t*) &pBlock->info.uid);
|
||||
SScalarParam srcParam = {
|
||||
.numOfRows = pBlock->info.rows, .param = pMeta, .columnData = &infoData};
|
||||
|
||||
SScalarParam param = {.columnData = pColInfoData};
|
||||
fpSet.process(&srcParam, 1, ¶m);
|
||||
}
|
||||
|
||||
static SSDataBlock* doTableScanImpl(SOperatorInfo* pOperator) {
|
||||
STableScanInfo* pTableScanInfo = pOperator->info;
|
||||
SSDataBlock* pBlock = pTableScanInfo->pResBlock;
|
||||
|
|
|
@ -105,7 +105,12 @@ int32_t mavgFunction(SqlFunctionCtx* pCtx);
|
|||
bool getSampleFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool sampleFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
|
||||
int32_t sampleFunction(SqlFunctionCtx* pCtx);
|
||||
int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
//int32_t sampleFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
bool getTailFuncEnv(struct SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
bool tailFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo* pResultInfo);
|
||||
int32_t tailFunction(SqlFunctionCtx* pCtx);
|
||||
int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock);
|
||||
|
||||
bool getSelectivityFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv);
|
||||
|
||||
|
|
|
@ -386,6 +386,35 @@ static int32_t translateSample(SFunctionNode* pFunc, char* pErrBuf, int32_t len)
|
|||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateTail(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
int32_t numOfParams = LIST_LENGTH(pFunc->pParameterList);
|
||||
if (2 != numOfParams && 3 != numOfParams) {
|
||||
return invaildFuncParaNumErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
|
||||
SNode* pPara = nodesListGetNode(pFunc->pParameterList, 0);
|
||||
if (QUERY_NODE_COLUMN != nodeType(pPara)) {
|
||||
return buildFuncErrMsg(pErrBuf, len, TSDB_CODE_FUNC_FUNTION_ERROR,
|
||||
"The input parameter of TAIL function can only be column");
|
||||
}
|
||||
|
||||
for (int32_t i = 1; i < numOfParams; ++i) {
|
||||
uint8_t paraType = ((SExprNode*)nodesListGetNode(pFunc->pParameterList, i))->resType.type;
|
||||
if (!IS_INTEGER_TYPE(paraType)) {
|
||||
return invaildFuncParaTypeErrMsg(pErrBuf, len, pFunc->functionName);
|
||||
}
|
||||
}
|
||||
|
||||
SExprNode* pCol = (SExprNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
uint8_t colType = pCol->resType.type;
|
||||
if (IS_VAR_DATA_TYPE(colType)) {
|
||||
pFunc->node.resType = (SDataType){.bytes = pCol->resType.bytes, .type = colType};
|
||||
} else {
|
||||
pFunc->node.resType = (SDataType){.bytes = tDataTypes[colType].bytes, .type = colType};
|
||||
}
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t translateLastRow(SFunctionNode* pFunc, char* pErrBuf, int32_t len) {
|
||||
// todo
|
||||
return TSDB_CODE_SUCCESS;
|
||||
|
@ -850,6 +879,16 @@ const SBuiltinFuncDefinition funcMgtBuiltins[] = {
|
|||
.processFunc = sampleFunction,
|
||||
.finalizeFunc = NULL
|
||||
},
|
||||
{
|
||||
.name = "tail",
|
||||
.type = FUNCTION_TYPE_TAIL,
|
||||
.classification = FUNC_MGT_NONSTANDARD_SQL_FUNC | FUNC_MGT_TIMELINE_FUNC,
|
||||
.translateFunc = translateTail,
|
||||
.getEnvFunc = getTailFuncEnv,
|
||||
.initFunc = tailFunctionSetup,
|
||||
.processFunc = tailFunction,
|
||||
.finalizeFunc = tailFinalize
|
||||
},
|
||||
{
|
||||
.name = "abs",
|
||||
.type = FUNCTION_TYPE_ABS,
|
||||
|
|
|
@ -18,12 +18,15 @@
|
|||
#include "function.h"
|
||||
#include "querynodes.h"
|
||||
#include "taggfunction.h"
|
||||
#include "tcompare.h"
|
||||
#include "tdatablock.h"
|
||||
#include "tpercentile.h"
|
||||
|
||||
#define HISTOGRAM_MAX_BINS_NUM 1000
|
||||
#define MAVG_MAX_POINTS_NUM 1000
|
||||
#define SAMPLE_MAX_POINTS_NUM 1000
|
||||
#define TAIL_MAX_POINTS_NUM 100
|
||||
#define TAIL_MAX_OFFSET 100
|
||||
|
||||
typedef struct SSumRes {
|
||||
union {
|
||||
|
@ -161,6 +164,21 @@ typedef struct SSampleInfo {
|
|||
int64_t *timestamp;
|
||||
} SSampleInfo;
|
||||
|
||||
typedef struct STailItem {
|
||||
int64_t timestamp;
|
||||
bool isNull;
|
||||
char data[];
|
||||
} STailItem;
|
||||
|
||||
typedef struct STailInfo {
|
||||
int32_t numOfPoints;
|
||||
int32_t numAdded;
|
||||
int32_t offset;
|
||||
uint8_t colType;
|
||||
int16_t colBytes;
|
||||
STailItem **pItems;
|
||||
} STailInfo;
|
||||
|
||||
#define SET_VAL(_info, numOfElem, res) \
|
||||
do { \
|
||||
if ((numOfElem) <= 0) { \
|
||||
|
@ -3107,7 +3125,7 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
|
|||
|
||||
int32_t startOffset = pCtx->offset;
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex; i += 1) {
|
||||
if (colDataIsNull_f(pInputCol->nullbitmap, i)) {
|
||||
if (colDataIsNull_s(pInputCol, i)) {
|
||||
//colDataAppendNULL(pOutput, i);
|
||||
continue;
|
||||
}
|
||||
|
@ -3141,3 +3159,132 @@ int32_t sampleFunction(SqlFunctionCtx* pCtx) {
|
|||
//
|
||||
// return pResInfo->numOfRes;
|
||||
//}
|
||||
|
||||
|
||||
bool getTailFuncEnv(SFunctionNode* pFunc, SFuncExecEnv* pEnv) {
|
||||
SColumnNode* pCol = (SColumnNode*)nodesListGetNode(pFunc->pParameterList, 0);
|
||||
SValueNode* pVal = (SValueNode*)nodesListGetNode(pFunc->pParameterList, 1);
|
||||
int32_t numOfPoints = pVal->datum.i;
|
||||
pEnv->calcMemSize = sizeof(STailInfo) + numOfPoints * (POINTER_BYTES + sizeof(STailItem) + pCol->node.resType.bytes);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool tailFunctionSetup(SqlFunctionCtx *pCtx, SResultRowEntryInfo *pResultInfo) {
|
||||
if (!functionSetup(pCtx, pResultInfo)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
STailInfo *pInfo = GET_ROWCELL_INTERBUF(pResultInfo);
|
||||
pInfo->numAdded = 0;
|
||||
pInfo->numOfPoints = pCtx->param[1].param.i;
|
||||
if (pCtx->numOfParams == 4) {
|
||||
pInfo->offset = pCtx->param[2].param.i;
|
||||
} else {
|
||||
pInfo->offset = 0;
|
||||
}
|
||||
pInfo->colType = pCtx->resDataInfo.type;
|
||||
pInfo->colBytes = pCtx->resDataInfo.bytes;
|
||||
if ((pInfo->numOfPoints < 1 || pInfo->numOfPoints > TAIL_MAX_POINTS_NUM) ||
|
||||
(pInfo->numOfPoints < 0 || pInfo->numOfPoints > TAIL_MAX_OFFSET)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
pInfo->pItems = (STailItem **)((char *)pInfo + sizeof(STailInfo));
|
||||
char *pItem = (char *)pInfo->pItems + pInfo->numOfPoints * POINTER_BYTES;
|
||||
|
||||
size_t unitSize = sizeof(STailItem) + pInfo->colBytes;
|
||||
for (int32_t i = 0; i < pInfo->numOfPoints; ++i) {
|
||||
pInfo->pItems[i] = (STailItem *)(pItem + i * unitSize);
|
||||
pInfo->pItems[i]->isNull = false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void tailAssignResult(STailItem* pItem, char *data, int32_t colBytes, TSKEY ts, bool isNull) {
|
||||
pItem->timestamp = ts;
|
||||
if (isNull) {
|
||||
pItem->isNull = true;
|
||||
} else {
|
||||
memcpy(pItem->data, data, colBytes);
|
||||
}
|
||||
}
|
||||
|
||||
static int32_t tailCompFn(const void *p1, const void *p2, const void *param) {
|
||||
STailItem *d1 = *(STailItem **)p1;
|
||||
STailItem *d2 = *(STailItem **)p2;
|
||||
return compareInt64Val(&d1->timestamp, &d2->timestamp);
|
||||
}
|
||||
|
||||
static void doTailAdd(STailInfo* pInfo, char *data, TSKEY ts, bool isNull) {
|
||||
STailItem **pList = pInfo->pItems;
|
||||
if (pInfo->numAdded < pInfo->numOfPoints) {
|
||||
tailAssignResult(pList[pInfo->numAdded], data, pInfo->colBytes, ts, isNull);
|
||||
taosheapsort((void *)pList, sizeof(STailItem **), pInfo->numAdded + 1, NULL, tailCompFn, 0);
|
||||
pInfo->numAdded++;
|
||||
} else if (pList[0]->timestamp < ts) {
|
||||
tailAssignResult(pList[0], data, pInfo->colBytes, ts, isNull);
|
||||
taosheapadjust((void *)pList, sizeof(STailItem **), 0, pInfo->numOfPoints - 1, NULL, tailCompFn, NULL, 0);
|
||||
}
|
||||
}
|
||||
|
||||
int32_t tailFunction(SqlFunctionCtx* pCtx) {
|
||||
SResultRowEntryInfo* pResInfo = GET_RES_INFO(pCtx);
|
||||
STailInfo* pInfo = GET_ROWCELL_INTERBUF(pResInfo);
|
||||
|
||||
SInputColumnInfoData* pInput = &pCtx->input;
|
||||
TSKEY* tsList = (int64_t*)pInput->pPTS->pData;
|
||||
|
||||
SColumnInfoData* pInputCol = pInput->pData[0];
|
||||
SColumnInfoData* pTsOutput = pCtx->pTsOutput;
|
||||
SColumnInfoData* pOutput = (SColumnInfoData*)pCtx->pOutput;
|
||||
|
||||
int32_t startOffset = pCtx->offset;
|
||||
if (pInfo->offset >= pInput->numOfRows) {
|
||||
return 0;
|
||||
} else {
|
||||
pInfo->numOfPoints = MIN(pInfo->numOfPoints, pInput->numOfRows - pInfo->offset);
|
||||
}
|
||||
for (int32_t i = pInput->startRowIndex; i < pInput->numOfRows + pInput->startRowIndex - pInfo->offset; i += 1) {
|
||||
|
||||
char* data = colDataGetData(pInputCol, i);
|
||||
doTailAdd(pInfo, data, tsList[i], colDataIsNull_s(pInputCol, i));
|
||||
}
|
||||
|
||||
taosqsort(pInfo->pItems, pInfo->numOfPoints, POINTER_BYTES, NULL, tailCompFn);
|
||||
|
||||
for (int32_t i = 0; i < pInfo->numOfPoints; ++i) {
|
||||
int32_t pos = startOffset + i;
|
||||
STailItem *pItem = pInfo->pItems[i];
|
||||
if (pItem->isNull) {
|
||||
colDataAppendNULL(pOutput, pos);
|
||||
} else {
|
||||
colDataAppend(pOutput, pos, pItem->data, false);
|
||||
}
|
||||
}
|
||||
|
||||
return pInfo->numOfPoints;
|
||||
}
|
||||
|
||||
int32_t tailFinalize(SqlFunctionCtx* pCtx, SSDataBlock* pBlock) {
|
||||
SResultRowEntryInfo* pEntryInfo = GET_RES_INFO(pCtx);
|
||||
STailInfo* pInfo = GET_ROWCELL_INTERBUF(pEntryInfo);
|
||||
pEntryInfo->complete = true;
|
||||
|
||||
int32_t type = pCtx->input.pData[0]->info.type;
|
||||
int32_t slotId = pCtx->pExpr->base.resSchema.slotId;
|
||||
|
||||
SColumnInfoData* pCol = taosArrayGet(pBlock->pDataBlock, slotId);
|
||||
|
||||
// todo assign the tag value and the corresponding row data
|
||||
int32_t currentRow = pBlock->info.rows;
|
||||
for (int32_t i = 0; i < pEntryInfo->numOfRes; ++i) {
|
||||
STailItem *pItem = pInfo->pItems[i];
|
||||
colDataAppend(pCol, currentRow, pItem->data, false);
|
||||
|
||||
//setSelectivityValue(pCtx, pBlock, &pInfo->pItems[i].tuplePos, currentRow);
|
||||
currentRow += 1;
|
||||
}
|
||||
|
||||
return pEntryInfo->numOfRes;
|
||||
}
|
||||
|
|
|
@ -312,6 +312,7 @@ typedef struct SUdfcFuncStub {
|
|||
char udfName[TSDB_FUNC_NAME_LEN];
|
||||
UdfcFuncHandle handle;
|
||||
int32_t refCount;
|
||||
int64_t lastRefTime;
|
||||
} SUdfcFuncStub;
|
||||
|
||||
typedef struct SUdfcProxy {
|
||||
|
@ -1446,6 +1447,7 @@ int32_t accquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle) {
|
|||
uv_mutex_unlock(&gUdfdProxy.udfStubsMutex);
|
||||
*pHandle = foundStub->handle;
|
||||
++foundStub->refCount;
|
||||
foundStub->lastRefTime = taosGetTimestampUs();
|
||||
return 0;
|
||||
}
|
||||
*pHandle = NULL;
|
||||
|
@ -1455,6 +1457,7 @@ int32_t accquireUdfFuncHandle(char* udfName, UdfcFuncHandle* pHandle) {
|
|||
strcpy(stub.udfName, udfName);
|
||||
stub.handle = *pHandle;
|
||||
++stub.refCount;
|
||||
stub.lastRefTime = taosGetTimestampUs();
|
||||
taosArrayPush(gUdfdProxy.udfStubs, &stub);
|
||||
taosArraySort(gUdfdProxy.udfStubs, compareUdfcFuncSub);
|
||||
} else {
|
||||
|
@ -1662,7 +1665,8 @@ int32_t cleanUpUdfs() {
|
|||
fnInfo("tear down udf. udf name: %s, handle: %p", stub->udfName, stub->handle);
|
||||
doTeardownUdf(stub->handle);
|
||||
} else {
|
||||
fnInfo("udf still in use. udf name: %s, ref count: %d, handle: %p", stub->udfName, stub->refCount, stub->handle);
|
||||
fnInfo("udf still in use. udf name: %s, ref count: %d, last ref time: %"PRId64", handle: %p",
|
||||
stub->udfName, stub->refCount, stub->lastRefTime, stub->handle);
|
||||
taosArrayPush(udfStubs, stub);
|
||||
}
|
||||
++i;
|
||||
|
|
|
@ -485,7 +485,146 @@ void udfdIntrSignalHandler(uv_signal_t *handle, int signum) {
|
|||
uv_stop(global.loop);
|
||||
}
|
||||
|
||||
void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) { return; }
|
||||
typedef enum EUdfdRpcReqRspType {
|
||||
UDFD_RPC_MNODE_CONNECT = 0,
|
||||
UDFD_RPC_RETRIVE_FUNC,
|
||||
} EUdfdRpcReqRspType;
|
||||
|
||||
typedef struct SUdfdRpcSendRecvInfo {
|
||||
EUdfdRpcReqRspType rpcType;
|
||||
int32_t code;
|
||||
void* param;
|
||||
uv_sem_t resultSem;
|
||||
} SUdfdRpcSendRecvInfo;
|
||||
|
||||
|
||||
void udfdProcessRpcRsp(void *parent, SRpcMsg *pMsg, SEpSet *pEpSet) {
|
||||
SUdfdRpcSendRecvInfo *msgInfo = (SUdfdRpcSendRecvInfo *)pMsg->ahandle;
|
||||
ASSERT(pMsg->ahandle != NULL);
|
||||
|
||||
if (pEpSet) {
|
||||
if (!isEpsetEqual(&global.mgmtEp.epSet, pEpSet)) {
|
||||
updateEpSet_s(&global.mgmtEp, pEpSet);
|
||||
}
|
||||
}
|
||||
|
||||
if (pMsg->code != TSDB_CODE_SUCCESS) {
|
||||
fnError("udfd rpc error. code: %s", tstrerror(pMsg->code));
|
||||
msgInfo->code = pMsg->code;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
if (msgInfo->rpcType == UDFD_RPC_MNODE_CONNECT) {
|
||||
SConnectRsp connectRsp = {0};
|
||||
tDeserializeSConnectRsp(pMsg->pCont, pMsg->contLen, &connectRsp);
|
||||
if (connectRsp.epSet.numOfEps == 0) {
|
||||
msgInfo->code = TSDB_CODE_MND_APP_ERROR;
|
||||
goto _return;
|
||||
}
|
||||
|
||||
if (connectRsp.dnodeNum > 1 && !isEpsetEqual(&global.mgmtEp.epSet, &connectRsp.epSet)) {
|
||||
updateEpSet_s(&global.mgmtEp, &connectRsp.epSet);
|
||||
}
|
||||
msgInfo->code = 0;
|
||||
} else if (msgInfo->rpcType == UDFD_RPC_RETRIVE_FUNC) {
|
||||
SRetrieveFuncRsp retrieveRsp = {0};
|
||||
tDeserializeSRetrieveFuncRsp(pMsg->pCont, pMsg->contLen, &retrieveRsp);
|
||||
|
||||
SFuncInfo *pFuncInfo = (SFuncInfo *)taosArrayGet(retrieveRsp.pFuncInfos, 0);
|
||||
SUdf* udf = msgInfo->param;
|
||||
udf->funcType = pFuncInfo->funcType;
|
||||
udf->scriptType = pFuncInfo->scriptType;
|
||||
udf->outputType = pFuncInfo->funcType;
|
||||
udf->outputLen = pFuncInfo->outputLen;
|
||||
udf->bufSize = pFuncInfo->bufSize;
|
||||
|
||||
char path[PATH_MAX] = {0};
|
||||
snprintf(path, sizeof(path), "%s/lib%s.so", "/tmp", pFuncInfo->name);
|
||||
TdFilePtr file = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
|
||||
// TODO check for failure of flush to disk
|
||||
taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize);
|
||||
taosCloseFile(&file);
|
||||
strncpy(udf->path, path, strlen(path));
|
||||
taosArrayDestroy(retrieveRsp.pFuncInfos);
|
||||
msgInfo->code = 0;
|
||||
}
|
||||
|
||||
_return:
|
||||
rpcFreeCont(pMsg->pCont);
|
||||
uv_sem_post(&msgInfo->resultSem);
|
||||
return;
|
||||
}
|
||||
|
||||
int32_t udfdConnectToMNode() {
|
||||
SConnectReq connReq = {0};
|
||||
connReq.connType = CONN_TYPE__UDFD;
|
||||
tstrncpy(connReq.app, "udfd",sizeof(connReq.app));
|
||||
tstrncpy(connReq.user, TSDB_DEFAULT_USER, sizeof(connReq.user));
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass);
|
||||
tstrncpy(connReq.passwd, pass, sizeof(connReq.passwd));
|
||||
connReq.pid = htonl(taosGetPId());
|
||||
connReq.startTime = htobe64(taosGetTimestampMs());
|
||||
|
||||
int32_t contLen = tSerializeSConnectReq(NULL, 0, &connReq);
|
||||
void* pReq = rpcMallocCont(contLen);
|
||||
tSerializeSConnectReq(pReq, contLen, &connReq);
|
||||
|
||||
SUdfdRpcSendRecvInfo *msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo));
|
||||
msgInfo->rpcType = UDFD_RPC_MNODE_CONNECT;
|
||||
uv_sem_init(&msgInfo->resultSem, 0);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.msgType = TDMT_MND_CONNECT;
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = contLen;
|
||||
rpcMsg.ahandle = msgInfo;
|
||||
rpcSendRequest(global.clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL);
|
||||
|
||||
uv_sem_wait(&msgInfo->resultSem);
|
||||
int32_t code = msgInfo->code;
|
||||
uv_sem_destroy(&msgInfo->resultSem);
|
||||
taosMemoryFree(msgInfo);
|
||||
return code;
|
||||
}
|
||||
|
||||
int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
|
||||
SRetrieveFuncReq retrieveReq = {0};
|
||||
retrieveReq.numOfFuncs = 1;
|
||||
retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN);
|
||||
taosArrayPush(retrieveReq.pFuncNames, udfName);
|
||||
|
||||
int32_t contLen = tSerializeSRetrieveFuncReq(NULL, 0, &retrieveReq);
|
||||
void *pReq = rpcMallocCont(contLen);
|
||||
tSerializeSRetrieveFuncReq(pReq, contLen, &retrieveReq);
|
||||
taosArrayDestroy(retrieveReq.pFuncNames);
|
||||
|
||||
SUdfdRpcSendRecvInfo* msgInfo = taosMemoryCalloc(1, sizeof(SUdfdRpcSendRecvInfo));
|
||||
msgInfo->rpcType = UDFD_RPC_RETRIVE_FUNC;
|
||||
msgInfo->param = udf;
|
||||
uv_sem_init(&msgInfo->resultSem, 0);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = contLen;
|
||||
rpcMsg.msgType = TDMT_MND_RETRIEVE_FUNC;
|
||||
rpcMsg.ahandle = msgInfo;
|
||||
rpcSendRequest(clientRpc, &global.mgmtEp.epSet, &rpcMsg, NULL);
|
||||
|
||||
uv_sem_wait(&msgInfo->resultSem);
|
||||
uv_sem_destroy(&msgInfo->resultSem);
|
||||
int32_t code = msgInfo->code;
|
||||
taosMemoryFree(msgInfo);
|
||||
return code;
|
||||
}
|
||||
|
||||
static bool udfdRpcRfp(int32_t code) {
|
||||
if (code == TSDB_CODE_RPC_REDIRECT) {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSet) {
|
||||
pEpSet->version = 0;
|
||||
|
@ -528,69 +667,30 @@ int initEpSetFromCfg(const char* firstEp, const char* secondEp, SCorEpSet* pEpSe
|
|||
return 0;
|
||||
}
|
||||
|
||||
int32_t udfdFillUdfInfoFromMNode(void *clientRpc, char *udfName, SUdf *udf) {
|
||||
SRetrieveFuncReq retrieveReq = {0};
|
||||
retrieveReq.numOfFuncs = 1;
|
||||
retrieveReq.pFuncNames = taosArrayInit(1, TSDB_FUNC_NAME_LEN);
|
||||
taosArrayPush(retrieveReq.pFuncNames, udfName);
|
||||
|
||||
int32_t contLen = tSerializeSRetrieveFuncReq(NULL, 0, &retrieveReq);
|
||||
void *pReq = rpcMallocCont(contLen);
|
||||
tSerializeSRetrieveFuncReq(pReq, contLen, &retrieveReq);
|
||||
taosArrayDestroy(retrieveReq.pFuncNames);
|
||||
|
||||
SRpcMsg rpcMsg = {0};
|
||||
rpcMsg.pCont = pReq;
|
||||
rpcMsg.contLen = contLen;
|
||||
rpcMsg.msgType = TDMT_MND_RETRIEVE_FUNC;
|
||||
|
||||
SRpcMsg rpcRsp = {0};
|
||||
rpcSendRecv(clientRpc, &global.mgmtEp.epSet, &rpcMsg, &rpcRsp);
|
||||
SRetrieveFuncRsp retrieveRsp = {0};
|
||||
tDeserializeSRetrieveFuncRsp(rpcRsp.pCont, rpcRsp.contLen, &retrieveRsp);
|
||||
|
||||
SFuncInfo *pFuncInfo = (SFuncInfo *)taosArrayGet(retrieveRsp.pFuncInfos, 0);
|
||||
|
||||
udf->funcType = pFuncInfo->funcType;
|
||||
udf->scriptType = pFuncInfo->scriptType;
|
||||
udf->outputType = pFuncInfo->funcType;
|
||||
udf->outputLen = pFuncInfo->outputLen;
|
||||
udf->bufSize = pFuncInfo->bufSize;
|
||||
|
||||
char path[PATH_MAX] = {0};
|
||||
snprintf(path, sizeof(path), "%s/lib%s.so", "/tmp", udfName);
|
||||
TdFilePtr file = taosOpenFile(path, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_READ | TD_FILE_TRUNC | TD_FILE_AUTO_DEL);
|
||||
// TODO check for failure of flush to disk
|
||||
taosWriteFile(file, pFuncInfo->pCode, pFuncInfo->codeSize);
|
||||
taosCloseFile(&file);
|
||||
strncpy(udf->path, path, strlen(path));
|
||||
taosArrayDestroy(retrieveRsp.pFuncInfos);
|
||||
|
||||
rpcFreeCont(rpcRsp.pCont);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int32_t udfdOpenClientRpc() {
|
||||
char *pass = "taosdata";
|
||||
char *user = "root";
|
||||
char secretEncrypt[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)pass, strlen(pass), secretEncrypt);
|
||||
SRpcInit rpcInit = {0};
|
||||
rpcInit.label = (char *)"UDFD";
|
||||
rpcInit.label = "UDFD";
|
||||
rpcInit.numOfThreads = 1;
|
||||
rpcInit.cfp = udfdProcessRpcRsp;
|
||||
rpcInit.cfp = (RpcCfp)udfdProcessRpcRsp;
|
||||
rpcInit.sessions = 1024;
|
||||
rpcInit.connType = TAOS_CONN_CLIENT;
|
||||
rpcInit.idleTime = 30 * 1000;
|
||||
rpcInit.parent = &global;
|
||||
|
||||
rpcInit.user = (char *)user;
|
||||
rpcInit.ckey = (char *)"key";
|
||||
rpcInit.secret = (char *)secretEncrypt;
|
||||
rpcInit.idleTime = tsShellActivityTimer * 1000;
|
||||
rpcInit.user = TSDB_DEFAULT_USER;
|
||||
rpcInit.ckey = "key";
|
||||
rpcInit.spi = 1;
|
||||
rpcInit.parent = &global;
|
||||
rpcInit.rfp = udfdRpcRfp;
|
||||
|
||||
char pass[TSDB_PASSWORD_LEN + 1] = {0};
|
||||
taosEncryptPass_c((uint8_t *)(TSDB_DEFAULT_PASS), strlen(TSDB_DEFAULT_PASS), pass);
|
||||
rpcInit.secret = pass;
|
||||
|
||||
global.clientRpc = rpcOpen(&rpcInit);
|
||||
|
||||
if (global.clientRpc == NULL) {
|
||||
fnError("failed to init dnode rpc client");
|
||||
return -1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -700,12 +800,6 @@ static int32_t udfdRun() {
|
|||
global.udfsHash = taosHashInit(64, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BINARY), true, HASH_NO_LOCK);
|
||||
uv_mutex_init(&global.udfsMutex);
|
||||
|
||||
// TOOD: client rpc to fetch udf function info from mnode
|
||||
if (udfdOpenClientRpc() != 0) {
|
||||
fnError("open rpc connection to mnode failure");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (udfdUvInit() != 0) {
|
||||
fnError("uv init failure");
|
||||
return -2;
|
||||
|
@ -717,7 +811,6 @@ static int32_t udfdRun() {
|
|||
int codeClose = uv_loop_close(global.loop);
|
||||
fnDebug("uv loop close. result: %s", uv_err_name(codeClose));
|
||||
removeListeningPipe();
|
||||
udfdCloseClientRpc();
|
||||
uv_mutex_destroy(&global.udfsMutex);
|
||||
taosHashCleanup(global.udfsHash);
|
||||
return 0;
|
||||
|
@ -746,9 +839,22 @@ int main(int argc, char *argv[]) {
|
|||
|
||||
if (taosInitCfg(configDir, NULL, NULL, NULL, NULL, 0) != 0) {
|
||||
fnError("failed to start since read config error");
|
||||
return -1;
|
||||
return -2;
|
||||
}
|
||||
|
||||
initEpSetFromCfg(tsFirst, tsSecond, &global.mgmtEp);
|
||||
return udfdRun();
|
||||
if (udfdOpenClientRpc() != 0) {
|
||||
fnError("open rpc connection to mnode failure");
|
||||
return -3;
|
||||
}
|
||||
|
||||
if (udfdConnectToMNode() != 0) {
|
||||
fnError("failed to start since can not connect to mnode");
|
||||
return -4;
|
||||
}
|
||||
|
||||
udfdRun();
|
||||
|
||||
udfdCloseClientRpc();
|
||||
|
||||
}
|
||||
|
|
|
@ -241,7 +241,7 @@ alter_table_clause(A) ::=
|
|||
alter_table_clause(A) ::=
|
||||
full_table_name(B) RENAME TAG column_name(C) column_name(D). { A = createAlterTableRenameCol(pCxt, B, TSDB_ALTER_TABLE_UPDATE_TAG_NAME, &C, &D); }
|
||||
alter_table_clause(A) ::=
|
||||
full_table_name(B) SET TAG column_name(C) NK_EQ literal(D). { A = createAlterTableSetTag(pCxt, B, &C, D); }
|
||||
full_table_name(B) SET TAG column_name(C) NK_EQ literal(D). { A = createAlterTableSetTag(pCxt, B, &C, releaseRawExprNode(pCxt, D)); }
|
||||
|
||||
%type multi_create_clause { SNodeList* }
|
||||
%destructor multi_create_clause { nodesDestroyList($$); }
|
||||
|
|
|
@ -1041,18 +1041,6 @@ static void destroyInsertParseContextForTable(SInsertParseContext* pCxt) {
|
|||
destroyCreateSubTbReq(&pCxt->createTblReq);
|
||||
}
|
||||
|
||||
static void destroyDataBlock(STableDataBlocks* pDataBlock) {
|
||||
if (pDataBlock == NULL) {
|
||||
return;
|
||||
}
|
||||
|
||||
taosMemoryFreeClear(pDataBlock->pData);
|
||||
if (!pDataBlock->cloned) {
|
||||
destroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
|
||||
}
|
||||
taosMemoryFreeClear(pDataBlock);
|
||||
}
|
||||
|
||||
static void destroyInsertParseContext(SInsertParseContext* pCxt) {
|
||||
destroyInsertParseContextForTable(pCxt);
|
||||
taosHashCleanup(pCxt->pVgroupsHashObj);
|
||||
|
@ -1301,6 +1289,7 @@ int32_t qBuildStmtOutput(SQuery* pQuery, SHashObj* pVgHash, SHashObj* pBlockHash
|
|||
|
||||
CHECK_CODE(buildOutput(&insertCtx));
|
||||
|
||||
destroyBlockArrayList(insertCtx.pVgDataBlocks);
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
|
@ -1580,16 +1569,25 @@ int32_t qBuildStmtColFields(void* pBlock, int32_t* fieldNum, TAOS_FIELD** fields
|
|||
|
||||
// schemaless logic start
|
||||
|
||||
typedef struct SmlExecHandle {
|
||||
SHashObj* pBlockHash;
|
||||
|
||||
typedef struct SmlExecTableHandle {
|
||||
SParsedDataColInfo tags; // each table
|
||||
SKVRowBuilder tagsBuilder; // each table
|
||||
SVCreateTbReq createTblReq; // each table
|
||||
} SmlExecTableHandle;
|
||||
|
||||
typedef struct SmlExecHandle {
|
||||
SHashObj* pBlockHash;
|
||||
SmlExecTableHandle tableExecHandle;
|
||||
SQuery *pQuery;
|
||||
} SSmlExecHandle;
|
||||
|
||||
static void smlDestroyTableHandle(void* pHandle) {
|
||||
SmlExecTableHandle* handle = (SmlExecTableHandle*)pHandle;
|
||||
tdDestroyKVRowBuilder(&handle->tagsBuilder);
|
||||
destroyBoundColumnInfo(&handle->tags);
|
||||
destroyCreateSubTbReq(&handle->createTblReq);
|
||||
}
|
||||
|
||||
static int32_t smlBoundColumnData(SArray* cols, SParsedDataColInfo* pColList, SSchema* pSchema) {
|
||||
col_id_t nCols = pColList->numOfCols;
|
||||
|
||||
|
@ -1692,25 +1690,26 @@ int32_t smlBindData(void *handle, SArray *tags, SArray *colsSchema, SArray *cols
|
|||
SMsgBuf pBuf = {.buf = msgBuf, .len = msgBufLen};
|
||||
|
||||
SSmlExecHandle* smlHandle = (SSmlExecHandle*)handle;
|
||||
smlDestroyTableHandle(&smlHandle->tableExecHandle); // free for each table
|
||||
SSchema* pTagsSchema = getTableTagSchema(pTableMeta);
|
||||
setBoundColumnInfo(&smlHandle->tags, pTagsSchema, getNumOfTags(pTableMeta));
|
||||
int ret = smlBoundColumnData(tags, &smlHandle->tags, pTagsSchema);
|
||||
setBoundColumnInfo(&smlHandle->tableExecHandle.tags, pTagsSchema, getNumOfTags(pTableMeta));
|
||||
int ret = smlBoundColumnData(tags, &smlHandle->tableExecHandle.tags, pTagsSchema);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
buildInvalidOperationMsg(&pBuf, "bound tags error");
|
||||
return ret;
|
||||
}
|
||||
SKVRow row = NULL;
|
||||
ret = smlBuildTagRow(tags, &smlHandle->tagsBuilder, &smlHandle->tags, pTagsSchema, &row, &pBuf);
|
||||
ret = smlBuildTagRow(tags, &smlHandle->tableExecHandle.tagsBuilder, &smlHandle->tableExecHandle.tags, pTagsSchema, &row, &pBuf);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
buildCreateTbReq(&smlHandle->createTblReq, tableName, row, pTableMeta->suid);
|
||||
buildCreateTbReq(&smlHandle->tableExecHandle.createTblReq, tableName, row, pTableMeta->suid);
|
||||
|
||||
STableDataBlocks* pDataBlock = NULL;
|
||||
ret = getDataBlockFromList(smlHandle->pBlockHash, &pTableMeta->uid, sizeof(pTableMeta->uid),
|
||||
TSDB_DEFAULT_PAYLOAD_SIZE, sizeof(SSubmitBlk), getTableInfo(pTableMeta).rowSize,
|
||||
pTableMeta, &pDataBlock, NULL, &smlHandle->createTblReq);
|
||||
pTableMeta, &pDataBlock, NULL, &smlHandle->tableExecHandle.createTblReq);
|
||||
if (ret != TSDB_CODE_SUCCESS) {
|
||||
buildInvalidOperationMsg(&pBuf, "create data block error");
|
||||
return ret;
|
||||
|
@ -1826,9 +1825,7 @@ void smlDestroyHandle(void* pHandle) {
|
|||
if (!pHandle) return;
|
||||
SSmlExecHandle* handle = (SSmlExecHandle*)pHandle;
|
||||
destroyBlockHashmap(handle->pBlockHash);
|
||||
tdDestroyKVRowBuilder(&handle->tagsBuilder);
|
||||
destroyBoundColumnInfo(&handle->tags);
|
||||
destroyCreateSubTbReq(&handle->createTblReq);
|
||||
smlDestroyTableHandle(&handle->tableExecHandle);
|
||||
taosMemoryFree(handle);
|
||||
}
|
||||
|
||||
|
|
|
@ -237,9 +237,7 @@ static void destroyDataBlock(STableDataBlocks* pDataBlock) {
|
|||
taosMemoryFreeClear(pDataBlock->pData);
|
||||
if (!pDataBlock->cloned) {
|
||||
// free the refcount for metermeta
|
||||
if (pDataBlock->pTableMeta != NULL) {
|
||||
taosMemoryFreeClear(pDataBlock->pTableMeta);
|
||||
}
|
||||
|
||||
destroyBoundColumnInfo(&pDataBlock->boundColumnInfo);
|
||||
}
|
||||
|
|
|
@ -3800,7 +3800,7 @@ static void destroyCreateTbReqBatch(SVgroupCreateTableBatch* pTbBatch) {
|
|||
taosArrayDestroy(pTbBatch->req.pArray);
|
||||
}
|
||||
|
||||
static int32_t rewriteToVnodeModifOpStmt(SQuery* pQuery, SArray* pBufArray) {
|
||||
static int32_t rewriteToVnodeModifyOpStmt(SQuery* pQuery, SArray* pBufArray) {
|
||||
SVnodeModifOpStmt* pNewStmt = nodesMakeNode(QUERY_NODE_VNODE_MODIF_STMT);
|
||||
if (pNewStmt == NULL) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
|
@ -3855,7 +3855,7 @@ static int32_t rewriteCreateTable(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
code = buildCreateTableDataBlock(pCxt->pParseCxt->acctId, pStmt, &info, &pBufArray);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteToVnodeModifOpStmt(pQuery, pBufArray);
|
||||
code = rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
destroyCreateTbReqArray(pBufArray);
|
||||
}
|
||||
|
@ -4111,7 +4111,7 @@ static int32_t rewriteCreateMultiTable(STranslateContext* pCxt, SQuery* pQuery)
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return rewriteToVnodeModifOpStmt(pQuery, pBufArray);
|
||||
return rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
|
||||
}
|
||||
|
||||
typedef struct SVgroupDropTableBatch {
|
||||
|
@ -4251,12 +4251,160 @@ static int32_t rewriteDropTable(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
return rewriteToVnodeModifOpStmt(pQuery, pBufArray);
|
||||
return rewriteToVnodeModifyOpStmt(pQuery, pBufArray);
|
||||
}
|
||||
|
||||
static int32_t buildAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq) {
|
||||
pReq->tbName = strdup(pStmt->tableName);
|
||||
if (NULL == pReq->tbName) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pReq->action = pStmt->alterType;
|
||||
|
||||
switch (pStmt->alterType) {
|
||||
case TSDB_ALTER_TABLE_ADD_TAG:
|
||||
case TSDB_ALTER_TABLE_DROP_TAG:
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_NAME:
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_BYTES:
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
case TSDB_ALTER_TABLE_UPDATE_TAG_VAL:
|
||||
pReq->tagName = strdup(pStmt->colName);
|
||||
if (NULL == pReq->tagName) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
if (DEAL_RES_ERROR == translateValue(pCxt, pStmt->pVal)) {
|
||||
return pCxt->errCode;
|
||||
}
|
||||
pReq->isNull = (TSDB_DATA_TYPE_NULL == pStmt->pVal->node.resType.type);
|
||||
pReq->nTagVal = pStmt->pVal->node.resType.bytes;
|
||||
char* pVal = nodesGetValueFromNode(pStmt->pVal);
|
||||
pReq->pTagVal = IS_VAR_DATA_TYPE(pStmt->pVal->node.resType.type) ? pVal + VARSTR_HEADER_SIZE : pVal;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
case TSDB_ALTER_TABLE_DROP_COLUMN:
|
||||
pReq->colName = strdup(pStmt->colName);
|
||||
if (NULL == pReq->colName) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pReq->type = pStmt->dataType.type;
|
||||
pReq->flags = COL_SMA_ON;
|
||||
pReq->bytes = pStmt->dataType.bytes;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
|
||||
pReq->colName = strdup(pStmt->colName);
|
||||
if (NULL == pReq->colName) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pReq->colModBytes = calcTypeBytes(pStmt->dataType);
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_OPTIONS:
|
||||
if (-1 != pStmt->pOptions->ttl) {
|
||||
pReq->updateTTL = true;
|
||||
pReq->newTTL = pStmt->pOptions->ttl;
|
||||
}
|
||||
if ('\0' != pStmt->pOptions->comment[0]) {
|
||||
pReq->updateComment = true;
|
||||
pReq->newComment = strdup(pStmt->pOptions->comment);
|
||||
if (NULL == pReq->newComment) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
|
||||
pReq->colName = strdup(pStmt->colName);
|
||||
pReq->colNewName = strdup(pStmt->newColName);
|
||||
if (NULL == pReq->colName || NULL == pReq->colNewName) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return TSDB_CODE_SUCCESS;
|
||||
}
|
||||
|
||||
static int32_t serializeAlterTbReq(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq,
|
||||
SArray* pArray) {
|
||||
SVgroupInfo vg = {0};
|
||||
int32_t code = getTableHashVgroup(pCxt, pStmt->dbName, pStmt->tableName, &vg);
|
||||
int tlen = 0;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tEncodeSize(tEncodeSVAlterTbReq, pReq, tlen, code);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
tlen += sizeof(SMsgHead);
|
||||
void* pMsg = taosMemoryMalloc(tlen);
|
||||
if (NULL == pMsg) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
((SMsgHead*)pMsg)->vgId = htonl(vg.vgId);
|
||||
((SMsgHead*)pMsg)->contLen = htonl(tlen);
|
||||
void* pBuf = POINTER_SHIFT(pMsg, sizeof(SMsgHead));
|
||||
SEncoder coder = {0};
|
||||
tEncoderInit(&coder, pBuf, tlen - sizeof(SMsgHead));
|
||||
tEncodeSVAlterTbReq(&coder, pReq);
|
||||
tEncoderClear(&coder);
|
||||
|
||||
SVgDataBlocks* pVgData = taosMemoryCalloc(1, sizeof(SVgDataBlocks));
|
||||
if (NULL == pVgData) {
|
||||
taosMemoryFree(pMsg);
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
pVgData->vg = vg;
|
||||
pVgData->pData = pMsg;
|
||||
pVgData->size = tlen;
|
||||
pVgData->numOfTables = 1;
|
||||
taosArrayPush(pArray, &pVgData);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t buildModifyVnodeArray(STranslateContext* pCxt, SAlterTableStmt* pStmt, SVAlterTbReq* pReq,
|
||||
SArray** pArray) {
|
||||
SArray* pTmpArray = taosArrayInit(1, sizeof(void*));
|
||||
if (NULL == pTmpArray) {
|
||||
return TSDB_CODE_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
int32_t code = serializeAlterTbReq(pCxt, pStmt, pReq, pTmpArray);
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
*pArray = pTmpArray;
|
||||
} else {
|
||||
taosArrayDestroy(pTmpArray);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t rewriteAlterTable(STranslateContext* pCxt, SQuery* pQuery) {
|
||||
// todo
|
||||
SAlterTableStmt* pStmt = (SAlterTableStmt*)pQuery->pRoot;
|
||||
|
||||
STableMeta* pTableMeta = NULL;
|
||||
int32_t code = getTableMeta(pCxt, pStmt->dbName, pStmt->tableName, &pTableMeta);
|
||||
if (TSDB_CODE_SUCCESS != code) {
|
||||
return code;
|
||||
}
|
||||
|
||||
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
|
||||
return TSDB_CODE_SUCCESS;
|
||||
} else if (TSDB_CHILD_TABLE != pTableMeta->tableType && TSDB_NORMAL_TABLE != pTableMeta->tableType) {
|
||||
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_ALTER_TABLE);
|
||||
}
|
||||
|
||||
SVAlterTbReq req = {0};
|
||||
code = buildAlterTbReq(pCxt, pStmt, &req);
|
||||
|
||||
SArray* pArray = NULL;
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = buildModifyVnodeArray(pCxt, pStmt, &req, &pArray);
|
||||
}
|
||||
if (TSDB_CODE_SUCCESS == code) {
|
||||
code = rewriteToVnodeModifyOpStmt(pQuery, pArray);
|
||||
}
|
||||
|
||||
return code;
|
||||
}
|
||||
|
||||
static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
|
||||
|
@ -4296,9 +4444,7 @@ static int32_t rewriteQuery(STranslateContext* pCxt, SQuery* pQuery) {
|
|||
code = rewriteDropTable(pCxt, pQuery);
|
||||
break;
|
||||
case QUERY_NODE_ALTER_TABLE_STMT:
|
||||
if (TSDB_ALTER_TABLE_UPDATE_TAG_VAL == ((SAlterTableStmt*)pQuery->pRoot)->alterType) {
|
||||
code = rewriteAlterTable(pCxt, pQuery);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
|
|
|
@ -152,6 +152,8 @@ static char* getSyntaxErrFormat(int32_t errCode) {
|
|||
return "Invalid timeline function";
|
||||
case TSDB_CODE_PAR_INVALID_PASSWD:
|
||||
return "Invalid password";
|
||||
case TSDB_CODE_PAR_INVALID_ALTER_TABLE:
|
||||
return "Invalid alter table statement";
|
||||
case TSDB_CODE_OUT_OF_MEMORY:
|
||||
return "Out of memory";
|
||||
default:
|
||||
|
|
|
@ -3500,7 +3500,7 @@ static YYACTIONTYPE yy_reduce(
|
|||
yymsp[-4].minor.yy172 = yylhsminor.yy172;
|
||||
break;
|
||||
case 121: /* alter_table_clause ::= full_table_name SET TAG column_name NK_EQ literal */
|
||||
{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, yymsp[0].minor.yy172); }
|
||||
{ yylhsminor.yy172 = createAlterTableSetTag(pCxt, yymsp[-5].minor.yy172, &yymsp[-2].minor.yy105, releaseRawExprNode(pCxt, yymsp[0].minor.yy172)); }
|
||||
yymsp[-5].minor.yy172 = yylhsminor.yy172;
|
||||
break;
|
||||
case 123: /* multi_create_clause ::= multi_create_clause create_subtable_clause */
|
||||
|
|
|
@ -69,7 +69,7 @@ TEST_F(ParserInitialATest, alterDatabase) {
|
|||
* | COMMENT 'string_value'
|
||||
* }
|
||||
*/
|
||||
TEST_F(ParserInitialATest, alterTable) {
|
||||
TEST_F(ParserInitialATest, alterSTable) {
|
||||
useDb("root", "test");
|
||||
|
||||
SMAlterStbReq expect = {0};
|
||||
|
@ -119,7 +119,7 @@ TEST_F(ParserInitialATest, alterTable) {
|
|||
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
||||
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_ALTER_TABLE_STMT);
|
||||
SMAlterStbReq req = {0};
|
||||
ASSERT_TRUE(TSDB_CODE_SUCCESS == tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req));
|
||||
ASSERT_EQ(tDeserializeSMAlterStbReq(pQuery->pCmdMsg->pMsg, pQuery->pCmdMsg->msgLen, &req), TSDB_CODE_SUCCESS);
|
||||
ASSERT_EQ(std::string(req.name), std::string(expect.name));
|
||||
ASSERT_EQ(req.alterType, expect.alterType);
|
||||
ASSERT_EQ(req.numOfFields, expect.numOfFields);
|
||||
|
@ -139,24 +139,24 @@ TEST_F(ParserInitialATest, alterTable) {
|
|||
}
|
||||
});
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
|
||||
run("ALTER TABLE t1 TTL 10");
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, nullptr, 10);
|
||||
run("ALTER TABLE st1 TTL 10");
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
|
||||
run("ALTER TABLE t1 COMMENT 'test'");
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_OPTIONS, 0, nullptr, 0, 0, nullptr, "test");
|
||||
run("ALTER TABLE st1 COMMENT 'test'");
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
|
||||
run("ALTER TABLE t1 ADD COLUMN cc1 BIGINT");
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_COLUMN, 1, "cc1", TSDB_DATA_TYPE_BIGINT);
|
||||
run("ALTER TABLE st1 ADD COLUMN cc1 BIGINT");
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
|
||||
run("ALTER TABLE t1 DROP COLUMN c1");
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_DROP_COLUMN, 1, "c1");
|
||||
run("ALTER TABLE st1 DROP COLUMN c1");
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c1", TSDB_DATA_TYPE_VARCHAR,
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, 1, "c1", TSDB_DATA_TYPE_VARCHAR,
|
||||
20 + VARSTR_HEADER_SIZE);
|
||||
run("ALTER TABLE t1 MODIFY COLUMN c1 VARCHAR(20)");
|
||||
run("ALTER TABLE st1 MODIFY COLUMN c1 VARCHAR(20)");
|
||||
|
||||
setAlterStbReqFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
|
||||
run("ALTER TABLE t1 RENAME COLUMN c1 cc1");
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, 2, "c1", 0, 0, "cc1");
|
||||
run("ALTER TABLE st1 RENAME COLUMN c1 cc1");
|
||||
|
||||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_ADD_TAG, 1, "tag11", TSDB_DATA_TYPE_BIGINT);
|
||||
run("ALTER TABLE st1 ADD TAG tag11 BIGINT");
|
||||
|
@ -171,7 +171,127 @@ TEST_F(ParserInitialATest, alterTable) {
|
|||
setAlterStbReqFunc("st1", TSDB_ALTER_TABLE_UPDATE_TAG_NAME, 2, "tag1", 0, 0, "tag11");
|
||||
run("ALTER TABLE st1 RENAME TAG tag1 tag11");
|
||||
|
||||
// run("ALTER TABLE st1s1 SET TAG tag1=10");
|
||||
// todo
|
||||
// ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
|
||||
}
|
||||
|
||||
TEST_F(ParserInitialATest, alterTable) {
|
||||
useDb("root", "test");
|
||||
|
||||
SVAlterTbReq expect = {0};
|
||||
|
||||
auto setAlterColFunc = [&](const char* pTbname, int8_t alterType, const char* pColName, int8_t dataType = 0,
|
||||
int32_t dataBytes = 0, const char* pNewColName = nullptr) {
|
||||
memset(&expect, 0, sizeof(SVAlterTbReq));
|
||||
expect.tbName = strdup(pTbname);
|
||||
expect.action = alterType;
|
||||
expect.colName = strdup(pColName);
|
||||
|
||||
switch (alterType) {
|
||||
case TSDB_ALTER_TABLE_ADD_COLUMN:
|
||||
expect.type = dataType;
|
||||
expect.flags = COL_SMA_ON;
|
||||
expect.bytes = dataBytes > 0 ? dataBytes : (dataType > 0 ? tDataTypes[dataType].bytes : 0);
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES:
|
||||
expect.colModBytes = dataBytes;
|
||||
break;
|
||||
case TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME:
|
||||
expect.colNewName = strdup(pNewColName);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
};
|
||||
|
||||
auto setAlterTagFunc = [&](const char* pTbname, const char* pTagName, const uint8_t* pNewVal, uint32_t bytes) {
|
||||
memset(&expect, 0, sizeof(SVAlterTbReq));
|
||||
expect.tbName = strdup(pTbname);
|
||||
expect.action = TSDB_ALTER_TABLE_UPDATE_TAG_VAL;
|
||||
expect.tagName = strdup(pTagName);
|
||||
|
||||
expect.isNull = (nullptr == pNewVal);
|
||||
expect.nTagVal = bytes;
|
||||
expect.pTagVal = pNewVal;
|
||||
};
|
||||
|
||||
auto setAlterOptionsFunc = [&](const char* pTbname, int32_t ttl, const char* pComment = nullptr) {
|
||||
memset(&expect, 0, sizeof(SVAlterTbReq));
|
||||
expect.tbName = strdup(pTbname);
|
||||
expect.action = TSDB_ALTER_TABLE_UPDATE_OPTIONS;
|
||||
if (-1 != ttl) {
|
||||
expect.updateTTL = true;
|
||||
expect.newTTL = ttl;
|
||||
}
|
||||
if (nullptr != pComment) {
|
||||
expect.updateComment = true;
|
||||
expect.newComment = pComment;
|
||||
}
|
||||
};
|
||||
|
||||
setCheckDdlFunc([&](const SQuery* pQuery, ParserStage stage) {
|
||||
ASSERT_EQ(nodeType(pQuery->pRoot), QUERY_NODE_VNODE_MODIF_STMT);
|
||||
SVnodeModifOpStmt* pStmt = (SVnodeModifOpStmt*)pQuery->pRoot;
|
||||
|
||||
ASSERT_EQ(pStmt->sqlNodeType, QUERY_NODE_ALTER_TABLE_STMT);
|
||||
ASSERT_NE(pStmt->pDataBlocks, nullptr);
|
||||
ASSERT_EQ(taosArrayGetSize(pStmt->pDataBlocks), 1);
|
||||
SVgDataBlocks* pVgData = (SVgDataBlocks*)taosArrayGetP(pStmt->pDataBlocks, 0);
|
||||
void* pBuf = POINTER_SHIFT(pVgData->pData, sizeof(SMsgHead));
|
||||
SVAlterTbReq req = {0};
|
||||
SDecoder coder = {0};
|
||||
tDecoderInit(&coder, (const uint8_t*)pBuf, pVgData->size);
|
||||
ASSERT_EQ(tDecodeSVAlterTbReq(&coder, &req), TSDB_CODE_SUCCESS);
|
||||
|
||||
ASSERT_EQ(std::string(req.tbName), std::string(expect.tbName));
|
||||
ASSERT_EQ(req.action, expect.action);
|
||||
if (nullptr != expect.colName) {
|
||||
ASSERT_EQ(std::string(req.colName), std::string(expect.colName));
|
||||
}
|
||||
ASSERT_EQ(req.type, expect.type);
|
||||
ASSERT_EQ(req.flags, expect.flags);
|
||||
ASSERT_EQ(req.bytes, expect.bytes);
|
||||
ASSERT_EQ(req.colModBytes, expect.colModBytes);
|
||||
if (nullptr != expect.colNewName) {
|
||||
ASSERT_EQ(std::string(req.colNewName), std::string(expect.colNewName));
|
||||
}
|
||||
if (nullptr != expect.tagName) {
|
||||
ASSERT_EQ(std::string(req.tagName), std::string(expect.tagName));
|
||||
}
|
||||
ASSERT_EQ(req.isNull, expect.isNull);
|
||||
ASSERT_EQ(req.nTagVal, expect.nTagVal);
|
||||
ASSERT_EQ(memcmp(req.pTagVal, expect.pTagVal, expect.nTagVal), 0);
|
||||
ASSERT_EQ(req.updateTTL, expect.updateTTL);
|
||||
ASSERT_EQ(req.newTTL, expect.newTTL);
|
||||
ASSERT_EQ(req.updateComment, expect.updateComment);
|
||||
if (nullptr != expect.newComment) {
|
||||
ASSERT_EQ(std::string(req.newComment), std::string(expect.newComment));
|
||||
}
|
||||
|
||||
tDecoderClear(&coder);
|
||||
});
|
||||
|
||||
setAlterOptionsFunc("t1", 10, nullptr);
|
||||
run("ALTER TABLE t1 TTL 10");
|
||||
|
||||
setAlterOptionsFunc("t1", -1, "test");
|
||||
run("ALTER TABLE t1 COMMENT 'test'");
|
||||
|
||||
setAlterColFunc("t1", TSDB_ALTER_TABLE_ADD_COLUMN, "cc1", TSDB_DATA_TYPE_BIGINT);
|
||||
run("ALTER TABLE t1 ADD COLUMN cc1 BIGINT");
|
||||
|
||||
setAlterColFunc("t1", TSDB_ALTER_TABLE_DROP_COLUMN, "c1");
|
||||
run("ALTER TABLE t1 DROP COLUMN c1");
|
||||
|
||||
setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_BYTES, "c1", TSDB_DATA_TYPE_VARCHAR, 20 + VARSTR_HEADER_SIZE);
|
||||
run("ALTER TABLE t1 MODIFY COLUMN c1 VARCHAR(20)");
|
||||
|
||||
setAlterColFunc("t1", TSDB_ALTER_TABLE_UPDATE_COLUMN_NAME, "c1", 0, 0, "cc1");
|
||||
run("ALTER TABLE t1 RENAME COLUMN c1 cc1");
|
||||
|
||||
int64_t val = 10;
|
||||
setAlterTagFunc("st1s1", "tag1", (const uint8_t*)&val, sizeof(val));
|
||||
run("ALTER TABLE st1s1 SET TAG tag1=10");
|
||||
|
||||
// todo
|
||||
// ADD {FULLTEXT | SMA} INDEX index_name (col_name [, col_name] ...) [index_option]
|
||||
|
|
|
@ -985,6 +985,8 @@ static int32_t getMsgType(ENodeType sqlType) {
|
|||
return TDMT_VND_CREATE_TABLE;
|
||||
case QUERY_NODE_DROP_TABLE_STMT:
|
||||
return TDMT_VND_DROP_TABLE;
|
||||
case QUERY_NODE_ALTER_TABLE_STMT:
|
||||
return TDMT_VND_ALTER_TABLE;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -256,6 +256,7 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
|
|||
return TSDB_CODE_SUCCESS;
|
||||
case TDMT_VND_CREATE_TABLE_RSP:
|
||||
case TDMT_VND_DROP_TABLE_RSP:
|
||||
case TDMT_VND_ALTER_TABLE_RSP:
|
||||
case TDMT_VND_SUBMIT_RSP:
|
||||
break;
|
||||
default:
|
||||
|
@ -1131,6 +1132,24 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
|
|||
SCH_ERR_RET(schProcessOnTaskSuccess(pJob, pTask));
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_ALTER_TABLE_RSP: {
|
||||
SVAlterTbRsp rsp = {0};
|
||||
if (msg) {
|
||||
SDecoder coder = {0};
|
||||
tDecoderInit(&coder, msg, msgSize);
|
||||
code = tDecodeSVAlterTbRsp(&coder, &rsp);
|
||||
tDecoderClear(&coder);
|
||||
SCH_ERR_JRET(code);
|
||||
SCH_ERR_JRET(rsp.code);
|
||||
}
|
||||
|
||||
SCH_ERR_JRET(rspCode);
|
||||
|
||||
if (NULL == msg) {
|
||||
SCH_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case TDMT_VND_SUBMIT_RSP: {
|
||||
SCH_ERR_JRET(rspCode);
|
||||
|
||||
|
@ -1391,6 +1410,10 @@ int32_t schHandleDropTableCallback(void *param, const SDataBuf *pMsg, int32_t co
|
|||
return schHandleCallback(param, pMsg, TDMT_VND_DROP_TABLE_RSP, code);
|
||||
}
|
||||
|
||||
int32_t schHandleAlterTableCallback(void *param, const SDataBuf *pMsg, int32_t code) {
|
||||
return schHandleCallback(param, pMsg, TDMT_VND_ALTER_TABLE_RSP, code);
|
||||
}
|
||||
|
||||
int32_t schHandleQueryCallback(void *param, const SDataBuf *pMsg, int32_t code) {
|
||||
return schHandleCallback(param, pMsg, TDMT_VND_QUERY_RSP, code);
|
||||
}
|
||||
|
@ -1490,6 +1513,9 @@ int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) {
|
|||
case TDMT_VND_DROP_TABLE:
|
||||
*fp = schHandleDropTableCallback;
|
||||
break;
|
||||
case TDMT_VND_ALTER_TABLE:
|
||||
*fp = schHandleAlterTableCallback;
|
||||
break;
|
||||
case TDMT_VND_SUBMIT:
|
||||
*fp = schHandleSubmitCallback;
|
||||
break;
|
||||
|
@ -2010,6 +2036,7 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
|
|||
switch (msgType) {
|
||||
case TDMT_VND_CREATE_TABLE:
|
||||
case TDMT_VND_DROP_TABLE:
|
||||
case TDMT_VND_ALTER_TABLE:
|
||||
case TDMT_VND_SUBMIT: {
|
||||
msgSize = pTask->msgLen;
|
||||
msg = taosMemoryCalloc(1, msgSize);
|
||||
|
|
|
@ -7,7 +7,7 @@ system sh/cfg.sh -n dnode1 -c udf -v 1
|
|||
|
||||
print ========= start dnode1 as LEADER
|
||||
system sh/exec.sh -n dnode1 -s start
|
||||
sleep 2000
|
||||
sleep 1000
|
||||
sql connect
|
||||
|
||||
print ======== step1 udf
|
||||
|
|
|
@ -220,7 +220,6 @@ if $data[0][4] == LEADER then
|
|||
print ---- vgroup $data[0][0] leader switch to dnode $data[0][3]
|
||||
elif $data[0][6] == LEADER then
|
||||
print ---- vgroup $data[0][0] leader switch to dnode $data[0][5]
|
||||
endi
|
||||
elif $data[0][8] == LEADER then
|
||||
print ---- vgroup $data[0][0] leader switch to dnode $data[0][7]
|
||||
else
|
||||
|
@ -342,7 +341,6 @@ elif $data[0][6] == LEADER then
|
|||
goto check_vg_ready_3
|
||||
endi
|
||||
print ---- vgroup $data[0][0] leader locating dnode $data[0][7]
|
||||
endi
|
||||
elif $data[0][8] == LEADER then
|
||||
if $data[0][4] == LEADER then
|
||||
goto check_vg_ready_3
|
||||
|
|
|
@ -420,7 +420,6 @@ elif $data[0][6] == LEADER then
|
|||
goto check_vg_ready_3
|
||||
endi
|
||||
print ---- vgroup $data[0][0] leader locating dnode $data[0][7]
|
||||
endi
|
||||
elif $data[0][8] == LEADER then
|
||||
if $data[0][4] == LEADER then
|
||||
goto check_vg_ready_3
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 3
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,17 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
|
||||
$cdb_index = 0
|
||||
#=============================== start consume =============================#
|
||||
|
@ -74,7 +84,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -102,7 +112,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
|
||||
|
@ -145,7 +155,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -173,7 +183,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -216,7 +226,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -244,7 +254,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 5
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,10 +54,19 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
$cdb_index = 0
|
||||
|
||||
#=============================== start consume =============================#
|
||||
|
||||
print ================ test consume from stb
|
||||
|
@ -74,7 +84,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for stb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -102,10 +112,10 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
|
||||
|
@ -177,7 +187,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ctb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -205,9 +215,9 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -279,7 +289,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ntb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -307,9 +317,9 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 3
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,17 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
|
||||
$topicNum = 3
|
||||
|
||||
|
@ -74,7 +84,7 @@ $consumerId = 0
|
|||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$totalMsgOfStb = $totalMsgOfStb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
|
||||
|
@ -109,7 +119,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -131,7 +141,7 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -166,7 +176,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -188,7 +198,7 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 5
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,16 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
$topicNum = 3
|
||||
|
||||
|
@ -74,9 +83,9 @@ $consumerId = 0
|
|||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$totalMsgOfStb = $totalMsgOfStb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
|
||||
|
@ -139,7 +148,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ctb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -161,9 +170,9 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -226,7 +235,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ntb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -248,9 +257,9 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 5
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,16 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
$topicNum = 2
|
||||
|
||||
|
@ -72,7 +81,7 @@ $consumerId = 0
|
|||
$totalMsgOfOneTopic = $ctbNum * $rowsPerCtb
|
||||
$totalMsgOfStb = $totalMsgOfOneTopic * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
|
||||
$topicList = ' . topic_stb_all
|
||||
|
@ -80,7 +89,7 @@ $topicList = $topicList . ,
|
|||
$topicList = $topicList . topic_stb_function
|
||||
$topicList = $topicList . '
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
|
||||
|
@ -158,7 +167,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ctb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -179,14 +188,14 @@ $consumerId = 0
|
|||
$totalMsgOfOneTopic = $rowsPerCtb
|
||||
$totalMsgOfCtb = $totalMsgOfOneTopic * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
$topicList = ' . topic_ctb_function
|
||||
$topicList = $topicList . ,
|
||||
$topicList = $topicList . topic_ctb_all
|
||||
$topicList = $topicList . '
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
|
||||
|
@ -249,7 +258,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table for ntb
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -270,7 +279,7 @@ $consumerId = 0
|
|||
$totalMsgOfOneTopic = $rowsPerCtb
|
||||
$totalMsgOfNtb = $totalMsgOfOneTopic * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
|
||||
$topicList = ' . topic_ntb_function
|
||||
|
@ -278,7 +287,7 @@ $topicList = $topicList . ,
|
|||
$topicList = $topicList . topic_ntb_all
|
||||
$topicList = $topicList . '
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 3
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,17 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
|
||||
$cdb_index = 0
|
||||
#=============================== start consume =============================#
|
||||
|
@ -74,7 +84,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -102,7 +112,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
|
||||
|
@ -145,7 +155,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -173,7 +183,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -216,7 +226,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -244,7 +254,7 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 5
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,16 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
$cdb_index = 0
|
||||
#=============================== start consume =============================#
|
||||
|
@ -74,7 +83,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -102,9 +111,9 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start
|
||||
|
@ -189,7 +198,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -217,9 +226,9 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -291,7 +300,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -319,9 +328,9 @@ endi
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 3
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,17 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
|
||||
$topicNum = 3
|
||||
|
||||
|
@ -71,7 +81,7 @@ $consumerId = 0
|
|||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$totalMsgOfStb = $totalMsgOfStb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
|
||||
|
@ -106,7 +116,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -128,7 +138,7 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -163,7 +173,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -185,7 +195,7 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -27,6 +27,7 @@ $tstart = 1640966400000 # 2022-01-01 00:00:00.000
|
|||
|
||||
$pullDelay = 5
|
||||
$ifcheckdata = 1
|
||||
$ifmanualcommit = 1
|
||||
$showMsg = 1
|
||||
$showRow = 0
|
||||
|
||||
|
@ -53,8 +54,16 @@ sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
|
|||
# return -1
|
||||
#endi
|
||||
|
||||
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
|
||||
$keyList = ' . group.id:cgrp1
|
||||
$keyList = $keyList . ,
|
||||
$keyList = $keyList . enable.auto.commit:false
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.commit.interval.ms:6000
|
||||
#$keyList = $keyList . ,
|
||||
#$keyList = $keyList . auto.offset.reset:earliest
|
||||
$keyList = $keyList . '
|
||||
print ========== key list: $keyList
|
||||
|
||||
$topicNum = 3
|
||||
|
||||
|
@ -71,9 +80,9 @@ $consumerId = 0
|
|||
$totalMsgOfStb = $ctbNum * $rowsPerCtb
|
||||
$totalMsgOfStb = $totalMsgOfStb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfStb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from stb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $dbName -s start
|
||||
|
@ -148,7 +157,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -170,9 +179,9 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfCtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfCtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ctb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
@ -236,7 +245,7 @@ sleep 500
|
|||
sql use $cdbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
@ -258,9 +267,9 @@ $topicList = $topicList . '
|
|||
$consumerId = 0
|
||||
$totalMsgOfNtb = $rowsPerCtb * $topicNum
|
||||
$expectmsgcnt = $totalMsgOfNtb
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
$consumerId = 1
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata )
|
||||
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
|
||||
|
||||
print == start consumer to pull msgs from ntb
|
||||
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start
|
||||
|
|
|
@ -48,7 +48,7 @@ endi
|
|||
sql use $dbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
|
|
@ -48,7 +48,7 @@ endi
|
|||
sql use $dbName
|
||||
|
||||
print == create consume info table and consume result table
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)
|
||||
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
|
||||
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
|
||||
|
||||
sql show tables
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -22,18 +22,6 @@ class TDTestCase:
|
|||
tdSql.init(conn.cursor(), logSql)
|
||||
|
||||
def insertData(self, tb_name):
|
||||
# insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1)',
|
||||
# f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2)',
|
||||
# f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3)',
|
||||
# f'insert into {tb_name} values ("2021-01-09 12:00:00", 1, 2, 4, 3, 1.1, 1.1, "binary", "nchar", true, 4)',
|
||||
# f'insert into {tb_name} values ("2021-01-11 12:00:00", 1, 2, 5, 5, 1.1, 1.1, "binary", "nchar", true, 5)',
|
||||
# f'insert into {tb_name} values ("2021-01-13 12:00:00", 1, 2, 1, 3, 6.6, 1.1, "binary", "nchar", true, 6)',
|
||||
# f'insert into {tb_name} values ("2021-01-15 12:00:00", 1, 2, 1, 3, 1.1, 7.7, "binary", "nchar", true, 7)',
|
||||
# f'insert into {tb_name} values ("2021-01-17 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary8", "nchar", true, 8)',
|
||||
# f'insert into {tb_name} values ("2021-01-19 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar9", true, 9)',
|
||||
# f'insert into {tb_name} values ("2021-01-21 12:00:00", 1, 2, 1, 3, 1.1, 1.1, "binary", "nchar", false, 10)',
|
||||
# f'insert into {tb_name} values ("2021-01-23 12:00:00", 1, 3, 1, 3, 1.1, 1.1, Null, Null, false, 11)'
|
||||
# ]
|
||||
insert_sql_list = [f'insert into {tb_name} values ("2021-01-01 12:00:00", 1, 1, 1, 3, 1.1, 1.1, "binary", "nchar", true, 1, 2, 3, 4)',
|
||||
f'insert into {tb_name} values ("2021-01-05 12:00:00", 2, 2, 1, 3, 1.1, 1.1, "binary", "nchar", true, 2, 3, 4, 5)',
|
||||
f'insert into {tb_name} values ("2021-01-07 12:00:00", 1, 3, 1, 2, 1.1, 1.1, "binary", "nchar", true, 3, 4, 5, 6)',
|
||||
|
@ -54,7 +42,6 @@ class TDTestCase:
|
|||
tb_name = tdCom.getLongName(8, "letters")
|
||||
tdSql.execute(
|
||||
f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 tinyint unsigned, c11 smallint unsigned, c12 int unsigned, c13 bigint unsigned)")
|
||||
# f"CREATE TABLE {tb_name} (ts timestamp, c1 tinyint, c2 smallint, c3 int, c4 bigint, c5 float, c6 double, c7 binary(100), c8 nchar(200), c9 bool, c10 int)")
|
||||
self.insertData(tb_name)
|
||||
return tb_name
|
||||
|
||||
|
@ -95,6 +82,31 @@ class TDTestCase:
|
|||
|
||||
def queryTsCol(self, tb_name, check_elm=None):
|
||||
select_elm = "*" if check_elm is None else check_elm
|
||||
# ts in
|
||||
query_sql = f'select {select_elm} from {tb_name} where ts in ("2021-01-11 12:00:00", "2021-01-13 12:00:00")'
|
||||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(2)
|
||||
tdSql.checkEqual(self.queryLastC10(query_sql), 6) if select_elm == "*" else False
|
||||
# ts not in
|
||||
query_sql = f'select {select_elm} from {tb_name} where ts not in ("2021-01-11 12:00:00", "2021-01-13 12:00:00")'
|
||||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(9)
|
||||
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
|
||||
# ts not null
|
||||
query_sql = f'select {select_elm} from {tb_name} where ts is not Null'
|
||||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
|
||||
# ts null
|
||||
query_sql = f'select {select_elm} from {tb_name} where ts is Null'
|
||||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(0)
|
||||
# not support like not like match nmatch
|
||||
tdSql.error(f'select {select_elm} from {tb_name} where ts like ("2021-01-11 12:00:00%")')
|
||||
tdSql.error(f'select {select_elm} from {tb_name} where ts not like ("2021-01-11 12:00:0_")')
|
||||
tdSql.error(f'select {select_elm} from {tb_name} where ts match "2021-01-11 12:00:00%"')
|
||||
tdSql.error(f'select {select_elm} from {tb_name} where ts nmatch "2021-01-11 12:00:00%"')
|
||||
|
||||
# ts and ts
|
||||
query_sql = f'select {select_elm} from {tb_name} where ts > "2021-01-11 12:00:00" or ts < "2021-01-13 12:00:00"'
|
||||
tdSql.query(query_sql)
|
||||
|
@ -1422,9 +1434,9 @@ class TDTestCase:
|
|||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkEqual(self.queryLastC10(query_sql), 11) if select_elm == "*" else False
|
||||
query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c9 > "binary" and c9 >= "binary8" or c9 < "binary9" and c9 <= "binary" and c9 != 2 and c9 <> 2 and c9 = 4 or c9 is not null and c9 between 2 and 4 and c9 not between 1 and 2 and c9 in (2,4) and c9 not in (1,2) or c9 match "binary[28]" or c9 nmatch "binary"'
|
||||
query_sql = f'select c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13 from {tb_name} where c9 > "binary" and c9 >= "binary8" or c9 < "binary9" and c9 <= "binary" and c9 != 2 and c9 <> 2 and c9 = 4 or c9 is not null and c9 between 2 and 4 and c9 not between 1 and 2 and c9 in (2,4) and c9 not in (1,2)'
|
||||
tdSql.query(query_sql)
|
||||
tdSql.checkRows(11)
|
||||
tdSql.checkRows(9)
|
||||
|
||||
def queryFullColType(self, tb_name, check_elm=None):
|
||||
select_elm = "*" if check_elm is None else check_elm
|
||||
|
|
|
@ -0,0 +1,412 @@
|
|||
import datetime
|
||||
|
||||
from util.log import *
|
||||
from util.sql import *
|
||||
from util.cases import *
|
||||
from util.dnodes import *
|
||||
|
||||
PRIMARY_COL = "ts"
|
||||
|
||||
INT_COL = "c1"
|
||||
BINT_COL = "c2"
|
||||
SINT_COL = "c3"
|
||||
TINT_COL = "c4"
|
||||
FLOAT_COL = "c5"
|
||||
DOUBLE_COL = "c6"
|
||||
BOOL_COL = "c7"
|
||||
|
||||
BINARY_COL = "c8"
|
||||
NCHAR_COL = "c9"
|
||||
TS_COL = "c10"
|
||||
|
||||
NUM_COL = [ INT_COL, BINT_COL, SINT_COL, TINT_COL, FLOAT_COL, DOUBLE_COL, ]
|
||||
CHAR_COL = [ BINARY_COL, NCHAR_COL, ]
|
||||
BOOLEAN_COL = [ BOOL_COL, ]
|
||||
TS_TYPE_COL = [ TS_COL, ]
|
||||
|
||||
class TDTestCase:
|
||||
|
||||
def init(self, conn, logSql):
|
||||
tdLog.debug(f"start to excute {__file__}")
|
||||
tdSql.init(conn.cursor())
|
||||
|
||||
def __query_condition(self,tbname):
|
||||
query_condition = []
|
||||
for char_col in CHAR_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{char_col}",
|
||||
f"upper( {tbname}.{char_col} )",
|
||||
f"char_length( {tbname}.{char_col} )",
|
||||
f"concat( {tbname}.{char_col}, {tbname}.{char_col} )",
|
||||
f"concat_ws( '_', {tbname}.{char_col}, {tbname}.{char_col} )",
|
||||
f"length( {tbname}.{char_col} )",
|
||||
f"lower( {tbname}.{char_col} )",
|
||||
f"ltrim( {tbname}.{char_col} )",
|
||||
f"rtrim( {tbname}.{char_col} )",
|
||||
f"substr( {tbname}.{char_col}, 1 )",
|
||||
f"count( {tbname}.{char_col} )",
|
||||
f"cast( {tbname}.{char_col} as nchar(3) )",
|
||||
f"cast( {tbname}.{char_col} as nchar(8) )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"cast( {tbname}.{un_char_col} as binary(16) ) " for un_char_col in NUM_COL)
|
||||
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{char_col_2} as binary(32) ) " for char_col_2 in CHAR_COL )
|
||||
query_condition.extend( f"cast( {tbname}.{char_col} + {tbname}.{un_char_col} as binary(32) ) " for un_char_col in NUM_COL )
|
||||
|
||||
for num_col in NUM_COL:
|
||||
query_condition.extend(
|
||||
(
|
||||
f"{tbname}.{num_col}",
|
||||
f"ceil( {tbname}.{num_col} )",
|
||||
f"abs( {tbname}.{num_col} )",
|
||||
f"acos( {tbname}.{num_col} )",
|
||||
f"asin( {tbname}.{num_col} )",
|
||||
f"atan( {tbname}.{num_col} )",
|
||||
f"cos( {tbname}.{num_col} )",
|
||||
f"floor( {tbname}.{num_col} )",
|
||||
f"log( {tbname}.{num_col}, {tbname}.{num_col})",
|
||||
f"sin( {tbname}.{num_col} )",
|
||||
f"sqrt( {tbname}.{num_col} )",
|
||||
f"tan( {tbname}.{num_col} )",
|
||||
f"round( {tbname}.{num_col} )",
|
||||
f"max( {tbname}.{num_col} )",
|
||||
f"sum( {tbname}.{num_col} )",
|
||||
f"count( {tbname}.{num_col} )",
|
||||
f"min( {tbname}.{num_col} )",
|
||||
)
|
||||
)
|
||||
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{num_col_2}" for num_col_2 in NUM_COL )
|
||||
query_condition.extend( f"{tbname}.{num_col} + {tbname}.{char_col} " for char_col in CHAR_COL )
|
||||
|
||||
query_condition.extend(
|
||||
(
|
||||
''' "test1234!@#$%^&*():'><?/.,][}{" ''',
|
||||
''' "test12" ''',
|
||||
# 1010,
|
||||
)
|
||||
)
|
||||
|
||||
return query_condition
|
||||
|
||||
def __join_condition(self, tb_list, filter=PRIMARY_COL, INNER=False):
|
||||
table_reference = tb_list[0]
|
||||
join_condition = table_reference
|
||||
join = "inner join" if INNER else "join"
|
||||
for i in range(len(tb_list[1:])):
|
||||
join_condition += f" {join} {tb_list[i+1]} on {table_reference}.{filter}={tb_list[i+1]}.{filter}"
|
||||
|
||||
return join_condition
|
||||
|
||||
def __where_condition(self, col=None, tbname=None, query_conditon=None):
|
||||
if query_conditon and isinstance(query_conditon, str):
|
||||
if query_conditon.startswith("count"):
|
||||
query_conditon = query_conditon[6:-1]
|
||||
elif query_conditon.startswith("max"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("sum"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
elif query_conditon.startswith("min"):
|
||||
query_conditon = query_conditon[4:-1]
|
||||
|
||||
|
||||
if query_conditon:
|
||||
return f" where {query_conditon} is not null"
|
||||
if col in NUM_COL:
|
||||
return f" where abs( {tbname}.{col} ) >= 0"
|
||||
if col in CHAR_COL:
|
||||
return f" where lower( {tbname}.{col} ) like 'bina%' or lower( {tbname}.{col} ) like '_cha%' "
|
||||
if col in BOOLEAN_COL:
|
||||
return f" where {tbname}.{col} in (false, true) "
|
||||
if col in TS_TYPE_COL or col in PRIMARY_COL:
|
||||
return f" where cast( {tbname}.{col} as binary(16) ) is not null "
|
||||
|
||||
return ""
|
||||
|
||||
|
||||
def __group_condition(self, col, having = None):
|
||||
if isinstance(col, str):
|
||||
if col.startswith("count"):
|
||||
col = col[6:-1]
|
||||
elif col.startswith("max"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("sum"):
|
||||
col = col[4:-1]
|
||||
elif col.startswith("min"):
|
||||
col = col[4:-1]
|
||||
return f" group by {col} having {having}" if having else f" group by {col} "
|
||||
|
||||
def __single_sql(self, select_clause, from_clause, where_condition="", group_condition=""):
|
||||
if isinstance(select_clause, str) and "on" not in from_clause and select_clause.split(".")[0] != from_clause.split(".")[0]:
|
||||
return
|
||||
return f"select {select_clause} from {from_clause} {where_condition} {group_condition}"
|
||||
|
||||
|
||||
@property
|
||||
def __join_tblist(self):
|
||||
return [
|
||||
["ct1", "ct2"],
|
||||
["ct1", "ct4"],
|
||||
["ct1", "t1"],
|
||||
["ct2", "ct4"],
|
||||
["ct2", "t1"],
|
||||
["ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4"],
|
||||
# ["ct1", "ct2", "t1"],
|
||||
# ["ct1", "ct4", "t1"],
|
||||
# ["ct2", "ct4", "t1"],
|
||||
# ["ct1", "ct2", "ct4", "t1"],
|
||||
]
|
||||
|
||||
@property
|
||||
def __tb_liast(self):
|
||||
return [
|
||||
"ct1",
|
||||
"ct2",
|
||||
"ct4",
|
||||
"t1",
|
||||
]
|
||||
|
||||
def sql_list(self):
|
||||
sqls = []
|
||||
__join_tblist = self.__join_tblist
|
||||
for join_tblist in __join_tblist:
|
||||
for join_tb in join_tblist:
|
||||
select_claus_list = self.__query_condition(join_tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition( col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition( col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, join_tb, where_claus, group_claus),
|
||||
self.__single_sql(select_claus, join_tb, where_claus, having_claus),
|
||||
self.__single_sql(select_claus, self.__join_condition(join_tblist), where_claus, having_claus),
|
||||
self.__single_sql(select_claus, self.__join_condition(join_tblist, INNER=True), where_claus, having_claus),
|
||||
self.__single_sql(select_claus, join_tb, where_claus),
|
||||
self.__single_sql(select_claus, join_tb, having_claus),
|
||||
self.__single_sql(select_claus, join_tb, group_claus),
|
||||
self.__single_sql(select_claus, join_tb),
|
||||
|
||||
)
|
||||
)
|
||||
__no_join_tblist = self.__tb_liast
|
||||
for tb in __no_join_tblist:
|
||||
select_claus_list = self.__query_condition(tb)
|
||||
for select_claus in select_claus_list:
|
||||
group_claus = self.__group_condition(col=select_claus)
|
||||
where_claus = self.__where_condition(query_conditon=select_claus)
|
||||
having_claus = self.__group_condition(col=select_claus, having=f"{select_claus} is not null")
|
||||
sqls.extend(
|
||||
(
|
||||
self.__single_sql(select_claus, join_tb, where_claus, group_claus),
|
||||
self.__single_sql(select_claus, join_tb, where_claus, having_claus),
|
||||
self.__single_sql(select_claus, join_tb, where_claus),
|
||||
self.__single_sql(select_claus, join_tb, group_claus),
|
||||
self.__single_sql(select_claus, join_tb, having_claus),
|
||||
self.__single_sql(select_claus, join_tb),
|
||||
)
|
||||
)
|
||||
|
||||
return filter(None, sqls)
|
||||
# return list(filter(None, sqls))
|
||||
|
||||
def __get_type(self, col):
|
||||
if tdSql.cursor.istype(col, "BOOL"):
|
||||
return "BOOL"
|
||||
if tdSql.cursor.istype(col, "INT"):
|
||||
return "INT"
|
||||
if tdSql.cursor.istype(col, "BIGINT"):
|
||||
return "BIGINT"
|
||||
if tdSql.cursor.istype(col, "TINYINT"):
|
||||
return "TINYINT"
|
||||
if tdSql.cursor.istype(col, "SMALLINT"):
|
||||
return "SMALLINT"
|
||||
if tdSql.cursor.istype(col, "FLOAT"):
|
||||
return "FLOAT"
|
||||
if tdSql.cursor.istype(col, "DOUBLE"):
|
||||
return "DOUBLE"
|
||||
if tdSql.cursor.istype(col, "BINARY"):
|
||||
return "BINARY"
|
||||
if tdSql.cursor.istype(col, "NCHAR"):
|
||||
return "NCHAR"
|
||||
if tdSql.cursor.istype(col, "TIMESTAMP"):
|
||||
return "TIMESTAMP"
|
||||
if tdSql.cursor.istype(col, "JSON"):
|
||||
return "JSON"
|
||||
if tdSql.cursor.istype(col, "TINYINT UNSIGNED"):
|
||||
return "TINYINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "SMALLINT UNSIGNED"):
|
||||
return "SMALLINT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "INT UNSIGNED"):
|
||||
return "INT UNSIGNED"
|
||||
if tdSql.cursor.istype(col, "BIGINT UNSIGNED"):
|
||||
return "BIGINT UNSIGNED"
|
||||
|
||||
def union_check(self):
|
||||
sqls = self.sql_list()
|
||||
for sql1 in sqls:
|
||||
tdSql.query(sql1)
|
||||
res1_type = self.__get_type(0)
|
||||
for sql2 in sqls:
|
||||
tdSql.query(sql2)
|
||||
union_type = False
|
||||
res2_type = self.__get_type(0)
|
||||
|
||||
if res1_type in ( "BIGINT" , "NCHAR" ):
|
||||
union_type = True
|
||||
elif res2_type == res1_type:
|
||||
union_type = True
|
||||
elif res1_type == "TIMESAMP" and res2_type not in ("BINARY", "NCHAR"):
|
||||
union_type = True
|
||||
elif res1_type == "BINARY" and res2_type != "NCHAR":
|
||||
union_type = True
|
||||
|
||||
if union_type:
|
||||
tdSql.query(f"{sql1} union {sql2}")
|
||||
tdSql.checkCols(1)
|
||||
tdSql.query(f"{sql1} union all {sql2}")
|
||||
tdSql.checkCols(1)
|
||||
else:
|
||||
tdSql.error(f"{sql1} union {sql2}")
|
||||
|
||||
def __test_error(self):
|
||||
|
||||
|
||||
tdSql.error( "show tables union show tables" )
|
||||
tdSql.error( "create table errtb1 union all create table errtb2" )
|
||||
tdSql.error( "drop table ct1 union all drop table ct3" )
|
||||
tdSql.error( "select c1 from ct1 union all drop table ct3" )
|
||||
tdSql.error( "select c1 from ct1 union all '' " )
|
||||
tdSql.error( " '' union all select c1 from ct1 " )
|
||||
tdSql.error( "select c1 from ct1 union select c1 from ct2 union select c1 from ct4 ")
|
||||
|
||||
def all_test(self):
|
||||
self.__test_error()
|
||||
self.union_check()
|
||||
|
||||
|
||||
def __create_tb(self):
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
create_stb_sql = f'''create table stb1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
) tags (t1 int)
|
||||
'''
|
||||
create_ntb_sql = f'''create table t1(
|
||||
ts timestamp, {INT_COL} int, {BINT_COL} bigint, {SINT_COL} smallint, {TINT_COL} tinyint,
|
||||
{FLOAT_COL} float, {DOUBLE_COL} double, {BOOL_COL} bool,
|
||||
{BINARY_COL} binary(16), {NCHAR_COL} nchar(32), {TS_COL} timestamp
|
||||
)
|
||||
'''
|
||||
tdSql.execute(create_stb_sql)
|
||||
tdSql.execute(create_ntb_sql)
|
||||
|
||||
for i in range(4):
|
||||
tdSql.execute(f'create table ct{i+1} using stb1 tags ( {i+1} )')
|
||||
{ i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2}
|
||||
|
||||
def __insert_data(self, rows):
|
||||
now_time = int(datetime.datetime.timestamp(datetime.datetime.now()) * 1000)
|
||||
for i in range(rows):
|
||||
tdSql.execute(
|
||||
f"insert into ct1 values ( { now_time - i * 1000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct4 values ( { now_time - i * 7776000000 }, {i}, {11111 * i}, {111 * i % 32767 }, {11 * i % 127}, {1.11*i}, {1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f"insert into ct2 values ( { now_time - i * 7776000000 }, {-i}, {-11111 * i}, {-111 * i % 32767 }, {-11 * i % 127}, {-1.11*i}, {-1100.0011*i}, {i%2}, 'binary{i}', 'nchar_测试_{i}', { now_time + 1 * i } )"
|
||||
)
|
||||
tdSql.execute(
|
||||
f'''insert into ct1 values
|
||||
( { now_time - rows * 5 }, 0, 0, 0, 0, 0, 0, 0, 'binary0', 'nchar_测试_0', { now_time + 8 } )
|
||||
( { now_time + 10000 }, { rows }, -99999, -999, -99, -9.99, -99.99, 1, 'binary9', 'nchar_测试_9', { now_time + 9 } )
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct4 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000}, {pow(2,31)-pow(2,15)}, {pow(2,63)-pow(2,30)}, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000}
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, {pow(2,31)-pow(2,16)}, {pow(2,63)-pow(2,31)}, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000}
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
tdSql.execute(
|
||||
f'''insert into ct2 values
|
||||
( { now_time - rows * 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3888000000 + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7776000000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
(
|
||||
{ now_time + 5184000000 }, { -1 * pow(2,31) + pow(2,15) }, { -1 * pow(2,63) + pow(2,30) }, -32766, -126,
|
||||
{ -1 * 3.2 * pow(10,38) }, { -1.2 * pow(10,308) }, { rows % 2 }, "binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 2592000000 }, { -1 * pow(2,31) + pow(2,16) }, { -1 * pow(2,63) + pow(2,31) }, -32767, -127,
|
||||
{ - 3.3 * pow(10,38) }, { -1.3 * pow(10,308) }, { (rows-1) % 2 }, "binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
for i in range(rows):
|
||||
insert_data = f'''insert into t1 values
|
||||
( { now_time - i * 3600000 }, {i}, {i * 11111}, { i % 32767 }, { i % 127}, { i * 1.11111 }, { i * 1000.1111 }, { i % 2},
|
||||
"binary_{i}", "nchar_测试_{i}", { now_time - 1000 * i } )
|
||||
'''
|
||||
tdSql.execute(insert_data)
|
||||
tdSql.execute(
|
||||
f'''insert into t1 values
|
||||
( { now_time + 10800000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - (( rows // 2 ) * 60 + 30) * 60000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time - rows * 3600000 }, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL )
|
||||
( { now_time + 7200000 }, { pow(2,31) - pow(2,15) }, { pow(2,63) - pow(2,30) }, 32767, 127,
|
||||
{ 3.3 * pow(10,38) }, { 1.3 * pow(10,308) }, { rows % 2 },
|
||||
"binary_limit-1", "nchar_测试_limit-1", { now_time - 86400000 }
|
||||
)
|
||||
(
|
||||
{ now_time + 3600000 } , { pow(2,31) - pow(2,16) }, { pow(2,63) - pow(2,31) }, 32766, 126,
|
||||
{ 3.2 * pow(10,38) }, { 1.2 * pow(10,308) }, { (rows-1) % 2 },
|
||||
"binary_limit-2", "nchar_测试_limit-2", { now_time - 172800000 }
|
||||
)
|
||||
'''
|
||||
)
|
||||
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
tdLog.printNoPrefix("==========step1:create table")
|
||||
self.__create_tb()
|
||||
|
||||
tdLog.printNoPrefix("==========step2:insert data")
|
||||
self.rows = 10
|
||||
self.__insert_data(self.rows)
|
||||
|
||||
tdLog.printNoPrefix("==========step3:all check")
|
||||
self.all_test()
|
||||
|
||||
tdDnodes.stop(1)
|
||||
tdDnodes.start(1)
|
||||
|
||||
tdSql.execute("use db")
|
||||
|
||||
tdLog.printNoPrefix("==========step4:after wal, all check again ")
|
||||
self.all_test()
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
tdLog.success(f"{__file__} successfully executed")
|
||||
|
||||
tdCases.addLinux(__file__, TDTestCase())
|
||||
tdCases.addWindows(__file__, TDTestCase())
|
|
@ -52,7 +52,7 @@ class TDTestCase:
|
|||
def create_tables(self,tsql, dbName,vgroups,stbName,ctbNum,rowsPerTbl):
|
||||
tsql.execute("create database if not exists %s vgroups %d"%(dbName, vgroups))
|
||||
tsql.execute("use %s" %dbName)
|
||||
tsql.execute("create table %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
tsql.execute("create table if not exists %s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%stbName)
|
||||
pre_create = "create table"
|
||||
sql = pre_create
|
||||
#tdLog.debug("doing create one stable %s and %d child table in %s ..." %(stbname, count ,dbname))
|
||||
|
@ -345,11 +345,11 @@ class TDTestCase:
|
|||
after starting consumer, create ctables ")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db2', \
|
||||
'dbName': 'db3', \
|
||||
'vgroups': 1, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'rowsPerTbl': 30000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
@ -375,21 +375,32 @@ class TDTestCase:
|
|||
else:
|
||||
time.sleep(1)
|
||||
|
||||
tdLog.info("create stable2 for the seconde topic")
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db3', \
|
||||
'vgroups': 1, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 30000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict2['cfg'] = cfgPath
|
||||
tdSql.execute("create stable if not exists %s.%s (ts timestamp, c1 bigint, c2 binary(16)) tags(t1 int)"%(parameterDict2['dbName'], parameterDict2['stbName']))
|
||||
|
||||
tdLog.info("create topics from super table")
|
||||
topicFromStb = 'topic_stb_column2'
|
||||
topicFromCtb = 'topic_ctb_column2'
|
||||
topicFromStb = 'topic_stb_column3'
|
||||
topicFromStb2 = 'topic_stb_column32'
|
||||
|
||||
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb, parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s_0" %(topicFromCtb, parameterDict['dbName'], parameterDict['stbName']))
|
||||
tdSql.execute("create topic %s as select ts, c1, c2 from %s.%s" %(topicFromStb2, parameterDict2['dbName'], parameterDict2['stbName']))
|
||||
|
||||
time.sleep(1)
|
||||
tdSql.query("show topics")
|
||||
topic1 = tdSql.getData(0 , 0)
|
||||
topic2 = tdSql.getData(1 , 0)
|
||||
tdLog.info("show topics: %s, %s"%(topic1, topic2))
|
||||
if topic1 != topicFromStb and topic1 != topicFromCtb:
|
||||
if topic1 != topicFromStb and topic1 != topicFromStb2:
|
||||
tdLog.exit("topic error1")
|
||||
if topic2 != topicFromStb and topic2 != topicFromCtb:
|
||||
if topic2 != topicFromStb and topic2 != topicFromStb2:
|
||||
tdLog.exit("topic error2")
|
||||
|
||||
tdLog.info("create consume info table and consume result table")
|
||||
|
@ -397,10 +408,9 @@ class TDTestCase:
|
|||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
rowsOfNewCtb = 1000
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + rowsOfNewCtb
|
||||
topicList = topicFromStb
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicFromStb + ',' + topicFromStb2
|
||||
ifcheckdata = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
|
@ -432,17 +442,13 @@ class TDTestCase:
|
|||
tdLog.info(shellCmd)
|
||||
os.system(shellCmd)
|
||||
|
||||
# create new child table and insert data
|
||||
newCtbName = 'newctb'
|
||||
tdSql.query("create table %s.%s using %s.%s tags(9999)"%(parameterDict["dbName"], newCtbName, parameterDict["dbName"], parameterDict["stbName"]))
|
||||
startTs = parameterDict["startTs"]
|
||||
for j in range(rowsOfNewCtb):
|
||||
sql = "insert into %s.%s values (%d, %d, 'tmqrow_%d') "%(parameterDict["dbName"], newCtbName, startTs + j, j, j)
|
||||
tdSql.execute(sql)
|
||||
tdLog.debug("insert data into new child table ............ [OK]")
|
||||
# start the second thread to create new child table and insert data
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
while 1:
|
||||
|
@ -457,7 +463,7 @@ class TDTestCase:
|
|||
tdSql.checkData(0 , 3, expectrowcnt)
|
||||
|
||||
tdSql.query("drop topic %s"%topicFromStb)
|
||||
tdSql.query("drop topic %s"%topicFromCtb)
|
||||
tdSql.query("drop topic %s"%topicFromStb2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 3 end ...... ")
|
||||
|
||||
|
@ -474,7 +480,7 @@ class TDTestCase:
|
|||
|
||||
self.tmqCase1(cfgPath, buildPath)
|
||||
self.tmqCase2(cfgPath, buildPath)
|
||||
#self.tmqCase3(cfgPath, buildPath)
|
||||
self.tmqCase3(cfgPath, buildPath)
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -49,7 +49,38 @@ class TDTestCase:
|
|||
print(cur)
|
||||
return cur
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg,showRow,cdbName,valgrind=0):
|
||||
def initConsumerTable(self,cdbName='cdb'):
|
||||
tdLog.info("create consume database, and consume info table, and consume result table")
|
||||
tdSql.query("create database if not exists %s vgroups 1"%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeinfo "%(cdbName))
|
||||
tdSql.query("drop table if exists %s.consumeresult "%(cdbName))
|
||||
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
def insertConsumerInfo(self,consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifmanualcommit,cdbName='cdb'):
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata, ifmanualcommit)
|
||||
tdLog.info("consume info sql: %s"%sql)
|
||||
tdSql.query(sql)
|
||||
|
||||
def selectConsumeResult(self,expectRows,cdbName='cdb'):
|
||||
resultList=[]
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == expectRows:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
for i in range(expectRows):
|
||||
tdLog.info ("consume id: %d, consume msgs: %d, consume rows: %d"%(tdSql.getData(i , 1), tdSql.getData(i , 2), tdSql.getData(i , 3)))
|
||||
resultList.append(tdSql.getData(i , 3))
|
||||
|
||||
return resultList
|
||||
|
||||
def startTmqSimProcess(self,buildPath,cfgPath,pollDelay,dbName,showMsg=1,showRow=1,cdbName='cdb',valgrind=0):
|
||||
shellCmd = 'nohup '
|
||||
if valgrind == 1:
|
||||
logFile = cfgPath + '/../log/valgrind-tmq.log'
|
||||
|
@ -87,6 +118,8 @@ class TDTestCase:
|
|||
pre_insert = "insert into "
|
||||
sql = pre_insert
|
||||
|
||||
t = time.time()
|
||||
startTs = int(round(t * 1000))
|
||||
#tdLog.debug("doing insert data into stable:%s rows:%d ..."%(stbName, allRows))
|
||||
for i in range(ctbNum):
|
||||
sql += " %s_%d values "%(stbName,i)
|
||||
|
@ -127,7 +160,7 @@ class TDTestCase:
|
|||
return
|
||||
|
||||
def tmqCase1(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db")
|
||||
tdLog.printNoPrefix("======== test case 1: Produce while one consume to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
|
@ -135,11 +168,13 @@ class TDTestCase:
|
|||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 100000, \
|
||||
'batchNum': 200, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
|
@ -149,23 +184,16 @@ class TDTestCase:
|
|||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
tdLog.info("create consume info table and consume result table")
|
||||
cdbName = parameterDict["dbName"]
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
|
||||
tdSql.query(sql)
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
|
@ -173,32 +201,28 @@ class TDTestCase:
|
|||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == 1:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
tdLog.info("consumer result: %d, %d"%(tdSql.getData(0 , 2), tdSql.getData(0 , 3)))
|
||||
tdSql.checkData(0 , 1, consumerId)
|
||||
# mulit rows and mulit tables in one sql, this num of msg is not sure
|
||||
#tdSql.checkData(0 , 2, expectmsgcnt)
|
||||
tdSql.checkData(0 , 3, expectrowcnt+1)
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 1 end ...... ")
|
||||
|
||||
def tmqCase2(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db")
|
||||
tdLog.printNoPrefix("======== test case 2: Produce while two consumers to subscribe one db, inclue 1 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
|
@ -206,11 +230,13 @@ class TDTestCase:
|
|||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 100000, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
|
@ -221,27 +247,19 @@ class TDTestCase:
|
|||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
tdLog.info("create consume info table and consume result table")
|
||||
cdbName = parameterDict["dbName"]
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
|
||||
tdSql.query(sql)
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
|
||||
tdSql.query(sql)
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
|
@ -249,30 +267,20 @@ class TDTestCase:
|
|||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == 2:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
consumerId0 = tdSql.getData(0 , 1)
|
||||
consumerId1 = tdSql.getData(1 , 1)
|
||||
actConsumeRows0 = tdSql.getData(0 , 3)
|
||||
actConsumeRows1 = tdSql.getData(1 , 3)
|
||||
|
||||
tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0))
|
||||
tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1))
|
||||
|
||||
totalConsumeRows = actConsumeRows0 + actConsumeRows1
|
||||
if totalConsumeRows != expectrowcnt + 2:
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
@ -288,11 +296,13 @@ class TDTestCase:
|
|||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 100000, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
|
@ -303,7 +313,7 @@ class TDTestCase:
|
|||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 100000, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
@ -316,27 +326,19 @@ class TDTestCase:
|
|||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
tdLog.info("create consume info table and consume result table")
|
||||
cdbName = parameterDict["dbName"]
|
||||
tdSql.query("create table %s.consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int)"%cdbName)
|
||||
tdSql.query("create table %s.consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)"%cdbName)
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
sql = "insert into %s.consumeinfo values "%cdbName
|
||||
sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
|
||||
tdSql.query(sql)
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
# consumerId = 1
|
||||
# sql = "insert into %s.consumeinfo values "%cdbName
|
||||
# sql += "(now, %d, '%s', '%s', %d, %d)"%(consumerId, topicList, keyList, expectrowcnt, ifcheckdata)
|
||||
# tdSql.query(sql)
|
||||
# self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
|
@ -344,37 +346,357 @@ class TDTestCase:
|
|||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow, cdbName)
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
while 1:
|
||||
tdSql.query("select * from %s.consumeresult"%cdbName)
|
||||
#tdLog.info("row: %d, %l64d, %l64d"%(tdSql.getData(0, 1),tdSql.getData(0, 2),tdSql.getData(0, 3))
|
||||
if tdSql.getRows() == 1:
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
consumerId0 = tdSql.getData(0 , 1)
|
||||
#consumerId1 = tdSql.getData(1 , 1)
|
||||
actConsumeRows0 = tdSql.getData(0 , 3)
|
||||
#actConsumeRows1 = tdSql.getData(1 , 3)
|
||||
|
||||
tdLog.info("consumer %d rows: %d"%(consumerId0, actConsumeRows0))
|
||||
#tdLog.info("consumer %d rows: %d"%(consumerId1, actConsumeRows1))
|
||||
|
||||
#totalConsumeRows = actConsumeRows0 + actConsumeRows1
|
||||
if actConsumeRows0 != expectrowcnt + 1:
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 3 end ...... ")
|
||||
|
||||
def tmqCase4(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 4: Produce while two consumers to subscribe one db, include 2 stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db4', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db4', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 4 end ...... ")
|
||||
|
||||
def tmqCase5(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 5: Produce while two consumers to subscribe one db, firstly create one stb, after start consume create other stb")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db5', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db5', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db1'
|
||||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 5 end ...... ")
|
||||
|
||||
def tmqCase6(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 6: Produce while one consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db60', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db61', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
#consumerId = 1
|
||||
#self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 1
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 6 end ...... ")
|
||||
|
||||
def tmqCase7(self, cfgPath, buildPath):
|
||||
tdLog.printNoPrefix("======== test case 7: Produce while two consumers to subscribe tow topic, Each contains one db")
|
||||
tdLog.info("step 1: create database, stb, ctb and insert data")
|
||||
# create and start thread
|
||||
parameterDict = {'cfg': '', \
|
||||
'dbName': 'db70', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
self.initConsumerTable()
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict['dbName'], parameterDict['vgroups']))
|
||||
|
||||
prepareEnvThread = threading.Thread(target=self.prepareEnv, kwargs=parameterDict)
|
||||
prepareEnvThread.start()
|
||||
|
||||
parameterDict2 = {'cfg': '', \
|
||||
'dbName': 'db71', \
|
||||
'vgroups': 4, \
|
||||
'stbName': 'stb2', \
|
||||
'ctbNum': 10, \
|
||||
'rowsPerTbl': 10000, \
|
||||
'batchNum': 100, \
|
||||
'startTs': 1640966400000} # 2022-01-01 00:00:00.000
|
||||
parameterDict['cfg'] = cfgPath
|
||||
|
||||
tdSql.execute("create database if not exists %s vgroups %d" %(parameterDict2['dbName'], parameterDict2['vgroups']))
|
||||
|
||||
prepareEnvThread2 = threading.Thread(target=self.prepareEnv, kwargs=parameterDict2)
|
||||
prepareEnvThread2.start()
|
||||
|
||||
tdLog.info("create topics from db")
|
||||
topicName1 = 'topic_db60'
|
||||
topicName2 = 'topic_db61'
|
||||
|
||||
tdSql.execute("create topic %s as %s" %(topicName1, parameterDict['dbName']))
|
||||
tdSql.execute("create topic %s as %s" %(topicName2, parameterDict2['dbName']))
|
||||
|
||||
consumerId = 0
|
||||
expectrowcnt = parameterDict["rowsPerTbl"] * parameterDict["ctbNum"] + parameterDict2["rowsPerTbl"] * parameterDict2["ctbNum"]
|
||||
topicList = topicName1 + ',' + topicName2
|
||||
ifcheckdata = 0
|
||||
ifManualCommit = 0
|
||||
keyList = 'group.id:cgrp1,\
|
||||
enable.auto.commit:false,\
|
||||
auto.commit.interval.ms:6000,\
|
||||
auto.offset.reset:earliest'
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
consumerId = 1
|
||||
self.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
|
||||
|
||||
event.wait()
|
||||
|
||||
tdLog.info("start consume processor")
|
||||
pollDelay = 5
|
||||
showMsg = 1
|
||||
showRow = 1
|
||||
self.startTmqSimProcess(buildPath,cfgPath,pollDelay,parameterDict["dbName"],showMsg, showRow)
|
||||
|
||||
# wait for data ready
|
||||
prepareEnvThread.join()
|
||||
prepareEnvThread2.join()
|
||||
|
||||
tdLog.info("insert process end, and start to check consume result")
|
||||
expectRows = 2
|
||||
resultList = self.selectConsumeResult(expectRows)
|
||||
totalConsumeRows = 0
|
||||
for i in range(expectRows):
|
||||
totalConsumeRows += resultList[i]
|
||||
|
||||
if totalConsumeRows != expectrowcnt:
|
||||
tdLog.info("act consume rows: %d, expect consume rows: %d"%(totalConsumeRows, expectrowcnt))
|
||||
tdLog.exit("tmq consume rows error!")
|
||||
|
||||
tdSql.query("drop topic %s"%topicName1)
|
||||
tdSql.query("drop topic %s"%topicName2)
|
||||
|
||||
tdLog.printNoPrefix("======== test case 7 end ...... ")
|
||||
|
||||
def run(self):
|
||||
tdSql.prepare()
|
||||
|
||||
|
@ -387,8 +709,13 @@ class TDTestCase:
|
|||
tdLog.info("cfgPath: %s" % cfgPath)
|
||||
|
||||
#self.tmqCase1(cfgPath, buildPath)
|
||||
self.tmqCase2(cfgPath, buildPath)
|
||||
#self.tmqCase2(cfgPath, buildPath)
|
||||
#self.tmqCase3(cfgPath, buildPath)
|
||||
self.tmqCase4(cfgPath, buildPath)
|
||||
self.tmqCase5(cfgPath, buildPath)
|
||||
self.tmqCase6(cfgPath, buildPath)
|
||||
#self.tmqCase7(cfgPath, buildPath)
|
||||
|
||||
|
||||
def stop(self):
|
||||
tdSql.close()
|
||||
|
|
|
@ -53,6 +53,9 @@ python3 ./test.py -f 2-query/tan.py
|
|||
python3 ./test.py -f 2-query/arcsin.py
|
||||
python3 ./test.py -f 2-query/arccos.py
|
||||
python3 ./test.py -f 2-query/arctan.py
|
||||
# python3 ./test.py -f 2-query/query_cols_tags_and_or.py
|
||||
python3 ./test.py -f 2-query/query_cols_tags_and_or.py
|
||||
python3 ./test.py -f 2-query/nestedQuery.py
|
||||
|
||||
python3 ./test.py -f 7-tmq/basic5.py
|
||||
python3 ./test.py -f 7-tmq/subscribeDb.py
|
||||
|
||||
|
|
|
@ -37,9 +37,10 @@ typedef struct {
|
|||
TdThread thread;
|
||||
int32_t consumerId;
|
||||
|
||||
int32_t autoCommitIntervalMs; // 1000 ms
|
||||
char autoCommit[8]; // true, false
|
||||
char autoOffsetRest[16]; // none, earliest, latest
|
||||
int32_t ifManualCommit;
|
||||
//int32_t autoCommitIntervalMs; // 1000 ms
|
||||
//char autoCommit[8]; // true, false
|
||||
//char autoOffsetRest[16]; // none, earliest, latest
|
||||
|
||||
int32_t ifCheckData;
|
||||
int64_t expectMsgCnt;
|
||||
|
@ -136,9 +137,9 @@ void saveConfigToLogFile() {
|
|||
|
||||
for (int32_t i = 0; i < g_stConfInfo.numOfThread; i++) {
|
||||
taosFprintfFile(g_fp, "# consumer %d info:\n", g_stConfInfo.stThreads[i].consumerId);
|
||||
taosFprintfFile(g_fp, " auto commit: %s\n", g_stConfInfo.stThreads[i].autoCommit);
|
||||
taosFprintfFile(g_fp, " auto commit interval ms: %d\n", g_stConfInfo.stThreads[i].autoCommitIntervalMs);
|
||||
taosFprintfFile(g_fp, " auto offset rest: %s\n", g_stConfInfo.stThreads[i].autoOffsetRest);
|
||||
//taosFprintfFile(g_fp, " auto commit: %s\n", g_stConfInfo.stThreads[i].autoCommit);
|
||||
//taosFprintfFile(g_fp, " auto commit interval ms: %d\n", g_stConfInfo.stThreads[i].autoCommitIntervalMs);
|
||||
//taosFprintfFile(g_fp, " auto offset rest: %s\n", g_stConfInfo.stThreads[i].autoOffsetRest);
|
||||
taosFprintfFile(g_fp, " Topics: ");
|
||||
for (int j = 0; j < g_stConfInfo.stThreads[i].numOfTopic; j++) {
|
||||
taosFprintfFile(g_fp, "%s, ", g_stConfInfo.stThreads[i].topics[j]);
|
||||
|
@ -232,13 +233,18 @@ static int32_t msg_process(TAOS_RES* msg, int64_t msgIndex, int32_t threadLable)
|
|||
|
||||
while (1) {
|
||||
TAOS_ROW row = taos_fetch_row(msg);
|
||||
|
||||
if (row == NULL) break;
|
||||
if (0 != g_stConfInfo.showRowFlag) {
|
||||
|
||||
TAOS_FIELD* fields = taos_fetch_fields(msg);
|
||||
int32_t numOfFields = taos_field_count(msg);
|
||||
|
||||
taos_print_row(buf, row, fields, numOfFields);
|
||||
|
||||
if (0 != g_stConfInfo.showRowFlag) {
|
||||
taosFprintfFile(g_fp, "rows[%d]: %s\n", totalRows, buf);
|
||||
}
|
||||
|
||||
totalRows++;
|
||||
}
|
||||
|
||||
|
@ -316,6 +322,8 @@ int32_t saveConsumeResult(SThreadInfo* pInfo) {
|
|||
sprintf(sqlStr, "insert into %s.consumeresult values (now, %d, %" PRId64 ", %" PRId64 ", %d)", g_stConfInfo.cdbName,
|
||||
pInfo->consumerId, pInfo->consumeMsgCnt, pInfo->consumeRowCnt, pInfo->checkresult);
|
||||
|
||||
taosFprintfFile(g_fp, "== save result sql: %s \n", sqlStr);
|
||||
|
||||
TAOS_RES* pRes = taos_query(pConn, sqlStr);
|
||||
if (taos_errno(pRes) != 0) {
|
||||
pError("error in save consumeinfo, reason:%s\n", taos_errstr(pRes));
|
||||
|
@ -384,7 +392,11 @@ void* consumeThreadFunc(void* param) {
|
|||
|
||||
loop_consume(pInfo);
|
||||
|
||||
if (pInfo->ifManualCommit) {
|
||||
taosFprintfFile(g_fp, "tmq_commit() manual commit when consume end.\n");
|
||||
pPrint("tmq_commit() manual commit when consume end.\n");
|
||||
tmq_commit(pInfo->tmq, NULL, 0);
|
||||
}
|
||||
|
||||
err = tmq_unsubscribe(pInfo->tmq);
|
||||
if (err) {
|
||||
|
@ -470,9 +482,9 @@ int32_t getConsumeInfo() {
|
|||
int32_t* lengths = taos_fetch_lengths(pRes);
|
||||
|
||||
// set default value
|
||||
g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = 5000;
|
||||
memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, "true", strlen("true"));
|
||||
memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, "earlieast", strlen("earlieast"));
|
||||
//g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = 5000;
|
||||
//memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, "true", strlen("true"));
|
||||
//memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, "earlieast", strlen("earlieast"));
|
||||
|
||||
for (int i = 0; i < num_fields; ++i) {
|
||||
if (row[i] == NULL || 0 == i) {
|
||||
|
@ -489,12 +501,8 @@ int32_t getConsumeInfo() {
|
|||
g_stConfInfo.stThreads[numOfThread].expectMsgCnt = *((int64_t*)row[i]);
|
||||
} else if ((5 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
|
||||
g_stConfInfo.stThreads[numOfThread].ifCheckData = *((int32_t*)row[i]);
|
||||
} else if ((6 == i) && (fields[i].type == TSDB_DATA_TYPE_BINARY)) {
|
||||
memcpy(g_stConfInfo.stThreads[numOfThread].autoCommit, row[i], lengths[i]);
|
||||
} else if ((7 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
|
||||
g_stConfInfo.stThreads[numOfThread].autoCommitIntervalMs = *((int32_t*)row[i]);
|
||||
} else if ((8 == i) && (fields[i].type == TSDB_DATA_TYPE_BINARY)) {
|
||||
memcpy(g_stConfInfo.stThreads[numOfThread].autoOffsetRest, row[i], lengths[i]);
|
||||
} else if ((6 == i) && (fields[i].type == TSDB_DATA_TYPE_INT)) {
|
||||
g_stConfInfo.stThreads[numOfThread].ifManualCommit = *((int32_t*)row[i]);
|
||||
}
|
||||
}
|
||||
numOfThread++;
|
||||
|
|
Loading…
Reference in New Issue