Merge branch '3.0' into test/jcy

This commit is contained in:
jiacy-jcy 2022-11-03 11:00:39 +08:00
commit d13c20d9b3
296 changed files with 743 additions and 341 deletions

View File

@ -297,7 +297,6 @@ typedef struct {
typedef struct {
int32_t code;
int8_t hashMeta;
int64_t uid;
char* tblFName;
int32_t numOfRows;

View File

@ -203,13 +203,11 @@ int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
int32_t catalogUpdateTableMeta(SCatalog* pCatalog, STableMetaRsp* rspMsg);
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableMeta** pTableMeta);
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
STableMeta** pTableMeta);
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta);
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
SVgroupInfo* pVgroup, bool* exists);
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
/**
* Force refresh DB's local cached vgroup info.
@ -309,7 +307,7 @@ int32_t catalogGetUdfInfo(SCatalog* pCtg, SRequestConnInfo* pConn, const char* f
int32_t catalogChkAuth(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass);
int32_t catalogChkAuthFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
int32_t catalogChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass, bool* exists);
int32_t catalogUpdateUserAuthInfo(SCatalog* pCtg, SGetUserAuthRsp* pAuth);
@ -326,6 +324,10 @@ SMetaData* catalogCloneMetaData(SMetaData* pData);
void catalogFreeMetaData(SMetaData* pData);
int32_t ctgdEnableDebug(char *option, bool enable);
int32_t ctgdHandleDbgCommand(char *command);
/**
* Destroy catalog and relase all resources
*/

View File

@ -248,7 +248,7 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
(_code) == TSDB_CODE_PAR_INVALID_DROP_COL || ((_code) == TSDB_CODE_TDB_INVALID_TABLE_ID))
#define NEED_CLIENT_REFRESH_VG_ERROR(_code) \
((_code) == TSDB_CODE_VND_HASH_MISMATCH || (_code) == TSDB_CODE_VND_INVALID_VGROUP_ID)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_TABLE_RECREATED)
#define NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code) ((_code) == TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER)
#define NEED_CLIENT_HANDLE_ERROR(_code) \
(NEED_CLIENT_RM_TBLMETA_ERROR(_code) || NEED_CLIENT_REFRESH_VG_ERROR(_code) || \
NEED_CLIENT_REFRESH_TBLMETA_ERROR(_code))

View File

@ -340,7 +340,7 @@ int32_t* taosGetErrno();
#define TSDB_CODE_TDB_TABLE_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x0618)
#define TSDB_CODE_TDB_STB_ALREADY_EXIST TAOS_DEF_ERROR_CODE(0, 0x0619)
#define TSDB_CODE_TDB_STB_NOT_EXIST TAOS_DEF_ERROR_CODE(0, 0x061A)
#define TSDB_CODE_TDB_TABLE_RECREATED TAOS_DEF_ERROR_CODE(0, 0x061B)
#define TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER TAOS_DEF_ERROR_CODE(0, 0x061B)
#define TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR TAOS_DEF_ERROR_CODE(0, 0x061C)
// query

View File

@ -211,7 +211,8 @@ elif [[ ${packgeName} =~ "tar" ]];then
if [ ${diffNumbers} != 0 ];then
echoColor R "The number and names of files is different from the previous installation package"
echoColor Y `cat ${installPath}/diffFile.log`
diffLog=`cat ${installPath}/diffFile.log`
echoColor Y "${diffLog}"
exit -1
else
echoColor G "The number and names of files are the same as previous installation packages"

View File

@ -318,6 +318,7 @@ void* createTscObj(const char* user, const char* auth, const char* db, int32_
void destroyTscObj(void* pObj);
STscObj* acquireTscObj(int64_t rid);
int32_t releaseTscObj(int64_t rid);
void destroyAppInst(SAppInstInfo *pAppInfo);
uint64_t generateRequestId();

View File

@ -39,6 +39,7 @@ typedef enum {
STMT_BIND_COL,
STMT_ADD_BATCH,
STMT_EXECUTE,
STMT_MAX,
} STMT_STATUS;
typedef struct SStmtTableCache {
@ -94,12 +95,18 @@ typedef struct STscStmt {
STscObj *taos;
SCatalog *pCatalog;
int32_t affectedRows;
uint32_t seqId;
uint32_t seqIds[STMT_MAX];
SStmtSQLInfo sql;
SStmtExecInfo exec;
SStmtBindInfo bInfo;
} STscStmt;
extern char *gStmtStatusStr[];
#define STMT_LOG_SEQ(n) do { (pStmt)->seqId++; (pStmt)->seqIds[n]++; STMT_DLOG("the %dth:%d %s", (pStmt)->seqIds[n], (pStmt)->seqId, gStmtStatusStr[n]); } while (0)
#define STMT_STATUS_NE(S) (pStmt->sql.status != STMT_##S)
#define STMT_STATUS_EQ(S) (pStmt->sql.status == STMT_##S)
@ -128,6 +135,12 @@ typedef struct STscStmt {
} \
} while (0)
#define STMT_ELOG(param, ...) qError("stmt:%p " param, pStmt, __VA_ARGS__)
#define STMT_DLOG(param, ...) qDebug("stmt:%p " param, pStmt, __VA_ARGS__)
#define STMT_ELOG_E(param) qError("stmt:%p " param, pStmt)
#define STMT_DLOG_E(param) qDebug("stmt:%p " param, pStmt)
TAOS_STMT *stmtInit(STscObj *taos);
int stmtClose(TAOS_STMT *stmt);
int stmtExec(TAOS_STMT *stmt);

View File

@ -133,6 +133,12 @@ STscObj* taos_connect_internal(const char* ip, const char* user, const char* pas
taosThreadMutexInit(&p->qnodeMutex, NULL);
p->pTransporter = openTransporter(user, secretEncrypt, tsNumOfCores);
p->pAppHbMgr = appHbMgrInit(p, key);
if (NULL == p->pAppHbMgr) {
destroyAppInst(p);
taosThreadMutexUnlock(&appInfo.mutex);
taosMemoryFreeClear(key);
return NULL;
}
taosHashPut(appInfo.pInstMap, key, strlen(key), &p, POINTER_BYTES);
p->instKey = key;
key = NULL;
@ -1266,7 +1272,9 @@ static SMsgSendInfo* buildConnectMsg(SRequestObj* pRequest) {
pMsgSendInfo->requestObjRefId = pRequest->self;
pMsgSendInfo->requestId = pRequest->requestId;
pMsgSendInfo->fp = getMsgRspHandle(pMsgSendInfo->msgType);
pMsgSendInfo->param = pRequest;
pMsgSendInfo->param = taosMemoryCalloc(1, sizeof(pRequest->self));
*(int64_t*)pMsgSendInfo->param = pRequest->self;
SConnectReq connectReq = {0};
STscObj* pObj = pRequest->pTscObj;

View File

@ -45,8 +45,13 @@ int32_t genericRspCallback(void* param, SDataBuf* pMsg, int32_t code) {
}
int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
SRequestObj* pRequest = param;
SRequestObj *pRequest = acquireRequest(*(int64_t*)param);
if (NULL == pRequest) {
setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
tsem_post(&pRequest->body.rspSem);
goto End;
}
if (code != TSDB_CODE_SUCCESS) {
setErrno(pRequest, code);
tsem_post(&pRequest->body.rspSem);
@ -55,6 +60,12 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
STscObj* pTscObj = pRequest->pTscObj;
if (NULL == pTscObj->pAppInfo || NULL == pTscObj->pAppInfo->pAppHbMgr) {
setErrno(pRequest, TSDB_CODE_TSC_DISCONNECTED);
tsem_post(&pRequest->body.rspSem);
goto End;
}
SConnectRsp connectRsp = {0};
if (tDeserializeSConnectRsp(pMsg->pData, pMsg->len, &connectRsp) != 0) {
code = TSDB_CODE_TSC_INVALID_VERSION;
@ -115,10 +126,15 @@ int32_t processConnectRsp(void* param, SDataBuf* pMsg, int32_t code) {
tscDebug("0x%" PRIx64 " clusterId:%" PRId64 ", totalConn:%" PRId64, pRequest->requestId, connectRsp.clusterId,
pTscObj->pAppInfo->numOfConns);
tsem_post(&pRequest->body.rspSem);
End:
if (pRequest) {
releaseRequest(pRequest->self);
}
taosMemoryFree(param);
taosMemoryFree(pMsg->pEpSet);
taosMemoryFree(pMsg->pData);
return code;

View File

@ -5,6 +5,8 @@
#include "clientStmt.h"
char *gStmtStatusStr[] = {"unknown", "init", "prepare", "settbname", "settags", "fetchFields", "bind", "bindCol", "addBatch", "exec"};
static int32_t stmtCreateRequest(STscStmt* pStmt) {
int32_t code = 0;
@ -21,6 +23,10 @@ static int32_t stmtCreateRequest(STscStmt* pStmt) {
int32_t stmtSwitchStatus(STscStmt* pStmt, STMT_STATUS newStatus) {
int32_t code = 0;
if (newStatus >= STMT_INIT && newStatus < STMT_MAX) {
STMT_LOG_SEQ(newStatus);
}
switch (newStatus) {
case STMT_PREPARE:
break;
@ -528,13 +534,17 @@ TAOS_STMT* stmtInit(STscObj* taos) {
pStmt->bInfo.needParse = true;
pStmt->sql.status = STMT_INIT;
STMT_LOG_SEQ(STMT_INIT);
tscDebug("stmt:%p initialized", pStmt);
return pStmt;
}
int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to prepare");
STMT_DLOG_E("start to prepare");
if (pStmt->sql.status >= STMT_PREPARE) {
STMT_ERR_RET(stmtResetStmt(pStmt));
@ -555,7 +565,7 @@ int stmtPrepare(TAOS_STMT* stmt, const char* sql, unsigned long length) {
int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to set tbName: %s", tbName);
STMT_DLOG("start to set tbName: %s", tbName);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTBNAME));
@ -587,7 +597,7 @@ int stmtSetTbName(TAOS_STMT* stmt, const char* tbName) {
int stmtSetTbTags(TAOS_STMT* stmt, TAOS_MULTI_BIND* tags) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to set tbTags");
STMT_DLOG_E("start to set tbTags");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_SETTAGS));
@ -649,7 +659,7 @@ int stmtFetchColFields(STscStmt* pStmt, int32_t* fieldNum, TAOS_FIELD_E** fields
int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("start to bind stmt data, colIdx: %d", colIdx);
STMT_DLOG("start to bind stmt data, colIdx: %d", colIdx);
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_BIND));
@ -743,7 +753,7 @@ int stmtBindBatch(TAOS_STMT* stmt, TAOS_MULTI_BIND* bind, int32_t colIdx) {
int stmtAddBatch(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
tscDebug("stmt start to add batch");
STMT_DLOG_E("start to add batch");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_ADD_BATCH));
@ -756,8 +766,7 @@ int stmtUpdateTableUid(STscStmt* pStmt, SSubmitRsp* pRsp) {
tscDebug("stmt start to update tbUid, blockNum: %d", pRsp->nBlocks);
if (pRsp->nBlocks <= 0) {
tscError("invalid submit resp block number %d", pRsp->nBlocks);
STMT_ERR_RET(TSDB_CODE_TSC_APP_ERROR);
return TSDB_CODE_SUCCESS;
}
size_t keyLen = 0;
@ -810,7 +819,7 @@ int stmtExec(TAOS_STMT* stmt) {
SSubmitRsp* pRsp = NULL;
bool autoCreateTbl = pStmt->exec.autoCreateTbl;
tscDebug("stmt start to exec");
STMT_DLOG_E("start to exec");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_EXECUTE));
@ -885,6 +894,8 @@ int stmtAffectedRowsOnce(TAOS_STMT* stmt) { return ((STscStmt*)stmt)->exec.affec
int stmtIsInsert(TAOS_STMT* stmt, int* insert) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start is insert");
if (pStmt->sql.type) {
*insert = (STMT_TYPE_INSERT == pStmt->sql.type || STMT_TYPE_MULTI_INSERT == pStmt->sql.type);
} else {
@ -897,6 +908,8 @@ int stmtIsInsert(TAOS_STMT* stmt, int* insert) {
int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start to get tag fields");
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@ -927,6 +940,8 @@ int stmtGetTagFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start to get col fields");
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@ -957,6 +972,8 @@ int stmtGetColFields(TAOS_STMT* stmt, int* nums, TAOS_FIELD_E** fields) {
int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start to get param num");
STMT_ERR_RET(stmtSwitchStatus(pStmt, STMT_FETCH_FIELDS));
if (pStmt->bInfo.needParse && pStmt->sql.runTimes && pStmt->sql.type > 0 &&
@ -986,6 +1003,8 @@ int stmtGetParamNum(TAOS_STMT* stmt, int* nums) {
int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start to get param");
if (STMT_TYPE_QUERY == pStmt->sql.type) {
STMT_RET(TSDB_CODE_TSC_STMT_API_ERROR);
}
@ -1028,6 +1047,8 @@ int stmtGetParam(TAOS_STMT* stmt, int idx, int* type, int* bytes) {
TAOS_RES* stmtUseResult(TAOS_STMT* stmt) {
STscStmt* pStmt = (STscStmt*)stmt;
STMT_DLOG_E("start to use result");
if (STMT_TYPE_QUERY != pStmt->sql.type) {
tscError("useResult only for query statement");
return NULL;

View File

@ -5432,9 +5432,12 @@ static int32_t tEncodeSSubmitBlkRsp(SEncoder *pEncoder, const SSubmitBlkRsp *pBl
if (tStartEncode(pEncoder) < 0) return -1;
if (tEncodeI32(pEncoder, pBlock->code) < 0) return -1;
if (tEncodeI8(pEncoder, pBlock->hashMeta) < 0) return -1;
if (tEncodeI64(pEncoder, pBlock->uid) < 0) return -1;
if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
if (pBlock->tblFName) {
if (tEncodeCStr(pEncoder, pBlock->tblFName) < 0) return -1;
} else {
if (tEncodeCStr(pEncoder, "") < 0) return -1;
}
if (tEncodeI32v(pEncoder, pBlock->numOfRows) < 0) return -1;
if (tEncodeI32v(pEncoder, pBlock->affectedRows) < 0) return -1;
if (tEncodeI64v(pEncoder, pBlock->sver) < 0) return -1;
@ -5451,7 +5454,6 @@ static int32_t tDecodeSSubmitBlkRsp(SDecoder *pDecoder, SSubmitBlkRsp *pBlock) {
if (tStartDecode(pDecoder) < 0) return -1;
if (tDecodeI32(pDecoder, &pBlock->code) < 0) return -1;
if (tDecodeI8(pDecoder, &pBlock->hashMeta) < 0) return -1;
if (tDecodeI64(pDecoder, &pBlock->uid) < 0) return -1;
pBlock->tblFName = taosMemoryCalloc(TSDB_TABLE_FNAME_LEN, 1);
if (NULL == pBlock->tblFName) return -1;

View File

@ -116,6 +116,13 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
if (info.suid) {
metaGetInfo(pTsdb->pVnode->pMeta, info.suid, &info);
}
if (pMsgIter->sversion != info.skmVer) {
tsdbError("vgId:%d, req sver:%d, skmVer:%d suid:%" PRId64 " uid:%" PRId64,
TD_VID(pTsdb->pVnode), pMsgIter->sversion, info.skmVer, suid, uid);
code = TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER;
goto _err;
}
pRsp->sver = info.skmVer;
// create/get STbData to op
@ -133,6 +140,7 @@ int32_t tsdbInsertTableData(STsdb *pTsdb, int64_t version, SSubmitMsgIter *pMsgI
return code;
_err:
terrno = code;
return code;
}

View File

@ -861,6 +861,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
SEncoder encoder = {0};
SArray *newTbUids = NULL;
SVStatis statis = {0};
bool tbCreated = false;
terrno = TSDB_CODE_SUCCESS;
pRsp->code = 0;
@ -894,11 +895,10 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
if (pBlock == NULL) break;
SSubmitBlkRsp submitBlkRsp = {0};
tbCreated = false;
// create table for auto create table mode
if (msgIter.schemaLen > 0) {
submitBlkRsp.hashMeta = 1;
tDecoderInit(&decoder, pBlock->data, msgIter.schemaLen);
if (tDecodeSVCreateTbReq(&decoder, &createTbReq) < 0) {
pRsp->code = TSDB_CODE_INVALID_MSG;
@ -935,12 +935,13 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
}
taosArrayPush(newTbUids, &createTbReq.uid);
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
tbCreated = true;
}
submitBlkRsp.uid = createTbReq.uid;
submitBlkRsp.tblFName = taosMemoryMalloc(strlen(pVnode->config.dbname) + strlen(createTbReq.name) + 2);
sprintf(submitBlkRsp.tblFName, "%s.%s", pVnode->config.dbname, createTbReq.name);
msgIter.uid = createTbReq.uid;
if (createTbReq.type == TSDB_CHILD_TABLE) {
msgIter.suid = createTbReq.ctb.suid;
@ -953,10 +954,7 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
#endif
tDecoderClear(&decoder);
taosArrayDestroy(createTbReq.ctb.tagName);
} else {
submitBlkRsp.tblFName = taosMemoryMalloc(TSDB_TABLE_FNAME_LEN);
sprintf(submitBlkRsp.tblFName, "%s.", pVnode->config.dbname);
}
}
if (tsdbInsertTableData(pVnode->pTsdb, version, &msgIter, pBlock, &submitBlkRsp) < 0) {
submitBlkRsp.code = terrno;
@ -964,7 +962,9 @@ static int32_t vnodeProcessSubmitReq(SVnode *pVnode, int64_t version, void *pReq
submitRsp.numOfRows += submitBlkRsp.numOfRows;
submitRsp.affectedRows += submitBlkRsp.affectedRows;
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
if (tbCreated || submitBlkRsp.code) {
taosArrayPush(submitRsp.pArray, &submitBlkRsp);
}
}
if (taosArrayGetSize(newTbUids) > 0) {

View File

@ -33,6 +33,7 @@ extern "C" {
#define CTG_DEFAULT_MAX_RETRY_TIMES 3
#define CTG_DEFAULT_BATCH_NUM 64
#define CTG_DEFAULT_FETCH_NUM 8
#define CTG_MAX_COMMAND_LEN 512
#define CTG_RENT_SLOT_SECOND 1.5
@ -223,6 +224,7 @@ typedef struct SCtgUserAuth {
typedef struct SCatalog {
uint64_t clusterId;
bool stopUpdate;
SHashObj* userCache; // key:user, value:SCtgUserAuth
SHashObj* dbCache; // key:dbname, value:SCtgDBCache
SCtgRentMgmt dbRent;
@ -671,7 +673,7 @@ void ctgdShowClusterCache(SCatalog* pCtg);
int32_t ctgdShowCacheInfo(void);
int32_t ctgRemoveTbMetaFromCache(SCatalog* pCtg, SName* pTableName, bool syncReq);
int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
int32_t ctgGetTbMetaFromCache(SCatalog* pCtg, SCtgTbMetaCtx* ctx, STableMeta** pTableMeta);
int32_t ctgGetTbMetasFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetasCtx* ctx, int32_t dbIdx,
int32_t* fetchIdx, int32_t baseResIdx, SArray* pList);
@ -786,6 +788,7 @@ void ctgFreeTbCacheImpl(SCtgTbCache* pCache);
int32_t ctgRemoveTbMeta(SCatalog* pCtg, SName* pTableName);
int32_t ctgGetTbHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists);
SName* ctgGetFetchName(SArray* pNames, SCtgFetch* pFetch);
int32_t ctgdGetOneHandle(SCatalog **pHandle);
extern SCatalogMgmt gCtgMgmt;
extern SCtgDebug gCTGDebug;

View File

@ -202,7 +202,7 @@ int32_t ctgGetTbMeta(SCatalog* pCtg, SRequestConnInfo* pConn, SCtgTbMetaCtx* ctx
int32_t code = 0;
STableMetaOutput* output = NULL;
CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, ctx, pTableMeta));
CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, ctx, pTableMeta));
if (*pTableMeta || (ctx->flag & CTG_FLAG_ONLY_CACHE)) {
goto _return;
}
@ -959,14 +959,14 @@ int32_t catalogGetTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
}
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName, STableMeta** pTableMeta) {
int32_t catalogGetCachedTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta) {
CTG_API_ENTER();
SCtgTbMetaCtx ctx = {0};
ctx.pName = (SName*)pTableName;
ctx.flag = CTG_FLAG_UNKNOWN_STB | CTG_FLAG_ONLY_CACHE;
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
CTG_API_LEAVE(ctgGetTbMeta(pCtg, NULL, &ctx, pTableMeta));
}
@ -981,15 +981,14 @@ int32_t catalogGetSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SNam
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
}
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
STableMeta** pTableMeta) {
int32_t catalogGetCachedSTableMeta(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta) {
CTG_API_ENTER();
SCtgTbMetaCtx ctx = {0};
ctx.pName = (SName*)pTableName;
ctx.flag = CTG_FLAG_STB | CTG_FLAG_ONLY_CACHE;
CTG_API_LEAVE(ctgGetTbMeta(pCtg, pConn, &ctx, pTableMeta));
CTG_API_LEAVE(ctgGetTbMeta(pCtg, NULL, &ctx, pTableMeta));
}
@ -1114,11 +1113,10 @@ int32_t catalogGetTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup, NULL));
}
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, SRequestConnInfo* pConn, const SName* pTableName,
SVgroupInfo* pVgroup, bool* exists) {
int32_t catalogGetCachedTableHashVgroup(SCatalog* pCtg, const SName* pTableName, SVgroupInfo* pVgroup, bool* exists) {
CTG_API_ENTER();
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, pConn, pTableName, pVgroup, exists));
CTG_API_LEAVE(ctgGetTbHashVgroup(pCtg, NULL, pTableName, pVgroup, exists));
}
#if 0
@ -1387,16 +1385,16 @@ _return:
CTG_API_LEAVE(code);
}
int32_t catalogChkAuthFromCache(SCatalog* pCtg, SRequestConnInfo* pConn, const char* user, const char* dbFName, AUTH_TYPE type,
int32_t catalogChkAuthFromCache(SCatalog* pCtg, const char* user, const char* dbFName, AUTH_TYPE type,
bool* pass, bool* exists) {
CTG_API_ENTER();
if (NULL == pCtg || NULL == pConn || NULL == user || NULL == dbFName || NULL == pass || NULL == exists) {
if (NULL == pCtg || NULL == user || NULL == dbFName || NULL == pass || NULL == exists) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
int32_t code = 0;
CTG_ERR_JRET(ctgChkAuth(pCtg, pConn, user, dbFName, type, pass, exists));
CTG_ERR_JRET(ctgChkAuth(pCtg, NULL, user, dbFName, type, pass, exists));
_return:

View File

@ -1204,11 +1204,15 @@ int32_t ctgHandleGetTbMetasRsp(SCtgTaskReq* tReq, int32_t reqType, const SDataBu
stbCtx.flag = flag;
stbCtx.pName = &stbName;
taosMemoryFreeClear(pOut->tbMeta);
CTG_ERR_JRET(ctgReadTbMetaFromCache(pCtg, &stbCtx, &pOut->tbMeta));
if (pOut->tbMeta) {
STableMeta *stbMeta = NULL;
ctgReadTbMetaFromCache(pCtg, &stbCtx, &stbMeta);
if (stbMeta && stbMeta->sversion >= pOut->tbMeta->sversion) {
ctgDebug("use cached stb meta, tbName:%s", tNameGetTableName(pName));
exist = 1;
} else {
ctgDebug("need to get/update stb meta, tbName:%s", tNameGetTableName(pName));
taosMemoryFreeClear(pOut->tbMeta);
taosMemoryFreeClear(stbMeta);
}
}
@ -1641,7 +1645,7 @@ int32_t ctgLaunchGetTbMetaTask(SCtgTask* pTask) {
pMsgCtx->pBatchs = pJob->pBatchs;
}
CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, pConn, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
CTG_ERR_RET(ctgGetTbMetaFromCache(pCtg, (SCtgTbMetaCtx*)pTask->taskCtx, (STableMeta**)&pTask->res));
if (pTask->res) {
CTG_ERR_RET(ctgHandleTaskEnd(pTask, 0));
return TSDB_CODE_SUCCESS;

View File

@ -248,6 +248,8 @@ int32_t ctgAcquireStbMetaFromCache(SCatalog *pCtg, char *dbFName, uint64_t suid,
goto _return;
}
taosHashRelease(dbCache->stbCache, stName);
CTG_LOCK(CTG_READ, &pCache->metaLock);
if (NULL == pCache->pMeta) {
ctgDebug("stb 0x%" PRIx64 " meta not in cache, dbFName:%s", suid, dbFName);
@ -1550,7 +1552,7 @@ int32_t ctgOpUpdateVgroup(SCtgCacheOperation *operation) {
char *dbFName = msg->dbFName;
SCatalog *pCtg = msg->pCtg;
if (NULL == dbInfo->vgHash) {
if (pCtg->stopUpdate || NULL == dbInfo->vgHash) {
goto _return;
}
@ -1620,6 +1622,10 @@ int32_t ctgOpDropDbCache(SCtgCacheOperation *operation) {
SCtgDropDBMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
if (pCtg->stopUpdate) {
goto _return;
}
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@ -1646,6 +1652,10 @@ int32_t ctgOpDropDbVgroup(SCtgCacheOperation *operation) {
SCtgDropDbVgroupMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
if (pCtg->stopUpdate) {
goto _return;
}
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(msg->pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@ -1675,6 +1685,10 @@ int32_t ctgOpUpdateTbMeta(SCtgCacheOperation *operation) {
STableMetaOutput *pMeta = msg->pMeta;
SCtgDBCache *dbCache = NULL;
if (pCtg->stopUpdate) {
goto _return;
}
if ((!CTG_IS_META_CTABLE(pMeta->metaType)) && NULL == pMeta->tbMeta) {
ctgError("no valid tbmeta got from meta rsp, dbFName:%s, tbName:%s", pMeta->dbFName, pMeta->tbName);
CTG_ERR_JRET(TSDB_CODE_CTG_INTERNAL_ERROR);
@ -1723,6 +1737,10 @@ int32_t ctgOpDropStbMeta(SCtgCacheOperation *operation) {
SCtgDropStbMetaMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
if (pCtg->stopUpdate) {
goto _return;
}
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@ -1776,6 +1794,10 @@ int32_t ctgOpDropTbMeta(SCtgCacheOperation *operation) {
SCtgDropTblMetaMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
if (pCtg->stopUpdate) {
goto _return;
}
SCtgDBCache *dbCache = NULL;
ctgGetDBCache(pCtg, msg->dbFName, &dbCache);
if (NULL == dbCache) {
@ -1819,6 +1841,10 @@ int32_t ctgOpUpdateUser(SCtgCacheOperation *operation) {
SCtgUpdateUserMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
if (pCtg->stopUpdate) {
goto _return;
}
SCtgUserAuth *pUser = (SCtgUserAuth *)taosHashGet(pCtg->userCache, msg->userAuth.user, strlen(msg->userAuth.user));
if (NULL == pUser) {
SCtgUserAuth userAuth = {0};
@ -1872,8 +1898,12 @@ int32_t ctgOpUpdateEpset(SCtgCacheOperation *operation) {
int32_t code = 0;
SCtgUpdateEpsetMsg *msg = operation->data;
SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
if (pCtg->stopUpdate) {
goto _return;
}
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
ctgDebug("db %s not exist, ignore epset update", msg->dbFName);
@ -1920,6 +1950,10 @@ int32_t ctgOpUpdateTbIndex(SCtgCacheOperation *operation) {
STableIndex *pIndex = msg->pIndex;
SCtgDBCache *dbCache = NULL;
if (pCtg->stopUpdate) {
goto _return;
}
CTG_ERR_JRET(ctgGetAddDBCache(pCtg, pIndex->dbFName, 0, &dbCache));
CTG_ERR_JRET(ctgWriteTbIndexToCache(pCtg, dbCache, pIndex->dbFName, pIndex->tbName, &pIndex));
@ -1942,6 +1976,10 @@ int32_t ctgOpDropTbIndex(SCtgCacheOperation *operation) {
SCatalog *pCtg = msg->pCtg;
SCtgDBCache *dbCache = NULL;
if (pCtg->stopUpdate) {
goto _return;
}
CTG_ERR_JRET(ctgGetDBCache(pCtg, msg->dbFName, &dbCache));
if (NULL == dbCache) {
return TSDB_CODE_SUCCESS;
@ -2154,7 +2192,7 @@ int32_t ctgStartUpdateThread() {
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetTbMetaFromCache(SCatalog *pCtg, SRequestConnInfo *pConn, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
int32_t ctgGetTbMetaFromCache(SCatalog *pCtg, SCtgTbMetaCtx *ctx, STableMeta **pTableMeta) {
if (IS_SYS_DBNAME(ctx->pName->dbname)) {
CTG_FLAG_SET_SYS_DB(ctx->flag);
}

View File

@ -226,28 +226,45 @@ _return:
CTG_RET(code);
}
int32_t ctgdEnableDebug(char *option) {
int32_t ctgdEnableDebug(char *option, bool enable) {
if (0 == strcasecmp(option, "lock")) {
gCTGDebug.lockEnable = true;
qDebug("lock debug enabled");
gCTGDebug.lockEnable = enable;
qDebug("catalog lock debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "cache")) {
gCTGDebug.cacheEnable = true;
qDebug("cache debug enabled");
gCTGDebug.cacheEnable = enable;
qDebug("catalog cache debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "api")) {
gCTGDebug.apiEnable = true;
qDebug("api debug enabled");
gCTGDebug.apiEnable = enable;
qDebug("catalog api debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "meta")) {
gCTGDebug.metaEnable = true;
qDebug("api debug enabled");
gCTGDebug.metaEnable = enable;
qDebug("catalog meta debug set to %d", enable);
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "stopUpdate")) {
SCatalog *pCtg = NULL;
void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
while (pIter) {
pCtg = *(SCatalog **)pIter;
pCtg->stopUpdate = enable;
pIter = taosHashIterate(gCtgMgmt.pCluster, pIter);
}
qDebug("catalog stopUpdate set to %d", enable);
return TSDB_CODE_SUCCESS;
}
@ -256,6 +273,77 @@ int32_t ctgdEnableDebug(char *option) {
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
int32_t ctgdHandleDbgCommand(char *command) {
if (NULL == command) {
CTG_RET(TSDB_CODE_INVALID_PARA);
}
if (strlen(command) > CTG_MAX_COMMAND_LEN) {
CTG_RET(TSDB_CODE_INVALID_PARA);
}
char *dup = strdup(command);
char *option = NULL;
char *param = NULL;
int32_t i = 0;
bool newItem = true;
while (*(dup + i)) {
if (isspace(*(dup + i))) {
*(dup + i) = 0;
++i;
newItem = true;
continue;
}
if (!newItem) {
++i;
continue;
}
newItem = false;
if (NULL == option) {
option = dup + i;
++i;
continue;
}
if (NULL == param) {
param = dup + i;
++i;
continue;
}
taosMemoryFree(dup);
CTG_RET(TSDB_CODE_INVALID_PARA);
}
bool enable = atoi(param);
int32_t code = ctgdEnableDebug(option, enable);
taosMemoryFree(dup);
CTG_RET(code);
}
int32_t ctgdGetOneHandle(SCatalog **pHandle) {
SCatalog *pCtg = NULL;
void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
while (pIter) {
pCtg = *(SCatalog **)pIter;
taosHashCancelIterate(gCtgMgmt.pCluster, pIter);
break;
}
*pHandle = pCtg;
return TSDB_CODE_SUCCESS;
}
int32_t ctgdGetStatNum(char *option, void *res) {
if (0 == strcasecmp(option, "runtime.numOfOpDequeue")) {
*(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.numOfOpDequeue);

View File

@ -41,7 +41,6 @@
namespace {
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog *pCatalog, int32_t type);
extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);
void ctgTestSetRspTableMeta();
@ -49,6 +48,8 @@ void ctgTestSetRspCTableMeta();
void ctgTestSetRspSTableMeta();
void ctgTestSetRspMultiSTableMeta();
extern int32_t clientConnRefPool;
enum {
CTGT_RSP_VGINFO = 1,
CTGT_RSP_TBMETA,
@ -151,10 +152,10 @@ void ctgTestInitLogFile() {
qDebugFlag = 159;
strcpy(tsLogDir, TD_LOG_DIR_PATH);
ctgdEnableDebug("api");
ctgdEnableDebug("meta");
ctgdEnableDebug("cache");
ctgdEnableDebug("lock");
ctgdEnableDebug("api", true);
ctgdEnableDebug("meta", true);
ctgdEnableDebug("cache", true);
ctgdEnableDebug("lock", true);
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@ -1204,6 +1205,34 @@ void *ctgTestSetCtableMetaThread(void *param) {
}
void ctgTestFetchRows(TAOS_RES *result, int32_t *rows) {
TAOS_ROW row;
int num_fields = taos_num_fields(result);
TAOS_FIELD *fields = taos_fetch_fields(result);
char temp[256];
// fetch the records row by row
while ((row = taos_fetch_row(result))) {
(*rows)++;
memset(temp, 0, sizeof(temp));
taos_print_row(temp, row, fields, num_fields);
printf("\t[%s]\n", temp);
}
}
void ctgTestExecQuery(TAOS * taos, char* sql, bool fetch, int32_t *rows) {
TAOS_RES *result = taos_query(taos, sql);
int code = taos_errno(result);
ASSERT_EQ(code, 0);
if (fetch) {
ctgTestFetchRows(result, rows);
}
taos_free_result(result);
}
TEST(tableMeta, normalTable) {
struct SCatalog *pCtg = NULL;
SVgroupInfo vgInfo = {0};
@ -1245,7 +1274,7 @@ TEST(tableMeta, normalTable) {
memset(&vgInfo, 0, sizeof(vgInfo));
bool exists = false;
code = catalogGetCachedTableHashVgroup(pCtg, mockPointer, &n, &vgInfo, &exists);
code = catalogGetCachedTableHashVgroup(pCtg, &n, &vgInfo, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
@ -1292,7 +1321,7 @@ TEST(tableMeta, normalTable) {
taosMemoryFree(tableMeta);
tableMeta = NULL;
catalogGetCachedTableMeta(pCtg, mockPointer, &n, &tableMeta);
catalogGetCachedTableMeta(pCtg, &n, &tableMeta);
ASSERT_EQ(code, 0);
ASSERT_EQ(tableMeta->vgId, 8);
ASSERT_EQ(tableMeta->tableType, TSDB_NORMAL_TABLE);
@ -1500,7 +1529,7 @@ TEST(tableMeta, superTableCase) {
}
tableMeta = NULL;
code = catalogGetCachedSTableMeta(pCtg, mockPointer, &n, &tableMeta);
code = catalogGetCachedSTableMeta(pCtg, &n, &tableMeta);
ASSERT_EQ(code, 0);
ASSERT_EQ(tableMeta->vgId, 0);
ASSERT_EQ(tableMeta->tableType, TSDB_SUPER_TABLE);
@ -2772,7 +2801,7 @@ TEST(apiTest, catalogChkAuth_test) {
bool pass = false;
bool exists = false;
code = catalogChkAuthFromCache(pCtg, mockPointer, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
code = catalogChkAuthFromCache(pCtg, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(exists, false);
@ -2790,7 +2819,7 @@ TEST(apiTest, catalogChkAuth_test) {
}
}
code = catalogChkAuthFromCache(pCtg, mockPointer, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
code = catalogChkAuthFromCache(pCtg, ctgTestUsername, ctgTestDbname, AUTH_TYPE_READ, &pass, &exists);
ASSERT_EQ(code, 0);
ASSERT_EQ(pass, true);
ASSERT_EQ(exists, true);
@ -3063,6 +3092,58 @@ TEST(apiTest, catalogGetDnodeList_test) {
catalogDestroy();
}
#ifdef INTEGRATION_TEST
TEST(intTest, autoCreateTableTest) {
struct SCatalog *pCtg = NULL;
TAOS *taos = taos_connect("localhost", "root", "taosdata", NULL, 0);
ASSERT_TRUE(NULL != taos);
ctgdEnableDebug("api", true);
ctgdEnableDebug("meta", true);
ctgdEnableDebug("cache", true);
ctgdEnableDebug("lock", true);
ctgTestExecQuery(taos, "drop database if exists db1", false, NULL);
ctgTestExecQuery(taos, "create database db1", false, NULL);
ctgTestExecQuery(taos, "create stable db1.st1 (ts timestamp, f1 int) tags(tg1 int)", false, NULL);
ctgTestExecQuery(taos, "insert into db1.tb1 using db1.st1 tags(1) values(now, 1)", false, NULL);
ctgdGetOneHandle(&pCtg);
while (true) {
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (2 != n) {
taosMsleep(50);
} else {
break;
}
}
uint64_t n = 0, m = 0;
ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&n);
ctgTestExecQuery(taos, "insert into db1.tb1 using db1.st1 tags(1) values(now, 2)", false, NULL);
ctgTestExecQuery(taos, "insert into db1.tb1 values(now, 3)", false, NULL);
taosMsleep(1000);
ctgdGetStatNum("runtime.numOfOpDequeue", (void *)&m);
ASSERT_EQ(n, m);
ctgdEnableDebug("stopUpdate", true);
ctgTestExecQuery(taos, "alter table db1.st1 add column f2 double", false, NULL);
ctgdEnableDebug("stopUpdate", false);
ctgTestExecQuery(taos, "insert into db1.tb1 (ts, f1) values(now, 4)", false, NULL);
taos_close(taos);
}
#endif
int main(int argc, char **argv) {
testing::InitGoogleTest(&argc, argv);

View File

@ -96,6 +96,7 @@ extern "C" {
#define COMMAND_RESET_LOG "resetLog"
#define COMMAND_SCHEDULE_POLICY "schedulePolicy"
#define COMMAND_ENABLE_RESCHEDULE "enableReSchedule"
#define COMMAND_CATALOG_DEBUG "catalogDebug"
typedef struct SExplainGroup {
int32_t nodeNum;

View File

@ -571,6 +571,8 @@ static int32_t execAlterCmd(char* cmd, char* value, bool* processed) {
code = schedulerUpdatePolicy(atoi(value));
} else if (0 == strcasecmp(cmd, COMMAND_ENABLE_RESCHEDULE)) {
code = schedulerEnableReSchedule(atoi(value));
} else if (0 == strcasecmp(cmd, COMMAND_CATALOG_DEBUG)) {
code = ctgdHandleDbgCommand(value);
} else {
goto _return;
}

View File

@ -30,10 +30,10 @@
// The numOfOutputGroups is specified by physical plan. and will not be affect by numOfGroups
struct STableListInfo {
bool oneTableForEachGroup;
int32_t numOfOuputGroups; // the data block will be generated one by one
int32_t* groupOffset; // keep the offset value for each group in the tableList
int32_t numOfOuputGroups; // the data block will be generated one by one
int32_t* groupOffset; // keep the offset value for each group in the tableList
SArray* pTableList;
SHashObj* map; // speedup acquire the tableQueryInfo by table uid
SHashObj* map; // speedup acquire the tableQueryInfo by table uid
uint64_t suid;
};
@ -1678,9 +1678,7 @@ uint64_t tableListGetSize(const STableListInfo* pTableList) {
return taosArrayGetSize(pTableList->pTableList);
}
uint64_t tableListGetSuid(const STableListInfo* pTableList) {
return pTableList->suid;
}
uint64_t tableListGetSuid(const STableListInfo* pTableList) { return pTableList->suid; }
STableKeyInfo* tableListGetInfo(const STableListInfo* pTableList, int32_t index) {
if (taosArrayGetSize(pTableList->pTableList) == 0) {
@ -1718,7 +1716,7 @@ int32_t tableListAddTableInfo(STableListInfo* pTableList, uint64_t uid, uint64_t
}
int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalGroupIndex, STableKeyInfo** pKeyInfo,
int32_t* size) {
int32_t* size) {
int32_t total = tableListGetOutputGroups(pTableList);
if (ordinalGroupIndex < 0 || ordinalGroupIndex >= total) {
return TSDB_CODE_INVALID_PARA;
@ -1728,7 +1726,7 @@ int32_t tableListGetGroupList(const STableListInfo* pTableList, int32_t ordinalG
// 1. only one group exists, and 2. one table exists for each group.
if (total == 1) {
*size = tableListGetSize(pTableList);
*pKeyInfo = (*size == 0)? NULL:taosArrayGet(pTableList->pTableList, 0);
*pKeyInfo = (*size == 0) ? NULL : taosArrayGet(pTableList->pTableList, 0);
return TSDB_CODE_SUCCESS;
} else if (total == tableListGetSize(pTableList)) {
*size = 1;
@ -1806,13 +1804,13 @@ void tableListClear(STableListInfo* pTableListInfo) {
}
static int32_t orderbyGroupIdComparFn(const void* p1, const void* p2) {
STableKeyInfo* pInfo1 = (STableKeyInfo*) p1;
STableKeyInfo* pInfo2 = (STableKeyInfo*) p2;
STableKeyInfo* pInfo1 = (STableKeyInfo*)p1;
STableKeyInfo* pInfo2 = (STableKeyInfo*)p2;
if (pInfo1->groupId == pInfo2->groupId) {
return 0;
} else {
return pInfo1->groupId < pInfo2->groupId? -1:1;
return pInfo1->groupId < pInfo2->groupId ? -1 : 1;
}
}
@ -1825,12 +1823,12 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
SArray* pList = taosArrayInit(4, sizeof(int32_t));
STableKeyInfo* pInfo = taosArrayGet(pTableListInfo->pTableList, 0);
uint64_t gid = pInfo->groupId;
uint64_t gid = pInfo->groupId;
int32_t start = 0;
taosArrayPush(pList, &start);
for(int32_t i = 1; i < size; ++i) {
for (int32_t i = 1; i < size; ++i) {
pInfo = taosArrayGet(pTableListInfo->pTableList, i);
if (pInfo->groupId != gid) {
taosArrayPush(pList, &i);
@ -1845,16 +1843,17 @@ static int32_t sortTableGroup(STableListInfo* pTableListInfo) {
return TDB_CODE_SUCCESS;
}
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* group, bool groupSort) {
int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle* pHandle, SNodeList* group,
bool groupSort) {
int32_t code = TSDB_CODE_SUCCESS;
ASSERT(pTableListInfo->map != NULL);
bool groupByTbname = groupbyTbname(group);
bool groupByTbname = groupbyTbname(group);
size_t numOfTables = taosArrayGetSize(pTableListInfo->pTableList);
if (group == NULL || groupByTbname) {
for (int32_t i = 0; i < numOfTables; i++) {
STableKeyInfo* info = taosArrayGet(pTableListInfo->pTableList, i);
info->groupId = groupByTbname? info->uid:0;
info->groupId = groupByTbname ? info->uid : 0;
}
pTableListInfo->oneTableForEachGroup = groupByTbname;
@ -1878,7 +1877,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
// add all table entry in the hash map
size_t size = taosArrayGetSize(pTableListInfo->pTableList);
for(int32_t i = 0; i < size; ++i) {
for (int32_t i = 0; i < size; ++i) {
STableKeyInfo* p = taosArrayGet(pTableListInfo->pTableList, i);
taosHashPut(pTableListInfo->map, &p->uid, sizeof(uint64_t), &i, sizeof(int32_t));
}
@ -1889,7 +1888,7 @@ int32_t buildGroupIdMapForAllTables(STableListInfo* pTableListInfo, SReadHandle*
int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags, bool groupSort, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SNode* pTagCond, SNode* pTagIndexCond,
struct SExecTaskInfo* pTaskInfo) {
int64_t st = taosGetTimestampUs();
int64_t st = taosGetTimestampUs();
const char* idStr = GET_TASKID(pTaskInfo);
if (pHandle == NULL) {
@ -1919,7 +1918,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags
return code;
}
pTaskInfo->cost.groupIdMapTime = (taosGetTimestampUs() - st1)/1000.0;
pTaskInfo->cost.groupIdMapTime = (taosGetTimestampUs() - st1) / 1000.0;
qDebug("generate group id map completed, elapsed time:%.2f ms %s", pTaskInfo->cost.groupIdMapTime, idStr);
return TSDB_CODE_SUCCESS;

View File

@ -321,7 +321,7 @@ static SArray* filterUnqualifiedTables(const SStreamScanInfo* pScanInfo, const S
}
int32_t qUpdateQualifiedTableId(qTaskInfo_t tinfo, const SArray* tableIdList, bool isAdd) {
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
if (isAdd) {
qDebug("add %d tables id into query list, %s", (int32_t)taosArrayGetSize(tableIdList), pTaskInfo->id.str);
@ -473,7 +473,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId,
qDebug("subplan task create completed, TID:0x%" PRIx64 " QID:0x%" PRIx64, taskId, pSubplan->id.queryId);
_error:
_error:
// if failed to add ref for all tables in this query, abort current query
return code;
}
@ -1027,10 +1027,11 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
if (pTableScanInfo->dataReader == NULL) {
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
int32_t num = tableListGetSize(pTaskInfo->pTableInfoList);
if (tsdbReaderOpen(pTableScanInfo->readHandle.vnode, &pTableScanInfo->cond, pList, num,
&pTableScanInfo->dataReader, NULL) < 0 || pTableScanInfo->dataReader == NULL) {
&pTableScanInfo->dataReader, NULL) < 0 ||
pTableScanInfo->dataReader == NULL) {
ASSERT(0);
}
}
@ -1071,14 +1072,14 @@ int32_t qStreamPrepareScan(qTaskInfo_t tinfo, STqOffsetVal* pOffset, int8_t subT
initQueryTableDataCondForTmq(&pTaskInfo->streamInfo.tableCond, sContext, &mtInfo);
pTaskInfo->streamInfo.tableCond.twindows.skey = pOffset->ts;
if (pTaskInfo->pTableInfoList == NULL) {
if (pTaskInfo->pTableInfoList == NULL) {
pTaskInfo->pTableInfoList = tableListCreate();
}
tableListAddTableInfo(pTaskInfo->pTableInfoList, mtInfo.uid, 0);
STableKeyInfo* pList = tableListGetInfo(pTaskInfo->pTableInfoList, 0);
int32_t size = tableListGetSize(pTaskInfo->pTableInfoList);
int32_t size = tableListGetSize(pTaskInfo->pTableInfoList);
ASSERT(size == 1);
tsdbReaderOpen(pInfo->vnode, &pTaskInfo->streamInfo.tableCond, pList, size, &pInfo->dataReader, NULL);

View File

@ -1062,15 +1062,18 @@ SOperatorInfo* createDataBlockInfoScanOperator(SReadHandle* readHandle, SBlockDi
int32_t code = initTableblockDistQueryCond(pBlockScanNode->suid, &cond);
if (code != TSDB_CODE_SUCCESS) {
return NULL;
goto _error;
}
STableListInfo* pTableListInfo = pTaskInfo->pTableInfoList;
size_t num = tableListGetSize(pTableListInfo);
void* pList = tableListGetInfo(pTableListInfo, 0);
tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str);
code = tsdbReaderOpen(readHandle->vnode, &cond, pList, num, &pInfo->pHandle, pTaskInfo->id.str);
cleanupQueryTableDataCond(&cond);
if (code != 0) {
goto _error;
}
}
pInfo->readHandle = *readHandle;
@ -1164,6 +1167,7 @@ static SSDataBlock* readPreVersionData(SOperatorInfo* pTableScanOp, uint64_t tbU
GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
terrno = code;
T_LONG_JMP(pTaskInfo->env, code);
return NULL;
}
@ -2418,8 +2422,9 @@ SOperatorInfo* createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhys
if (pHandle->initTableReader) {
pTSInfo->scanMode = TABLE_SCAN__TABLE_ORDER;
pTSInfo->dataReader = NULL;
if (tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL) < 0) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
int32_t code = tsdbReaderOpen(pHandle->vnode, &pTSInfo->cond, pList, num, &pTSInfo->dataReader, NULL);
if (code != 0) {
terrno = code;
destroyTableScanOperatorInfo(pTableScanOp);
goto _error;
}
@ -4284,131 +4289,6 @@ int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle*
return TSDB_CODE_SUCCESS;
}
int32_t createMultipleDataReaders2(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle,
STableListInfo* pTableListInfo, int32_t tableStartIdx, int32_t tableEndIdx,
STsdbReader** ppReader, const char* idstr) {
STsdbReader* pReader = NULL;
void* pStart = tableListGetInfo(pTableListInfo, tableStartIdx);
int32_t num = tableEndIdx - tableStartIdx + 1;
int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, pStart, num, &pReader, idstr);
if (code != 0) {
return code;
}
*ppReader = pReader;
return TSDB_CODE_SUCCESS;
}
static int32_t loadDataBlockFromOneTable2(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
SSDataBlock* pBlock, uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
STableMergeScanInfo* pInfo = pOperator->info;
uint64_t uid = pBlock->info.uid;
SFileBlockLoadRecorder* pCost = &pTableScanInfo->readRecorder;
pCost->totalBlocks += 1;
pCost->totalRows += pBlock->info.rows;
*status = pInfo->dataBlockLoadFlag;
if (pTableScanInfo->pFilterNode != NULL ||
overlapWithTimeWindow(&pTableScanInfo->interval, &pBlock->info, pTableScanInfo->cond.order)) {
(*status) = FUNC_DATA_REQUIRED_DATA_LOAD;
}
SDataBlockInfo* pBlockInfo = &pBlock->info;
taosMemoryFreeClear(pBlock->pBlockAgg);
if (*status == FUNC_DATA_REQUIRED_FILTEROUT) {
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->filterOutBlocks += 1;
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_NOT_LOAD) {
qDebug("%s data block skipped, brange:%" PRId64 "-%" PRId64 ", rows:%d", GET_TASKID(pTaskInfo),
pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows);
pCost->skipBlocks += 1;
// clear all data in pBlock that are set when handing the previous block
for (int32_t i = 0; i < taosArrayGetSize(pBlock->pDataBlock); ++i) {
SColumnInfoData* pcol = taosArrayGet(pBlock->pDataBlock, i);
pcol->pData = NULL;
}
return TSDB_CODE_SUCCESS;
} else if (*status == FUNC_DATA_REQUIRED_STATIS_LOAD) {
pCost->loadBlockStatis += 1;
bool allColumnsHaveAgg = true;
SColumnDataAgg** pColAgg = NULL;
STsdbReader* reader = pTableScanInfo->pReader;
tsdbRetrieveDatablockSMA(reader, &pColAgg, &allColumnsHaveAgg);
if (allColumnsHaveAgg == true) {
int32_t numOfCols = taosArrayGetSize(pBlock->pDataBlock);
// todo create this buffer during creating operator
if (pBlock->pBlockAgg == NULL) {
pBlock->pBlockAgg = taosMemoryCalloc(numOfCols, POINTER_BYTES);
}
for (int32_t i = 0; i < numOfCols; ++i) {
SColMatchItem* pColMatchInfo = taosArrayGet(pTableScanInfo->matchInfo.pList, i);
if (!pColMatchInfo->needOutput) {
continue;
}
pBlock->pBlockAgg[pColMatchInfo->dstSlotId] = pColAgg[i];
}
return TSDB_CODE_SUCCESS;
} else { // failed to load the block sma data, data block statistics does not exist, load data block instead
*status = FUNC_DATA_REQUIRED_DATA_LOAD;
}
}
ASSERT(*status == FUNC_DATA_REQUIRED_DATA_LOAD);
pCost->totalCheckedRows += pBlock->info.rows;
pCost->loadBlocks += 1;
STsdbReader* reader = pTableScanInfo->pReader;
SArray* pCols = tsdbRetrieveDataBlock(reader, NULL);
if (pCols == NULL) {
return terrno;
}
relocateColumnData(pBlock, pTableScanInfo->matchInfo.pList, pCols, true);
// currently only the tbname pseudo column
if (pTableScanInfo->pseudoSup.numOfExprs > 0) {
int32_t code =
addTagPseudoColumnData(&pTableScanInfo->readHandle, pTableScanInfo->pseudoSup.pExprInfo,
pTableScanInfo->pseudoSup.numOfExprs, pBlock, pBlock->info.rows, GET_TASKID(pTaskInfo));
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pTaskInfo->env, code);
}
}
if (pTableScanInfo->pFilterNode != NULL) {
int64_t st = taosGetTimestampMs();
doFilter(pTableScanInfo->pFilterNode, pBlock, &pTableScanInfo->matchInfo, NULL);
double el = (taosGetTimestampUs() - st) / 1000.0;
pTableScanInfo->readRecorder.filterTime += el;
if (pBlock->info.rows == 0) {
pCost->filterOutBlocks += 1;
qDebug("%s data block filter out, brange:%" PRId64 "-%" PRId64 ", rows:%d, elapsed time:%.2f ms",
GET_TASKID(pTaskInfo), pBlockInfo->window.skey, pBlockInfo->window.ekey, pBlockInfo->rows, el);
} else {
qDebug("%s data block filter applied, elapsed time:%.2f ms", GET_TASKID(pTaskInfo), el);
}
}
return TSDB_CODE_SUCCESS;
}
// todo refactor
static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) {
@ -4535,7 +4415,7 @@ typedef struct STableMergeScanSortSourceParam {
SSDataBlock* inputBlock;
} STableMergeScanSortSourceParam;
static SSDataBlock* getTableDataBlockTemp(void* param) {
static SSDataBlock* getTableDataBlockImpl(void* param) {
STableMergeScanSortSourceParam* source = param;
SOperatorInfo* pOperator = source->pOperator;
STableMergeScanInfo* pInfo = pOperator->info;
@ -4552,7 +4432,11 @@ static SSDataBlock* getTableDataBlockTemp(void* param) {
void* p = tableListGetInfo(pInfo->tableListInfo, readIdx + pInfo->tableStartIndex);
SReadHandle* pHandle = &pInfo->readHandle;
tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
int32_t code = tsdbReaderOpen(pHandle->vnode, pQueryCond, p, 1, &pInfo->pReader, GET_TASKID(pTaskInfo));
if (code != 0) {
T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
STsdbReader* reader = pInfo->pReader;
while (tsdbNextDataBlock(reader)) {
@ -4603,55 +4487,6 @@ static SSDataBlock* getTableDataBlockTemp(void* param) {
pInfo->pReader = NULL;
return NULL;
}
static SSDataBlock* getTableDataBlock2(void* param) {
STableMergeScanSortSourceParam* source = param;
SOperatorInfo* pOperator = source->pOperator;
int64_t uid = source->uid;
SSDataBlock* pBlock = source->inputBlock;
STableMergeScanInfo* pTableScanInfo = pOperator->info;
int64_t st = taosGetTimestampUs();
blockDataCleanup(pBlock);
STsdbReader* reader = pTableScanInfo->pReader;
while (tsdbTableNextDataBlock(reader, uid)) {
if (isTaskKilled(pOperator->pTaskInfo)) {
T_LONG_JMP(pOperator->pTaskInfo->env, TSDB_CODE_TSC_QUERY_CANCELLED);
}
// process this data block based on the probabilities
bool processThisBlock = processBlockWithProbability(&pTableScanInfo->sample);
if (!processThisBlock) {
continue;
}
blockDataCleanup(pBlock);
int32_t rows = 0;
tsdbRetrieveDataBlockInfo(reader, &rows, &pBlock->info.uid, &pBlock->info.window);
blockDataEnsureCapacity(pBlock, rows);
pBlock->info.rows = rows;
uint32_t status = 0;
int32_t code = loadDataBlockFromOneTable2(pOperator, pTableScanInfo, pBlock, &status);
if (code != TSDB_CODE_SUCCESS) {
T_LONG_JMP(pOperator->pTaskInfo->env, code);
}
// current block is filter out according to filter condition, continue load the next block
if (status == FUNC_DATA_REQUIRED_FILTEROUT || pBlock->info.rows == 0) {
continue;
}
pBlock->info.groupId = getTableGroupId(pOperator->pTaskInfo->pTableInfoList, pBlock->info.uid);
pOperator->resultInfo.totalRows = pTableScanInfo->readRecorder.totalRows;
pTableScanInfo->readRecorder.elapsedTime += (taosGetTimestampUs() - st) / 1000.0;
return pBlock;
}
return NULL;
}
static SSDataBlock* getTableDataBlock(void* param) {
STableMergeScanSortSourceParam* source = param;
@ -4761,7 +4596,7 @@ int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockTemp, NULL, NULL);
tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlockImpl, NULL, NULL);
// one table has one data block
int32_t numOfTable = tableEndIdx - tableStartIdx + 1;

View File

@ -122,14 +122,14 @@ static int32_t udfSpawnUdfd(SUdfdData *pData) {
taosGetCpuCores(&numCpuCores);
snprintf(thrdPoolSizeEnvItem, 32, "%s=%d", "UV_THREADPOOL_SIZE", (int)numCpuCores * 2);
char pathTaosdLdLib[512] = {0};
char pathTaosdLdLib[512] = {0};
size_t taosdLdLibPathLen = sizeof(pathTaosdLdLib);
int ret = uv_os_getenv("LD_LIBRARY_PATH", pathTaosdLdLib, &taosdLdLibPathLen);
if (ret != UV_ENOBUFS) {
taosdLdLibPathLen = strlen(pathTaosdLdLib);
}
char udfdPathLdLib[1024] = {0};
char udfdPathLdLib[1024] = {0};
size_t udfdLdLibPathLen = strlen(tsUdfdLdLibPath);
strncpy(udfdPathLdLib, tsUdfdLdLibPath, udfdLdLibPathLen);
udfdPathLdLib[udfdLdLibPathLen] = ':';
@ -362,7 +362,7 @@ typedef struct SUdfcProxy {
SArray *udfStubs; // SUdfcFuncStub
uv_mutex_t udfcUvMutex;
int8_t initialized;
int8_t initialized;
} SUdfcProxy;
SUdfcProxy gUdfcProxy = {0};

View File

@ -280,7 +280,7 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t execId, SDa
}
atomic_add_fetch_32(&pJob->resNumOfRows, rsp->affectedRows);
SCH_TASK_DLOG("submit succeed, affectedRows:%d", rsp->affectedRows);
SCH_TASK_DLOG("submit succeed, affectedRows:%d, blocks:%d", rsp->affectedRows, rsp->nBlocks);
SCH_LOCK(SCH_WRITE, &pJob->resLock);
if (pJob->execRes.res) {

View File

@ -127,7 +127,7 @@ static void uvFreeCb(uv_handle_t* handle);
static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg);
static void uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static int uvPrepareSendData(SSvrMsg* msg, uv_buf_t* wb);
static void uvStartSendResp(SSvrMsg* msg);
static void uvNotifyLinkBrokenToApp(SSvrConn* conn);
@ -384,7 +384,7 @@ static void uvOnPipeWriteCb(uv_write_t* req, int status) {
taosMemoryFree(req);
}
static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
static int uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
SSvrConn* pConn = smsg->pConn;
STransMsg* pMsg = &smsg->msg;
if (pMsg->pCont == 0) {
@ -397,6 +397,13 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
pHead->hasEpSet = pMsg->info.hasEpSet;
pHead->magicNum = htonl(TRANS_MAGIC_NUM);
// handle invalid drop_task resp, TD-20098
if (pMsg->msgType == TDMT_SCH_DROP_TASK && pMsg->code == TSDB_CODE_VND_INVALID_VGROUP_ID) {
transQueuePop(&pConn->srvMsgs);
destroySmsg(smsg);
return -1;
}
if (pConn->status == ConnNormal) {
pHead->msgType = (0 == pMsg->msgType ? pConn->inType + 1 : pMsg->msgType);
if (smsg->type == Release) pHead->msgType = 0;
@ -431,6 +438,7 @@ static void uvPrepareSendData(SSvrMsg* smsg, uv_buf_t* wb) {
wb->base = (char*)pHead;
wb->len = len;
return 0;
}
static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
@ -440,7 +448,9 @@ static FORCE_INLINE void uvStartSendRespImpl(SSvrMsg* smsg) {
}
uv_buf_t wb;
uvPrepareSendData(smsg, &wb);
if (uvPrepareSendData(smsg, &wb) < 0) {
return;
}
transRefSrvHandle(pConn);
uv_write_t* req = transReqQueuePush(&pConn->wreqQueue);
@ -451,8 +461,9 @@ static void uvStartSendResp(SSvrMsg* smsg) {
SSvrConn* pConn = smsg->pConn;
if (pConn->broken == true) {
// persist by
transFreeMsg(smsg->msg.pCont);
taosMemoryFree(smsg);
destroySmsg(smsg);
// transFreeMsg(smsg->msg.pCont);
// taosMemoryFree(smsg);
transUnrefSrvHandle(pConn);
return;
}
@ -748,10 +759,11 @@ void uvOnConnectionCb(uv_stream_t* q, ssize_t nread, const uv_buf_t* buf) {
return;
}
transSockInfo2Str(&sockname, pConn->src);
struct sockaddr_in addr = *(struct sockaddr_in*)&sockname;
struct sockaddr_in addr = *(struct sockaddr_in*)&peername;
pConn->clientIp = addr.sin_addr.s_addr;
pConn->port = ntohs(addr.sin_port);
uv_read_start((uv_stream_t*)(pConn->pTcp), uvAllocRecvBufferCb, uvOnRecvCb);
} else {

View File

@ -340,7 +340,7 @@ TAOS_DEFINE_ERROR(TSDB_CODE_TDB_NO_CACHE_LAST_ROW, "TSDB no cache last ro
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_NOT_EXIST, "Table not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_ALREADY_EXIST, "Stable already exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_STB_NOT_EXIST, "Stable not exists")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TABLE_RECREATED, "Table re-created")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_INVALID_TABLE_SCHEMA_VER, "Table schema is old")
TAOS_DEFINE_ERROR(TSDB_CODE_TDB_TDB_ENV_OPEN_ERROR, "TDB env open error")
// query

View File

@ -25,7 +25,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -23,7 +23,8 @@ class TDTestCase:
[TD-11510] taosBenchmark test cases
"""
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -27,7 +27,8 @@ class TDTestCase:
"""
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -29,7 +29,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -24,7 +24,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -25,7 +25,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -25,7 +25,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -52,7 +53,7 @@ class TDTestCase:
return paths[0]
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -28,7 +28,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -51,7 +52,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -28,7 +28,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -51,7 +52,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -49,7 +50,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -49,7 +50,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -27,7 +27,8 @@ class TDTestCase:
'''
return
def init(self, conn, logSql, replicaVarl=1):
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.tmpdir = "tmp"
@ -50,7 +51,7 @@ class TDTestCase:
return buildPath
def run(self):
tdSql.prepare()
tdSql.prepare(replica=f"{self.replicaVar}")
tdSql.execute("drop database if exists db")
tdSql.execute("create database db days 11 keep 3649 blocks 8 ")

View File

@ -15,6 +15,7 @@ class TDTestCase:
"jniDebugFlag":143 ,"simDebugFlag":143,"dDebugFlag":143, "dDebugFlag":143,"vDebugFlag":143,"mDebugFlag":143,"qDebugFlag":143,
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"udfDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)

View File

@ -23,6 +23,7 @@ class TDTestCase:
return
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -27,6 +27,7 @@ TS_TYPE_COL = [ TS_COL, ]
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -21,6 +21,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()

View File

@ -21,6 +21,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.dbname = 'db'

View File

@ -109,6 +109,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -111,6 +111,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -111,6 +111,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -285,6 +285,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -45,6 +45,7 @@ class TDTestCase:
# print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -11,6 +11,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -174,6 +174,7 @@ class TDTestCase:
print ("===================: ", updatecfgDict)
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -14,6 +14,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)

View File

@ -16,6 +16,7 @@ class TDTestCase:
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":0}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)

View File

@ -16,6 +16,7 @@ class TDTestCase:
"jniDebugFlag": 143, "simDebugFlag": 143, "dDebugFlag": 143, "dDebugFlag": 143, "vDebugFlag": 143, "mDebugFlag": 143, "qDebugFlag": 143,
"wDebugFlag": 143, "sDebugFlag": 143, "tsdbDebugFlag": 143, "tqDebugFlag": 143, "fsDebugFlag": 143, "fnDebugFlag": 143 ,"udf":1}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)

View File

@ -20,6 +20,7 @@ class MyDnodes(TDDnodes):
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
self.TDDnodes = None
self.depoly_cluster(3)

View File

@ -16,6 +16,7 @@ import threading
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)

View File

@ -13,6 +13,7 @@ import subprocess
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), logSql)

View File

@ -155,6 +155,7 @@ class User:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())

View File

@ -12,6 +12,7 @@ from util.dnodes import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
self.buffer_boundary = [3,4097,8193,12289,16384]

View File

@ -21,6 +21,7 @@ from util import constant
from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()

View File

@ -22,6 +22,7 @@ from util.sqlset import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.setsql = TDSetSql()

View File

@ -143,6 +143,7 @@ class BSMAschema:
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.precision = "ms"

View File

@ -45,6 +45,7 @@ NTBNAME = "nt1"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), True)

View File

@ -29,6 +29,7 @@ class TDTestCase:
"wDebugFlag":143,"sDebugFlag":143,"tsdbDebugFlag":143,"tqDebugFlag":143 ,"fsDebugFlag":143 ,"fnDebugFlag":143}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -32,6 +32,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.special_name = ['!','@','#','$','%','^','&','*','(',')','[',']','{','}',\

View File

@ -24,6 +24,7 @@ from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), True)
self.dbname = 'db_test'

View File

@ -30,6 +30,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
self._conn = conn

View File

@ -63,6 +63,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()

View File

@ -9,6 +9,7 @@ import threading
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)

View File

@ -8,6 +8,7 @@ import time
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self.dbname = "test"

View File

@ -58,6 +58,7 @@ DATA_PRE2 = f"data2"
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.taos_cfg_path = tdDnodes.dnodes[0].cfgPath

View File

@ -61,6 +61,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
# tdSql.init(conn.cursor())
# tdSql.prepare()

View File

@ -25,6 +25,7 @@ import json
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), logSql)
self._conn = conn

View File

@ -29,6 +29,7 @@ if platform.system().lower() == 'windows':
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(), False)
self._conn = conn

View File

@ -21,6 +21,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# prepare data

View File

@ -19,6 +19,7 @@ from util.common import *
class TDTestCase:
updatecfgDict = {'ttlUnit':5,'ttlPushInterval':3}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ntbname = 'ntb'

View File

@ -6,6 +6,7 @@ from util.common import *
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
self.ts = 1537146000000

View File

@ -58,6 +58,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()

View File

@ -58,6 +58,7 @@ class TDTestCase:
# init
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor())
# tdSql.prepare()

View File

@ -135,6 +135,7 @@ class TDTestCase:
updatecfgDict = {"querySmaOptimize": 1}
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor(), False)
self.precision = "ms"

View File

@ -21,6 +21,7 @@ from util.common import *
from util.sqlset import TDSetSql
class TDTestCase:
def init(self, conn, logSql, replicaVar=1):
self.replicaVar = int(replicaVar)
tdLog.debug("start to execute %s" % __file__)
tdSql.init(conn.cursor(),logSql)
self.setsql = TDSetSql()

Some files were not shown because too many files have changed in this diff Show More