Merge remote-tracking branch 'origin/3.0' into feature/3.0_liaohj

This commit is contained in:
Haojun Liao 2022-04-01 14:53:18 +08:00
commit 13e7310315
91 changed files with 5120 additions and 3186 deletions

View File

@ -5,6 +5,7 @@ AccessModifierOffset: -1
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: true
AlignConsecutiveMacros: true
AlignEscapedNewlinesLeft: true
AlignOperands: true
AlignTrailingComments: true

View File

@ -28,7 +28,7 @@ int32_t init_env() {
return -1;
}
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 1");
TAOS_RES* pRes = taos_query(pConn, "create database if not exists abc1 vgroups 2");
if (taos_errno(pRes) != 0) {
printf("error in create db, reason:%s\n", taos_errstr(pRes));
return -1;
@ -42,25 +42,33 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create stable if not exists st1 (ts timestamp, k int) tags(a int)");
pRes =
taos_query(pConn, "create stable if not exists st1 (ts timestamp, c1 int, c2 float, c3 binary(10)) tags(t1 int)");
if (taos_errno(pRes) != 0) {
printf("failed to create super table st1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists tu1 using st1 tags(1)");
pRes = taos_query(pConn, "create table if not exists ct0 using st1 tags(1000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists tu2 using st1 tags(2)");
pRes = taos_query(pConn, "create table if not exists ct1 using st1 tags(2000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu2, reason:%s\n", taos_errstr(pRes));
return -1;
}
pRes = taos_query(pConn, "create table if not exists ct3 using st1 tags(3000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0;
}
@ -82,12 +90,40 @@ int32_t create_topic() {
/*const char* sql = "select * from tu1";*/
/*pRes = tmq_create_topic(pConn, "test_stb_topic_1", sql, strlen(sql));*/
pRes = taos_query(pConn, "create topic test_stb_topic_1 as select * from tu1");
pRes = taos_query(pConn, "create topic topic_ctb_column as select ts, c1 from ct1");
if (taos_errno(pRes) != 0) {
printf("failed to create topic test_stb_topic_1, reason:%s\n", taos_errstr(pRes));
printf("failed to create topic topic_ctb_column, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
#if 0
pRes = taos_query(pConn, "insert into tu1 values(now, 1, 1.0, 'bi1')");
if (taos_errno(pRes) != 0) {
printf("failed to insert, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tu1 values(now+1d, 1, 1.0, 'bi1')");
if (taos_errno(pRes) != 0) {
printf("failed to insert, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tu2 values(now, 2, 2.0, 'bi2')");
if (taos_errno(pRes) != 0) {
printf("failed to insert, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into tu2 values(now+1d, 2, 2.0, 'bi2')");
if (taos_errno(pRes) != 0) {
printf("failed to insert, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
#endif
taos_close(pConn);
return 0;
}
@ -115,7 +151,7 @@ tmq_t* build_consumer() {
tmq_list_t* build_topic_list() {
tmq_list_t* topic_list = tmq_list_new();
tmq_list_append(topic_list, "test_stb_topic_1");
tmq_list_append(topic_list, "topic_ctb_column");
return topic_list;
}
@ -215,8 +251,8 @@ int main(int argc, char* argv[]) {
if (argc > 1) {
printf("env init\n");
code = init_env();
}
create_topic();
}
tmq_t* tmq = build_consumer();
tmq_list_t* topic_list = build_topic_list();
/*perf_loop(tmq, topic_list);*/

View File

@ -20,7 +20,7 @@
#include "taos.h"
int32_t init_env() {
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 7010);
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}
@ -65,7 +65,7 @@ int32_t init_env() {
int32_t create_stream() {
printf("create stream\n");
TAOS_RES* pRes;
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 7010);
TAOS* pConn = taos_connect("localhost", "root", "taosdata", NULL, 0);
if (pConn == NULL) {
return -1;
}

View File

@ -37,6 +37,14 @@ enum {
TMQ_MSG_TYPE__EP_RSP,
};
enum {
STREAM_TRIGGER__AT_ONCE = 1,
STREAM_TRIGGER__WINDOW_CLOSE,
STREAM_TRIGGER__BY_COUNT,
STREAM_TRIGGER__BY_BATCH_COUNT,
STREAM_TRIGGER__BY_EVENT_TIME,
};
typedef struct {
uint32_t numOfTables;
SArray* pGroupList;
@ -59,7 +67,10 @@ typedef struct SDataBlockInfo {
int32_t rowSize;
int16_t numOfCols;
int16_t hasVarCol;
union {int64_t uid; int64_t blockId;};
union {
int64_t uid;
int64_t blockId;
};
int64_t groupId; // no need to serialize
} SDataBlockInfo;

View File

@ -524,6 +524,7 @@ typedef struct {
int8_t walLevel;
int8_t quorum;
int8_t cacheLastRow;
int8_t replications;
} SAlterDbReq;
int32_t tSerializeSAlterDbReq(void* buf, int32_t bufLen, SAlterDbReq* pReq);

View File

@ -60,8 +60,10 @@ int32_t parseNatualDuration(const char* token, int32_t tokenLen, int64_t* durati
int32_t taosParseTime(const char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t dayligth);
void deltaToUtcInitOnce();
char getPrecisionUnit(int32_t precision);
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision);
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit);
void taosFormatUtcTime(char *buf, int32_t bufLen, int64_t time, int32_t precision);

View File

@ -82,21 +82,21 @@
#define TK_SINGLE_STABLE 64
#define TK_STREAM_MODE 65
#define TK_RETENTIONS 66
#define TK_TABLE 67
#define TK_NK_LP 68
#define TK_NK_RP 69
#define TK_STABLE 70
#define TK_ADD 71
#define TK_COLUMN 72
#define TK_MODIFY 73
#define TK_RENAME 74
#define TK_TAG 75
#define TK_SET 76
#define TK_NK_EQ 77
#define TK_USING 78
#define TK_TAGS 79
#define TK_NK_DOT 80
#define TK_NK_COMMA 81
#define TK_NK_COMMA 67
#define TK_TABLE 68
#define TK_NK_LP 69
#define TK_NK_RP 70
#define TK_STABLE 71
#define TK_ADD 72
#define TK_COLUMN 73
#define TK_MODIFY 74
#define TK_RENAME 75
#define TK_TAG 76
#define TK_SET 77
#define TK_NK_EQ 78
#define TK_USING 79
#define TK_TAGS 80
#define TK_NK_DOT 81
#define TK_COMMENT 82
#define TK_BOOL 83
#define TK_TINYINT 84

View File

@ -15,5 +15,10 @@
#include "cmdnodes.h"
#include "tmsg.h"
#include "plannodes.h"
int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp);
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp);

View File

@ -38,7 +38,9 @@ typedef struct SDatabaseOptions {
int32_t fsyncPeriod;
int32_t maxRowsPerBlock;
int32_t minRowsPerBlock;
int32_t keep;
int32_t keep0;
int32_t keep1;
int32_t keep2;
int32_t precision;
int32_t quorum;
int32_t replica;
@ -76,7 +78,9 @@ typedef struct SAlterDatabaseStmt {
typedef struct STableOptions {
ENodeType type;
int32_t keep;
int32_t keep0;
int32_t keep1;
int32_t keep2;
int32_t ttl;
char comments[TSDB_STB_COMMENT_LEN];
SNodeList* pSma;

View File

@ -215,6 +215,8 @@ int32_t nodesStringToNode(const char* pStr, SNode** pNode);
int32_t nodesListToString(const SNodeList* pList, bool format, char** pStr, int32_t* pLen);
int32_t nodesStringToList(const char* pStr, SNodeList** pList);
int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len);
#ifdef __cplusplus
}
#endif

View File

@ -314,6 +314,7 @@ bool nodesIsTimeorderQuery(const SNode* pQuery);
bool nodesIsTimelineQuery(const SNode* pQuery);
void* nodesGetValueFromNode(SValueNode *pNode);
char* nodesGetStrValueFromNode(SValueNode *pNode);
#ifdef __cplusplus
}

View File

@ -53,6 +53,7 @@ typedef struct SIndexMeta {
} SIndexMeta;
/*
* ASSERT(sizeof(SCTableMeta) == 24)
* ASSERT(tableType == TSDB_CHILD_TABLE)
@ -235,6 +236,11 @@ extern int32_t (*queryProcessMsgRsp[TDMT_MAX])(void* output, char* msg, int32_t
} \
} while (0)
#define QRY_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
#define QRY_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define QRY_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
#ifdef __cplusplus
}
#endif

View File

@ -157,6 +157,7 @@ typedef struct SWalReadHandle {
int64_t curVersion;
int64_t capacity;
int64_t status; // if cursor valid
TdThreadMutex mutex;
SWalHead *pHead;
} SWalReadHandle;
#pragma pack(pop)
@ -191,6 +192,7 @@ int32_t walEndSnapshot(SWal *);
SWalReadHandle *walOpenReadHandle(SWal *);
void walCloseReadHandle(SWalReadHandle *);
int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver);
int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead);
// deprecated
#if 0

View File

@ -21,9 +21,11 @@ extern "C" {
#endif
int32_t taosNewProc(char **args);
void taosWaitProc(int32_t pid);
void taosKillProc(int32_t pid);
bool taosProcExist(int32_t pid);
void taosSetProcName(int32_t argc, char **argv, const char *name);
void taosSetProcPath(int32_t argc, char **argv);
bool taosProcExists(int32_t pid);
#ifdef __cplusplus
}

View File

@ -46,6 +46,14 @@ extern "C" {
#define MILLISECOND_PER_DAY (MILLISECOND_PER_HOUR * 24)
#define MILLISECOND_PER_WEEK (MILLISECOND_PER_DAY * 7)
#define NANOSECOND_PER_USEC (1000L)
#define NANOSECOND_PER_MSEC (1000000L)
#define NANOSECOND_PER_SEC (1000000000L)
#define NANOSECOND_PER_MINUTE (NANOSECOND_PER_SEC * 60)
#define NANOSECOND_PER_HOUR (NANOSECOND_PER_MINUTE * 60)
#define NANOSECOND_PER_DAY (NANOSECOND_PER_HOUR * 24)
#define NANOSECOND_PER_WEEK (NANOSECOND_PER_DAY * 7)
int32_t taosGetTimeOfDay(struct timeval *tv);
//@return timestamp in second

View File

@ -479,6 +479,9 @@ int32_t* taosGetErrno();
#define TSDB_CODE_PAR_INVALID_ENDPOINT TAOS_DEF_ERROR_CODE(0, 0x2613)
#define TSDB_CODE_PAR_EXPRIE_STATEMENT TAOS_DEF_ERROR_CODE(0, 0x2614)
#define TSDB_CODE_PAR_INTERVAL_VALUE_TOO_SMALL TAOS_DEF_ERROR_CODE(0, 0x2615)
#define TSDB_CODE_PAR_DB_NOT_SPECIFIED TAOS_DEF_ERROR_CODE(0, 0x2616)
#define TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME TAOS_DEF_ERROR_CODE(0, 0x2617)
#define TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR TAOS_DEF_ERROR_CODE(0, 0x2618)
#ifdef __cplusplus
}

View File

@ -224,6 +224,7 @@ typedef enum ELogicConditionType {
#define TSDB_APP_NAME_LEN TSDB_UNI_LEN
#define TSDB_STB_COMMENT_LEN 1024
/**
* In some scenarios uint16_t (0~65535) is used to store the row len.
* - Firstly, we use 65531(65535 - 4), as the SDataRow/SKVRow contains 4 bits header.
@ -303,13 +304,13 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_TOTAL_BLOCKS 10000
#define TSDB_DEFAULT_TOTAL_BLOCKS 6
#define TSDB_MIN_DAYS_PER_FILE 1
#define TSDB_MAX_DAYS_PER_FILE 3650
#define TSDB_DEFAULT_DAYS_PER_FILE 10
#define TSDB_MIN_DAYS_PER_FILE (1 * 1440) // unit minute
#define TSDB_MAX_DAYS_PER_FILE (3650 * 1440)
#define TSDB_DEFAULT_DAYS_PER_FILE (10 * 1440)
#define TSDB_MIN_KEEP 1 // data in db to be reserved.
#define TSDB_MAX_KEEP 365000 // data in db to be reserved.
#define TSDB_DEFAULT_KEEP 3650 // ten years
#define TSDB_MIN_KEEP (1 * 1440) // data in db to be reserved. unit minute
#define TSDB_MAX_KEEP (365000 * 1440) // data in db to be reserved.
#define TSDB_DEFAULT_KEEP (3650 * 1440) // ten years
#define TSDB_MIN_MIN_ROW_FBLOCK 10
#define TSDB_MAX_MIN_ROW_FBLOCK 1000
@ -327,7 +328,7 @@ typedef enum ELogicConditionType {
#define TSDB_MAX_FSYNC_PERIOD 180000 // millisecond
#define TSDB_DEFAULT_FSYNC_PERIOD 3000 // three second
#define TSDB_MIN_WAL_LEVEL 0
#define TSDB_MIN_WAL_LEVEL 1
#define TSDB_MAX_WAL_LEVEL 2
#define TSDB_DEFAULT_WAL_LEVEL 1
@ -388,6 +389,7 @@ typedef enum ELogicConditionType {
#define TSDB_DEFAULT_EXPLAIN_RATIO 0.001
#define TSDB_EXPLAIN_RESULT_ROW_SIZE 1024
#define TSDB_EXPLAIN_RESULT_COLUMN_NAME "QUERY PLAN"
#define TSDB_MAX_JOIN_TABLE_NUM 10
#define TSDB_MAX_UNION_CLAUSE 5
@ -479,6 +481,9 @@ enum {
#define QND_VGID 1
#define VND_VGID 0
#define MAX_NUM_STR_SIZE 40
#ifdef __cplusplus
}
#endif

View File

@ -181,6 +181,11 @@ int32_t execLocalCmd(SRequestObj* pRequest, SQuery* pQuery) {
}
int32_t execDdlQuery(SRequestObj* pRequest, SQuery* pQuery) {
// drop table if exists not_exists_table
if (NULL == pQuery->pCmdMsg) {
return TSDB_CODE_SUCCESS;
}
SCmdMsgInfo* pMsgInfo = pQuery->pCmdMsg;
pRequest->type = pMsgInfo->msgType;
pRequest->body.requestMsg = (SDataBuf){.pData = pMsgInfo->pMsg, .len = pMsgInfo->msgLen, .handle = NULL};

View File

@ -255,6 +255,7 @@ void tmqClearUnhandleMsg(tmq_t* tmq) {
break;
}
msg = NULL;
taosReadAllQitems(tmq->mqueue, tmq->qall);
while (1) {
taosGetQitem(tmq->qall, (void**)&msg);
@ -787,7 +788,7 @@ void tmqShowMsg(tmq_message_t* tmq_message) {
static bool noPrintSchema;
char pBuf[128];
SMqPollRsp* pRsp = &tmq_message->msg;
int32_t colNum = pRsp->schema->nCols;
int32_t colNum = 2;
if (!noPrintSchema) {
printf("|");
for (int32_t i = 0; i < colNum; i++) {
@ -838,6 +839,7 @@ int32_t tmqPollCb(void* param, const SDataBuf* pMsg, int32_t code) {
int32_t msgEpoch = ((SMqRspHead*)pMsg->pData)->epoch;
int32_t tmqEpoch = atomic_load_32(&tmq->epoch);
if (msgEpoch < tmqEpoch) {
/*printf("discard rsp epoch %d, current epoch %d\n", msgEpoch, tmqEpoch);*/
tsem_post(&tmq->rspSem);
tscWarn("discard rsp epoch %d, current epoch %d", msgEpoch, tmqEpoch);
return 0;
@ -886,6 +888,9 @@ int32_t tmqPollCb(void* param, const SDataBuf* pMsg, int32_t code) {
goto WRITE_QUEUE_FAIL;
}
tscError("tmq recv poll: vg %d, req offset %ld, rsp offset %ld", pParam->pVg->vgId, pRsp->msg.reqOffset,
pRsp->msg.rspOffset);
pRsp->vg = pParam->pVg;
taosWriteQitem(tmq->mqueue, pRsp);
atomic_add_fetch_32(&tmq->readyRequest, 1);
@ -902,6 +907,7 @@ WRITE_QUEUE_FAIL:
bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqCMGetSubEpRsp* pRsp) {
/*printf("call update ep %d\n", epoch);*/
/*printf("tmq update ep epoch %d to epoch %d\n", tmq->epoch, epoch);*/
bool set = false;
int32_t topicNumGet = taosArrayGetSize(pRsp->topics);
char vgKey[TSDB_TOPIC_FNAME_LEN + 22];
@ -932,6 +938,7 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqCMGetSubEpRsp* pRsp) {
for (int32_t k = 0; k < vgNumCur; k++) {
SMqClientVg* pVgCur = taosArrayGet(pTopicCur->vgs, k);
sprintf(vgKey, "%s:%d", topic.topicName, pVgCur->vgId);
/*printf("epoch %d vg %d build %s\n", epoch, pVgCur->vgId, vgKey);*/
taosHashPut(pHash, vgKey, strlen(vgKey), &pVgCur->currentOffset, sizeof(int64_t));
}
break;
@ -945,9 +952,12 @@ bool tmqUpdateEp(tmq_t* tmq, int32_t epoch, SMqCMGetSubEpRsp* pRsp) {
sprintf(vgKey, "%s:%d", topic.topicName, pVgEp->vgId);
int64_t* pOffset = taosHashGet(pHash, vgKey, strlen(vgKey));
int64_t offset = pVgEp->offset;
/*printf("epoch %d vg %d offset og to %ld\n", epoch, pVgEp->vgId, offset);*/
if (pOffset != NULL) {
offset = *pOffset;
/*printf("epoch %d vg %d found %s\n", epoch, pVgEp->vgId, vgKey);*/
}
/*printf("epoch %d vg %d offset set to %ld\n", epoch, pVgEp->vgId, offset);*/
SMqClientVg clientVg = {
.pollCnt = 0,
.currentOffset = offset,
@ -1195,6 +1205,7 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
SMqClientVg* pVg = taosArrayGet(pTopic->vgs, j);
int32_t vgStatus = atomic_val_compare_exchange_32(&pVg->vgStatus, TMQ_VG_STATUS__IDLE, TMQ_VG_STATUS__WAIT);
if (vgStatus != TMQ_VG_STATUS__IDLE) {
/*printf("skip vg %d\n", pVg->vgId);*/
continue;
}
SMqPollReq* pReq = tmqBuildConsumeReqImpl(tmq, blockingTime, pTopic, pVg);
@ -1238,6 +1249,8 @@ int32_t tmqPollImpl(tmq_t* tmq, int64_t blockingTime) {
int64_t transporterId = 0;
/*printf("send poll\n");*/
atomic_add_fetch_32(&tmq->waitingRequest, 1);
/*tscDebug("tmq send poll: vg %d, req offset %ld", pVg->vgId, pVg->currentOffset);*/
/*printf("send vg %d %ld\n", pVg->vgId, pVg->currentOffset);*/
asyncSendMsgToServer(tmq->pTscObj->pAppInfo->pTransporter, &pVg->epSet, &transporterId, sendInfo);
pVg->pollCnt++;
tmq->pollCnt++;

View File

@ -1629,6 +1629,7 @@ int32_t tSerializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tEncodeI8(&encoder, pReq->walLevel) < 0) return -1;
if (tEncodeI8(&encoder, pReq->quorum) < 0) return -1;
if (tEncodeI8(&encoder, pReq->cacheLastRow) < 0) return -1;
if (tEncodeI8(&encoder, pReq->replications) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
@ -1650,6 +1651,7 @@ int32_t tDeserializeSAlterDbReq(void *buf, int32_t bufLen, SAlterDbReq *pReq) {
if (tDecodeI8(&decoder, &pReq->walLevel) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->quorum) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->cacheLastRow) < 0) return -1;
if (tDecodeI8(&decoder, &pReq->replications) < 0) return -1;
tEndDecode(&decoder);
tCoderClear(&decoder);

View File

@ -361,6 +361,18 @@ int32_t parseLocaltimeDst(char* timestr, int64_t* time, int32_t timePrec) {
return 0;
}
char getPrecisionUnit(int32_t precision) {
static char units[3] = {TIME_UNIT_MILLISECOND, TIME_UNIT_MICROSECOND, TIME_UNIT_NANOSECOND};
switch (precision) {
case TSDB_TIME_PRECISION_MILLI:
case TSDB_TIME_PRECISION_MICRO:
case TSDB_TIME_PRECISION_NANO:
return units[precision];
default:
return 0;
}
}
int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
@ -370,6 +382,33 @@ int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrec
return (int64_t)((double)time * factors[fromPrecision][toPrecision]);
}
int64_t convertTimeFromPrecisionToUnit(int64_t time, int32_t fromPrecision, char toUnit) {
assert(fromPrecision == TSDB_TIME_PRECISION_MILLI || fromPrecision == TSDB_TIME_PRECISION_MICRO ||
fromPrecision == TSDB_TIME_PRECISION_NANO);
static double factors[3] = {1000000., 1000., 1.};
switch (toUnit) {
case 's':
return time * factors[fromPrecision] / NANOSECOND_PER_SEC;
case 'm':
return time * factors[fromPrecision] / NANOSECOND_PER_MINUTE;
case 'h':
return time * factors[fromPrecision] / NANOSECOND_PER_HOUR;
case 'd':
return time * factors[fromPrecision] / NANOSECOND_PER_DAY;
case 'w':
return time * factors[fromPrecision] / NANOSECOND_PER_WEEK;
case 'a':
return time * factors[fromPrecision] / NANOSECOND_PER_MSEC;
case 'u':
return time * factors[fromPrecision] / NANOSECOND_PER_USEC;
case 'b':
return time * factors[fromPrecision];
default: {
return -1;
}
}
}
static int32_t getDuration(int64_t val, char unit, int64_t* result, int32_t timePrecision) {
switch (unit) {
case 's':

View File

@ -200,7 +200,7 @@ int32_t dmWriteFile(SDnodeMgmt *pMgmt) {
taosMemoryFree(content);
char realfile[PATH_MAX];
snprintf(realfile, sizeof(realfile), "%s%smnode.json", pMgmt->path, TD_DIRSEP);
snprintf(realfile, sizeof(realfile), "%s%sdnode.json", pMgmt->path, TD_DIRSEP);
if (taosRenameFile(file, realfile) != 0) {
terrno = TAOS_SYSTEM_ERROR(errno);

View File

@ -37,13 +37,6 @@ static void dndStopDnode(int signum, void *info, void *ctx) {
}
}
static void dndHandleChild(int signum, void *info, void *ctx) {
dInfo("sigchild received");
if (global.pDnode != NULL) {
dndHandleEvent(global.pDnode, DND_EVENT_CHILD);
}
}
static void dndSetSignalHandle() {
taosSetSignal(SIGTERM, dndStopDnode);
taosSetSignal(SIGHUP, dndStopDnode);
@ -53,7 +46,7 @@ static void dndSetSignalHandle() {
if (!tsMultiProcess) {
} else if (global.ntype == DNODE || global.ntype == NODE_MAX) {
taosSetSignal(SIGCHLD, dndHandleChild);
taosIgnSignal(SIGCHLD);
} else {
taosKillChildOnParentStopped();
}

View File

@ -0,0 +1,96 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dndInt.h"
#include "wal.h"
static int8_t once = DND_ENV_INIT;
int32_t dndInit() {
dDebug("start to init dnode env");
if (atomic_val_compare_exchange_8(&once, DND_ENV_INIT, DND_ENV_READY) != DND_ENV_INIT) {
terrno = TSDB_CODE_REPEAT_INIT;
dError("failed to init dnode env since %s", terrstr());
return -1;
}
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
SMonCfg monCfg = {0};
monCfg.maxLogs = tsMonitorMaxLogs;
monCfg.port = tsMonitorPort;
monCfg.server = tsMonitorFqdn;
monCfg.comp = tsMonitorComp;
if (monInit(&monCfg) != 0) {
dError("failed to init monitor since %s", terrstr());
return -1;
}
dInfo("dnode env is initialized");
return 0;
}
void dndCleanup() {
dDebug("start to cleanup dnode env");
if (atomic_val_compare_exchange_8(&once, DND_ENV_READY, DND_ENV_CLEANUP) != DND_ENV_READY) {
dError("dnode env is already cleaned up");
return;
}
monCleanup();
walCleanUp();
taosStopCacheRefreshWorker();
dInfo("dnode env is cleaned up");
}
void dndSetMsgHandle(SMgmtWrapper *pWrapper, tmsg_t msgType, NodeMsgFp nodeMsgFp, int8_t vgId) {
pWrapper->msgFps[TMSG_INDEX(msgType)] = nodeMsgFp;
pWrapper->msgVgIds[TMSG_INDEX(msgType)] = vgId;
}
EDndStatus dndGetStatus(SDnode *pDnode) { return pDnode->status; }
void dndSetStatus(SDnode *pDnode, EDndStatus status) {
if (pDnode->status != status) {
dDebug("dnode status set from %s to %s", dndStatStr(pDnode->status), dndStatStr(status));
pDnode->status = status;
}
}
void dndReportStartup(SDnode *pDnode, const char *pName, const char *pDesc) {
SStartupReq *pStartup = &pDnode->startup;
tstrncpy(pStartup->name, pName, TSDB_STEP_NAME_LEN);
tstrncpy(pStartup->desc, pDesc, TSDB_STEP_DESC_LEN);
pStartup->finished = 0;
}
void dndGetStartup(SDnode *pDnode, SStartupReq *pStartup) {
memcpy(pStartup, &pDnode->startup, sizeof(SStartupReq));
pStartup->finished = (dndGetStatus(pDnode) == DND_STAT_RUNNING);
}
void dndProcessStartupReq(SDnode *pDnode, SRpcMsg *pReq) {
dDebug("startup req is received");
SStartupReq *pStartup = rpcMallocCont(sizeof(SStartupReq));
dndGetStartup(pDnode, pStartup);
dDebug("startup req is sent, step:%s desc:%s finished:%d", pStartup->name, pStartup->desc, pStartup->finished);
SRpcMsg rpcRsp = {
.handle = pReq->handle, .pCont = pStartup, .contLen = sizeof(SStartupReq), .ahandle = pReq->ahandle};
rpcSendResponse(&rpcRsp);
}

View File

@ -20,9 +20,9 @@ static bool dndRequireNode(SMgmtWrapper *pWrapper) {
bool required = false;
int32_t code = (*pWrapper->fp.requiredFp)(pWrapper, &required);
if (!required) {
dDebug("node:%s, no need to start", pWrapper->name);
dDebug("node:%s, does not require startup", pWrapper->name);
} else {
dDebug("node:%s, need to start", pWrapper->name);
dDebug("node:%s, needs to be started", pWrapper->name);
}
return required;
}
@ -128,7 +128,7 @@ static int32_t dndNewProc(SMgmtWrapper *pWrapper, ENodeType n) {
}
pWrapper->procId = pid;
dInfo("node:%s, run in new process, pid:%d", pWrapper->name, pid);
dInfo("node:%s, continue running in new process:%d", pWrapper->name, pid);
return 0;
}
@ -255,7 +255,15 @@ static int32_t dndRunInParentProcess(SDnode *pDnode) {
while (1) {
if (pDnode->event == DND_EVENT_STOP) {
dInfo("dnode is about to stop");
break;
for (ENodeType n = DNODE + 1; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
if (!pWrapper->required) continue;
if (pDnode->ntype == NODE_MAX) continue;
if (pWrapper->procId > 0 && taosProcExist(pWrapper->procId)) {
dInfo("node:%s, send kill signal to the child process:%d", pWrapper->name, pWrapper->procId);
taosKillProc(pWrapper->procId);
}
}
for (ENodeType n = DNODE + 1; n < NODE_MAX; ++n) {
@ -263,21 +271,41 @@ static int32_t dndRunInParentProcess(SDnode *pDnode) {
if (!pWrapper->required) continue;
if (pDnode->ntype == NODE_MAX) continue;
if (pWrapper->procId != 0 && !taosProcExists(pWrapper->procId)) {
dInfo("node:%s, process not exist, pid:%d", pWrapper->name, pWrapper->procId);
if (pWrapper->procId > 0 && taosProcExist(pWrapper->procId)) {
dInfo("node:%s, wait for child process:%d to stop", pWrapper->name, pWrapper->procId);
taosWaitProc(pWrapper->procId);
dInfo("node:%s, child process:%d is stopped", pWrapper->name, pWrapper->procId);
}
}
break;
} else {
for (ENodeType n = DNODE + 1; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
if (!pWrapper->required) continue;
if (pDnode->ntype == NODE_MAX) continue;
if (pWrapper->procId <= 0 || !taosProcExist(pWrapper->procId)) {
dInfo("node:%s, process:%d is killed and needs to be restarted", pWrapper->name, pWrapper->procId);
dndNewProc(pWrapper, n);
}
}
}
taosMsleep(100);
}
}
return 0;
}
static int32_t dndRunInChildProcess(SDnode *pDnode) {
dInfo("dnode run in child process");
SMgmtWrapper *pWrapper = &pDnode->wrappers[pDnode->ntype];
dInfo("%s run in child process", pWrapper->name);
pWrapper->required = dndRequireNode(pWrapper);
if (!pWrapper->required) {
dError("%s does not require startup", pWrapper->name);
return -1;
}
SMsgCb msgCb = dndCreateMsgcb(pWrapper);
tmsgSetDefaultMsgCb(&msgCb);

View File

@ -15,82 +15,186 @@
#define _DEFAULT_SOURCE
#include "dndInt.h"
#include "wal.h"
static int8_t once = DND_ENV_INIT;
static int32_t dndInitVars(SDnode *pDnode, const SDnodeOpt *pOption) {
pDnode->numOfSupportVnodes = pOption->numOfSupportVnodes;
pDnode->serverPort = pOption->serverPort;
pDnode->dataDir = strdup(pOption->dataDir);
pDnode->localEp = strdup(pOption->localEp);
pDnode->localFqdn = strdup(pOption->localFqdn);
pDnode->firstEp = strdup(pOption->firstEp);
pDnode->secondEp = strdup(pOption->secondEp);
pDnode->disks = pOption->disks;
pDnode->numOfDisks = pOption->numOfDisks;
pDnode->ntype = pOption->ntype;
pDnode->rebootTime = taosGetTimestampMs();
int32_t dndInit() {
dDebug("start to init dnode env");
if (atomic_val_compare_exchange_8(&once, DND_ENV_INIT, DND_ENV_READY) != DND_ENV_INIT) {
terrno = TSDB_CODE_REPEAT_INIT;
dError("failed to init dnode env since %s", terrstr());
if (pDnode->dataDir == NULL || pDnode->localEp == NULL || pDnode->localFqdn == NULL || pDnode->firstEp == NULL ||
pDnode->secondEp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
taosIgnSIGPIPE();
taosBlockSIGPIPE();
taosResolveCRC();
SMonCfg monCfg = {0};
monCfg.maxLogs = tsMonitorMaxLogs;
monCfg.port = tsMonitorPort;
monCfg.server = tsMonitorFqdn;
monCfg.comp = tsMonitorComp;
if (monInit(&monCfg) != 0) {
dError("failed to init monitor since %s", terrstr());
if (!tsMultiProcess || pDnode->ntype == DNODE || pDnode->ntype == NODE_MAX) {
pDnode->lockfile = dndCheckRunning(pDnode->dataDir);
if (pDnode->lockfile == NULL) {
return -1;
}
}
dInfo("dnode env is initialized");
return 0;
}
void dndCleanup() {
dDebug("start to cleanup dnode env");
if (atomic_val_compare_exchange_8(&once, DND_ENV_READY, DND_ENV_CLEANUP) != DND_ENV_READY) {
dError("dnode env is already cleaned up");
static void dndClearVars(SDnode *pDnode) {
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pMgmt = &pDnode->wrappers[n];
taosMemoryFreeClear(pMgmt->path);
}
if (pDnode->lockfile != NULL) {
taosUnLockFile(pDnode->lockfile);
taosCloseFile(&pDnode->lockfile);
pDnode->lockfile = NULL;
}
taosMemoryFreeClear(pDnode->localEp);
taosMemoryFreeClear(pDnode->localFqdn);
taosMemoryFreeClear(pDnode->firstEp);
taosMemoryFreeClear(pDnode->secondEp);
taosMemoryFreeClear(pDnode->dataDir);
taosMemoryFree(pDnode);
dDebug("dnode memory is cleared, data:%p", pDnode);
}
SDnode *dndCreate(const SDnodeOpt *pOption) {
dDebug("start to create dnode object");
int32_t code = -1;
char path[PATH_MAX] = {0};
SDnode *pDnode = NULL;
pDnode = taosMemoryCalloc(1, sizeof(SDnode));
if (pDnode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
if (dndInitVars(pDnode, pOption) != 0) {
dError("failed to init variables since %s", terrstr());
goto _OVER;
}
dndSetStatus(pDnode, DND_STAT_INIT);
dmGetMgmtFp(&pDnode->wrappers[DNODE]);
mmGetMgmtFp(&pDnode->wrappers[MNODE]);
vmGetMgmtFp(&pDnode->wrappers[VNODES]);
qmGetMgmtFp(&pDnode->wrappers[QNODE]);
smGetMgmtFp(&pDnode->wrappers[SNODE]);
bmGetMgmtFp(&pDnode->wrappers[BNODE]);
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
snprintf(path, sizeof(path), "%s%s%s", pDnode->dataDir, TD_DIRSEP, pWrapper->name);
pWrapper->path = strdup(path);
pWrapper->shm.id = -1;
pWrapper->pDnode = pDnode;
if (pWrapper->path == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
pWrapper->procType = PROC_SINGLE;
taosInitRWLatch(&pWrapper->latch);
}
if (dndInitMsgHandle(pDnode) != 0) {
dError("failed to msg handles since %s", terrstr());
goto _OVER;
}
if (dndReadShmFile(pDnode) != 0) {
dError("failed to read shm file since %s", terrstr());
goto _OVER;
}
SMsgCb msgCb = dndCreateMsgcb(&pDnode->wrappers[0]);
tmsgSetDefaultMsgCb(&msgCb);
dInfo("dnode is created, data:%p", pDnode);
code = 0;
_OVER:
if (code != 0 && pDnode) {
dndClearVars(pDnode);
pDnode = NULL;
dError("failed to create dnode since %s", terrstr());
}
return pDnode;
}
void dndClose(SDnode *pDnode) {
if (pDnode == NULL) return;
if (dndGetStatus(pDnode) == DND_STAT_STOPPED) {
dError("dnode is shutting down, data:%p", pDnode);
return;
}
monCleanup();
walCleanUp();
taosStopCacheRefreshWorker();
dInfo("dnode env is cleaned up");
dInfo("start to close dnode, data:%p", pDnode);
dndSetStatus(pDnode, DND_STAT_STOPPED);
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
dndCloseNode(pWrapper);
}
void dndSetMsgHandle(SMgmtWrapper *pWrapper, tmsg_t msgType, NodeMsgFp nodeMsgFp, int8_t vgId) {
pWrapper->msgFps[TMSG_INDEX(msgType)] = nodeMsgFp;
pWrapper->msgVgIds[TMSG_INDEX(msgType)] = vgId;
dndClearVars(pDnode);
dInfo("dnode is closed, data:%p", pDnode);
}
EDndStatus dndGetStatus(SDnode *pDnode) { return pDnode->status; }
void dndSetStatus(SDnode *pDnode, EDndStatus status) {
if (pDnode->status != status) {
dDebug("dnode status set from %s to %s", dndStatStr(pDnode->status), dndStatStr(status));
pDnode->status = status;
void dndHandleEvent(SDnode *pDnode, EDndEvent event) {
dInfo("dnode receive %s event, data:%p", dndEventStr(event), pDnode);
if (event == DND_EVENT_STOP) {
pDnode->event = event;
}
}
void dndReportStartup(SDnode *pDnode, const char *pName, const char *pDesc) {
SStartupReq *pStartup = &pDnode->startup;
tstrncpy(pStartup->name, pName, TSDB_STEP_NAME_LEN);
tstrncpy(pStartup->desc, pDesc, TSDB_STEP_DESC_LEN);
pStartup->finished = 0;
SMgmtWrapper *dndAcquireWrapper(SDnode *pDnode, ENodeType ntype) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype];
SMgmtWrapper *pRetWrapper = pWrapper;
taosRLockLatch(&pWrapper->latch);
if (pWrapper->deployed) {
int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1);
dTrace("node:%s, is acquired, refCount:%d", pWrapper->name, refCount);
} else {
terrno = TSDB_CODE_NODE_NOT_DEPLOYED;
pRetWrapper = NULL;
}
taosRUnLockLatch(&pWrapper->latch);
return pRetWrapper;
}
void dndGetStartup(SDnode *pDnode, SStartupReq *pStartup) {
memcpy(pStartup, &pDnode->startup, sizeof(SStartupReq));
pStartup->finished = (dndGetStatus(pDnode) == DND_STAT_RUNNING);
int32_t dndMarkWrapper(SMgmtWrapper *pWrapper) {
int32_t code = 0;
taosRLockLatch(&pWrapper->latch);
if (pWrapper->deployed || (pWrapper->procType == PROC_PARENT && pWrapper->required)) {
int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1);
dTrace("node:%s, is marked, refCount:%d", pWrapper->name, refCount);
} else {
terrno = TSDB_CODE_NODE_NOT_DEPLOYED;
code = -1;
}
taosRUnLockLatch(&pWrapper->latch);
return code;
}
void dndProcessStartupReq(SDnode *pDnode, SRpcMsg *pReq) {
dDebug("startup req is received");
SStartupReq *pStartup = rpcMallocCont(sizeof(SStartupReq));
dndGetStartup(pDnode, pStartup);
void dndReleaseWrapper(SMgmtWrapper *pWrapper) {
if (pWrapper == NULL) return;
dDebug("startup req is sent, step:%s desc:%s finished:%d", pStartup->name, pStartup->desc, pStartup->finished);
SRpcMsg rpcRsp = {
.handle = pReq->handle, .pCont = pStartup, .contLen = sizeof(SStartupReq), .ahandle = pReq->ahandle};
rpcSendResponse(&rpcRsp);
taosRLockLatch(&pWrapper->latch);
int32_t refCount = atomic_sub_fetch_32(&pWrapper->refCount, 1);
taosRUnLockLatch(&pWrapper->latch);
dTrace("node:%s, is released, refCount:%d", pWrapper->name, refCount);
}

View File

@ -1,200 +0,0 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#define _DEFAULT_SOURCE
#include "dndInt.h"
static int32_t dndInitVars(SDnode *pDnode, const SDnodeOpt *pOption) {
pDnode->numOfSupportVnodes = pOption->numOfSupportVnodes;
pDnode->serverPort = pOption->serverPort;
pDnode->dataDir = strdup(pOption->dataDir);
pDnode->localEp = strdup(pOption->localEp);
pDnode->localFqdn = strdup(pOption->localFqdn);
pDnode->firstEp = strdup(pOption->firstEp);
pDnode->secondEp = strdup(pOption->secondEp);
pDnode->disks = pOption->disks;
pDnode->numOfDisks = pOption->numOfDisks;
pDnode->ntype = pOption->ntype;
pDnode->rebootTime = taosGetTimestampMs();
if (pDnode->dataDir == NULL || pDnode->localEp == NULL || pDnode->localFqdn == NULL || pDnode->firstEp == NULL ||
pDnode->secondEp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
return -1;
}
if (!tsMultiProcess || pDnode->ntype == DNODE || pDnode->ntype == NODE_MAX) {
pDnode->lockfile = dndCheckRunning(pDnode->dataDir);
if (pDnode->lockfile == NULL) {
return -1;
}
}
return 0;
}
static void dndClearVars(SDnode *pDnode) {
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pMgmt = &pDnode->wrappers[n];
taosMemoryFreeClear(pMgmt->path);
}
if (pDnode->lockfile != NULL) {
taosUnLockFile(pDnode->lockfile);
taosCloseFile(&pDnode->lockfile);
pDnode->lockfile = NULL;
}
taosMemoryFreeClear(pDnode->localEp);
taosMemoryFreeClear(pDnode->localFqdn);
taosMemoryFreeClear(pDnode->firstEp);
taosMemoryFreeClear(pDnode->secondEp);
taosMemoryFreeClear(pDnode->dataDir);
taosMemoryFree(pDnode);
dDebug("dnode memory is cleared, data:%p", pDnode);
}
SDnode *dndCreate(const SDnodeOpt *pOption) {
dDebug("start to create dnode object");
int32_t code = -1;
char path[PATH_MAX] = {0};
SDnode *pDnode = NULL;
pDnode = taosMemoryCalloc(1, sizeof(SDnode));
if (pDnode == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
if (dndInitVars(pDnode, pOption) != 0) {
dError("failed to init variables since %s", terrstr());
goto _OVER;
}
dndSetStatus(pDnode, DND_STAT_INIT);
dmGetMgmtFp(&pDnode->wrappers[DNODE]);
mmGetMgmtFp(&pDnode->wrappers[MNODE]);
vmGetMgmtFp(&pDnode->wrappers[VNODES]);
qmGetMgmtFp(&pDnode->wrappers[QNODE]);
smGetMgmtFp(&pDnode->wrappers[SNODE]);
bmGetMgmtFp(&pDnode->wrappers[BNODE]);
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
snprintf(path, sizeof(path), "%s%s%s", pDnode->dataDir, TD_DIRSEP, pWrapper->name);
pWrapper->path = strdup(path);
pWrapper->shm.id = -1;
pWrapper->pDnode = pDnode;
if (pWrapper->path == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
pWrapper->procType = PROC_SINGLE;
taosInitRWLatch(&pWrapper->latch);
}
if (dndInitMsgHandle(pDnode) != 0) {
dError("failed to msg handles since %s", terrstr());
goto _OVER;
}
if (dndReadShmFile(pDnode) != 0) {
dError("failed to read shm file since %s", terrstr());
goto _OVER;
}
SMsgCb msgCb = dndCreateMsgcb(&pDnode->wrappers[0]);
tmsgSetDefaultMsgCb(&msgCb);
dInfo("dnode is created, data:%p", pDnode);
code = 0;
_OVER:
if (code != 0 && pDnode) {
dndClearVars(pDnode);
pDnode = NULL;
dError("failed to create dnode since %s", terrstr());
}
return pDnode;
}
void dndClose(SDnode *pDnode) {
if (pDnode == NULL) return;
if (dndGetStatus(pDnode) == DND_STAT_STOPPED) {
dError("dnode is shutting down, data:%p", pDnode);
return;
}
dInfo("start to close dnode, data:%p", pDnode);
dndSetStatus(pDnode, DND_STAT_STOPPED);
for (ENodeType n = 0; n < NODE_MAX; ++n) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[n];
dndCloseNode(pWrapper);
}
dndClearVars(pDnode);
dInfo("dnode is closed, data:%p", pDnode);
}
void dndHandleEvent(SDnode *pDnode, EDndEvent event) {
dInfo("dnode receive %s event, data:%p", dndEventStr(event), pDnode);
if (event == DND_EVENT_STOP) {
pDnode->event = event;
}
}
SMgmtWrapper *dndAcquireWrapper(SDnode *pDnode, ENodeType ntype) {
SMgmtWrapper *pWrapper = &pDnode->wrappers[ntype];
SMgmtWrapper *pRetWrapper = pWrapper;
taosRLockLatch(&pWrapper->latch);
if (pWrapper->deployed) {
int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1);
dTrace("node:%s, is acquired, refCount:%d", pWrapper->name, refCount);
} else {
terrno = TSDB_CODE_NODE_NOT_DEPLOYED;
pRetWrapper = NULL;
}
taosRUnLockLatch(&pWrapper->latch);
return pRetWrapper;
}
int32_t dndMarkWrapper(SMgmtWrapper *pWrapper) {
int32_t code = 0;
taosRLockLatch(&pWrapper->latch);
if (pWrapper->deployed || (pWrapper->procType == PROC_PARENT && pWrapper->required)) {
int32_t refCount = atomic_add_fetch_32(&pWrapper->refCount, 1);
dTrace("node:%s, is marked, refCount:%d", pWrapper->name, refCount);
} else {
terrno = TSDB_CODE_NODE_NOT_DEPLOYED;
code = -1;
}
taosRUnLockLatch(&pWrapper->latch);
return code;
}
void dndReleaseWrapper(SMgmtWrapper *pWrapper) {
if (pWrapper == NULL) return;
taosRLockLatch(&pWrapper->latch);
int32_t refCount = atomic_sub_fetch_32(&pWrapper->refCount, 1);
taosRUnLockLatch(&pWrapper->latch);
dTrace("node:%s, is released, refCount:%d", pWrapper->name, refCount);
}

View File

@ -735,6 +735,9 @@ typedef struct {
int8_t createdBy; // STREAM_CREATED_BY__USER or SMA
int32_t fixedSinkVgId; // 0 for shuffle
int64_t smaId; // 0 for unused
int8_t trigger;
int32_t triggerParam;
int64_t waterMark;
char* sql;
char* logicalPlan;
char* physicalPlan;

View File

@ -1309,7 +1309,7 @@ static int32_t mndGetDbMeta(SNodeMsg *pReq, SShowObj *pShow, STableMetaRsp *pMet
cols++;
pShow->bytes[cols] = 2;
pSchema[cols].type = TSDB_DATA_TYPE_SMALLINT;
pSchema[cols].type = TSDB_DATA_TYPE_INT;
strcpy(pSchema[cols].name, "days");
pSchema[cols].bytes = pShow->bytes[cols];
cols++;
@ -1444,7 +1444,7 @@ static void dumpDbInfoToPayload(char *data, SDbObj *pDb, SShowObj *pShow, int32_
cols++;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);
*(int16_t *)pWrite = pDb->cfg.daysPerFile;
*(int32_t *)pWrite = pDb->cfg.daysPerFile;
cols++;
pWrite = getDataPosition(data, pShow, cols, rows, rowCapacity);

View File

@ -53,7 +53,7 @@ static const SInfosTableSchema userDBSchema[] = {
{.name = "ntables", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT},
{.name = "replica", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "quorum", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "days", .bytes = 2, .type = TSDB_DATA_TYPE_SMALLINT},
{.name = "days", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "keep", .bytes = 24 + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_BINARY},
{.name = "cache", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "blocks", .bytes = 4, .type = TSDB_DATA_TYPE_INT},

View File

@ -42,10 +42,10 @@ void* MndTestSma::BuildCreateDbReq(const char* dbname, int32_t* pContLen) {
createReq.numOfVgroups = 2;
createReq.cacheBlockSize = 16;
createReq.totalBlocks = 10;
createReq.daysPerFile = 10;
createReq.daysToKeep0 = 3650;
createReq.daysToKeep1 = 3650;
createReq.daysToKeep2 = 3650;
createReq.daysPerFile = 10 * 1440;
createReq.daysToKeep0 = 3650 * 1440;
createReq.daysToKeep1 = 3650 * 1440;
createReq.daysToKeep2 = 3650 * 1440;
createReq.minRows = 100;
createReq.maxRows = 4096;
createReq.commitTime = 3600;

View File

@ -35,10 +35,10 @@ void* MndTestTopic::BuildCreateDbReq(const char* dbname, int32_t* pContLen) {
createReq.numOfVgroups = 2;
createReq.cacheBlockSize = 16;
createReq.totalBlocks = 10;
createReq.daysPerFile = 10;
createReq.daysToKeep0 = 3650;
createReq.daysToKeep1 = 3650;
createReq.daysToKeep2 = 3650;
createReq.daysPerFile = 10 * 1440;
createReq.daysToKeep0 = 3650 * 1440;
createReq.daysToKeep1 = 3650 * 1440;
createReq.daysToKeep2 = 3650 * 1440;
createReq.minRows = 100;
createReq.maxRows = 4096;
createReq.commitTime = 3600;

View File

@ -324,10 +324,10 @@ TEST_F(MndTestUser, 03_Alter_User) {
createReq.numOfVgroups = 2;
createReq.cacheBlockSize = 16;
createReq.totalBlocks = 10;
createReq.daysPerFile = 10;
createReq.daysToKeep0 = 3650;
createReq.daysToKeep1 = 3650;
createReq.daysToKeep2 = 3650;
createReq.daysPerFile = 10 * 1440;
createReq.daysToKeep0 = 3650 * 1440;
createReq.daysToKeep1 = 3650 * 1440;
createReq.daysToKeep2 = 3650 * 1440;
createReq.minRows = 100;
createReq.maxRows = 4096;
createReq.commitTime = 3600;

View File

@ -194,7 +194,7 @@ void tqClose(STQ*);
int tqPushMsg(STQ*, void* msg, int32_t msgLen, tmsg_t msgType, int64_t version);
int tqCommit(STQ*);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg);
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId);
int32_t tqProcessSetConnReq(STQ* pTq, char* msg);
int32_t tqProcessRebReq(STQ* pTq, char* msg);
int32_t tqProcessTaskExec(STQ* pTq, char* msg, int32_t msgLen, int32_t workerId);

View File

@ -27,5 +27,5 @@ void metaCloseUidGnrt(SMeta *pMeta) { /* TODO */
tb_uid_t metaGenerateUid(SMeta *pMeta) {
// Generate a new table UID
return ++(pMeta->uidGnrt.nextUid);
return tGenIdPI32();
}

View File

@ -250,7 +250,7 @@ int32_t tqDeserializeConsumer(STQ* pTq, const STqSerializedHead* pHead, STqConsu
return 0;
}
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
SMqPollReq* pReq = pMsg->pCont;
int64_t consumerId = pReq->consumerId;
int64_t fetchOffset;
@ -264,6 +264,8 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
fetchOffset = pReq->currentOffset + 1;
}
/*printf("tmq poll vg %d req %ld %ld\n", pTq->pVnode->vgId, pReq->currentOffset, fetchOffset);*/
SMqPollRsp rsp = {
/*.consumerId = consumerId,*/
.numOfTopics = 0,
@ -288,37 +290,47 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
rsp.reqOffset = pReq->currentOffset;
rsp.skipLogNum = 0;
SWalHead* pHead;
while (1) {
/*if (fetchOffset > walGetLastVer(pTq->pWal) || walReadWithHandle(pTopic->pReadhandle, fetchOffset) < 0) {*/
if (walReadWithHandle(pTopic->pReadhandle, fetchOffset) < 0) {
SWalReadHead* pHead;
if (walReadWithHandle_s(pTopic->pReadhandle, fetchOffset, &pHead) < 0) {
// TODO: no more log, set timer to wait blocking time
// if data inserted during waiting, launch query and
// response to user
break;
}
int8_t pos = fetchOffset % TQ_BUFFER_SIZE;
pHead = pTopic->pReadhandle->pHead;
if (pHead->head.msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->head.body;
qTaskInfo_t task = pTopic->buffer.output[pos].task;
/*printf("vg %d offset %ld msgType %d from epoch %d\n", pTq->pVnode->vgId, fetchOffset, pHead->msgType,
* pReq->epoch);*/
/*int8_t pos = fetchOffset % TQ_BUFFER_SIZE;*/
/*pHead = pTopic->pReadhandle->pHead;*/
if (pHead->msgType == TDMT_VND_SUBMIT) {
SSubmitReq* pCont = (SSubmitReq*)&pHead->body;
/*printf("from topic %s from consumer\n", pTopic->topicName, consumerId);*/
qTaskInfo_t task = pTopic->buffer.output[workerId].task;
ASSERT(task);
qSetStreamInput(task, pCont, STREAM_DATA_TYPE_SUBMIT_BLOCK);
SArray* pRes = taosArrayInit(0, sizeof(SSDataBlock));
while (1) {
SSDataBlock* pDataBlock;
SSDataBlock* pDataBlock = NULL;
uint64_t ts;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(false);
}
if (pDataBlock == NULL) {
fetchOffset++;
pos = fetchOffset % TQ_BUFFER_SIZE;
rsp.skipLogNum++;
/*pos = fetchOffset % TQ_BUFFER_SIZE;*/
break;
}
taosArrayPush(pRes, pDataBlock);
rsp.schema = pTopic->buffer.output[pos].pReadHandle->pSchemaWrapper;
}
if (taosArrayGetSize(pRes) == 0) {
fetchOffset++;
rsp.skipLogNum++;
taosArrayDestroy(pRes);
continue;
}
rsp.schema = pTopic->buffer.output[workerId].pReadHandle->pSchemaWrapper;
rsp.rspOffset = fetchOffset;
rsp.numOfTopics = 1;
@ -328,6 +340,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
void* buf = rpcMallocCont(tlen);
if (buf == NULL) {
pMsg->code = -1;
taosMemoryFree(pHead);
return -1;
}
((SMqRspHead*)buf)->mqMsgType = TMQ_MSG_TYPE__POLL_RSP;
@ -340,10 +353,14 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
pMsg->pCont = buf;
pMsg->contLen = tlen;
pMsg->code = 0;
/*printf("vg %d offset %ld msgType %d from epoch %d actual rsp\n", pTq->pVnode->vgId, fetchOffset,
* pHead->msgType,*/
/*pReq->epoch);*/
tmsgSendRsp(pMsg);
taosMemoryFree(pHead);
return 0;
}
} else {
taosMemoryFree(pHead);
fetchOffset++;
rsp.skipLogNum++;
}
@ -368,6 +385,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg) {
pMsg->contLen = tlen;
pMsg->code = 0;
tmsgSendRsp(pMsg);
/*printf("vg %d offset %ld from epoch %d not rsp\n", pTq->pVnode->vgId, fetchOffset, pReq->epoch);*/
/*}*/
return 0;
@ -432,7 +450,9 @@ int32_t tqProcessSetConnReq(STQ* pTq, char* msg) {
};
pTopic->buffer.output[i].pReadHandle = pReadHandle;
pTopic->buffer.output[i].task = qCreateStreamExecTaskInfo(req.qmsg, &handle);
ASSERT(pTopic->buffer.output[i].task);
}
printf("set topic %s to consumer %ld\n", pTopic->topicName, req.consumerId);
taosArrayPush(pConsumer->topics, pTopic);
tqHandleMovePut(pTq->tqMeta, req.consumerId, pConsumer);
tqHandleCommit(pTq->tqMeta, req.consumerId);

View File

@ -168,6 +168,7 @@ SArray* tqRetrieveDataBlock(STqReadHandle* pHandle) {
break;
}
if (colDataAppend(pColData, curRow, sVal.val, false) < 0) {
/*if (colDataAppend(pColData, curRow, sVal.val, sVal.valType == TD_VTYPE_NULL) < 0) {*/
taosArrayDestroyEx(pArray, (void (*)(void*))tDeleteSSDataBlock);
return NULL;
}

View File

@ -1394,7 +1394,7 @@ int tsdbWriteBlockImpl(STsdb *pRepo, STable *pTable, SDFile *pDFile, SDFile *pDF
tsdbDebug("vgId:%d uid:%" PRId64 " a block of data is written to file %s, offset %" PRId64
" numOfRows %d len %d numOfCols %" PRId16 " keyFirst %" PRId64 " keyLast %" PRId64,
REPO_ID(pRepo), TABLE_TID(pTable), TSDB_FILE_FULL_NAME(pDFile), offset, rowsToWrite, pBlock->len,
REPO_ID(pRepo), TABLE_UID(pTable), TSDB_FILE_FULL_NAME(pDFile), offset, rowsToWrite, pBlock->len,
pBlock->numOfCols, pBlock->keyFirst, pBlock->keyLast);
return 0;

View File

@ -66,7 +66,7 @@ int vnodeProcessFetchMsg(SVnode *pVnode, SRpcMsg *pMsg, SQueueInfo *pInfo) {
case TDMT_VND_TABLE_META:
return vnodeGetTableMeta(pVnode, pMsg);
case TDMT_VND_CONSUME:
return tqProcessPollReq(pVnode->pTq, pMsg);
return tqProcessPollReq(pVnode->pTq, pMsg, pInfo->workerId);
case TDMT_VND_TASK_PIPE_EXEC:
case TDMT_VND_TASK_MERGE_EXEC:
return tqProcessTaskExec(pVnode->pTq, msgstr, msgLen, 0);

View File

@ -165,6 +165,7 @@ int vnodeApplyWMsg(SVnode *pVnode, SRpcMsg *pMsg, SRpcMsg **pRsp) {
// }
break;
case TDMT_VND_SUBMIT:
/*printf("vnode %d write data %ld\n", pVnode->vgId, ver);*/
if (pVnode->config.streamMode == 0) {
if (tsdbInsertData(pVnode->pTsdb, (SSubmitReq *)ptr, NULL) < 0) {
// TODO: handle error

View File

@ -112,7 +112,14 @@ typedef struct SCtgRuntimeStat {
} SCtgRuntimeStat;
typedef struct SCtgCacheStat {
uint64_t clusterNum;
uint64_t dbNum;
uint64_t tblNum;
uint64_t stblNum;
uint64_t vgHitNum;
uint64_t vgMissNum;
uint64_t tblHitNum;
uint64_t tblMissNum;
} SCtgCacheStat;
typedef struct SCatalogStat {
@ -204,8 +211,13 @@ typedef struct SCtgAction {
#define CTG_QUEUE_ADD() atomic_add_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
#define CTG_QUEUE_SUB() atomic_sub_fetch_64(&gCtgMgmt.queue.qRemainNum, 1)
#define CTG_STAT_ADD(n) atomic_add_fetch_64(&(n), 1)
#define CTG_STAT_SUB(n) atomic_sub_fetch_64(&(n), 1)
#define CTG_STAT_ADD(_item, _n) atomic_add_fetch_64(&(_item), _n)
#define CTG_STAT_SUB(_item, _n) atomic_sub_fetch_64(&(_item), _n)
#define CTG_STAT_GET(_item) atomic_load_64(&(_item))
#define CTG_RUNTIME_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.runtime.item, n))
#define CTG_CACHE_STAT_ADD(item, n) (CTG_STAT_ADD(gCtgMgmt.stat.cache.item, n))
#define CTG_CACHE_STAT_SUB(item, n) (CTG_STAT_SUB(gCtgMgmt.stat.cache.item, n))
#define CTG_IS_META_NULL(type) ((type) == META_TYPE_NULL_TABLE)
#define CTG_IS_META_CTABLE(type) ((type) == META_TYPE_CTABLE)
@ -291,6 +303,9 @@ typedef struct SCtgAction {
#define CTG_API_ENTER() do { CTG_API_DEBUG("CTG API enter %s", __FUNCTION__); CTG_LOCK(CTG_READ, &gCtgMgmt.lock); if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) { CTG_API_LEAVE(TSDB_CODE_CTG_OUT_OF_SERVICE); } } while (0)
extern void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p);
extern void ctgdShowClusterCache(SCatalog* pCtg);
extern int32_t ctgdShowCacheInfo(void);
#ifdef __cplusplus
}

View File

@ -24,8 +24,8 @@ int32_t ctgActRemoveDB(SCtgMetaAction *action);
int32_t ctgActRemoveStb(SCtgMetaAction *action);
int32_t ctgActRemoveTbl(SCtgMetaAction *action);
extern SCtgDebug gCTGDebug;
SCatalogMgmt gCtgMgmt = {0};
SCtgDebug gCTGDebug = {0};
SCtgAction gCtgAction[CTG_ACT_MAX] = {{
CTG_ACT_UPDATE_VG,
"update vgInfo",
@ -53,182 +53,6 @@ SCtgAction gCtgAction[CTG_ACT_MAX] = {{
}
};
int32_t ctgDbgEnableDebug(char *option) {
if (0 == strcasecmp(option, "lock")) {
gCTGDebug.lockEnable = true;
qDebug("lock debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "cache")) {
gCTGDebug.cacheEnable = true;
qDebug("cache debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "api")) {
gCTGDebug.apiEnable = true;
qDebug("api debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "meta")) {
gCTGDebug.metaEnable = true;
qDebug("api debug enabled");
return TSDB_CODE_SUCCESS;
}
qError("invalid debug option:%s", option);
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
int32_t ctgDbgGetStatNum(char *option, void *res) {
if (0 == strcasecmp(option, "runtime.qDoneNum")) {
*(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.qDoneNum);
return TSDB_CODE_SUCCESS;
}
qError("invalid stat option:%s", option);
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
int32_t ctgDbgGetTbMetaNum(SCtgDBCache *dbCache) {
return dbCache->tbCache.metaCache ? (int32_t)taosHashGetSize(dbCache->tbCache.metaCache) : 0;
}
int32_t ctgDbgGetStbNum(SCtgDBCache *dbCache) {
return dbCache->tbCache.stbCache ? (int32_t)taosHashGetSize(dbCache->tbCache.stbCache) : 0;
}
int32_t ctgDbgGetRentNum(SCtgRentMgmt *rent) {
int32_t num = 0;
for (uint16_t i = 0; i < rent->slotNum; ++i) {
SCtgRentSlot *slot = &rent->slots[i];
if (NULL == slot->meta) {
continue;
}
num += taosArrayGetSize(slot->meta);
}
return num;
}
int32_t ctgDbgGetClusterCacheNum(SCatalog* pCtg, int32_t type) {
if (NULL == pCtg || NULL == pCtg->dbCache) {
return 0;
}
switch (type) {
case CTG_DBG_DB_NUM:
return (int32_t)taosHashGetSize(pCtg->dbCache);
case CTG_DBG_DB_RENT_NUM:
return ctgDbgGetRentNum(&pCtg->dbRent);
case CTG_DBG_STB_RENT_NUM:
return ctgDbgGetRentNum(&pCtg->stbRent);
default:
break;
}
SCtgDBCache *dbCache = NULL;
int32_t num = 0;
void *pIter = taosHashIterate(pCtg->dbCache, NULL);
while (pIter) {
dbCache = (SCtgDBCache *)pIter;
switch (type) {
case CTG_DBG_META_NUM:
num += ctgDbgGetTbMetaNum(dbCache);
break;
case CTG_DBG_STB_NUM:
num += ctgDbgGetStbNum(dbCache);
break;
default:
ctgError("invalid type:%d", type);
break;
}
pIter = taosHashIterate(pCtg->dbCache, pIter);
}
return num;
}
void ctgDbgShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) {
if (!gCTGDebug.metaEnable) {
return;
}
STableComInfo *c = &p->tableInfo;
if (TSDB_CHILD_TABLE == p->tableType) {
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid);
return;
} else {
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize);
}
int32_t colNum = c->numOfColumns + c->numOfTags;
for (int32_t i = 0; i < colNum; ++i) {
SSchema *s = &p->schema[i];
ctgDebug("[%d] name:%s, type:%d, colId:%" PRIi16 ", bytes:%d", i, s->name, s->type, s->colId, s->bytes);
}
}
void ctgDbgShowDBCache(SCatalog* pCtg, SHashObj *dbHash) {
if (NULL == dbHash || !gCTGDebug.cacheEnable) {
return;
}
int32_t i = 0;
SCtgDBCache *dbCache = NULL;
void *pIter = taosHashIterate(dbHash, NULL);
while (pIter) {
char *dbFName = NULL;
size_t len = 0;
dbCache = (SCtgDBCache *)pIter;
dbFName = taosHashGetKey(pIter, &len);
int32_t metaNum = dbCache->tbCache.metaCache ? taosHashGetSize(dbCache->tbCache.metaCache) : 0;
int32_t stbNum = dbCache->tbCache.stbCache ? taosHashGetSize(dbCache->tbCache.stbCache) : 0;
int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION;
int32_t hashMethod = -1;
int32_t vgNum = 0;
if (dbCache->vgInfo) {
vgVersion = dbCache->vgInfo->vgVersion;
hashMethod = dbCache->vgInfo->hashMethod;
if (dbCache->vgInfo->vgHash) {
vgNum = taosHashGetSize(dbCache->vgInfo->vgHash);
}
}
ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum);
pIter = taosHashIterate(dbHash, pIter);
}
}
void ctgDbgShowClusterCache(SCatalog* pCtg) {
if (!gCTGDebug.cacheEnable || NULL == pCtg) {
return;
}
ctgDebug("## cluster %"PRIx64" %p cache Info ##", pCtg->clusterId, pCtg);
ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM),
ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM));
ctgDbgShowDBCache(pCtg, pCtg->dbCache);
}
void ctgFreeMetaRent(SCtgRentMgmt *mgmt) {
if (NULL == mgmt->slots) {
return;
@ -249,15 +73,19 @@ void ctgFreeMetaRent(SCtgRentMgmt *mgmt) {
void ctgFreeTableMetaCache(SCtgTbMetaCache *cache) {
CTG_LOCK(CTG_WRITE, &cache->stbLock);
if (cache->stbCache) {
int32_t stblNum = taosHashGetSize(cache->stbCache);
taosHashCleanup(cache->stbCache);
cache->stbCache = NULL;
CTG_CACHE_STAT_SUB(stblNum, stblNum);
}
CTG_UNLOCK(CTG_WRITE, &cache->stbLock);
CTG_LOCK(CTG_WRITE, &cache->metaLock);
if (cache->metaCache) {
int32_t tblNum = taosHashGetSize(cache->metaCache);
taosHashCleanup(cache->metaCache);
cache->metaCache = NULL;
CTG_CACHE_STAT_SUB(tblNum, tblNum);
}
CTG_UNLOCK(CTG_WRITE, &cache->metaLock);
}
@ -293,6 +121,8 @@ void ctgFreeHandle(SCatalog* pCtg) {
ctgFreeMetaRent(&pCtg->stbRent);
if (pCtg->dbCache) {
int32_t dbNum = taosHashGetSize(pCtg->dbCache);
void *pIter = taosHashIterate(pCtg->dbCache, NULL);
while (pIter) {
SCtgDBCache *dbCache = pIter;
@ -305,6 +135,8 @@ void ctgFreeHandle(SCatalog* pCtg) {
}
taosHashCleanup(pCtg->dbCache);
CTG_CACHE_STAT_SUB(dbNum, dbNum);
}
taosMemoryFree(pCtg);
@ -361,7 +193,7 @@ int32_t ctgPushAction(SCatalog* pCtg, SCtgMetaAction *action) {
CTG_UNLOCK(CTG_WRITE, &gCtgMgmt.queue.qlock);
CTG_QUEUE_ADD();
CTG_STAT_ADD(gCtgMgmt.stat.runtime.qNum);
CTG_RUNTIME_STAT_ADD(qNum, 1);
tsem_post(&gCtgMgmt.queue.reqSem);
@ -620,34 +452,45 @@ int32_t ctgGetDBCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache)
int32_t ctgAcquireVgInfoFromCache(SCatalog* pCtg, const char *dbFName, SCtgDBCache **pCache, bool *inCache) {
SCtgDBCache *dbCache = NULL;
if (NULL == pCtg->dbCache) {
*pCache = NULL;
*inCache = false;
ctgWarn("empty db cache, dbFName:%s", dbFName);
return TSDB_CODE_SUCCESS;
ctgDebug("empty db cache, dbFName:%s", dbFName);
goto _return;
}
SCtgDBCache *dbCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
*pCache = NULL;
*inCache = false;
return TSDB_CODE_SUCCESS;
ctgDebug("db %s not in cache", dbFName);
goto _return;
}
ctgAcquireVgInfo(pCtg, dbCache, inCache);
if (!(*inCache)) {
ctgReleaseDBCache(pCtg, dbCache);
*pCache = NULL;
return TSDB_CODE_SUCCESS;
ctgDebug("vgInfo of db %s not in cache", dbFName);
goto _return;
}
*pCache = dbCache;
*inCache = true;
CTG_CACHE_STAT_ADD(vgHitNum, 1);
ctgDebug("Got db vgInfo from cache, dbFName:%s", dbFName);
return TSDB_CODE_SUCCESS;
_return:
if (dbCache) {
ctgReleaseDBCache(pCtg, dbCache);
}
*pCache = NULL;
*inCache = false;
CTG_CACHE_STAT_ADD(vgMissNum, 1);
return TSDB_CODE_SUCCESS;
}
@ -763,11 +606,10 @@ int32_t ctgIsTableMetaExistInCache(SCatalog* pCtg, char *dbFName, char* tbName,
}
int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta, int32_t *exist, int32_t flag, uint64_t *dbId) {
int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STableMeta** pTableMeta, bool *inCache, int32_t flag, uint64_t *dbId) {
if (NULL == pCtg->dbCache) {
*exist = 0;
ctgWarn("empty tbmeta cache, tbName:%s", pTableName->tname);
return TSDB_CODE_SUCCESS;
ctgDebug("empty tbmeta cache, tbName:%s", pTableName->tname);
goto _return;
}
char dbFName[TSDB_DB_FNAME_LEN] = {0};
@ -782,8 +624,8 @@ int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STable
SCtgDBCache *dbCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
*exist = 0;
return TSDB_CODE_SUCCESS;
ctgDebug("db %s not in cache", pTableName->tname);
goto _return;
}
int32_t sz = 0;
@ -792,13 +634,11 @@ int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STable
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
if (NULL == *pTableMeta) {
*exist = 0;
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("tbl not in cache, dbFName:%s, tbName:%s", dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
goto _return;
}
*exist = 1;
if (dbId) {
*dbId = dbCache->dbId;
}
@ -808,6 +648,10 @@ int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STable
if (tbMeta->tableType != TSDB_CHILD_TABLE) {
ctgReleaseDBCache(pCtg, dbCache);
ctgDebug("Got meta from cache, type:%d, dbFName:%s, tbName:%s", tbMeta->tableType, dbFName, pTableName->tname);
*inCache = true;
CTG_CACHE_STAT_ADD(tblHitNum, 1);
return TSDB_CODE_SUCCESS;
}
@ -819,8 +663,7 @@ int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STable
ctgReleaseDBCache(pCtg, dbCache);
ctgError("stb not in stbCache, suid:%"PRIx64, tbMeta->suid);
taosMemoryFreeClear(*pTableMeta);
*exist = 0;
return TSDB_CODE_SUCCESS;
goto _return;
}
if ((*stbMeta)->suid != tbMeta->suid) {
@ -846,8 +689,18 @@ int32_t ctgGetTableMetaFromCache(SCatalog* pCtg, const SName* pTableName, STable
ctgReleaseDBCache(pCtg, dbCache);
*inCache = true;
CTG_CACHE_STAT_ADD(tblHitNum, 1);
ctgDebug("Got tbmeta from cache, dbFName:%s, tbName:%s", dbFName, pTableName->tname);
return TSDB_CODE_SUCCESS;
_return:
*inCache = false;
CTG_CACHE_STAT_ADD(tblMissNum, 1);
return TSDB_CODE_SUCCESS;
}
@ -1378,6 +1231,8 @@ int32_t ctgAddNewDBCache(SCatalog *pCtg, const char *dbFName, uint64_t dbId) {
CTG_ERR_JRET(TSDB_CODE_CTG_MEM_ERROR);
}
CTG_CACHE_STAT_ADD(dbNum, 1);
SDbVgVersion vgVersion = {.dbId = newDBCache.dbId, .vgVersion = -1};
strncpy(vgVersion.dbFName, dbFName, sizeof(vgVersion.dbFName));
@ -1436,6 +1291,8 @@ int32_t ctgRemoveDB(SCatalog* pCtg, SCtgDBCache *dbCache, const char* dbFName) {
CTG_ERR_RET(TSDB_CODE_CTG_DB_DROPPED);
}
CTG_CACHE_STAT_SUB(dbNum, 1);
ctgInfo("db removed from cache, dbFName:%s, dbId:%"PRIx64, dbFName, dbId);
return TSDB_CODE_SUCCESS;
@ -1568,6 +1425,8 @@ int32_t ctgUpdateTblMeta(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, ui
CTG_LOCK(CTG_WRITE, &tbCache->stbLock);
if (taosHashRemove(tbCache->stbCache, &orig->suid, sizeof(orig->suid))) {
ctgError("stb not exist in stbCache, dbFName:%s, stb:%s, suid:%"PRIx64, dbFName, tbName, orig->suid);
} else {
CTG_CACHE_STAT_SUB(stblNum, 1);
}
CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock);
@ -1594,8 +1453,12 @@ int32_t ctgUpdateTblMeta(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, ui
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
}
if (NULL == orig) {
CTG_CACHE_STAT_ADD(tblNum, 1);
}
ctgDebug("tbmeta updated to cache, dbFName:%s, tbName:%s, tbType:%d", dbFName, tbName, meta->tableType);
ctgDbgShowTableMeta(pCtg, tbName, meta);
ctgdShowTableMeta(pCtg, tbName, meta);
if (!isStb) {
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
@ -1616,6 +1479,8 @@ int32_t ctgUpdateTblMeta(SCatalog *pCtg, SCtgDBCache *dbCache, char *dbFName, ui
CTG_ERR_RET(TSDB_CODE_CTG_MEM_ERROR);
}
CTG_CACHE_STAT_ADD(stblNum, 1);
CTG_UNLOCK(CTG_WRITE, &tbCache->stbLock);
CTG_UNLOCK(CTG_READ, &tbCache->metaLock);
@ -1874,7 +1739,7 @@ int32_t ctgGetTableMeta(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, cons
CTG_ERR_RET(TSDB_CODE_CTG_INVALID_INPUT);
}
int32_t exist = 0;
bool inCache = false;
int32_t code = 0;
uint64_t dbId = 0;
uint64_t suid = 0;
@ -1884,11 +1749,11 @@ int32_t ctgGetTableMeta(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, cons
CTG_FLAG_SET_INF_DB(flag);
}
CTG_ERR_RET(ctgGetTableMetaFromCache(pCtg, pTableName, pTableMeta, &exist, flag, &dbId));
CTG_ERR_RET(ctgGetTableMetaFromCache(pCtg, pTableName, pTableMeta, &inCache, flag, &dbId));
int32_t tbType = 0;
if (exist) {
if (inCache) {
if (CTG_FLAG_MATCH_STB(flag, (*pTableMeta)->tableType) && ((!CTG_FLAG_IS_FORCE_UPDATE(flag)) || (CTG_FLAG_IS_INF_DB(flag)))) {
goto _return;
}
@ -1930,8 +1795,8 @@ int32_t ctgGetTableMeta(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, cons
SName stbName = *pTableName;
strcpy(stbName.tname, output->tbName);
CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, &stbName, pTableMeta, &exist, flag, NULL));
if (0 == exist) {
CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, &stbName, pTableMeta, &inCache, flag, NULL));
if (!inCache) {
ctgDebug("stb no longer exist, dbFName:%s, tbName:%s", output->dbFName, pTableName->tname);
continue;
}
@ -1943,7 +1808,7 @@ int32_t ctgGetTableMeta(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, cons
_return:
if (CTG_TABLE_NOT_EXIST(code) && exist) {
if (CTG_TABLE_NOT_EXIST(code) && inCache) {
char dbFName[TSDB_DB_FNAME_LEN] = {0};
if (CTG_FLAG_IS_INF_DB(flag)) {
strcpy(dbFName, pTableName->dbname);
@ -1962,7 +1827,7 @@ _return:
if (*pTableMeta) {
ctgDebug("tbmeta returned, tbName:%s, tbType:%d", pTableName->tname, (*pTableMeta)->tableType);
ctgDbgShowTableMeta(pCtg, pTableName->tname, *pTableMeta);
ctgdShowTableMeta(pCtg, pTableName->tname, *pTableMeta);
}
CTG_RET(code);
@ -2075,11 +1940,15 @@ int32_t ctgActRemoveStb(SCtgMetaAction *action) {
CTG_LOCK(CTG_WRITE, &dbCache->tbCache.stbLock);
if (taosHashRemove(dbCache->tbCache.stbCache, &msg->suid, sizeof(msg->suid))) {
ctgDebug("stb not exist in stbCache, may be removed, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else {
CTG_CACHE_STAT_SUB(stblNum, 1);
}
CTG_LOCK(CTG_READ, &dbCache->tbCache.metaLock);
if (taosHashRemove(dbCache->tbCache.metaCache, msg->stbName, strlen(msg->stbName))) {
ctgError("stb not exist in cache, dbFName:%s, stb:%s, suid:%"PRIx64, msg->dbFName, msg->stbName, msg->suid);
} else {
CTG_CACHE_STAT_SUB(tblNum, 1);
}
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
@ -2119,6 +1988,8 @@ int32_t ctgActRemoveTbl(SCtgMetaAction *action) {
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
ctgError("stb not exist in cache, dbFName:%s, tbName:%s", msg->dbFName, msg->tbName);
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
} else {
CTG_CACHE_STAT_SUB(tblNum, 1);
}
CTG_UNLOCK(CTG_READ, &dbCache->tbCache.metaLock);
@ -2140,7 +2011,9 @@ void* ctgUpdateThreadFunc(void* param) {
CTG_LOCK(CTG_READ, &gCtgMgmt.lock);
while (true) {
tsem_wait(&gCtgMgmt.queue.reqSem);
if (tsem_wait(&gCtgMgmt.queue.reqSem)) {
qError("ctg tsem_wait failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
}
if (atomic_load_8((int8_t*)&gCtgMgmt.exit)) {
tsem_post(&gCtgMgmt.queue.rspSem);
@ -2161,9 +2034,9 @@ void* ctgUpdateThreadFunc(void* param) {
tsem_post(&gCtgMgmt.queue.rspSem);
}
CTG_STAT_ADD(gCtgMgmt.stat.runtime.qDoneNum);
CTG_RUNTIME_STAT_ADD(qDoneNum, 1);
ctgDbgShowClusterCache(pCtg);
ctgdShowClusterCache(pCtg);
}
CTG_UNLOCK(CTG_READ, &gCtgMgmt.lock);
@ -2304,10 +2177,15 @@ int32_t catalogInit(SCatalogCfg *cfg) {
CTG_ERR_RET(TSDB_CODE_CTG_INTERNAL_ERROR);
}
CTG_ERR_RET(ctgStartUpdateThread());
if (tsem_init(&gCtgMgmt.queue.reqSem, 0, 0)) {
qError("tsem_init failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
CTG_ERR_RET(TSDB_CODE_CTG_SYS_ERROR);
}
tsem_init(&gCtgMgmt.queue.reqSem, 0, 0);
tsem_init(&gCtgMgmt.queue.rspSem, 0, 0);
if (tsem_init(&gCtgMgmt.queue.rspSem, 0, 0)) {
qError("tsem_init failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
CTG_ERR_RET(TSDB_CODE_CTG_SYS_ERROR);
}
gCtgMgmt.queue.head = taosMemoryCalloc(1, sizeof(SCtgQNode));
if (NULL == gCtgMgmt.queue.head) {
@ -2316,6 +2194,8 @@ int32_t catalogInit(SCatalogCfg *cfg) {
}
gCtgMgmt.queue.tail = gCtgMgmt.queue.head;
CTG_ERR_RET(ctgStartUpdateThread());
qDebug("catalog initialized, maxDb:%u, maxTbl:%u, dbRentSec:%u, stbRentSec:%u", gCtgMgmt.cfg.maxDBCacheNum, gCtgMgmt.cfg.maxTblCacheNum, gCtgMgmt.cfg.dbRentSec, gCtgMgmt.cfg.stbRentSec);
return TSDB_CODE_SUCCESS;
@ -2384,6 +2264,8 @@ int32_t catalogGetHandle(uint64_t clusterId, SCatalog** catalogHandle) {
*catalogHandle = clusterCtg;
CTG_CACHE_STAT_ADD(clusterNum, 1);
return TSDB_CODE_SUCCESS;
_return:
@ -2403,6 +2285,8 @@ void catalogFreeHandle(SCatalog* pCtg) {
return;
}
CTG_CACHE_STAT_SUB(clusterNum, 1);
uint64_t clusterId = pCtg->clusterId;
ctgFreeHandle(pCtg);
@ -2417,24 +2301,12 @@ int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* vers
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
if (NULL == pCtg->dbCache) {
*version = CTG_DEFAULT_INVALID_VERSION;
ctgInfo("empty db cache, dbFName:%s", dbFName);
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
SCtgDBCache *dbCache = NULL;
ctgAcquireDBCache(pCtg, dbFName, &dbCache);
if (NULL == dbCache) {
*version = CTG_DEFAULT_INVALID_VERSION;
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
bool inCache = false;
ctgAcquireVgInfo(pCtg, dbCache, &inCache);
if (!inCache) {
ctgReleaseDBCache(pCtg, dbCache);
int32_t code = 0;
CTG_ERR_JRET(ctgAcquireVgInfoFromCache(pCtg, dbFName, &dbCache, &inCache));
if (!inCache) {
*version = CTG_DEFAULT_INVALID_VERSION;
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
@ -2449,6 +2321,10 @@ int32_t catalogGetDBVgVersion(SCatalog* pCtg, const char* dbFName, int32_t* vers
ctgDebug("Got db vgVersion from cache, dbFName:%s, vgVersion:%d", dbFName, *version);
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
_return:
CTG_API_LEAVE(code);
}
int32_t catalogGetDBVgInfo(SCatalog* pCtg, void *pRpc, const SEpSet* pMgmtEps, const char* dbFName, SArray** vgroupList) {
@ -2549,11 +2425,11 @@ int32_t catalogRemoveTableMeta(SCatalog* pCtg, const SName* pTableName) {
}
STableMeta *tblMeta = NULL;
int32_t exist = 0;
bool inCache = false;
uint64_t dbId = 0;
CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, pTableName, &tblMeta, &exist, 0, &dbId));
CTG_ERR_JRET(ctgGetTableMetaFromCache(pCtg, pTableName, &tblMeta, &inCache, 0, &dbId));
if (0 == exist) {
if (!inCache) {
ctgDebug("table already not in cache, db:%s, tblName:%s", pTableName->dbname, pTableName->tname);
goto _return;
}
@ -2851,8 +2727,13 @@ void catalogDestroy(void) {
atomic_store_8((int8_t*)&gCtgMgmt.exit, true);
tsem_post(&gCtgMgmt.queue.reqSem);
tsem_post(&gCtgMgmt.queue.rspSem);
if (tsem_post(&gCtgMgmt.queue.reqSem)) {
qError("tsem_post failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
}
if (tsem_post(&gCtgMgmt.queue.rspSem)) {
qError("tsem_post failed, error:%s", tstrerror(TAOS_SYSTEM_ERROR(errno)));
}
while (CTG_IS_LOCKED(&gCtgMgmt.lock)) {
taosUsleep(1);

View File

@ -0,0 +1,222 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "trpc.h"
#include "query.h"
#include "tname.h"
#include "catalogInt.h"
extern SCatalogMgmt gCtgMgmt;
SCtgDebug gCTGDebug = {0};
int32_t ctgdEnableDebug(char *option) {
if (0 == strcasecmp(option, "lock")) {
gCTGDebug.lockEnable = true;
qDebug("lock debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "cache")) {
gCTGDebug.cacheEnable = true;
qDebug("cache debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "api")) {
gCTGDebug.apiEnable = true;
qDebug("api debug enabled");
return TSDB_CODE_SUCCESS;
}
if (0 == strcasecmp(option, "meta")) {
gCTGDebug.metaEnable = true;
qDebug("api debug enabled");
return TSDB_CODE_SUCCESS;
}
qError("invalid debug option:%s", option);
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
int32_t ctgdGetStatNum(char *option, void *res) {
if (0 == strcasecmp(option, "runtime.qDoneNum")) {
*(uint64_t *)res = atomic_load_64(&gCtgMgmt.stat.runtime.qDoneNum);
return TSDB_CODE_SUCCESS;
}
qError("invalid stat option:%s", option);
return TSDB_CODE_CTG_INTERNAL_ERROR;
}
int32_t ctgdGetTbMetaNum(SCtgDBCache *dbCache) {
return dbCache->tbCache.metaCache ? (int32_t)taosHashGetSize(dbCache->tbCache.metaCache) : 0;
}
int32_t ctgdGetStbNum(SCtgDBCache *dbCache) {
return dbCache->tbCache.stbCache ? (int32_t)taosHashGetSize(dbCache->tbCache.stbCache) : 0;
}
int32_t ctgdGetRentNum(SCtgRentMgmt *rent) {
int32_t num = 0;
for (uint16_t i = 0; i < rent->slotNum; ++i) {
SCtgRentSlot *slot = &rent->slots[i];
if (NULL == slot->meta) {
continue;
}
num += taosArrayGetSize(slot->meta);
}
return num;
}
int32_t ctgdGetClusterCacheNum(SCatalog* pCtg, int32_t type) {
if (NULL == pCtg || NULL == pCtg->dbCache) {
return 0;
}
switch (type) {
case CTG_DBG_DB_NUM:
return (int32_t)taosHashGetSize(pCtg->dbCache);
case CTG_DBG_DB_RENT_NUM:
return ctgdGetRentNum(&pCtg->dbRent);
case CTG_DBG_STB_RENT_NUM:
return ctgdGetRentNum(&pCtg->stbRent);
default:
break;
}
SCtgDBCache *dbCache = NULL;
int32_t num = 0;
void *pIter = taosHashIterate(pCtg->dbCache, NULL);
while (pIter) {
dbCache = (SCtgDBCache *)pIter;
switch (type) {
case CTG_DBG_META_NUM:
num += ctgdGetTbMetaNum(dbCache);
break;
case CTG_DBG_STB_NUM:
num += ctgdGetStbNum(dbCache);
break;
default:
ctgError("invalid type:%d", type);
break;
}
pIter = taosHashIterate(pCtg->dbCache, pIter);
}
return num;
}
void ctgdShowTableMeta(SCatalog* pCtg, const char *tbName, STableMeta* p) {
if (!gCTGDebug.metaEnable) {
return;
}
STableComInfo *c = &p->tableInfo;
if (TSDB_CHILD_TABLE == p->tableType) {
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64, tbName, p->tableType, p->vgId, p->uid, p->suid);
return;
} else {
ctgDebug("table [%s] meta: type:%d, vgId:%d, uid:%" PRIx64 ",suid:%" PRIx64 ",sv:%d, tv:%d, tagNum:%d, precision:%d, colNum:%d, rowSize:%d",
tbName, p->tableType, p->vgId, p->uid, p->suid, p->sversion, p->tversion, c->numOfTags, c->precision, c->numOfColumns, c->rowSize);
}
int32_t colNum = c->numOfColumns + c->numOfTags;
for (int32_t i = 0; i < colNum; ++i) {
SSchema *s = &p->schema[i];
ctgDebug("[%d] name:%s, type:%d, colId:%d, bytes:%d", i, s->name, s->type, s->colId, s->bytes);
}
}
void ctgdShowDBCache(SCatalog* pCtg, SHashObj *dbHash) {
if (NULL == dbHash || !gCTGDebug.cacheEnable) {
return;
}
int32_t i = 0;
SCtgDBCache *dbCache = NULL;
void *pIter = taosHashIterate(dbHash, NULL);
while (pIter) {
char *dbFName = NULL;
size_t len = 0;
dbCache = (SCtgDBCache *)pIter;
dbFName = taosHashGetKey(pIter, &len);
int32_t metaNum = dbCache->tbCache.metaCache ? taosHashGetSize(dbCache->tbCache.metaCache) : 0;
int32_t stbNum = dbCache->tbCache.stbCache ? taosHashGetSize(dbCache->tbCache.stbCache) : 0;
int32_t vgVersion = CTG_DEFAULT_INVALID_VERSION;
int32_t hashMethod = -1;
int32_t vgNum = 0;
if (dbCache->vgInfo) {
vgVersion = dbCache->vgInfo->vgVersion;
hashMethod = dbCache->vgInfo->hashMethod;
if (dbCache->vgInfo->vgHash) {
vgNum = taosHashGetSize(dbCache->vgInfo->vgHash);
}
}
ctgDebug("[%d] db [%.*s][%"PRIx64"] %s: metaNum:%d, stbNum:%d, vgVersion:%d, hashMethod:%d, vgNum:%d",
i, (int32_t)len, dbFName, dbCache->dbId, dbCache->deleted?"deleted":"", metaNum, stbNum, vgVersion, hashMethod, vgNum);
pIter = taosHashIterate(dbHash, pIter);
}
}
void ctgdShowClusterCache(SCatalog* pCtg) {
if (!gCTGDebug.cacheEnable || NULL == pCtg) {
return;
}
ctgDebug("## cluster %"PRIx64" %p cache Info BEGIN ##", pCtg->clusterId, pCtg);
ctgDebug("db:%d meta:%d stb:%d dbRent:%d stbRent:%d", ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM),
ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM));
ctgdShowDBCache(pCtg, pCtg->dbCache);
ctgDebug("## cluster %"PRIx64" %p cache Info END ##", pCtg->clusterId, pCtg);
}
int32_t ctgdShowCacheInfo(void) {
if (!gCTGDebug.cacheEnable) {
return TSDB_CODE_CTG_OUT_OF_SERVICE;
}
CTG_API_ENTER();
SCatalog *pCtg = NULL;
void *pIter = taosHashIterate(gCtgMgmt.pCluster, NULL);
while (pIter) {
pCtg = *(SCatalog **)pIter;
if (pCtg) {
ctgdShowClusterCache(pCtg);
}
pIter = taosHashIterate(gCtgMgmt.pCluster, pIter);
}
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}

View File

@ -38,11 +38,11 @@
namespace {
extern "C" int32_t ctgGetTableMetaFromCache(struct SCatalog *pCatalog, const SName *pTableName, STableMeta **pTableMeta,
int32_t *exist, int32_t flag, uint64_t *dbId);
extern "C" int32_t ctgDbgGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type);
bool *inCache, int32_t flag, uint64_t *dbId);
extern "C" int32_t ctgdGetClusterCacheNum(struct SCatalog* pCatalog, int32_t type);
extern "C" int32_t ctgActUpdateTbl(SCtgMetaAction *action);
extern "C" int32_t ctgDbgEnableDebug(char *option);
extern "C" int32_t ctgDbgGetStatNum(char *option, void *res);
extern "C" int32_t ctgdEnableDebug(char *option);
extern "C" int32_t ctgdGetStatNum(char *option, void *res);
void ctgTestSetRspTableMeta();
void ctgTestSetRspCTableMeta();
@ -140,9 +140,9 @@ void ctgTestInitLogFile() {
qDebugFlag = 159;
strcpy(tsLogDir, "/var/log/taos");
ctgDbgEnableDebug("api");
ctgDbgEnableDebug("meta");
ctgDbgEnableDebug("cache");
ctgdEnableDebug("api");
ctgdEnableDebug("meta");
ctgdEnableDebug("cache");
if (taosInitLog(defaultLogFileNamePrefix, maxLogFileNum) < 0) {
printf("failed to open log file in directory:%s\n", tsLogDir);
@ -786,15 +786,15 @@ void *ctgTestGetCtableMetaThread(void *param) {
int32_t code = 0;
int32_t n = 0;
STableMeta *tbMeta = NULL;
int32_t exist = 0;
bool inCache = false;
SName cn = {.type = TSDB_TABLE_NAME_T, .acctId = 1};
strcpy(cn.dbname, "db1");
strcpy(cn.tname, ctgTestCTablename);
while (!ctgTestStop) {
code = ctgGetTableMetaFromCache(pCtg, &cn, &tbMeta, &exist, 0, NULL);
if (code || 0 == exist) {
code = ctgGetTableMetaFromCache(pCtg, &cn, &tbMeta, &inCache, 0, NULL);
if (code || !inCache) {
assert(0);
}
@ -879,7 +879,7 @@ TEST(tableMeta, normalTable) {
ASSERT_EQ(vgInfo.vgId, 8);
ASSERT_EQ(vgInfo.epSet.numOfEps, 3);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM)) {
taosMsleep(50);
}
@ -899,7 +899,7 @@ TEST(tableMeta, normalTable) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) {
taosMsleep(50);
} else {
@ -994,7 +994,7 @@ TEST(tableMeta, childTableCase) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) {
taosMsleep(50);
} else {
@ -1103,7 +1103,7 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) {
taosMsleep(50);
} else {
@ -1130,7 +1130,7 @@ TEST(tableMeta, superTableCase) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (2 != n) {
taosMsleep(50);
} else {
@ -1228,7 +1228,7 @@ TEST(tableMeta, rmStbMeta) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) {
taosMsleep(50);
} else {
@ -1241,8 +1241,8 @@ TEST(tableMeta, rmStbMeta) {
ASSERT_EQ(code, 0);
while (true) {
int32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
int32_t m = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM);
int32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
int32_t m = ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM);
if (n || m) {
taosMsleep(50);
} else {
@ -1251,11 +1251,11 @@ TEST(tableMeta, rmStbMeta) {
}
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), 0);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), 0);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 0);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), 0);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), 0);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 0);
catalogDestroy();
memset(&gCtgMgmt, 0, sizeof(gCtgMgmt));
@ -1298,7 +1298,7 @@ TEST(tableMeta, updateStbMeta) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (true) {
uint32_t n = ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
uint32_t n = ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM);
if (0 == n) {
taosMsleep(50);
} else {
@ -1318,7 +1318,7 @@ TEST(tableMeta, updateStbMeta) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n != 3) {
taosMsleep(50);
} else {
@ -1326,11 +1326,11 @@ TEST(tableMeta, updateStbMeta) {
}
}
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), 1);
ASSERT_EQ(ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_DB_RENT_NUM), 1);
ASSERT_EQ(ctgdGetClusterCacheNum(pCtg, CTG_DBG_STB_RENT_NUM), 1);
code = catalogGetTableMeta(pCtg, mockPointer, (const SEpSet *)mockPointer, &n, &tableMeta);
ASSERT_EQ(code, 0);
@ -1388,7 +1388,7 @@ TEST(refreshGetMeta, normal2normal) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1409,7 +1409,7 @@ TEST(refreshGetMeta, normal2normal) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -1467,7 +1467,7 @@ TEST(refreshGetMeta, normal2notexist) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1488,7 +1488,7 @@ TEST(refreshGetMeta, normal2notexist) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -1541,7 +1541,7 @@ TEST(refreshGetMeta, normal2child) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1562,7 +1562,7 @@ TEST(refreshGetMeta, normal2child) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -1625,7 +1625,7 @@ TEST(refreshGetMeta, stable2child) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1647,7 +1647,7 @@ TEST(refreshGetMeta, stable2child) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -1710,7 +1710,7 @@ TEST(refreshGetMeta, stable2stable) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1732,7 +1732,7 @@ TEST(refreshGetMeta, stable2stable) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (0 == ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (0 == ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -1798,7 +1798,7 @@ TEST(refreshGetMeta, child2stable) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -1818,7 +1818,7 @@ TEST(refreshGetMeta, child2stable) {
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
taosMemoryFreeClear(tableMeta);
while (2 != ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
while (2 != ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM)) {
taosMsleep(50);
}
@ -2015,7 +2015,7 @@ TEST(dbVgroup, getSetDbVgroupCase) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n > 0) {
break;
}
@ -2041,7 +2041,7 @@ TEST(dbVgroup, getSetDbVgroupCase) {
while (true) {
uint64_t n = 0;
ctgDbgGetStatNum("runtime.qDoneNum", (void *)&n);
ctgdGetStatNum("runtime.qDoneNum", (void *)&n);
if (n != 3) {
taosMsleep(50);
} else {
@ -2266,7 +2266,7 @@ TEST(rentTest, allRent) {
ASSERT_EQ(tableMeta->tableInfo.precision, 1);
ASSERT_EQ(tableMeta->tableInfo.rowSize, 12);
while (ctgDbgGetClusterCacheNum(pCtg, CTG_DBG_META_NUM) < i) {
while (ctgdGetClusterCacheNum(pCtg, CTG_DBG_META_NUM) < i) {
taosMsleep(50);
}

View File

@ -0,0 +1,99 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _TD_QUERY_INT_H_
#define _TD_QUERY_INT_H_
#ifdef __cplusplus
extern "C" {
#endif
#include "nodes.h"
#include "plannodes.h"
#include "ttime.h"
#define EXPLAIN_MAX_GROUP_NUM 100
//newline area
#define EXPLAIN_TAG_SCAN_FORMAT "Tag Scan on %s columns=%d width=%d"
#define EXPLAIN_TBL_SCAN_FORMAT "Table Scan on %s columns=%d width=%d"
#define EXPLAIN_SYSTBL_SCAN_FORMAT "System Table Scan on %s columns=%d width=%d"
#define EXPLAIN_PROJECTION_FORMAT "Projection columns=%d width=%d"
#define EXPLAIN_JOIN_FORMAT "%s between %d tables width=%d"
#define EXPLAIN_AGG_FORMAT "Aggragate functions=%d"
#define EXPLAIN_EXCHANGE_FORMAT "Data Exchange %d:1 width=%d"
#define EXPLAIN_SORT_FORMAT "Sort on %d Column(s) width=%d"
#define EXPLAIN_INTERVAL_FORMAT "Interval on Column %s functions=%d interval=%" PRId64 "%c offset=%" PRId64 "%c sliding=%" PRId64 "%c width=%d"
#define EXPLAIN_SESSION_FORMAT "Session gap=%" PRId64 " functions=%d width=%d"
#define EXPLAIN_ORDER_FORMAT "Order: %s"
#define EXPLAIN_FILTER_FORMAT "Filter: "
#define EXPLAIN_FILL_FORMAT "Fill: %s"
#define EXPLAIN_ON_CONDITIONS_FORMAT "Join Cond: "
#define EXPLAIN_TIMERANGE_FORMAT "Time Range: [%" PRId64 ", %" PRId64 "]"
//append area
#define EXPLAIN_GROUPS_FORMAT " groups=%d"
#define EXPLAIN_WIDTH_FORMAT " width=%d"
#define EXPLAIN_LOOPS_FORMAT " loops=%d"
#define EXPLAIN_REVERSE_FORMAT " reverse=%d"
typedef struct SExplainGroup {
int32_t nodeNum;
SSubplan *plan;
void *execInfo; //TODO
} SExplainGroup;
typedef struct SExplainResNode {
SNodeList* pChildren;
SPhysiNode* pNode;
void* pExecInfo;
} SExplainResNode;
typedef struct SQueryExplainRowInfo {
int32_t level;
int32_t len;
char *buf;
} SQueryExplainRowInfo;
typedef struct SExplainCtx {
int32_t totalSize;
bool verbose;
char *tbuf;
SArray *rows;
SHashObj *groupHash;
} SExplainCtx;
#define EXPLAIN_ORDER_STRING(_order) ((TSDB_ORDER_ASC == _order) ? "Ascending" : "Descending")
#define EXPLAIN_JOIN_STRING(_type) ((JOIN_TYPE_INNER == _type) ? "Inner join" : "Join")
#define INVERAL_TIME_FROM_PRECISION_TO_UNIT(_t, _u, _p) (((_u) == 'n' || (_u) == 'y') ? (_t) : (convertTimeFromPrecisionToUnit(_t, _p, _u)))
#define EXPLAIN_ROW_NEW(level, ...) \
do { \
if (isVerboseLine) { \
tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, "%*s", (level) * 2 + 3, ""); \
} else { \
tlen = snprintf(tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, "%*s%s", (level) * 2, "", "-> "); \
} \
tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - tlen, __VA_ARGS__); \
} while (0)
#define EXPLAIN_ROW_APPEND(...) tlen += snprintf(tbuf + VARSTR_HEADER_SIZE + tlen, TSDB_EXPLAIN_RESULT_ROW_SIZE - tlen, __VA_ARGS__)
#define EXPLAIN_ROW_END() do { varDataSetLen(tbuf, tlen); tlen += VARSTR_HEADER_SIZE; isVerboseLine = true; } while (0)
#ifdef __cplusplus
}
#endif
#endif /*_TD_QUERY_INT_H_*/

View File

@ -0,0 +1,687 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "query.h"
#include "plannodes.h"
#include "commandInt.h"
int32_t qGenerateExplainResNode(SPhysiNode *pNode, void *pExecInfo, SExplainResNode **pRes);
int32_t qAppendTaskExplainResRows(void *pCtx, int32_t groupId, int32_t level);
void qFreeExplainResTree(SExplainResNode *res) {
if (NULL == res) {
return;
}
taosMemoryFreeClear(res->pExecInfo);
SNode* node = NULL;
FOREACH(node, res->pChildren) {
qFreeExplainResTree((SExplainResNode *)node);
}
nodesClearList(res->pChildren);
taosMemoryFreeClear(res);
}
void qFreeExplainCtx(void *ctx) {
if (NULL == ctx) {
return;
}
SExplainCtx *pCtx = (SExplainCtx *)ctx;
int32_t rowSize = taosArrayGetSize(pCtx->rows);
for (int32_t i = 0; i < rowSize; ++i) {
SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i);
taosMemoryFreeClear(row->buf);
}
taosHashCleanup(pCtx->groupHash);
taosArrayDestroy(pCtx->rows);
taosMemoryFree(pCtx);
}
int32_t qInitExplainCtx(void **pCtx, SHashObj *groupHash, bool verbose) {
int32_t code = 0;
SExplainCtx *ctx = taosMemoryCalloc(1, sizeof(SExplainCtx));
if (NULL == ctx) {
qError("calloc SExplainCtx failed");
QRY_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
SArray *rows = taosArrayInit(10, sizeof(SQueryExplainRowInfo));
if (NULL == rows) {
qError("taosArrayInit SQueryExplainRowInfo failed");
QRY_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
char *tbuf = taosMemoryMalloc(TSDB_EXPLAIN_RESULT_ROW_SIZE);
if (NULL == tbuf) {
qError("malloc size %d failed", TSDB_EXPLAIN_RESULT_ROW_SIZE);
QRY_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
ctx->verbose = verbose;
ctx->tbuf = tbuf;
ctx->rows = rows;
ctx->groupHash = groupHash;
*pCtx = ctx;
return TSDB_CODE_SUCCESS;
_return:
taosArrayDestroy(rows);
taosHashCleanup(groupHash);
taosMemoryFree(ctx);
QRY_RET(code);
}
char *qFillModeString(EFillMode mode) {
switch (mode) {
case FILL_MODE_NONE:
return "none";
case FILL_MODE_VALUE:
return "value";
case FILL_MODE_PREV:
return "prev";
case FILL_MODE_NULL:
return "null";
case FILL_MODE_LINEAR:
return "linear";
case FILL_MODE_NEXT:
return "next";
default:
return "unknown";
}
}
char *qGetNameFromColumnNode(SNode *pNode) {
if (NULL == pNode || QUERY_NODE_COLUMN != pNode->type) {
return "NULL";
}
return ((SColumnNode *)pNode)->colName;
}
int32_t qGenerateExplainResChildren(SPhysiNode *pNode, void *pExecInfo, SNodeList **pChildren) {
int32_t tlen = 0;
SNodeList *pPhysiChildren = NULL;
switch (pNode->type) {
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: {
STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode;
pPhysiChildren = pTagScanNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:{
STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode;
pPhysiChildren = pTblScanNode->scan.node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:{
SSystemTableScanPhysiNode *pSTblScanNode = (SSystemTableScanPhysiNode *)pNode;
pPhysiChildren = pSTblScanNode->scan.node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:{
SProjectPhysiNode *pPrjNode = (SProjectPhysiNode *)pNode;
pPhysiChildren = pPrjNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN:{
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
pPhysiChildren = pJoinNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG:{
SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode;
pPhysiChildren = pAggNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:{
SExchangePhysiNode *pExchNode = (SExchangePhysiNode *)pNode;
pPhysiChildren = pExchNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SORT:{
SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode;
pPhysiChildren = pSortNode->node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:{
SIntervalPhysiNode *pIntNode = (SIntervalPhysiNode *)pNode;
pPhysiChildren = pIntNode->window.node.pChildren;
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:{
SSessionWinodwPhysiNode *pSessNode = (SSessionWinodwPhysiNode *)pNode;
pPhysiChildren = pSessNode->window.node.pChildren;
break;
}
default:
qError("not supported physical node type %d", pNode->type);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
if (pPhysiChildren) {
*pChildren = nodesMakeList();
if (NULL == *pChildren) {
qError("nodesMakeList failed");
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
SNode* node = NULL;
SExplainResNode *pResNode = NULL;
FOREACH(node, pPhysiChildren) {
QRY_ERR_RET(qGenerateExplainResNode((SPhysiNode *)node, pExecInfo, &pResNode));
QRY_ERR_RET(nodesListAppend(*pChildren, pResNode));
}
return TSDB_CODE_SUCCESS;
}
int32_t qGenerateExplainResNode(SPhysiNode *pNode, void *pExecInfo, SExplainResNode **pRes) {
if (NULL == pNode) {
*pRes = NULL;
qError("physical node is NULL");
return TSDB_CODE_QRY_APP_ERROR;
}
SExplainResNode *res = taosMemoryCalloc(1, sizeof(SExplainResNode));
if (NULL == res) {
qError("calloc SPhysiNodeExplainRes failed");
return TSDB_CODE_QRY_OUT_OF_MEMORY;
}
int32_t code = 0;
res->pNode = pNode;
res->pExecInfo = pExecInfo;
QRY_ERR_JRET(qGenerateExplainResChildren(pNode, pExecInfo, &res->pChildren));
*pRes = res;
return TSDB_CODE_SUCCESS;
_return:
qFreeExplainResTree(res);
QRY_RET(code);
}
int32_t qExplainBufAppendExecInfo(void *pExecInfo, char *tbuf, int32_t *len) {
int32_t tlen = *len;
EXPLAIN_ROW_APPEND("(exec info here)");
*len = tlen;
return TSDB_CODE_SUCCESS;
}
int32_t qExplainResAppendRow(SExplainCtx *ctx, char *tbuf, int32_t len, int32_t level) {
SQueryExplainRowInfo row = {0};
row.buf = taosMemoryMalloc(len);
if (NULL == row.buf) {
qError("taosMemoryMalloc %d failed", len);
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
memcpy(row.buf, tbuf, len);
row.level = level;
row.len = len;
ctx->totalSize += len;
if (NULL == taosArrayPush(ctx->rows, &row)) {
qError("taosArrayPush row to explain res rows failed");
taosMemoryFree(row.buf);
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
return TSDB_CODE_SUCCESS;
}
int32_t qExplainResNodeToRowsImpl(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
int32_t tlen = 0;
bool isVerboseLine = false;
char *tbuf = ctx->tbuf;
bool verbose = ctx->verbose;
SPhysiNode* pNode = pResNode->pNode;
if (NULL == pNode) {
qError("pyhsical node in explain res node is NULL");
return TSDB_CODE_QRY_APP_ERROR;
}
switch (pNode->type) {
case QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN: {
STagScanPhysiNode *pTagScanNode = (STagScanPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_TAG_SCAN_FORMAT, pTagScanNode->tableName.tname, pTagScanNode->pScanCols->length, pTagScanNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_APPEND(EXPLAIN_LOOPS_FORMAT, pTagScanNode->count);
if (pTagScanNode->reverse) {
EXPLAIN_ROW_APPEND(EXPLAIN_REVERSE_FORMAT, pTagScanNode->reverse);
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pTagScanNode->order));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SEQ_SCAN:
case QUERY_NODE_PHYSICAL_PLAN_TABLE_SCAN:{
STableScanPhysiNode *pTblScanNode = (STableScanPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_TBL_SCAN_FORMAT, pTblScanNode->scan.tableName.tname, pTblScanNode->scan.pScanCols->length, pTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_APPEND(EXPLAIN_LOOPS_FORMAT, pTblScanNode->scan.count);
if (pTblScanNode->scan.reverse) {
EXPLAIN_ROW_APPEND(EXPLAIN_REVERSE_FORMAT, pTblScanNode->scan.reverse);
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pTblScanNode->scan.order));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_TIMERANGE_FORMAT, pTblScanNode->scanRange.skey, pTblScanNode->scanRange.ekey);
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (pTblScanNode->scan.node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pTblScanNode->scan.node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SYSTABLE_SCAN:{
SSystemTableScanPhysiNode *pSTblScanNode = (SSystemTableScanPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_SYSTBL_SCAN_FORMAT, pSTblScanNode->scan.tableName.tname, pSTblScanNode->scan.pScanCols->length, pSTblScanNode->scan.node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_APPEND(EXPLAIN_LOOPS_FORMAT, pSTblScanNode->scan.count);
if (pSTblScanNode->scan.reverse) {
EXPLAIN_ROW_APPEND(EXPLAIN_REVERSE_FORMAT, pSTblScanNode->scan.reverse);
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ORDER_FORMAT, EXPLAIN_ORDER_STRING(pSTblScanNode->scan.order));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
if (pSTblScanNode->scan.node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pSTblScanNode->scan.node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_PROJECT:{
SProjectPhysiNode *pPrjNode = (SProjectPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_PROJECTION_FORMAT, pPrjNode->pProjections->length, pPrjNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pPrjNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pPrjNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_JOIN:{
SJoinPhysiNode *pJoinNode = (SJoinPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_JOIN_FORMAT, EXPLAIN_JOIN_STRING(pJoinNode->joinType), pJoinNode->pTargets->length, pJoinNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pJoinNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pJoinNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_ON_CONDITIONS_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pJoinNode->pOnConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_AGG:{
SAggPhysiNode *pAggNode = (SAggPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_AGG_FORMAT, pAggNode->pAggFuncs->length);
if (pAggNode->pGroupKeys) {
EXPLAIN_ROW_APPEND(EXPLAIN_GROUPS_FORMAT, pAggNode->pGroupKeys->length);
}
EXPLAIN_ROW_APPEND(EXPLAIN_WIDTH_FORMAT, pAggNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pAggNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pAggNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_EXCHANGE:{
SExchangePhysiNode *pExchNode = (SExchangePhysiNode *)pNode;
SExplainGroup *group = taosHashGet(ctx->groupHash, &pExchNode->srcGroupId, sizeof(pExchNode->srcGroupId));
if (NULL == group) {
qError("exchange src group %d not in groupHash", pExchNode->srcGroupId);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
EXPLAIN_ROW_NEW(level, EXPLAIN_EXCHANGE_FORMAT, group->nodeNum, pExchNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pExchNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pExchNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
QRY_ERR_RET(qAppendTaskExplainResRows(ctx, pExchNode->srcGroupId, level + 1));
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SORT:{
SSortPhysiNode *pSortNode = (SSortPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_SORT_FORMAT, pSortNode->pSortKeys->length, pSortNode->node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pSortNode->node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pSortNode->node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_INTERVAL:{
SIntervalPhysiNode *pIntNode = (SIntervalPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_INTERVAL_FORMAT, qGetNameFromColumnNode(pIntNode->pTspk), pIntNode->window.pFuncs->length,
INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->interval, pIntNode->intervalUnit, pIntNode->precision), pIntNode->intervalUnit,
pIntNode->offset, getPrecisionUnit(pIntNode->precision),
INVERAL_TIME_FROM_PRECISION_TO_UNIT(pIntNode->sliding, pIntNode->slidingUnit, pIntNode->precision), pIntNode->slidingUnit,
pIntNode->window.node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pIntNode->pFill) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILL_FORMAT, qFillModeString(pIntNode->pFill->mode));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
if (pIntNode->window.node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pIntNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
case QUERY_NODE_PHYSICAL_PLAN_SESSION_WINDOW:{
SSessionWinodwPhysiNode *pIntNode = (SSessionWinodwPhysiNode *)pNode;
EXPLAIN_ROW_NEW(level, EXPLAIN_SESSION_FORMAT, pIntNode->gap, pIntNode->window.pFuncs->length, pIntNode->window.node.pOutputDataBlockDesc->outputRowSize);
if (pResNode->pExecInfo) {
QRY_ERR_RET(qExplainBufAppendExecInfo(pResNode->pExecInfo, tbuf, &tlen));
}
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level));
if (verbose) {
if (pIntNode->window.node.pConditions) {
EXPLAIN_ROW_NEW(level + 1, EXPLAIN_FILTER_FORMAT);
QRY_ERR_RET(nodesNodeToSQL(pIntNode->window.node.pConditions, tbuf + VARSTR_HEADER_SIZE, TSDB_EXPLAIN_RESULT_ROW_SIZE, &tlen));
EXPLAIN_ROW_END();
QRY_ERR_RET(qExplainResAppendRow(ctx, tbuf, tlen, level + 1));
}
}
break;
}
default:
qError("not supported physical node type %d", pNode->type);
return TSDB_CODE_QRY_APP_ERROR;
}
return TSDB_CODE_SUCCESS;
}
int32_t qExplainResNodeToRows(SExplainResNode *pResNode, SExplainCtx *ctx, int32_t level) {
if (NULL == pResNode) {
qError("explain res node is NULL");
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
int32_t code = 0;
QRY_ERR_RET(qExplainResNodeToRowsImpl(pResNode, ctx, level));
SNode* pNode = NULL;
FOREACH(pNode, pResNode->pChildren) {
QRY_ERR_RET(qExplainResNodeToRows((SExplainResNode *)pNode, ctx, level + 1));
}
return TSDB_CODE_SUCCESS;
}
int32_t qAppendTaskExplainResRows(void *pCtx, int32_t groupId, int32_t level) {
SExplainResNode *node = NULL;
int32_t code = 0;
SExplainCtx *ctx = (SExplainCtx *)pCtx;
SExplainGroup *group = taosHashGet(ctx->groupHash, &groupId, sizeof(groupId));
if (NULL == group) {
qError("group %d not in groupHash", groupId);
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
QRY_ERR_RET(qGenerateExplainResNode(group->plan->pNode, group->execInfo, &node));
QRY_ERR_JRET(qExplainResNodeToRows(node, ctx, level));
_return:
qFreeExplainResTree(node);
QRY_RET(code);
}
int32_t qGetExplainRspFromCtx(void *ctx, SRetrieveTableRsp **pRsp) {
SExplainCtx *pCtx = (SExplainCtx *)ctx;
int32_t rowNum = taosArrayGetSize(pCtx->rows);
if (rowNum <= 0) {
qError("empty explain res rows");
QRY_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
int32_t colNum = 1;
int32_t rspSize = sizeof(SRetrieveTableRsp) + sizeof(int32_t) * colNum + sizeof(int32_t) * rowNum + pCtx->totalSize;
SRetrieveTableRsp *rsp = (SRetrieveTableRsp *)taosMemoryCalloc(1, rspSize);
if (NULL == rsp) {
qError("malloc SRetrieveTableRsp failed, size:%d", rspSize);
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
rsp->completed = 1;
rsp->numOfRows = htonl(rowNum);
*(int32_t *)rsp->data = htonl(pCtx->totalSize);
int32_t *offset = (int32_t *)((char *)rsp->data + sizeof(int32_t));
char *data = (char *)(offset + rowNum);
int32_t tOffset = 0;
for (int32_t i = 0; i < rowNum; ++i) {
SQueryExplainRowInfo *row = taosArrayGet(pCtx->rows, i);
*offset = tOffset;
tOffset += row->len;
memcpy(data, row->buf, row->len);
++offset;
data += row->len;
}
*pRsp = rsp;
return TSDB_CODE_SUCCESS;
}
int32_t qExecStaticExplain(SQueryPlan *pDag, SRetrieveTableRsp **pRsp) {
int32_t code = 0;
SNodeListNode *plans = NULL;
int32_t taskNum = 0;
SExplainGroup *pGroup = NULL;
void *pCtx = NULL;
int32_t rootGroupId = 0;
if (pDag->numOfSubplans <= 0) {
qError("invalid subplan num:%d", pDag->numOfSubplans);
QRY_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
int32_t levelNum = (int32_t)LIST_LENGTH(pDag->pSubplans);
if (levelNum <= 0) {
qError("invalid level num:%d", levelNum);
QRY_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
SHashObj *groupHash = taosHashInit(EXPLAIN_MAX_GROUP_NUM, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), false, HASH_NO_LOCK);
if (NULL == groupHash) {
qError("groupHash %d failed", EXPLAIN_MAX_GROUP_NUM);
QRY_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
QRY_ERR_JRET(qInitExplainCtx(&pCtx, groupHash, pDag->explainInfo.verbose));
for (int32_t i = 0; i < levelNum; ++i) {
plans = (SNodeListNode *)nodesListGetNode(pDag->pSubplans, i);
if (NULL == plans) {
qError("empty level plan, level:%d", i);
QRY_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
taskNum = (int32_t)LIST_LENGTH(plans->pNodeList);
if (taskNum <= 0) {
qError("invalid level plan number:%d, level:%d", taskNum, i);
QRY_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
SSubplan *plan = NULL;
for (int32_t n = 0; n < taskNum; ++n) {
plan = (SSubplan *)nodesListGetNode(plans->pNodeList, n);
pGroup = taosHashGet(groupHash, &plan->id.groupId, sizeof(plan->id.groupId));
if (pGroup) {
++pGroup->nodeNum;
continue;
}
SExplainGroup group = {.nodeNum = 1, .plan = plan, .execInfo = NULL};
if (0 != taosHashPut(groupHash, &plan->id.groupId, sizeof(plan->id.groupId), &group, sizeof(group))) {
qError("taosHashPut to explainGroupHash failed, taskIdx:%d", n);
QRY_ERR_JRET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
}
if (0 == i) {
if (taskNum > 1) {
qError("invalid taskNum %d for level 0", taskNum);
QRY_ERR_JRET(TSDB_CODE_QRY_INVALID_INPUT);
}
rootGroupId = plan->id.groupId;
}
qDebug("level %d group handled, taskNum:%d", i, taskNum);
}
QRY_ERR_JRET(qAppendTaskExplainResRows(pCtx, rootGroupId, 0));
QRY_ERR_JRET(qGetExplainRspFromCtx(pCtx, pRsp));
_return:
qFreeExplainCtx(pCtx);
QRY_RET(code);
}

View File

@ -63,9 +63,10 @@ typedef struct {
FstRegex *regexCreate(const char *str);
void regexSetup(FstRegex *regex, uint32_t size, const char *str);
// uint32_t regexStart()
uint32_t regexAutomStart(FstRegex *regex);
bool regexAutomIsMatch(FstRegex *regex, uint32_t state);
bool regexAutomCanMatch(FstRegex *regex, uint32_t state, bool null);
bool regexAutomAccept(FstRegex *regex, uint32_t state, uint8_t byte, uint32_t *result);
#ifdef __cplusplus
}

View File

@ -23,8 +23,8 @@ extern "C" {
#endif
typedef struct FstSparseSet {
SArray *dense;
SArray *sparse;
uint32_t *dense;
uint32_t *sparse;
int32_t size;
} FstSparseSet;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
YAML:9:25: error: unknown key 'AlignConsecutiveMacros' * Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3

View File

@ -14,6 +14,7 @@
*/
#include "indexFstRegex.h"
#include "indexFstDfa.h"
#include "indexFstSparse.h"
FstRegex *regexCreate(const char *str) {
@ -26,9 +27,35 @@ FstRegex *regexCreate(const char *str) {
memcpy(orig, str, sz);
regex->orig = orig;
// construct insts based on str
SArray *insts = NULL;
FstDfaBuilder *builder = dfaBuilderCreate(insts);
regex->dfa = dfaBuilderBuild(builder);
return regex;
}
void regexSetup(FstRegex *regex, uint32_t size, const char *str) {
// return
// return;
uint32_t regexAutomStart(FstRegex *regex) {
///// no nothing
return 0;
}
bool regexAutomIsMatch(FstRegex *regex, uint32_t state) {
if (regex->dfa != NULL && dfaIsMatch(regex->dfa, state)) {
return true;
} else {
return false;
}
}
bool regexAutomCanMatch(FstRegex *regex, uint32_t state, bool null) {
// make frame happy
return null;
}
bool regexAutomAccept(FstRegex *regex, uint32_t state, uint8_t byte, uint32_t *result) {
if (regex->dfa == NULL) {
return false;
}
return dfaAccept(regex->dfa, state, byte, result);
}

View File

@ -21,47 +21,44 @@ FstSparseSet *sparSetCreate(int32_t sz) {
return NULL;
}
ss->dense = taosArrayInit(sz, sizeof(uint32_t));
ss->sparse = taosArrayInit(sz, sizeof(uint32_t));
ss->size = sz;
ss->dense = (uint32_t *)taosMemoryCalloc(sz, sizeof(uint32_t));
ss->sparse = (uint32_t *)taosMemoryCalloc(sz, sizeof(uint32_t));
ss->size = 0;
return ss;
}
void sparSetDestroy(FstSparseSet *ss) {
if (ss == NULL) {
return;
}
taosArrayDestroy(ss->dense);
taosArrayDestroy(ss->sparse);
taosMemoryFree(ss->dense);
taosMemoryFree(ss->sparse);
taosMemoryFree(ss);
}
uint32_t sparSetLen(FstSparseSet *ss) { return ss == NULL ? 0 : ss->size; }
uint32_t sparSetLen(FstSparseSet *ss) {
// Get occupied size
return ss == NULL ? 0 : ss->size;
}
uint32_t sparSetAdd(FstSparseSet *ss, uint32_t ip) {
if (ss == NULL) {
return 0;
}
uint32_t i = ss->size;
taosArraySet(ss->dense, i, &ip);
taosArraySet(ss->sparse, ip, &i);
ss->dense[i] = ip;
ss->sparse[ip] = i;
ss->size += 1;
return i;
}
uint32_t sparSetGet(FstSparseSet *ss, uint32_t i) {
if (i >= taosArrayGetSize(ss->dense)) {
return 0;
}
uint32_t *v = taosArrayGet(ss->dense, i);
return *v;
// check later
return ss->dense[i];
}
bool sparSetContains(FstSparseSet *ss, uint32_t ip) {
if (ip >= taosArrayGetSize(ss->sparse)) {
uint32_t i = ss->sparse[ip];
if (i < ss->size && ss->dense[i] == ip) {
return true;
} else {
return false;
}
uint32_t i = *(uint32_t *)taosArrayGet(ss->sparse, ip);
if (i >= taosArrayGetSize(ss->dense)) {
return false;
}
uint32_t v = *(uint32_t *)taosArrayGet(ss->dense, i);
return v == ip;
}
void sparSetClear(FstSparseSet *ss) {
if (ss == NULL) {

View File

@ -20,12 +20,16 @@
extern "C" {
#endif
#define nodesFatal(param, ...) qFatal("NODES: " param, __VA_ARGS__)
#define nodesError(param, ...) qError("NODES: " param, __VA_ARGS__)
#define nodesWarn(param, ...) qWarn("NODES: " param, __VA_ARGS__)
#define nodesInfo(param, ...) qInfo("NODES: " param, __VA_ARGS__)
#define nodesDebug(param, ...) qDebug("NODES: " param, __VA_ARGS__)
#define nodesTrace(param, ...) qTrace("NODES: " param, __VA_ARGS__)
#define nodesFatal(...) qFatal("NODES: " __VA_ARGS__)
#define nodesError(...) qError("NODES: " __VA_ARGS__)
#define nodesWarn(...) qWarn("NODES: " __VA_ARGS__)
#define nodesInfo(...) qInfo("NODES: " __VA_ARGS__)
#define nodesDebug(...) qDebug("NODES: " __VA_ARGS__)
#define nodesTrace(...) qTrace("NODES: " __VA_ARGS__)
#define NODES_ERR_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; return _code; } } while (0)
#define NODES_RET(c) do { int32_t _code = c; if (_code != TSDB_CODE_SUCCESS) { terrno = _code; } return _code; } while (0)
#define NODES_ERR_JRET(c) do { code = c; if (code != TSDB_CODE_SUCCESS) { terrno = code; goto _return; } } while (0)
#ifdef __cplusplus
}

View File

@ -0,0 +1,145 @@
/*
* Copyright (c) 2019 TAOS Data, Inc. <jhtao@taosdata.com>
*
* This program is free software: you can use, redistribute, and/or modify
* it under the terms of the GNU Affero General Public License, version 3
* or later ("AGPL"), as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "cmdnodes.h"
#include "nodesUtil.h"
#include "plannodes.h"
#include "querynodes.h"
#include "taos.h"
#include "taoserror.h"
#include "thash.h"
char *gOperatorStr[] = {NULL, "+", "-", "*", "/", "%", "&", "|", ">", ">=", "<", "<=", "=", "<>",
"IN", "NOT IN", "LIKE", "NOT LIKE", "MATCH", "NMATCH", "IS NULL", "IS NOT NULL",
"IS TRUE", "IS FALSE", "IS UNKNOWN", "IS NOT TRUE", "IS NOT FALSE", "IS NOT UNKNOWN"};
char *gLogicConditionStr[] = {"AND", "OR", "NOT"};
int32_t nodesNodeToSQL(SNode *pNode, char *buf, int32_t bufSize, int32_t *len) {
switch (pNode->type) {
case QUERY_NODE_COLUMN: {
SColumnNode *colNode = (SColumnNode *)pNode;
if (colNode->dbName[0]) {
*len += snprintf(buf + *len, bufSize - *len, "`%s`.", colNode->dbName);
}
if (colNode->tableAlias[0]) {
*len += snprintf(buf + *len, bufSize - *len, "`%s`.", colNode->tableAlias);
} else if (colNode->tableName[0]) {
*len += snprintf(buf + *len, bufSize - *len, "`%s`.", colNode->tableName);
}
*len += snprintf(buf + *len, bufSize - *len, "`%s`", colNode->colName);
return TSDB_CODE_SUCCESS;
}
case QUERY_NODE_VALUE:{
SValueNode *colNode = (SValueNode *)pNode;
char *t = nodesGetStrValueFromNode(colNode);
if (NULL == t) {
nodesError("fail to get str value from valueNode");
NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
*len += snprintf(buf + *len, bufSize - *len, "%s", t);
taosMemoryFree(t);
return TSDB_CODE_SUCCESS;
}
case QUERY_NODE_OPERATOR: {
SOperatorNode* pOpNode = (SOperatorNode*)pNode;
*len += snprintf(buf + *len, bufSize - *len, "(");
if (pOpNode->pLeft) {
NODES_ERR_RET(nodesNodeToSQL(pOpNode->pLeft, buf, bufSize, len));
}
if (pOpNode->opType >= (sizeof(gOperatorStr) / sizeof(gOperatorStr[0]))) {
nodesError("unknown operation type:%d", pOpNode->opType);
NODES_ERR_RET(TSDB_CODE_QRY_APP_ERROR);
}
*len += snprintf(buf + *len, bufSize - *len, " %s ", gOperatorStr[pOpNode->opType]);
if (pOpNode->pRight) {
NODES_ERR_RET(nodesNodeToSQL(pOpNode->pRight, buf, bufSize, len));
}
*len += snprintf(buf + *len, bufSize - *len, ")");
return TSDB_CODE_SUCCESS;
}
case QUERY_NODE_LOGIC_CONDITION:{
SLogicConditionNode* pLogicNode = (SLogicConditionNode*)pNode;
SNode* node = NULL;
bool first = true;
*len += snprintf(buf + *len, bufSize - *len, "(");
FOREACH(node, pLogicNode->pParameterList) {
if (!first) {
*len += snprintf(buf + *len, bufSize - *len, " %s ", gLogicConditionStr[pLogicNode->condType]);
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
}
*len += snprintf(buf + *len, bufSize - *len, ")");
return TSDB_CODE_SUCCESS;
}
case QUERY_NODE_FUNCTION:{
SFunctionNode* pFuncNode = (SFunctionNode*)pNode;
SNode* node = NULL;
bool first = true;
*len += snprintf(buf + *len, bufSize - *len, "%s(", pFuncNode->functionName);
FOREACH(node, pFuncNode->pParameterList) {
if (!first) {
*len += snprintf(buf + *len, bufSize - *len, ", ");
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
}
*len += snprintf(buf + *len, bufSize - *len, ")");
return TSDB_CODE_SUCCESS;
}
case QUERY_NODE_NODE_LIST:{
SNodeListNode* pListNode = (SNodeListNode *)pNode;
SNode* node = NULL;
bool first = true;
*len += snprintf(buf + *len, bufSize - *len, "(");
FOREACH(node, pListNode->pNodeList) {
if (!first) {
*len += snprintf(buf + *len, bufSize - *len, ", ");
}
NODES_ERR_RET(nodesNodeToSQL(node, buf, bufSize, len));
first = false;
}
*len += snprintf(buf + *len, bufSize - *len, ")");
return TSDB_CODE_SUCCESS;
}
default:
break;
}
nodesError("nodesNodeToSQL unknown node = %s", nodesNodeName(pNode->type));
NODES_RET(TSDB_CODE_QRY_APP_ERROR);
}

View File

@ -17,7 +17,8 @@
typedef enum ETraversalOrder {
TRAVERSAL_PREORDER = 1,
TRAVERSAL_POSTORDER
TRAVERSAL_INORDER,
TRAVERSAL_POSTORDER,
} ETraversalOrder;
static EDealRes walkList(SNodeList* pNodeList, ETraversalOrder order, FNodeWalker walker, void* pContext);

View File

@ -790,6 +790,71 @@ void* nodesGetValueFromNode(SValueNode *pNode) {
return NULL;
}
char* nodesGetStrValueFromNode(SValueNode *pNode) {
switch (pNode->node.resType.type) {
case TSDB_DATA_TYPE_BOOL: {
void *buf = taosMemoryMalloc(MAX_NUM_STR_SIZE);
if (NULL == buf) {
return NULL;
}
sprintf(buf, "%s", pNode->datum.b ? "true" : "false");
return buf;
}
case TSDB_DATA_TYPE_TINYINT:
case TSDB_DATA_TYPE_SMALLINT:
case TSDB_DATA_TYPE_INT:
case TSDB_DATA_TYPE_BIGINT:
case TSDB_DATA_TYPE_TIMESTAMP: {
void *buf = taosMemoryMalloc(MAX_NUM_STR_SIZE);
if (NULL == buf) {
return NULL;
}
sprintf(buf, "%" PRId64, pNode->datum.i);
return buf;
}
case TSDB_DATA_TYPE_UTINYINT:
case TSDB_DATA_TYPE_USMALLINT:
case TSDB_DATA_TYPE_UINT:
case TSDB_DATA_TYPE_UBIGINT: {
void *buf = taosMemoryMalloc(MAX_NUM_STR_SIZE);
if (NULL == buf) {
return NULL;
}
sprintf(buf, "%" PRIu64, pNode->datum.u);
return buf;
}
case TSDB_DATA_TYPE_FLOAT:
case TSDB_DATA_TYPE_DOUBLE: {
void *buf = taosMemoryMalloc(MAX_NUM_STR_SIZE);
if (NULL == buf) {
return NULL;
}
sprintf(buf, "%e", pNode->datum.d);
return buf;
}
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY: {
int32_t bufSize = varDataLen(pNode->datum.p) + 2 + 1;
void *buf = taosMemoryMalloc(bufSize);
if (NULL == buf) {
return NULL;
}
snprintf(buf, bufSize, "'%s'", varDataVal(pNode->datum.p));
return buf;
}
default:
break;
}
return NULL;
}
bool nodesIsExprNode(const SNode* pNode) {
ENodeType type = nodeType(pNode);
return (QUERY_NODE_COLUMN == type || QUERY_NODE_VALUE == type || QUERY_NODE_OPERATOR == type || QUERY_NODE_FUNCTION == type);

View File

@ -71,6 +71,7 @@ typedef enum ETableOptionType {
typedef struct SAlterOption {
int32_t type;
SToken val;
SNodeList* pKeep;
} SAlterOption;
extern SToken nil_token;
@ -121,6 +122,8 @@ SNode* createSetOperator(SAstCreateContext* pCxt, ESetOperatorType type, SNode*
SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt);
SNode* createDefaultAlterDatabaseOptions(SAstCreateContext* pCxt);
SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOptionType type, const SToken* pVal);
SNode* setDatabaseKeepOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pKeep);
SNode* setDatabaseAlterOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption);
SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pDbName, SNode* pOptions);
SNode* createDropDatabaseStmt(SAstCreateContext* pCxt, bool ignoreNotExists, SToken* pDbName);
SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode* pOptions);
@ -129,6 +132,8 @@ SNode* createDefaultAlterTableOptions(SAstCreateContext* pCxt);
SNode* setTableOption(SAstCreateContext* pCxt, SNode* pOptions, ETableOptionType type, const SToken* pVal);
SNode* setTableSmaOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pSma);
SNode* setTableRollupOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pFuncs);
SNode* setTableKeepOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pKeep);
SNode* setTableAlterOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption);
SNode* createColumnDefNode(SAstCreateContext* pCxt, const SToken* pColName, SDataType dataType, const SToken* pComment);
SDataType createDataType(uint8_t type);
SDataType createVarLenDataType(uint8_t type, const SToken* pLen);

View File

@ -137,7 +137,7 @@ db_options(A) ::= db_options(B) DAYS NK_INTEGER(C).
db_options(A) ::= db_options(B) FSYNC NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_FSYNC, &C); }
db_options(A) ::= db_options(B) MAXROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MAXROWS, &C); }
db_options(A) ::= db_options(B) MINROWS NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_MINROWS, &C); }
db_options(A) ::= db_options(B) KEEP NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_KEEP, &C); }
db_options(A) ::= db_options(B) KEEP integer_list(C). { A = setDatabaseKeepOption(pCxt, B, C); }
db_options(A) ::= db_options(B) PRECISION NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_PRECISION, &C); }
db_options(A) ::= db_options(B) QUORUM NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_QUORUM, &C); }
db_options(A) ::= db_options(B) REPLICA NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_REPLICA, &C); }
@ -148,17 +148,23 @@ db_options(A) ::= db_options(B) SINGLE_STABLE NK_INTEGER(C).
db_options(A) ::= db_options(B) STREAM_MODE NK_INTEGER(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_STREAM_MODE, &C); }
db_options(A) ::= db_options(B) RETENTIONS NK_STRING(C). { A = setDatabaseOption(pCxt, B, DB_OPTION_RETENTIONS, &C); }
alter_db_options(A) ::= alter_db_option(B). { A = createDefaultAlterDatabaseOptions(pCxt); A = setDatabaseOption(pCxt, A, B.type, &B.val); }
alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setDatabaseOption(pCxt, B, C.type, &C.val); }
alter_db_options(A) ::= alter_db_option(B). { A = createDefaultAlterDatabaseOptions(pCxt); A = setDatabaseAlterOption(pCxt, A, &B); }
alter_db_options(A) ::= alter_db_options(B) alter_db_option(C). { A = setDatabaseAlterOption(pCxt, B, &C); }
%type alter_db_option { SAlterOption }
%destructor alter_db_option { }
alter_db_option(A) ::= BLOCKS NK_INTEGER(B). { A.type = DB_OPTION_BLOCKS; A.val = B; }
alter_db_option(A) ::= FSYNC NK_INTEGER(B). { A.type = DB_OPTION_FSYNC; A.val = B; }
alter_db_option(A) ::= KEEP NK_INTEGER(B). { A.type = DB_OPTION_KEEP; A.val = B; }
alter_db_option(A) ::= KEEP integer_list(B). { A.type = DB_OPTION_KEEP; A.pKeep = B; }
alter_db_option(A) ::= WAL NK_INTEGER(B). { A.type = DB_OPTION_WAL; A.val = B; }
alter_db_option(A) ::= QUORUM NK_INTEGER(B). { A.type = DB_OPTION_QUORUM; A.val = B; }
alter_db_option(A) ::= CACHELAST NK_INTEGER(B). { A.type = DB_OPTION_CACHELAST; A.val = B; }
alter_db_option(A) ::= REPLICA NK_INTEGER(B). { A.type = DB_OPTION_REPLICA; A.val = B; }
%type integer_list { SNodeList* }
%destructor integer_list { nodesDestroyList($$); }
integer_list(A) ::= NK_INTEGER(B). { A = createNodeList(pCxt, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &B)); }
integer_list(A) ::= integer_list(B) NK_COMMA NK_INTEGER(C). { A = addNodeToList(pCxt, B, createValueNode(pCxt, TSDB_DATA_TYPE_BIGINT, &C)); }
/************************************************ create/drop table/stable ********************************************/
cmd ::= CREATE TABLE not_exists_opt(A) full_table_name(B)
@ -259,20 +265,20 @@ tags_def(A) ::= TAGS NK_LP column_def_list(B) NK_RP.
table_options(A) ::= . { A = createDefaultTableOptions(pCxt); }
table_options(A) ::= table_options(B) COMMENT NK_STRING(C). { A = setTableOption(pCxt, B, TABLE_OPTION_COMMENT, &C); }
table_options(A) ::= table_options(B) KEEP NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_KEEP, &C); }
table_options(A) ::= table_options(B) KEEP integer_list(C). { A = setTableKeepOption(pCxt, B, C); }
table_options(A) ::= table_options(B) TTL NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_TTL, &C); }
table_options(A) ::= table_options(B) SMA NK_LP col_name_list(C) NK_RP. { A = setTableSmaOption(pCxt, B, C); }
table_options(A) ::= table_options(B) ROLLUP NK_LP func_name_list(C) NK_RP. { A = setTableRollupOption(pCxt, B, C); }
table_options(A) ::= table_options(B) FILE_FACTOR NK_FLOAT(C). { A = setTableOption(pCxt, B, TABLE_OPTION_FILE_FACTOR, &C); }
table_options(A) ::= table_options(B) DELAY NK_INTEGER(C). { A = setTableOption(pCxt, B, TABLE_OPTION_DELAY, &C); }
alter_table_options(A) ::= alter_table_option(B). { A = createDefaultAlterTableOptions(pCxt); A = setTableOption(pCxt, A, B.type, &B.val); }
alter_table_options(A) ::= alter_table_options(B) alter_table_option(C). { A = setTableOption(pCxt, B, C.type, &C.val); }
alter_table_options(A) ::= alter_table_option(B). { A = createDefaultAlterTableOptions(pCxt); A = setTableAlterOption(pCxt, A, &B); }
alter_table_options(A) ::= alter_table_options(B) alter_table_option(C). { A = setTableAlterOption(pCxt, B, &C); }
%type alter_table_option { SAlterOption }
%destructor alter_table_option { }
alter_table_option(A) ::= COMMENT NK_STRING(B). { A.type = TABLE_OPTION_COMMENT; A.val = B; }
alter_table_option(A) ::= KEEP NK_INTEGER(B). { A.type = TABLE_OPTION_KEEP; A.val = B; }
alter_table_option(A) ::= KEEP integer_list(B). { A.type = TABLE_OPTION_KEEP; A.pKeep = B; }
alter_table_option(A) ::= TTL NK_INTEGER(B). { A.type = TABLE_OPTION_TTL; A.val = B; }
%type col_name_list { SNodeList* }

View File

@ -138,18 +138,6 @@ static SDatabaseOptions* setDbMinRows(SAstCreateContext* pCxt, SDatabaseOptions*
return pOptions;
}
static SDatabaseOptions* setDbKeep(SAstCreateContext* pCxt, SDatabaseOptions* pOptions, const SToken* pVal) {
int64_t val = strtol(pVal->z, NULL, 10);
if (val < TSDB_MIN_KEEP || val > TSDB_MAX_KEEP) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen,
"invalid db option keep: %"PRId64" valid range: [%d, %d]", val, TSDB_MIN_KEEP, TSDB_MAX_KEEP);
pCxt->valid = false;
return pOptions;
}
pOptions->keep = val;
return pOptions;
}
static SDatabaseOptions* setDbPrecision(SAstCreateContext* pCxt, SDatabaseOptions* pOptions, const SToken* pVal) {
char val[10] = {0};
trimString(pVal->z, pVal->n, val, sizeof(val));
@ -180,9 +168,9 @@ static SDatabaseOptions* setDbQuorum(SAstCreateContext* pCxt, SDatabaseOptions*
static SDatabaseOptions* setDbReplica(SAstCreateContext* pCxt, SDatabaseOptions* pOptions, const SToken* pVal) {
int64_t val = strtol(pVal->z, NULL, 10);
if (val < TSDB_MIN_DB_REPLICA_OPTION || val > TSDB_MAX_DB_REPLICA_OPTION) {
if (!(val == TSDB_MIN_DB_REPLICA_OPTION || val == TSDB_MAX_DB_REPLICA_OPTION)) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen,
"invalid db option replications: %"PRId64" valid range: [%d, %d]", val, TSDB_MIN_DB_REPLICA_OPTION, TSDB_MAX_DB_REPLICA_OPTION);
"invalid db option replications: %"PRId64", only 1, 3 allowed", val);
pCxt->valid = false;
return pOptions;
}
@ -291,7 +279,6 @@ static void initSetDatabaseOptionFp() {
setDbOptionFuncs[DB_OPTION_FSYNC] = setDbFsync;
setDbOptionFuncs[DB_OPTION_MAXROWS] = setDbMaxRows;
setDbOptionFuncs[DB_OPTION_MINROWS] = setDbMinRows;
setDbOptionFuncs[DB_OPTION_KEEP] = setDbKeep;
setDbOptionFuncs[DB_OPTION_PRECISION] = setDbPrecision;
setDbOptionFuncs[DB_OPTION_QUORUM] = setDbQuorum;
setDbOptionFuncs[DB_OPTION_REPLICA] = setDbReplica;
@ -303,18 +290,6 @@ static void initSetDatabaseOptionFp() {
setDbOptionFuncs[DB_OPTION_RETENTIONS] = setDbRetentions;
}
static STableOptions* setTableKeep(SAstCreateContext* pCxt, STableOptions* pOptions, const SToken* pVal) {
int64_t val = strtol(pVal->z, NULL, 10);
if (val < TSDB_MIN_KEEP || val > TSDB_MAX_KEEP) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen,
"invalid table option keep: %"PRId64" valid range: [%d, %d]", val, TSDB_MIN_KEEP, TSDB_MAX_KEEP);
pCxt->valid = false;
return pOptions;
}
pOptions->keep = val;
return pOptions;
}
static STableOptions* setTableTtl(SAstCreateContext* pCxt, STableOptions* pOptions, const SToken* pVal) {
int64_t val = strtol(pVal->z, NULL, 10);
if (val < TSDB_MIN_DB_TTL_OPTION) {
@ -363,7 +338,6 @@ static STableOptions* setTableDelay(SAstCreateContext* pCxt, STableOptions* pOpt
}
static void initSetTableOptionFp() {
setTableOptionFuncs[TABLE_OPTION_KEEP] = setTableKeep;
setTableOptionFuncs[TABLE_OPTION_TTL] = setTableTtl;
setTableOptionFuncs[TABLE_OPTION_COMMENT] = setTableComment;
setTableOptionFuncs[TABLE_OPTION_FILE_FACTOR] = setTableFileFactor;
@ -397,7 +371,9 @@ static bool checkUserName(SAstCreateContext* pCxt, SToken* pUserName) {
pCxt->valid = false;
}
}
if (pCxt->valid) {
trimEscape(pUserName);
}
return pCxt->valid;
}
@ -472,45 +448,50 @@ static bool checkPort(SAstCreateContext* pCxt, const SToken* pPortToken, int32_t
static bool checkDbName(SAstCreateContext* pCxt, SToken* pDbName, bool query) {
if (NULL == pDbName) {
pCxt->valid = (query ? NULL != pCxt->pQueryCxt->db : true);
if (!pCxt->valid) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "db not specified");
if (query && NULL == pCxt->pQueryCxt->db) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_DB_NOT_SPECIFIED);
pCxt->valid = false;
}
} else {
pCxt->valid = pDbName->n < TSDB_DB_NAME_LEN ? true : false;
if (pDbName->n >= TSDB_DB_NAME_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pDbName->z);
pCxt->valid = false;
}
}
if (pCxt->valid) {
trimEscape(pDbName);
}
return pCxt->valid;
}
static bool checkTableName(SAstCreateContext* pCxt, SToken* pTableName) {
if (NULL == pTableName) {
pCxt->valid = true;
} else {
pCxt->valid = pTableName->n < TSDB_TABLE_NAME_LEN ? true : false;
if (NULL != pTableName && pTableName->n >= TSDB_TABLE_NAME_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pTableName->z);
pCxt->valid = false;
return false;
}
trimEscape(pTableName);
return pCxt->valid;
return true;
}
static bool checkColumnName(SAstCreateContext* pCxt, SToken* pColumnName) {
if (NULL == pColumnName) {
pCxt->valid = true;
} else {
pCxt->valid = pColumnName->n < TSDB_COL_NAME_LEN ? true : false;
if (NULL != pColumnName && pColumnName->n >= TSDB_COL_NAME_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pColumnName->z);
pCxt->valid = false;
return false;
}
trimEscape(pColumnName);
return pCxt->valid;
return true;
}
static bool checkIndexName(SAstCreateContext* pCxt, SToken* pIndexName) {
if (NULL == pIndexName) {
if (NULL != pIndexName && pIndexName->n >= TSDB_INDEX_NAME_LEN) {
generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, pIndexName->z);
pCxt->valid = false;
} else {
pCxt->valid = pIndexName->n < TSDB_INDEX_NAME_LEN ? true : false;
return false;
}
trimEscape(pIndexName);
return pCxt->valid;
return true;
}
SNode* createRawExprNode(SAstCreateContext* pCxt, const SToken* pToken, SNode* pNode) {
@ -885,7 +866,9 @@ SNode* createDefaultDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->fsyncPeriod = TSDB_DEFAULT_FSYNC_PERIOD;
pOptions->maxRowsPerBlock = TSDB_DEFAULT_MAX_ROW_FBLOCK;
pOptions->minRowsPerBlock = TSDB_DEFAULT_MIN_ROW_FBLOCK;
pOptions->keep = TSDB_DEFAULT_KEEP;
pOptions->keep0 = TSDB_DEFAULT_KEEP;
pOptions->keep1 = TSDB_DEFAULT_KEEP;
pOptions->keep2 = TSDB_DEFAULT_KEEP;
pOptions->precision = TSDB_TIME_PRECISION_MILLI;
pOptions->quorum = TSDB_DEFAULT_DB_QUORUM_OPTION;
pOptions->replica = TSDB_DEFAULT_DB_REPLICA_OPTION;
@ -908,7 +891,9 @@ SNode* createDefaultAlterDatabaseOptions(SAstCreateContext* pCxt) {
pOptions->fsyncPeriod = -1;
pOptions->maxRowsPerBlock = -1;
pOptions->minRowsPerBlock = -1;
pOptions->keep = -1;
pOptions->keep0 = -1;
pOptions->keep1 = -1;
pOptions->keep2= -1;
pOptions->precision = -1;
pOptions->quorum = -1;
pOptions->replica = -1;
@ -924,6 +909,48 @@ SNode* setDatabaseOption(SAstCreateContext* pCxt, SNode* pOptions, EDatabaseOpti
return (SNode*)setDbOptionFuncs[type](pCxt, (SDatabaseOptions*)pOptions, pVal);
}
static bool checkAndSetKeepOption(SAstCreateContext* pCxt, SNodeList* pKeep, int32_t* pKeep0, int32_t* pKeep1, int32_t* pKeep2) {
int32_t numOfKeep = LIST_LENGTH(pKeep);
if (numOfKeep > 3 || numOfKeep < 1) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "invalid number of keep options");
return false;
}
int32_t daysToKeep0 = strtol(((SValueNode*)nodesListGetNode(pKeep, 0))->literal, NULL, 10);
int32_t daysToKeep1 = numOfKeep > 1 ? strtol(((SValueNode*)nodesListGetNode(pKeep, 1))->literal, NULL, 10) : daysToKeep0;
int32_t daysToKeep2 = numOfKeep > 2 ? strtol(((SValueNode*)nodesListGetNode(pKeep, 2))->literal, NULL, 10) : daysToKeep1;
if (daysToKeep0 < TSDB_MIN_KEEP || daysToKeep1 < TSDB_MIN_KEEP || daysToKeep2 < TSDB_MIN_KEEP ||
daysToKeep0 > TSDB_MAX_KEEP || daysToKeep1 > TSDB_MAX_KEEP || daysToKeep2 > TSDB_MAX_KEEP) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen,
"invalid option keep: %d, %d, %d valid range: [%d, %d]", daysToKeep0, daysToKeep1, daysToKeep2, TSDB_MIN_KEEP, TSDB_MAX_KEEP);
return false;
}
if (!((daysToKeep0 <= daysToKeep1) && (daysToKeep1 <= daysToKeep2))) {
snprintf(pCxt->pQueryCxt->pMsg, pCxt->pQueryCxt->msgLen, "invalid keep value, should be keep0 <= keep1 <= keep2");
return false;
}
*pKeep0 = daysToKeep0;
*pKeep1 = daysToKeep1;
*pKeep2 = daysToKeep2;
return true;
}
SNode* setDatabaseKeepOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pKeep) {
SDatabaseOptions* pOp = (SDatabaseOptions*)pOptions;
pCxt->valid = checkAndSetKeepOption(pCxt, pKeep, &pOp->keep0, &pOp->keep1, &pOp->keep2);
return pOptions;
}
SNode* setDatabaseAlterOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption) {
if (DB_OPTION_KEEP == pAlterOption->type) {
return setDatabaseKeepOption(pCxt, pOptions, pAlterOption->pKeep);
} else {
return setDatabaseOption(pCxt, pOptions, pAlterOption->type, &pAlterOption->val);
}
}
SNode* createCreateDatabaseStmt(SAstCreateContext* pCxt, bool ignoreExists, SToken* pDbName, SNode* pOptions) {
if (!checkDbName(pCxt, pDbName, false)) {
return NULL;
@ -961,7 +988,9 @@ SNode* createAlterDatabaseStmt(SAstCreateContext* pCxt, SToken* pDbName, SNode*
SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->keep = TSDB_DEFAULT_KEEP;
pOptions->keep0 = TSDB_DEFAULT_KEEP;
pOptions->keep1 = TSDB_DEFAULT_KEEP;
pOptions->keep2 = TSDB_DEFAULT_KEEP;
pOptions->ttl = TSDB_DEFAULT_DB_TTL_OPTION;
pOptions->filesFactor = TSDB_DEFAULT_DB_FILE_FACTOR;
pOptions->delay = TSDB_DEFAULT_DB_DELAY;
@ -971,7 +1000,9 @@ SNode* createDefaultTableOptions(SAstCreateContext* pCxt) {
SNode* createDefaultAlterTableOptions(SAstCreateContext* pCxt) {
STableOptions* pOptions = nodesMakeNode(QUERY_NODE_TABLE_OPTIONS);
CHECK_OUT_OF_MEM(pOptions);
pOptions->keep = -1;
pOptions->keep0 = -1;
pOptions->keep1 = -1;
pOptions->keep2 = -1;
pOptions->ttl = -1;
pOptions->filesFactor = -1;
pOptions->delay = -1;
@ -997,6 +1028,20 @@ SNode* setTableRollupOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList*
return pOptions;
}
SNode* setTableKeepOption(SAstCreateContext* pCxt, SNode* pOptions, SNodeList* pKeep) {
STableOptions* pOp = (STableOptions*)pOptions;
pCxt->valid = checkAndSetKeepOption(pCxt, pKeep, &pOp->keep0, &pOp->keep1, &pOp->keep2);
return pOptions;
}
SNode* setTableAlterOption(SAstCreateContext* pCxt, SNode* pOptions, SAlterOption* pAlterOption) {
if (TABLE_OPTION_KEEP == pAlterOption->type) {
return setTableKeepOption(pCxt, pOptions, pAlterOption->pKeep);
} else {
return setTableOption(pCxt, pOptions, pAlterOption->type, &pAlterOption->val);
}
}
SNode* createColumnDefNode(SAstCreateContext* pCxt, const SToken* pColName, SDataType dataType, const SToken* pComment) {
SColumnDefNode* pCol = (SColumnDefNode*)nodesMakeNode(QUERY_NODE_COLUMN_DEF);
CHECK_OUT_OF_MEM(pCol);

View File

@ -956,9 +956,9 @@ static int32_t buildCreateDbReq(STranslateContext* pCxt, SCreateDatabaseStmt* pS
pReq->cacheBlockSize = pStmt->pOptions->cacheBlockSize;
pReq->totalBlocks = pStmt->pOptions->numOfBlocks;
pReq->daysPerFile = pStmt->pOptions->daysPerFile;
pReq->daysToKeep0 = pStmt->pOptions->keep;
pReq->daysToKeep1 = -1;
pReq->daysToKeep2 = -1;
pReq->daysToKeep0 = pStmt->pOptions->keep0;
pReq->daysToKeep1 = pStmt->pOptions->keep1;
pReq->daysToKeep2 = pStmt->pOptions->keep2;
pReq->minRows = pStmt->pOptions->minRowsPerBlock;
pReq->maxRows = pStmt->pOptions->maxRowsPerBlock;
pReq->commitTime = -1;
@ -1041,13 +1041,14 @@ static void buildAlterDbReq(STranslateContext* pCxt, SAlterDatabaseStmt* pStmt,
tNameSetDbName(&name, pCxt->pParseCxt->acctId, pStmt->dbName, strlen(pStmt->dbName));
tNameGetFullDbName(&name, pReq->db);
pReq->totalBlocks = pStmt->pOptions->numOfBlocks;
pReq->daysToKeep0 = pStmt->pOptions->keep;
pReq->daysToKeep1 = -1;
pReq->daysToKeep2 = -1;
pReq->daysToKeep0 = pStmt->pOptions->keep0;
pReq->daysToKeep1 = pStmt->pOptions->keep1;
pReq->daysToKeep2 = pStmt->pOptions->keep2;
pReq->fsyncPeriod = pStmt->pOptions->fsyncPeriod;
pReq->walLevel = pStmt->pOptions->walLevel;
pReq->quorum = pStmt->pOptions->quorum;
pReq->cacheLastRow = pStmt->pOptions->cachelast;
pReq->replications = pStmt->pOptions->replica;
return;
}
@ -1119,7 +1120,7 @@ static const SColumnDefNode* findColDef(const SNodeList* pCols, const SColumnNod
return NULL;
}
static int32_t checkCreateTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
static int32_t checkCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
if (NULL != pStmt->pOptions->pSma) {
SNode* pNode = NULL;
FOREACH(pNode, pStmt->pOptions->pSma) {
@ -1148,7 +1149,7 @@ static int32_t getAggregationMethod(SNodeList* pFuncs) {
}
static int32_t translateCreateSuperTable(STranslateContext* pCxt, SCreateTableStmt* pStmt) {
int32_t code = checkCreateTable(pCxt, pStmt);
int32_t code = checkCreateSuperTable(pCxt, pStmt);
if (TSDB_CODE_SUCCESS != code) {
return code;
}
@ -1217,6 +1218,9 @@ static int32_t translateDropTable(STranslateContext* pCxt, SDropTableStmt* pStmt
SName tableName;
int32_t code = getTableMetaImpl(
pCxt, toName(pCxt->pParseCxt->acctId, pClause->dbName, pClause->tableName, &tableName), &pTableMeta);
if ((TSDB_CODE_TDB_INVALID_TABLE_ID == code || TSDB_CODE_VND_TB_NOT_EXIST == code) && pClause->ignoreNotExists) {
return TSDB_CODE_SUCCESS;
}
if (TSDB_CODE_SUCCESS == code) {
if (TSDB_SUPER_TABLE == pTableMeta->tableType) {
code = doTranslateDropSuperTable(pCxt, &tableName, pClause->ignoreNotExists);
@ -1949,6 +1953,7 @@ static int32_t extractExplainResultSchema(int32_t* numOfCols, SSchema** pSchema)
}
(*pSchema)[0].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[0].bytes = TSDB_EXPLAIN_RESULT_ROW_SIZE;
strcpy((*pSchema)[0].name, TSDB_EXPLAIN_RESULT_COLUMN_NAME);
return TSDB_CODE_SUCCESS;
}
@ -2444,9 +2449,19 @@ static int32_t buildKVRowForAllTags(STranslateContext* pCxt, SCreateSubTableClau
return TSDB_CODE_SUCCESS;
}
static int32_t checkCreateSubTable(STranslateContext* pCxt, SCreateSubTableClause* pStmt) {
if (0 != strcmp(pStmt->dbName, pStmt->useDbName)) {
return generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR);;
}
return TSDB_CODE_SUCCESS;
}
static int32_t rewriteCreateSubTable(STranslateContext* pCxt, SCreateSubTableClause* pStmt, SHashObj* pVgroupHashmap) {
int32_t code = checkCreateSubTable(pCxt, pStmt);
STableMeta* pSuperTableMeta = NULL;
int32_t code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
if (TSDB_CODE_SUCCESS == code) {
code = getTableMeta(pCxt, pStmt->useDbName, pStmt->useTableName, &pSuperTableMeta);
}
SKVRowBuilder kvRowBuilder = {0};
if (TSDB_CODE_SUCCESS == code) {
@ -2592,8 +2607,10 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
break;
default:
pQuery->directRpc = true;
if (NULL != pCxt->pCmdMsg) {
TSWAP(pQuery->pCmdMsg, pCxt->pCmdMsg, SCmdMsgInfo*);
pQuery->msgType = pQuery->pCmdMsg->msgType;
}
break;
}

View File

@ -61,6 +61,12 @@ static char* getSyntaxErrFormat(int32_t errCode) {
return "This statement is no longer supported";
case TSDB_CODE_PAR_INTERVAL_VALUE_TOO_SMALL:
return "This interval value is too small : %s";
case TSDB_CODE_PAR_DB_NOT_SPECIFIED:
return "db not specified";
case TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME:
return "Invalid identifier name : %s";
case TSDB_CODE_PAR_CORRESPONDING_STABLE_ERR:
return "corresponding super table not in this db";
case TSDB_CODE_OUT_OF_MEMORY:
return "Out of memory";
default:

File diff suppressed because it is too large Load Diff

View File

@ -288,7 +288,7 @@ static int32_t createJoinLogicNode(SLogicPlanContext* pCxt, SSelectStmt* pSelect
// set the output
if (TSDB_CODE_SUCCESS == code) {
pJoin->node.pTargets = nodesCloneList(pLeft->pTargets);
if (NULL == pJoin->pOnConditions) {
if (NULL == pJoin->node.pTargets) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
if (TSDB_CODE_SUCCESS == code) {

View File

@ -719,6 +719,11 @@ static int32_t createStreamScanPhysiNodeByExchange(SPhysiPlanContext* pCxt, SExc
if (NULL == pScan->pScanCols) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
if (TSDB_CODE_SUCCESS == code) {
code = sortScanCols(pScan->pScanCols);
}
if (TSDB_CODE_SUCCESS == code) {
code = sortScanCols(pScan->pScanCols);
}

View File

@ -8,7 +8,7 @@ target_include_directories(
target_link_libraries(
qcom
PRIVATE os util transport
PRIVATE os util transport nodes
)
if(${BUILD_TEST})

View File

@ -21,7 +21,6 @@ extern "C" {
#endif
#ifdef __cplusplus
}
#endif

View File

@ -36,8 +36,6 @@ extern "C" {
#define FILTER_DUMMY_EMPTY_OPTR 127
#define MAX_NUM_STR_SIZE 40
#define FILTER_RM_UNIT_MIN_ROWS 100
enum {

View File

@ -9,7 +9,7 @@ target_include_directories(
target_link_libraries(
scheduler
PUBLIC os util nodes planner qcom common catalog transport
PUBLIC os util nodes planner qcom common catalog transport command
)
if(${BUILD_TEST})

View File

@ -142,7 +142,7 @@ typedef struct SSchTask {
} SSchTask;
typedef struct SSchJobAttr {
bool needFetch;
EExplainMode explainMode;
bool syncSchedule;
bool queryJob;
bool needFlowCtrl;

View File

@ -19,6 +19,7 @@
#include "tmsg.h"
#include "tref.h"
#include "trpc.h"
#include "command.h"
SSchedulerMgmt schMgmt = {0};
@ -126,11 +127,13 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
return TSDB_CODE_SUCCESS;
case TDMT_VND_QUERY_RSP: // query_rsp may be processed later than ready_rsp
if (lastMsgType != reqMsgType && -1 != lastMsgType && TDMT_VND_FETCH != lastMsgType) {
SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType));
SCH_TASK_DLOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType),
TMSG_INFO(msgType));
}
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_DLOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType));
SCH_TASK_DLOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
TMSG_INFO(msgType));
}
SCH_SET_TASK_LASTMSG_TYPE(pTask, -1);
@ -138,12 +141,14 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
case TDMT_VND_RES_READY_RSP:
reqMsgType = TDMT_VND_QUERY;
if (lastMsgType != reqMsgType && -1 != lastMsgType) {
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", (lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s",
(lastMsgType > 0 ? TMSG_INFO(lastMsgType) : "null"), TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -151,12 +156,14 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
return TSDB_CODE_SUCCESS;
case TDMT_VND_FETCH_RSP:
if (lastMsgType != reqMsgType && -1 != lastMsgType) {
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType),
TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -171,12 +178,14 @@ int32_t schValidateTaskReceivedMsgType(SSchJob *pJob, SSchTask *pTask, int32_t m
}
if (lastMsgType != reqMsgType) {
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg type mis-match, last sent msgType:%s, rspType:%s", TMSG_INFO(lastMsgType),
TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
if (taskStatus != JOB_TASK_STATUS_EXECUTING && taskStatus != JOB_TASK_STATUS_PARTIAL_SUCCEED) {
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus), TMSG_INFO(msgType));
SCH_TASK_ELOG("rsp msg conflicted with task status, status:%s, rspType:%s", jobTaskStatusStr(taskStatus),
TMSG_INFO(msgType));
SCH_ERR_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
@ -654,7 +663,8 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
if (SCH_IS_DATA_SRC_TASK(pTask)) {
if (pTask->tryTimes >= SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode)) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry since all ep tried, tryTimes:%d, epNum:%d", pTask->tryTimes, SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
SCH_TASK_DLOG("task no more retry since all ep tried, tryTimes:%d, epNum:%d", pTask->tryTimes,
SCH_TASK_NUM_OF_EPS(&pTask->plan->execNode));
return TSDB_CODE_SUCCESS;
}
} else {
@ -662,7 +672,8 @@ int32_t schTaskCheckSetRetry(SSchJob *pJob, SSchTask *pTask, int32_t errCode, bo
if ((pTask->candidateIdx + 1) >= candidateNum) {
*needRetry = false;
SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d", pTask->candidateIdx, candidateNum);
SCH_TASK_DLOG("task no more retry since all candiates tried, candidateIdx:%d, candidateNum:%d",
pTask->candidateIdx, candidateNum);
return TSDB_CODE_SUCCESS;
}
}
@ -706,9 +717,8 @@ int32_t schUpdateHbConnection(SQueryNodeEpId *epId, SSchTrans *trans) {
memcpy(&hb->trans, trans, sizeof(*trans));
SCH_UNLOCK(SCH_WRITE, &hb->lock);
qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, instance:%p, handle:%p",
schMgmt.sId, epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->transInst,
trans->transHandle);
qDebug("hb connection updated, sId:%" PRIx64 ", nodeId:%d, fqdn:%s, port:%d, instance:%p, handle:%p", schMgmt.sId,
epId->nodeId, epId->ep.fqdn, epId->ep.port, trans->transInst, trans->transHandle);
return TSDB_CODE_SUCCESS;
}
@ -965,7 +975,8 @@ int32_t schHandleResponseMsg(SSchJob *pJob, SSchTask *pTask, int32_t msgType, ch
int8_t status = 0;
if (schJobNeedToStop(pJob, &status)) {
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status), rspCode);
SCH_TASK_ELOG("rsp not processed cause of job status, job status:%s, rspCode:0x%x", jobTaskStatusStr(status),
rspCode);
SCH_RET(atomic_load_32(&pJob->errCode));
}
@ -1173,7 +1184,8 @@ int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code) {
SCH_ERR_RET(schUpdateHbConnection(&rsp.epId, &trans));
int32_t taskNum = (int32_t)taosArrayGetSize(rsp.taskStatus);
qDebug("%d task status in hb rsp, nodeId:%d, fqdn:%s, port:%d", taskNum, rsp.epId.nodeId, rsp.epId.ep.fqdn, rsp.epId.ep.port);
qDebug("%d task status in hb rsp, nodeId:%d, fqdn:%s, port:%d", taskNum, rsp.epId.nodeId, rsp.epId.ep.fqdn,
rsp.epId.ep.port);
for (int32_t i = 0; i < taskNum; ++i) {
STaskStatus *taskStatus = taosArrayGet(rsp.taskStatus, i);
@ -1188,7 +1200,8 @@ int32_t schHandleHbCallback(void *param, const SDataBuf *pMsg, int32_t code) {
// TODO
SCH_JOB_DLOG("TID:0x%" PRIx64 " task status in server: %s", taskStatus->taskId, jobTaskStatusStr(taskStatus->status));
SCH_JOB_DLOG("TID:0x%" PRIx64 " task status in server: %s", taskStatus->taskId,
jobTaskStatusStr(taskStatus->status));
schReleaseJob(taskStatus->refId);
}
@ -1219,7 +1232,6 @@ int32_t schHandleLinkBrokenCallback(void *param, const SDataBuf *pMsg, int32_t c
return TSDB_CODE_SUCCESS;
}
int32_t schGetCallbackFp(int32_t msgType, __async_send_cb_fn_t *fp) {
switch (msgType) {
case TDMT_VND_CREATE_TABLE:
@ -1301,7 +1313,6 @@ int32_t schMakeHbCallbackParam(SSchJob *pJob, SSchTask *pTask, void **pParam) {
return TSDB_CODE_SUCCESS;
}
int32_t schMakeBrokenLinkVal(SSchJob *pJob, SSchTask *pTask, SRpcBrokenlinkVal *brokenVal, bool isHb) {
int32_t code = 0;
SMsgSendInfo *pMsgSendInfo = NULL;
@ -1450,7 +1461,6 @@ _return:
SCH_RET(code);
}
int32_t schRegisterHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *epId, bool *exist) {
int32_t code = 0;
SSchHbTrans hb = {0};
@ -1475,8 +1485,6 @@ int32_t schRegisterHbConnection(SSchJob *pJob, SSchTask *pTask, SQueryNodeEpId *
return TSDB_CODE_SUCCESS;
}
int32_t schCloneCallbackParam(SSchCallbackParamHeader *pSrc, SSchCallbackParamHeader **pDst) {
if (pSrc->isHbParam) {
SSchHbCallbackParam *dst = taosMemoryMalloc(sizeof(SSchHbCallbackParam));
@ -1568,8 +1576,8 @@ _return:
SCH_RET(code);
}
int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet* epSet, int32_t msgType, void *msg, uint32_t msgSize, bool persistHandle, SRpcCtx *ctx) {
int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet *epSet, int32_t msgType, void *msg,
uint32_t msgSize, bool persistHandle, SRpcCtx *ctx) {
int32_t code = 0;
SSchTrans *trans = (SSchTrans *)transport;
@ -1601,9 +1609,9 @@ int32_t schAsyncSendMsg(SSchJob *pJob, SSchTask *pTask, void *transport, SEpSet*
pMsgSendInfo->msgType = msgType;
pMsgSendInfo->fp = fp;
qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "instance:%p, handle:%p",
TMSG_INFO(msgType), ntohl(((SMsgHead *)msg)->vgId), epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port,
pJob->refId, trans->transInst, trans->transHandle);
qDebug("start to send %s msg to node[%d,%s,%d], refId:%" PRIx64 "instance:%p, handle:%p", TMSG_INFO(msgType),
ntohl(((SMsgHead *)msg)->vgId), epSet->eps[epSet->inUse].fqdn, epSet->eps[epSet->inUse].port, pJob->refId,
trans->transInst, trans->transHandle);
int64_t transporterId = 0;
code = asyncSendMsgToServerExt(trans->transInst, epSet, &transporterId, pMsgSendInfo, persistHandle, ctx);
@ -1628,13 +1636,14 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) {
SSchTrans trans = {0};
int32_t msgType = TDMT_VND_QUERY_HEARTBEAT;
req.header.vgId = htonl(nodeEpId->nodeId);
req.header.vgId = nodeEpId->nodeId;
req.sId = schMgmt.sId;
memcpy(&req.epId, nodeEpId, sizeof(SQueryNodeEpId));
SSchHbTrans *hb = taosHashGet(schMgmt.hbConnections, nodeEpId, sizeof(SQueryNodeEpId));
if (NULL == hb) {
qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", nodeEpId->nodeId, nodeEpId->ep.fqdn, nodeEpId->ep.port);
qError("taosHashGet hb connection failed, nodeId:%d, fqdn:%s, port:%d", nodeEpId->nodeId, nodeEpId->ep.fqdn,
nodeEpId->ep.port);
SCH_ERR_RET(code);
}
@ -1689,12 +1698,13 @@ int32_t schBuildAndSendHbMsg(SQueryNodeEpId *nodeEpId) {
SEpSet epSet = {.inUse = 0, .numOfEps = 1};
memcpy(&epSet.eps[0], &nodeEpId->ep, sizeof(nodeEpId->ep));
qDebug("start to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d", trans.transInst, trans.transHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port);
qDebug("start to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d", trans.transInst, trans.transHandle,
nodeEpId->ep.fqdn, nodeEpId->ep.port);
code = asyncSendMsgToServerExt(trans.transInst, &epSet, &transporterId, pMsgSendInfo, true, &rpcCtx);
if (code) {
qError("fail to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d, error:%x - %s",
trans.transInst, trans.transHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code));
qError("fail to send hb msg, instance:%p, handle:%p, fqdn:%s, port:%d, error:%x - %s", trans.transInst,
trans.transHandle, nodeEpId->ep.fqdn, nodeEpId->ep.port, code, tstrerror(code));
SCH_ERR_JRET(code);
}
@ -1856,7 +1866,8 @@ int32_t schBuildAndSendMsg(SSchJob *pJob, SSchTask *pTask, SQueryNodeAddr *addr,
SCH_SET_TASK_LASTMSG_TYPE(pTask, msgType);
SSchTrans trans = {.transInst = pJob->transport, .transHandle = SCH_GET_TASK_HANDLE(pTask)};
SCH_ERR_JRET(schAsyncSendMsg(pJob, pTask, &trans, &epSet, msgType, msg, msgSize, persistHandle, (rpcCtx.args ? &rpcCtx : NULL)));
SCH_ERR_JRET(schAsyncSendMsg(pJob, pTask, &trans, &epSet, msgType, msg, msgSize, persistHandle,
(rpcCtx.args ? &rpcCtx : NULL)));
if (msgType == TDMT_VND_QUERY) {
SCH_ERR_RET(schRecordTaskExecNode(pJob, pTask, addr, trans.transHandle));
@ -2093,6 +2104,7 @@ static int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pD
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pJob->attr.explainMode = pDag->explainInfo.mode;
pJob->attr.syncSchedule = syncSchedule;
pJob->transport = transport;
pJob->sql = sql;
@ -2126,19 +2138,24 @@ static int32_t schExecJobImpl(void *transport, SArray *pNodeList, SQueryPlan *pD
tsem_init(&pJob->rspSem, 0, 0);
pJob->refId = taosAddRef(schMgmt.jobRef, pJob);
if (pJob->refId < 0) {
SCH_JOB_ELOG("taosHashPut job failed, error:%s", tstrerror(terrno));
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
if (refId < 0) {
SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno));
SCH_ERR_JRET(terrno);
}
if (NULL == schAcquireJob(refId)) {
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
SCH_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
pJob->refId = refId;
SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId);
pJob->status = JOB_TASK_STATUS_NOT_START;
SCH_ERR_JRET(schLaunchJob(pJob));
schAcquireJob(pJob->refId);
*job = pJob->refId;
if (syncSchedule) {
@ -2158,6 +2175,54 @@ _return:
SCH_RET(code);
}
int32_t schExecStaticExplain(void *transport, SArray *pNodeList, SQueryPlan *pDag, int64_t *job, const char *sql,
bool syncSchedule) {
qDebug("QID:0x%" PRIx64 " job started", pDag->queryId);
int32_t code = 0;
SSchJob *pJob = taosMemoryCalloc(1, sizeof(SSchJob));
if (NULL == pJob) {
qError("QID:%" PRIx64 " calloc %d failed", pDag->queryId, (int32_t)sizeof(SSchJob));
SCH_ERR_RET(TSDB_CODE_QRY_OUT_OF_MEMORY);
}
pJob->sql = sql;
pJob->attr.queryJob = true;
pJob->attr.explainMode = pDag->explainInfo.mode;
pJob->queryId = pDag->queryId;
pJob->subPlans = pDag->pSubplans;
SCH_ERR_JRET(qExecStaticExplain(pDag, (SRetrieveTableRsp **)&pJob->resData));
int64_t refId = taosAddRef(schMgmt.jobRef, pJob);
if (refId < 0) {
SCH_JOB_ELOG("taosAddRef job failed, error:%s", tstrerror(terrno));
SCH_ERR_JRET(terrno);
}
if (NULL == schAcquireJob(refId)) {
SCH_JOB_ELOG("schAcquireJob job failed, refId:%" PRIx64, refId);
SCH_RET(TSDB_CODE_SCH_STATUS_ERROR);
}
pJob->refId = refId;
SCH_JOB_DLOG("job refId:%" PRIx64, pJob->refId);
pJob->status = JOB_TASK_STATUS_PARTIAL_SUCCEED;
*job = pJob->refId;
SCH_JOB_DLOG("job exec done, job status:%s", SCH_GET_JOB_STATUS_STR(pJob));
schReleaseJob(pJob->refId);
return TSDB_CODE_SUCCESS;
_return:
schFreeJobImpl(pJob);
SCH_RET(code);
}
int32_t schedulerInit(SSchedulerCfg *cfg) {
if (schMgmt.jobRef) {
qError("scheduler already initialized");
@ -2206,7 +2271,11 @@ int32_t schedulerExecJob(void *transport, SArray *nodeList, SQueryPlan *pDag, in
SCH_ERR_RET(TSDB_CODE_QRY_INVALID_INPUT);
}
if (EXPLAIN_MODE_STATIC == pDag->explainInfo.mode) {
SCH_ERR_RET(schExecStaticExplain(transport, nodeList, pDag, pJob, sql, true));
} else {
SCH_ERR_RET(schExecJobImpl(transport, nodeList, pDag, pJob, sql, true));
}
SSchJob *job = schAcquireJob(*pJob);
@ -2389,10 +2458,14 @@ int32_t schedulerFetchRows(int64_t job, void **pData) {
SCH_JOB_DLOG("job already succeed, status:%s", jobTaskStatusStr(status));
goto _return;
} else if (status == JOB_TASK_STATUS_PARTIAL_SUCCEED) {
if (!(pJob->attr.explainMode == EXPLAIN_MODE_STATIC)) {
SCH_ERR_JRET(schFetchFromRemote(pJob));
}
tsem_wait(&pJob->rspSem);
}
} else {
SCH_JOB_ELOG("job status error for fetch, status:%s", jobTaskStatusStr(status));
SCH_ERR_JRET(TSDB_CODE_SCH_STATUS_ERROR);
}
status = SCH_GET_JOB_STATUS(pJob);

View File

@ -38,7 +38,7 @@ static int32_t streamBuildDispatchMsg(SStreamTask* pTask, SArray* data, SRpcMsg*
req.taskId = pTask->fixedEpDispatcher.taskId;
} else if (pTask->dispatchType == TASK_DISPATCH__SHUFFLE) {
// TODO fix tbname issue
// TODO use general name rule of schemaless
char ctbName[TSDB_TABLE_FNAME_LEN + 22];
// all groupId must be the same in an array
SSDataBlock* pBlock = taosArrayGet(data, 0);

View File

@ -30,6 +30,9 @@ SWalReadHandle *walOpenReadHandle(SWal *pWal) {
pRead->curFileFirstVer = -1;
pRead->capacity = 0;
pRead->status = 0;
taosThreadMutexInit(&pRead->mutex, NULL);
pRead->pHead = taosMemoryMalloc(sizeof(SWalHead));
if (pRead->pHead == NULL) {
terrno = TSDB_CODE_WAL_OUT_OF_MEMORY;
@ -135,6 +138,22 @@ static int32_t walReadSeekVer(SWalReadHandle *pRead, int64_t ver) {
return 0;
}
int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead) {
taosThreadMutexLock(&pRead->mutex);
if (walReadWithHandle(pRead, ver) < 0) {
taosThreadMutexUnlock(&pRead->mutex);
return -1;
}
*ppHead = taosMemoryMalloc(sizeof(SWalReadHead) + pRead->pHead->head.len);
if (*ppHead == NULL) {
taosThreadMutexUnlock(&pRead->mutex);
return -1;
}
memcpy(*ppHead, &pRead->pHead->head, sizeof(SWalReadHead) + pRead->pHead->head.len);
taosThreadMutexUnlock(&pRead->mutex);
return 0;
}
int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver) {
int code;
// TODO: check wal life
@ -145,7 +164,9 @@ int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver) {
}
}
if (!taosValidFile(pRead->pReadLogTFile)) return -1;
if (!taosValidFile(pRead->pReadLogTFile)) {
return -1;
}
code = taosReadFile(pRead->pReadLogTFile, pRead->pHead, sizeof(SWalHead));
if (code != sizeof(SWalHead)) {

View File

@ -23,12 +23,27 @@ int32_t taosNewProc(char **args) {
int32_t pid = fork();
if (pid == 0) {
args[0] = tsProcPath;
close(STDIN_FILENO);
close(STDOUT_FILENO);
close(STDERR_FILENO);
return execvp(tsProcPath, args);
} else {
return pid;
}
}
void taosWaitProc(int32_t pid) {
int32_t status = 0;
waitpid(pid, &status, 0);
}
void taosKillProc(int32_t pid) { kill(pid, SIGINT); }
bool taosProcExist(int32_t pid) {
int32_t p = getpgid(pid);
return p >= 0;
}
// the length of the new name must be less than the original name to take effect
void taosSetProcName(int32_t argc, char **argv, const char *name) {
prctl(PR_SET_NAME, name);
@ -45,8 +60,3 @@ void taosSetProcName(int32_t argc, char **argv, const char *name) {
}
void taosSetProcPath(int32_t argc, char **argv) { tsProcPath = argv[0]; }
bool taosProcExists(int32_t pid) {
int32_t p = getpgid(pid);
return p >= 0;
}

View File

@ -590,12 +590,12 @@ void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) {
}
int32_t cfgLoadFromEnvVar(SConfig *pConfig) {
uInfo("load from global env variables success");
uInfo("load from env variables not implemented yet");
return 0;
}
int32_t cfgLoadFromEnvFile(SConfig *pConfig, const char *filepath) {
uInfo("load from env file [%s] success", filepath);
uInfo("load from env file not implemented yet");
return 0;
}
@ -649,11 +649,11 @@ int32_t cfgLoadFromCfgFile(SConfig *pConfig, const char *filepath) {
taosCloseFile(&pFile);
if (line != NULL) taosMemoryFreeClear(line);
uInfo("load from cfg file [%s] success", filepath);
uInfo("load from cfg file %s success", filepath);
return 0;
}
int32_t cfgLoadFromApollUrl(SConfig *pConfig, const char *url) {
uInfo("load from apoll url [%s] success", url);
uInfo("load from apoll url not implemented yet");
return 0;
}

View File

@ -121,7 +121,8 @@ int32_t tjsonAddItem(SJson* pJson, FToJson func, const void* pObj) {
return tjsonAddItemToArray(pJson, pJobj);
}
int32_t tjsonAddArray(SJson* pJson, const char* pName, FToJson func, const void* pArray, int32_t itemSize, int32_t num) {
int32_t tjsonAddArray(SJson* pJson, const char* pName, FToJson func, const void* pArray, int32_t itemSize,
int32_t num) {
if (num > 0) {
SJson* pJsonArray = tjsonAddArrayToObject(pJson, pName);
if (NULL == pJsonArray) {
@ -168,7 +169,7 @@ int32_t tjsonGetBigIntValue(const SJson* pJson, const char* pName, int64_t* pVal
}
*pVal = strtol(p, NULL, 10);
return (errno == EINVAL || errno == ERANGE) ? TSDB_CODE_FAILED:TSDB_CODE_SUCCESS;
return TSDB_CODE_SUCCESS;
}
int32_t tjsonGetIntValue(const SJson* pJson, const char* pName, int32_t* pVal) {
@ -199,7 +200,7 @@ int32_t tjsonGetUBigIntValue(const SJson* pJson, const char* pName, uint64_t* pV
}
*pVal = strtoul(p, NULL, 10);
return (errno == ERANGE||errno == EINVAL) ? TSDB_CODE_FAILED:TSDB_CODE_SUCCESS;
return TSDB_CODE_SUCCESS;
}
int32_t tjsonGetUIntValue(const SJson* pJson, const char* pName, uint32_t* pVal) {

View File

@ -140,14 +140,16 @@ static void taosProcDestroySem(SProcQueue *pQueue) {
pQueue->sem = NULL;
}
}
#endif
static void taosProcCleanupQueue(SProcQueue *pQueue) {
#if 0
if (pQueue != NULL) {
taosProcDestroyMutex(pQueue);
taosProcDestroySem(pQueue);
}
}
#endif
}
static int32_t taosProcQueuePush(SProcQueue *pQueue, const char *pHead, int16_t rawHeadLen, const char *pBody,
int32_t rawBodyLen, ProcFuncType ftype) {
@ -222,7 +224,6 @@ static int32_t taosProcQueuePop(SProcQueue *pQueue, void **ppHead, int16_t *pHea
taosThreadMutexLock(&pQueue->mutex);
if (pQueue->total - pQueue->avail <= 0) {
taosThreadMutexUnlock(&pQueue->mutex);
tsem_post(&pQueue->sem);
terrno = TSDB_CODE_OUT_OF_SHM_MEM;
return 0;
}
@ -317,7 +318,7 @@ SProcObj *taosProcInit(const SProcCfg *pCfg) {
pProc->pChildQueue = taosProcInitQueue(pCfg->name, pCfg->isChild, (char *)pCfg->shm.ptr + cstart, csize);
pProc->pParentQueue = taosProcInitQueue(pCfg->name, pCfg->isChild, (char *)pCfg->shm.ptr + pstart, psize);
if (pProc->pChildQueue == NULL || pProc->pParentQueue == NULL) {
// taosProcCleanupQueue(pProc->pChildQueue);
taosProcCleanupQueue(pProc->pChildQueue);
taosMemoryFree(pProc);
return NULL;
}
@ -370,13 +371,13 @@ static void taosProcThreadLoop(SProcObj *pProc) {
freeBodyFp = pProc->parentFreeBodyFp;
}
uDebug("proc:%s, start to get msg from queue:%p, isChild:%d", pProc->name, pQueue, pProc->isChild);
uDebug("proc:%s, start to get msg from queue:%p", pProc->name, pQueue);
while (1) {
int32_t numOfMsgs = taosProcQueuePop(pQueue, &pHead, &headLen, &pBody, &bodyLen, &ftype, mallocHeadFp, freeHeadFp,
mallocBodyFp, freeBodyFp);
if (numOfMsgs == 0) {
uInfo("proc:%s, get no msg from queue:%p and exit the proc thread", pProc->name, pQueue);
uDebug("proc:%s, get no msg from queue:%p and exit the proc thread", pProc->name, pQueue);
break;
} else if (numOfMsgs < 0) {
uTrace("proc:%s, get no msg from queue:%p since %s", pProc->name, pQueue, terrstr());
@ -406,7 +407,7 @@ int32_t taosProcRun(SProcObj *pProc) {
static void taosProcStop(SProcObj *pProc) {
if (!taosCheckPthreadValid(pProc->thread)) return;
uDebug("proc:%s, start to join thread:%" PRId64 ", isChild:%d", pProc->name, pProc->thread, pProc->isChild);
uDebug("proc:%s, start to join thread:%" PRId64, pProc->name, pProc->thread);
SProcQueue *pQueue;
if (pProc->isChild) {
pQueue = pProc->pChildQueue;
@ -421,9 +422,9 @@ void taosProcCleanup(SProcObj *pProc) {
if (pProc != NULL) {
uDebug("proc:%s, start to clean up", pProc->name);
taosProcStop(pProc);
taosProcCleanupQueue(pProc->pChildQueue);
taosProcCleanupQueue(pProc->pParentQueue);
uDebug("proc:%s, is cleaned up", pProc->name);
// taosProcCleanupQueue(pProc->pChildQueue);
// taosProcCleanupQueue(pProc->pParentQueue);
taosMemoryFree(pProc);
}
}

View File

@ -5,6 +5,7 @@
./test.sh -f tsim/user/basic1.sim
# ---- db
./test.sh -f tsim/db/alter_option.sim
./test.sh -f tsim/db/basic1.sim
./test.sh -f tsim/db/basic2.sim
./test.sh -f tsim/db/basic3.sim

View File

@ -59,24 +59,14 @@ endi
print ============= create database
#database_option: {
# BLOCKS value [3~1000, default: 6]
# | CACHE value [default: 16M]
# | CACHELAST value [0, 1, 2, 3]
# | COMP [0 | 1 | 2]
# | DAYS value [unit is minutes]
# | FSYNC value [0 ~ 180000 ms]
# | MAXROWS value [default: 4096]
# | MINROWS value [default: 100]
# | KEEP value [days, 365000]
# | PRECISION ['ms' | 'us' | 'ns']
# | QUORUM value [1 | 2]
# | REPLICA value [1 | 3]
# | TTL value [unit is day, min=1]
# | WAL value [1 | 2]
# | VGROUPS value [default: 2]
# | SINGLE_STABLE [0 | 1]
# | STREAM_MODE [0 | 1]
sql create database db BLOCKS 7 CACHE 3 CACHELAST 3 COMP 0 DAYS 240 FSYNC 1000 MAXROWS 8000 MINROWS 10 KEEP 1000 PRECISION 'ns' QUORUM 1 REPLICA 3 TTL 7 WAL 2 VGROUPS 6 SINGLE_STABLE 1 STREAM_MODE 1
sql create database db BLOCKS 7 CACHE 3 CACHELAST 3 COMP 0 DAYS 345600 FSYNC 1000 MAXROWS 8000 MINROWS 10 KEEP 1440000 PRECISION 'ns' QUORUM 1 REPLICA 3 TTL 7 WAL 2 VGROUPS 6 SINGLE_STABLE 1 STREAM_MODE 1
sql show databases
print rows: $rows
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
@ -102,10 +92,10 @@ endi
if $data5_db != 1 then # quorum
return -1
endi
if $data6_db != 240 then # days
if $data6_db != 345600 then # days
return -1
endi
if $data7_db != 1000,1000,1000 then # keep
if $data7_db != 1440000,1440000,1440000 then # keep
return -1
endi
if $data8_db != 3 then # cache
@ -187,28 +177,16 @@ sql_error alter database db quorum 4
sql_error alter database db quorum 5
#print ============== modify days
#sql alter database db days 480
#sql show databases
#print days $data6_db
#if $data6_db != 480 then # days
# return -1
#endi
#sql alter database db days 360
#sql show databases
#print days $data6_db
#if $data6_db != 360 then # days
# return -1
#endi
sql_error alter database db days 480
sql_error alter database db days 360
sql_error alter database db days 0
#sql_error alter database db days 14400 # set over than keep
sql_error alter database db days 14400 # set over than keep
print ============== modify keep
sql alter database db keep 2000
sql show databases
print keep $data7_db
if $data7_db != 1000,1000,2000 then
if $data7_db != 2000,2000,2000 then
return -1
endi
@ -230,25 +208,14 @@ sql_error alter database db keep -1
#sql_error alter database db keep 365001
print ============== modify cache
#sql alter database db cache 12
#sql show databases
#print cache $data8_db
#if $data8_db != 12 then
# return -1
#endi
#sql alter database db cache 1
#sql show databases
#print cache $data8_db
#if $data8_db != 6 then
# return -1
#endi
#
#sql_error alter database db cache 60
#sql_error alter database db cache 50
#sql_error alter database db cache 20
#sql_error alter database db cache 3
#sql_error alter database db cache 129
#sql_error alter database db cache 300
sql_error alter database db cache 12
sql_error alter database db cache 1
sql_error alter database db cache 60
sql_error alter database db cache 50
sql_error alter database db cache 20
sql_error alter database db cache 3
sql_error alter database db cache 129
sql_error alter database db cache 300
sql_error alter database db cache 0
sql_error alter database db cache -1
@ -276,45 +243,18 @@ sql_error alter database db blocks 0
sql_error alter database db blocks -1
sql_error alter database db blocks 10001
#print ============== modify minrows
#sql alter database db minrows 8
#sql show databases
#print minrows $data10_db
#if $data10_db != 8 then
# return -1
#endi
#sql alter database db minrows 200
#sql show databases
#print minrows $data10_db
#if $data10_db != 200 then
# return -1
#endi
#
#sql alter database db minrows 11
#sql show databases
#print minrows $data10_db
#if $data10_db != 11 then
# return -1
#endi
#sql_error alter database db minrows 8000
#sql_error alter database db minrows 8001
print ============== modify minrows
sql_error alter database db minrows 8
sql_error alter database db minrows 200
sql_error alter database db minrows 11
sql_error alter database db minrows 8000
sql_error alter database db minrows 8001
#print ============== modify maxrows
#sql alter database db maxrows 1000
#sql show databases
#print maxrows $data11_db
#if $data11_db != 1000 then
# return -1
#endi
#sql alter database db maxrows 2000
#sql show databases
#print maxrows $data11_db
#if $data11_db != 2000 then
# return -1
#endi
#
#sql_error alter database db maxrows 11 # equal minrows
#sql_error alter database db maxrows 10 # little than minrows
print ============== modify maxrows
sql_error alter database db maxrows 1000
sql_error alter database db maxrows 2000
sql_error alter database db maxrows 11 # equal minrows
sql_error alter database db maxrows 10 # little than minrows
print ============== step wal
sql alter database db wal 1
@ -330,7 +270,7 @@ if $data12_db != 2 then
return -1
endi
sql_error alter database db wal 0
#sql_error alter database db wal 0 # TD-14436
sql_error alter database db wal 3
sql_error alter database db wal 100
sql_error alter database db wal -1
@ -348,35 +288,20 @@ print fsync $data13_db
if $data13_db != 500 then
return -1
endi
sql_error alter database db fsync 0
sql alter database db fsync 0
sql show databases
print fsync $data13_db
if $data13_db != 0 then
return -1
endi
sql_error alter database db fsync 180001
sql_error alter database db fsync -1
print ============== modify comp
sql alter database db comp 1
sql show databases
print comp $data14_db
if $data14_db != 1 then
return -1
endi
sql alter database db comp 2
sql show databases
print comp $data14_db
if $data14_db != 2 then
return -1
endi
sql alter database db comp 1
sql show databases
print comp $data14_db
if $data14_db != 1 then
return -1
endi
sql alter database db comp 0
sql show databases
print comp $data14_db
if $data14_db != 0 then
return -1
endi
sql_error alter database db comp 1
sql_error alter database db comp 2
sql_error alter database db comp 1
sql_error alter database db comp 0
sql_error alter database db comp 3
sql_error alter database db comp 4
sql_error alter database db comp 5
@ -414,30 +339,15 @@ if $data15_db != 3 then
return -1
endi
sql_error alter database db comp 4
sql_error alter database db comp 10
sql_error alter database db comp -1
sql_error alter database db cachelast 4
sql_error alter database db cachelast 10
sql_error alter database db cachelast -1
print ============== modify precision
sql alter database db precision 'ms'
sql show databases
print precision $data16_db
if $data16_db != ms then
return -1
endi
sql alter database db precision 'us'
sql show databases
print precision $data16_db
if $data16_db != us then
return -1
endi
sql alter database db precision 'ns'
sql show databases
print precision $data16_db
if $data16_db != ns then
return -1
endi
sql_error alter database db precision 'ms'
sql_error alter database db precision 'us'
sql_error alter database db precision 'ns'
sql_error alter database db precision 'ys'
sql_error alter database db prec 'xs'
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -15,7 +15,7 @@ $tb = $tbPrefix . $i
print =============== step1
# quorum presicion
sql create database $db vgroups 8 replica 1 days 20 keep 3650 cache 32 blocks 12 minrows 80 maxrows 10000 wal 2 fsync 1000 comp 0 cachelast 2 precision 'us'
sql create database $db vgroups 8 replica 1 days 2880 keep 3650 cache 32 blocks 12 minrows 80 maxrows 10000 wal 2 fsync 1000 comp 0 cachelast 2 precision 'us'
sql show databases
print $data0_1 $data1_1 $data2_1 $data3_1 $data4_1 $data5_1 $data6_1 $data7_1 $data8_1 $data9_1
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
@ -35,7 +35,7 @@ endi
if $data04 != 1 then
return -1
endi
if $data06 != 20 then
if $data06 != 2880 then
return -1
endi
if $data07 != 3650,3650,3650 then
@ -67,7 +67,7 @@ print =============== step4
sql_error drop database $db
print =============== step5
sql create database $db replica 1 days 15 keep 1500
sql create database $db replica 1 days 21600 keep 2160000
sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07
if $data00 != $db then
@ -79,7 +79,7 @@ endi
if $data04 != 1 then
return -1
endi
if $data06 != 15 then
if $data06 != 21600 then
return -1
endi

View File

@ -73,39 +73,45 @@ sql insert into ct4 values ( '2022-12-01 01:01:30.000', 8 )
sql insert into ct4 values ( '2022-12-31 01:01:36.000', 9 )
print ================ start query ======================
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct1 interval(10s, 2s)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct1 interval(10s, 2s)
sql select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(*) from ct1 interval(10s, 2s)
print ===> select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(*) from ct1 interval(10s, 2s)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
print ===> rows0: $data00 $data01 $data02 $data05
print ===> rows1: $data10 $data11 $data12 $data15
print ===> rows2: $data20 $data21 $data22 $data25
print ===> rows3: $data30 $data31 $data32 $data35
print ===> rows4: $data40 $data41 $data42 $data45
if $rows != 5 then
return -1
endi
if $data00 != 1 then
if $data00 != @22-01-01 01:00:52.000@ then
return -1
endi
if $data40 != 1 then
if $data02 != 10000 then
return -1
endi
if $data45 != 1 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct1 interval(10s, 2s) sliding(10s)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct1 interval(10s, 2s) sliding(10s)
sql select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(*) from ct1 interval(10s, 2s) sliding(10s)
print ===> select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(*) from ct1 interval(10s, 2s) sliding(10s)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
print ===> rows0: $data00 $data01 $data02 $data05
print ===> rows1: $data10 $data11 $data12 $data15
print ===> rows2: $data20 $data21 $data22 $data25
print ===> rows3: $data30 $data31 $data32 $data35
print ===> rows4: $data40 $data41 $data42 $data45
if $rows != 5 then
return -1
endi
if $data00 != 1 then
if $data00 != @22-01-01 01:00:52.000@ then
return -1
endi
if $data40 != 1 then
if $data02 != 10000 then
return -1
endi
if $data45 != 1 then
return -1
endi
@ -176,80 +182,81 @@ if $data70 != 1 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w)
sql select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct3 interval(1n, 1w)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
#if $rows != 5 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#if $data40 != 1 then
# return -1
#endi
if $rows != 4 then
return -1
endi
if $data00 != @21-12-08 00:00:00.000@ then
return -1
endi
if $data31 != 1 then
return -1
endi
if $data34 != $data31 then
return -1
endi
if $data02 != 2678400000 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w) sliding(2w)
sql select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct3 interval(1n, 1w) sliding(2w)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w) sliding(2w)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
#if $rows != 5 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#if $data40 != 1 then
# return -1
#endi
if $rows != 4 then
return -1
endi
if $data00 != @21-11-30 08:00:00.000@ then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data31 != $data34 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w) sliding(4w)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct3 interval(1n, 1w) sliding(4w)
sql select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct3 interval(1n, 1w) sliding(4w)
print ===> select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct3 interval(1n, 1w) sliding(4w)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
print ===> rows5: $data50 $data51 $data52 $data53 $data54
print ===> rows6: $data60 $data61 $data62 $data63 $data64
print ===> rows7: $data70 $data71 $data72 $data73 $data74
#if $rows != 8 then
# return -1
#endi
#if $data00 != 2 then
# return -1
#endi
#if $data70 != 1 then
# return -1
#endi
if $rows != 4 then
return -1
endi
if $data01 != NULL then
return -1
endi
if $data04 != 1 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n)
sql select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct4 interval(1y, 6n)
print ===> select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct4 interval(1y, 6n)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
#if $rows != 5 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#if $data40 != 1 then
# return -1
#endi
if $rows != 3 then
return -1
endi
if $data01 != 2 then
return -1
endi
if $data04 != 2 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n) sliding(6n)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n) sliding(6n)
@ -257,38 +264,31 @@ print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
#if $rows != 5 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#if $data40 != 1 then
# return -1
#endi
if $rows != 3 then
return -1
endi
if $data00 != 2 then
return -1
endi
if $data04 != 2 then
return -1
endi
sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n) sliding(12n)
print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(*) from ct4 interval(1y, 6n) sliding(12n)
sql select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct4 interval(1y, 6n) sliding(12n)
print ===> select _wstartts, count(tbcol), _wduration, _wstartts, count(*) from ct4 interval(1y, 6n) sliding(12n)
print ===> rows: $rows
print ===> rows0: $data00 $data01 $data02 $data03 $data04
print ===> rows1: $data10 $data11 $data12 $data13 $data14
print ===> rows2: $data20 $data21 $data22 $data23 $data24
print ===> rows3: $data30 $data31 $data32 $data33 $data34
print ===> rows4: $data40 $data41 $data42 $data43 $data44
print ===> rows5: $data50 $data51 $data52 $data53 $data54
print ===> rows6: $data60 $data61 $data62 $data63 $data64
print ===> rows7: $data70 $data71 $data72 $data73 $data74
#if $rows != 8 then
# return -1
#endi
#if $data00 != 2 then
# return -1
#endi
#if $data70 != 1 then
# return -1
#endi
if $rows != 3 then
return -1
endi
if $data01 != 2 then
return -1
endi
if $data04 != 2 then
return -1
endi
#=================================================
print =============== stop and restart taosd
@ -322,9 +322,4 @@ endi
#sql select count(*) from car where ts > '2019-05-14 00:00:00' interval(1y, 5d)
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -59,7 +59,7 @@ if $data04 != 1 then
return -1
endi
#print =============== step3
print =============== step3
#$cc = 4 * 60000
#$ms = 1601481600000 + $cc
#sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms interval(1m)
@ -75,25 +75,25 @@ endi
# return -1
#endi
#print =============== step4
print =============== step4
#$cc = 40 * 60000
#$ms = 1601481600000 + $cc
#$cc = 1 * 60000
#$ms2 = 1601481600000 - $cc
#sql select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m)
#print ===> select count(tbcol), sum(tbcol), max(tbcol), min(tbcol), count(tbcol) from $tb where ts <= $ms and ts > $ms2 interval(1m)
#print ===> $rows $data01 $data05
#if $rows != 20 then
# return -1
#endi
#if $data00 != 1 then
# return -1
#endi
#if $data04 != 1 then
# return -1
#endi
sql select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(tbcol) from $tb interval(1m)
print ===> select _wstartts, _wendts, _wduration, _qstartts, _qendts, count(tbcol) from $tb interval(1m)
print ===> $rows $data01 $data05
if $rows != $rowNum then
return -1
endi
if $data05 != 1 then
return -1
endi
if $data02 != 60000 then
return -1
endi
#print =============== step5
#$cc = 40 * 60000

View File

@ -1,6 +1,7 @@
run tsim/user/basic1.sim
run tsim/db/alter_option.sim
run tsim/db/basic1.sim
run tsim/db/basic2.sim
run tsim/db/basic3.sim

View File

@ -0,0 +1,224 @@
#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
# scene1: vgroups=1, one topic for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene2: vgroups=1, multi topics for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene3: vgroups=4, one topic for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene4: vgroups=4, multi topics for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
#
# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
#
######## ######## ######## ######## ######## ######## ######## ######## ######## ########
######## This test case include scene2 and scene4
######## ######## ######## ######## ######## ######## ######## ######## ######## ########
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1
system sh/exec.sh -n dnode1 -s start
$loop_cnt = 0
check_dnode_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
if $data00 != 1 then
return -1
endi
if $data04 != ready then
goto check_dnode_ready
endi
sql connect
$loop_cnt = 0
$vgroups = 1
$dbNamme = d0
loop_vgroups:
print =============== create database $dbNamme vgroups $vgroups
sql create database $dbNamme vgroups $vgroups
sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19
print $data20 $data21 $data22 $data23 $data24 $data25 $data26 $data27 $data28 $data29
if $loop_cnt == 0 then
if $rows != 2 then
return -1
endi
if $data02 != 1 then # vgroups
print vgroups: $data02
return -1
endi
else
if $rows != 3 then
return -1
endi
if $data00 == d1 then
if $data02 != 4 then # vgroups
print vgroups: $data02
return -1
endi
else
if $data12 != 4 then # vgroups
print vgroups: $data12
return -1
endi
endi
endi
sql use $dbNamme
print =============== create super table
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
sql show stables
if $rows != 1 then
return -1
endi
print =============== create child table
$tbPrefix = ct
$tbNum = 100
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using stb tags( $i )
$i = $i + 1
endw
print =============== create normal table
sql create table ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
print =============== create multi topics. notes: now only support:
print =============== 1. columns from stb/ctb/ntb; 2. * from ctb/ntb; 3. function from stb/ctb/ntb
print =============== will support: * from stb
sql create topic topic_stb_column as select ts, c1, c3 from stb
#sql create topic topic_stb_all as select * from stb
sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
sql create topic topic_ctb_column as select ts, c1, c3 from ct0
sql create topic topic_ctb_all as select * from ct0
sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ct0
sql create topic topic_ntb_column as select ts, c1, c3 from ntb
sql create topic topic_ntb_all as select * from ntb
sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb
sql show tables
if $rows != 101 then
return -1
endi
print =============== insert data
$rowNum = 100
$tstart = 1640966400000 # 2022-01-01 00:00:00.000
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
$x = 0
while $x < $rowNum
$c = $x / 10
$c = $c * 10
$c = $x - $c
$binary = ' . binary
$binary = $binary . $c
$binary = $binary . '
sql insert into $tb values ($tstart , $c , $x , $binary )
sql insert into ntb values ($tstart , $c , $x , $binary )
$tstart = $tstart + 1
$x = $x + 1
endw
$i = $i + 1
$tstart = 1640966400000
endw
#root@trd02 /home $ tmq_sim --help
# -c Configuration directory, default is
# -d The name of the database for cosumer, no default
# -t The topic string for cosumer, no default
# -k The key-value string for cosumer, no default
# -g showMsgFlag, default is 0
#
$totalMsgCnt = $rowNum * $tbNum
print inserted totalMsgCnt: $totalMsgCnt
# supported key:
# group.id:<xxx>
# enable.auto.commit:<true | false>
# auto.offset.reset:<earliest | latest | none>
# td.connect.ip:<fqdn | ipaddress>
# td.connect.user:root
# td.connect.pass:taosdata
# td.connect.port:6030
# td.connect.db:db
$numOfTopics = 2
$totalMsgCntOfmultiTopics = $totalMsgCnt * $numOfTopics
$expect_result = @{consume success: @
$expect_result = $expect_result . $totalMsgCntOfmultiTopics
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
#print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column, topic_stb_function, topic_stb_all" -k "group.id:tg2"
#system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column, topic_stb_function, topic_stb_all" -k "group.id:tg2"
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column, topic_stb_function" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column, topic_stb_function" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 20000, 0}@ then
if $system_content != $expect_result then
return -1
endi
$numOfTopics = 3
$totalMsgCntOfmultiTopics = $rowNum * $numOfTopics
$expect_result = @{consume success: @
$expect_result = $expect_result . $totalMsgCntOfmultiTopics
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_column, topic_ctb_function, topic_ctb_all" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_column, topic_ctb_function, topic_ctb_all" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 300, 0}@ then
if $system_content != $expect_result then
return -1
endi
$numOfTopics = 3
$totalMsgCntOfmultiTopics = $totalMsgCnt * $numOfTopics
$expect_result = @{consume success: @
$expect_result = $expect_result . $totalMsgCntOfmultiTopics
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_column, topic_ntb_all, topic_ntb_function" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_column, topic_ntb_all, topic_ntb_function" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 30000, 0}@ then
if $system_content != $expect_result then
return -1
endi
if $loop_cnt == 0 then
$loop_cnt = 1
$vgroups = 4
$dbNamme = d1
goto loop_vgroups
endi
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -0,0 +1,264 @@
#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
# scene1: vgroups=1, one topic for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene2: vgroups=1, multi topics for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene3: vgroups=4, one topic for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# scene4: vgroups=4, multi topics for one consumer, include: columns from stb/ctb/ntb, * from stb/ctb/ntb, Scalar function from stb/ctb/ntb
# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
#
# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
#
######## ######## ######## ######## ######## ######## ######## ######## ######## ########
######## This test case include scene1 and scene3
######## ######## ######## ######## ######## ######## ######## ######## ######## ########
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/cfg.sh -n dnode1
system sh/exec.sh -n dnode1 -s start
$loop_cnt = 0
check_dnode_ready:
$loop_cnt = $loop_cnt + 1
sleep 200
if $loop_cnt == 10 then
print ====> dnode not ready!
return -1
endi
sql show dnodes
print ===> $rows $data00 $data01 $data02 $data03 $data04 $data05
if $data00 != 1 then
return -1
endi
if $data04 != ready then
goto check_dnode_ready
endi
sql connect
$loop_cnt = 0
$vgroups = 1
$dbNamme = d0
loop_vgroups:
print =============== create database $dbNamme vgroups $vgroups
sql create database $dbNamme vgroups $vgroups
sql show databases
print $data00 $data01 $data02 $data03 $data04 $data05 $data06 $data07 $data08 $data09
print $data10 $data11 $data12 $data13 $data14 $data15 $data16 $data17 $data18 $data19
print $data20 $data21 $data22 $data23 $data24 $data25 $data26 $data27 $data28 $data29
if $loop_cnt == 0 then
if $rows != 2 then
return -1
endi
if $data02 != 1 then # vgroups
print vgroups: $data02
return -1
endi
else
if $rows != 3 then
return -1
endi
if $data00 == d1 then
if $data02 != 4 then # vgroups
print vgroups: $data02
return -1
endi
else
if $data12 != 4 then # vgroups
print vgroups: $data12
return -1
endi
endi
endi
sql use $dbNamme
print =============== create super table
sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 binary(10)) tags (t1 int)
sql show stables
if $rows != 1 then
return -1
endi
print =============== create child table
$tbPrefix = ct
$tbNum = 100
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
sql create table $tb using stb tags( $i )
$i = $i + 1
endw
print =============== create normal table
sql create table ntb (ts timestamp, c1 int, c2 float, c3 binary(10))
print =============== create multi topics. notes: now only support:
print =============== 1. columns from stb/ctb/ntb; 2. * from ctb/ntb; 3. function from stb/ctb/ntb
print =============== will support: * from stb
sql create topic topic_stb_column as select ts, c1, c3 from stb
#sql create topic topic_stb_all as select * from stb
sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
sql create topic topic_ctb_column as select ts, c1, c3 from ct0
sql create topic topic_ctb_all as select * from ct0
sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ct0
sql create topic topic_ntb_column as select ts, c1, c3 from ntb
sql create topic topic_ntb_all as select * from ntb
sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb
sql show tables
if $rows != 101 then
return -1
endi
print =============== insert data
$rowNum = 100
$tstart = 1640966400000 # 2022-01-01 00:00:00.000
$i = 0
while $i < $tbNum
$tb = $tbPrefix . $i
$x = 0
while $x < $rowNum
$c = $x / 10
$c = $c * 10
$c = $x - $c
$binary = ' . binary
$binary = $binary . $c
$binary = $binary . '
sql insert into $tb values ($tstart , $c , $x , $binary )
sql insert into ntb values ($tstart , $c , $x , $binary )
$tstart = $tstart + 1
$x = $x + 1
endw
$i = $i + 1
$tstart = 1640966400000
endw
#root@trd02 /home $ tmq_sim --help
# -c Configuration directory, default is
# -d The name of the database for cosumer, no default
# -t The topic string for cosumer, no default
# -k The key-value string for cosumer, no default
# -g showMsgFlag, default is 0
#
$totalMsgCnt = $rowNum * $tbNum
print inserted totalMsgCnt: $totalMsgCnt
# supported key:
# group.id:<xxx>
# enable.auto.commit:<true | false>
# auto.offset.reset:<earliest | latest | none>
# td.connect.ip:<fqdn | ipaddress>
# td.connect.user:root
# td.connect.pass:taosdata
# td.connect.port:6030
# td.connect.db:db
$expect_result = @{consume success: @
$expect_result = $expect_result . $totalMsgCnt
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_column" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 10000, 0}@ then
if $system_content != $expect_result then
return -1
endi
#print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_all" -k "group.id:tg2"
#system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_all" -k "group.id:tg2"
#print cmd result----> $system_content
##if $system_content != @{consume success: 10000, 0}@ then
#if $system_content != $expect_result then
# return -1
#endi
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_function" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_stb_function" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 10000, 0}@ then
if $system_content != $expect_result then
return -1
endi
$expect_result = @{consume success: @
$expect_result = $expect_result . $rowNum
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_column" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_column" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 100, 0}@ then
if $system_content != $expect_result then
return -1
endi
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_all" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_all" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 100, 0}@ then
if $system_content != $expect_result then
return -1
endi
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_function" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ctb_function" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 100, 0}@ then
if $system_content != $expect_result then
return -1
endi
$expect_result = @{consume success: @
$expect_result = $expect_result . $totalMsgCnt
$expect_result = $expect_result . @, @
$expect_result = $expect_result . 0}
print expect_result----> $expect_result
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_column" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_column" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 10000, 0}@ then
if $system_content != $expect_result then
return -1
endi
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_all" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_all" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 10000, 0}@ then
if $system_content != $expect_result then
return -1
endi
print cmd===> system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_function" -k "group.id:tg2"
system_content ../../debug/tests/test/c/tmq_sim -c ../../sim/tsim/cfg -d $dbNamme -t "topic_ntb_function" -k "group.id:tg2"
print cmd result----> $system_content
#if $system_content != @{consume success: 10000, 0}@ then
if $system_content != $expect_result then
return -1
endi
if $loop_cnt == 0 then
$loop_cnt = 1
$vgroups = 4
$dbNamme = d1
goto loop_vgroups
endi
#system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -226,7 +226,7 @@ void loop_consume(tmq_t* tmq) {
int32_t totalRows = 0;
int32_t skipLogNum = 0;
while (running) {
tmq_message_t* tmqMsg = tmq_consumer_poll(tmq, 6000);
tmq_message_t* tmqMsg = tmq_consumer_poll(tmq, 3000);
if (tmqMsg) {
totalMsgs++;