Merge branch '3.0' of https://github.com/taosdata/TDengine into feature/3.0_mhli

This commit is contained in:
Minghao Li 2022-06-23 15:01:36 +08:00
commit 8de46b3eaf
101 changed files with 2718 additions and 619 deletions

View File

@ -76,19 +76,41 @@ int32_t init_env() {
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1 tags(2000)");
pRes = taos_query(pConn, "insert into ct0 values(now, 1, 2, 'a')");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu2, reason:%s\n", taos_errstr(pRes));
printf("failed to insert into ct0, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct1 using st1 tags(2000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct1 values(now, 3, 4, 'b')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct1, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "create table if not exists ct3 using st1 tags(3000)");
if (taos_errno(pRes) != 0) {
printf("failed to create child table tu3, reason:%s\n", taos_errstr(pRes));
printf("failed to create child table ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
pRes = taos_query(pConn, "insert into ct3 values(now, 5, 6, 'c')");
if (taos_errno(pRes) != 0) {
printf("failed to insert into ct3, reason:%s\n", taos_errstr(pRes));
return -1;
}
taos_free_result(pRes);
return 0;
}
@ -168,6 +190,9 @@ tmq_t* build_consumer() {
tmq_conf_set(conf, "td.connect.pass", "taosdata");
tmq_conf_set(conf, "msg.with.table.name", "true");
tmq_conf_set(conf, "enable.auto.commit", "true");
tmq_conf_set(conf, "experiment.use.snapshot", "true");
tmq_conf_set_auto_commit_cb(conf, tmq_commit_cb_print, NULL);
tmq_t* tmq = tmq_consumer_new(conf, NULL, 0);
assert(tmq);

View File

@ -52,6 +52,9 @@ typedef enum EStreamType {
typedef struct {
SArray* pTableList;
SHashObj* map; // speedup acquire the tableQueryInfo by table uid
void* pTagCond;
void* pTagIndexCond;
uint64_t suid;
} STableListInfo;
typedef struct SColumnDataAgg {
@ -113,8 +116,8 @@ typedef struct SQueryTableDataCond {
int32_t type; // data block load type:
int32_t numOfTWindows;
STimeWindow* twindows;
int32_t startVersion;
int32_t endVersion;
int64_t startVersion;
int64_t endVersion;
} SQueryTableDataCond;
void* blockDataDestroy(SSDataBlock* pBlock);

View File

@ -71,7 +71,8 @@ SEpSet getEpSet_s(SCorEpSet* pEpSet);
#define colDataGetData(p1_, r_) \
((IS_VAR_DATA_TYPE((p1_)->info.type)) ? colDataGetVarData(p1_, r_) : colDataGetNumData(p1_, r_))
#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && *(data) == TSDB_DATA_TYPE_NULL)
#define IS_JSON_NULL(type, data) ((type) == TSDB_DATA_TYPE_JSON && \
(*(data) == TSDB_DATA_TYPE_NULL || tTagIsJsonNull(data)))
static FORCE_INLINE bool colDataIsNull_s(const SColumnInfoData* pColumnInfoData, uint32_t row) {
if (!pColumnInfoData->hasNull) {

View File

@ -70,6 +70,8 @@ int32_t tGetTSRow(uint8_t *p, STSRow2 *pRow);
// STag
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag);
void tTagFree(STag *pTag);
bool tTagIsJson(const void *pTag);
bool tTagIsJsonNull(void *tagVal);
bool tTagGet(const STag *pTag, STagVal *pTagVal);
char *tTagValToData(const STagVal *pTagVal, bool isJson);
int32_t tEncodeTag(SEncoder *pEncoder, const STag *pTag);

View File

@ -137,6 +137,8 @@ extern bool tsSmlDataFormat;
// internal
extern int32_t tsTransPullupInterval;
extern int32_t tsMqRebalanceInterval;
extern int32_t tsTtlUnit;
extern int32_t tsTtlPushInterval;
#define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize)

View File

@ -696,12 +696,12 @@ typedef struct {
typedef STableCfg STableCfgRsp;
int32_t tSerializeSTableCfgReq(void *buf, int32_t bufLen, STableCfgReq *pReq);
int32_t tDeserializeSTableCfgReq(void *buf, int32_t bufLen, STableCfgReq *pReq);
int32_t tSerializeSTableCfgReq(void* buf, int32_t bufLen, STableCfgReq* pReq);
int32_t tDeserializeSTableCfgReq(void* buf, int32_t bufLen, STableCfgReq* pReq);
int32_t tSerializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp);
int32_t tDeserializeSTableCfgRsp(void *buf, int32_t bufLen, STableCfgRsp *pRsp);
void tFreeSTableCfgRsp(STableCfgRsp *pRsp);
int32_t tSerializeSTableCfgRsp(void* buf, int32_t bufLen, STableCfgRsp* pRsp);
int32_t tDeserializeSTableCfgRsp(void* buf, int32_t bufLen, STableCfgRsp* pRsp);
void tFreeSTableCfgRsp(STableCfgRsp* pRsp);
typedef struct {
char db[TSDB_DB_FNAME_LEN];
@ -834,6 +834,14 @@ typedef struct {
int32_t tSerializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
int32_t tDeserializeSQnodeListReq(void* buf, int32_t bufLen, SQnodeListReq* pReq);
typedef struct {
int32_t rowNum;
} SDnodeListReq;
int32_t tSerializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
int32_t tDeserializeSDnodeListReq(void* buf, int32_t bufLen, SDnodeListReq* pReq);
typedef struct SQueryNodeAddr {
int32_t nodeId; // vgId or qnodeId
SEpSet epSet;
@ -852,6 +860,15 @@ int32_t tSerializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
int32_t tDeserializeSQnodeListRsp(void* buf, int32_t bufLen, SQnodeListRsp* pRsp);
void tFreeSQnodeListRsp(SQnodeListRsp* pRsp);
typedef struct {
SArray* dnodeList; // SArray<SEpSet>
} SDnodeListRsp;
int32_t tSerializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp);
int32_t tDeserializeSDnodeListRsp(void* buf, int32_t bufLen, SDnodeListRsp* pRsp);
void tFreeSDnodeListRsp(SDnodeListRsp* pRsp);
typedef struct {
SArray* pArray; // Array of SUseDbRsp
} SUseDbBatchRsp;
@ -2652,6 +2669,7 @@ typedef struct {
SMsgHead head;
char subKey[TSDB_SUBSCRIBE_KEY_LEN];
int8_t withTbName;
int8_t useSnapshot;
int32_t epoch;
uint64_t reqId;
int64_t consumerId;

View File

@ -81,6 +81,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_DND_SERVER_STATUS, "server-status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_NET_TEST, "net-test", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_CONFIG_DNODE, "config-dnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_DND_SYSTABLE_RETRIEVE, "dnode-retrieve", NULL, NULL)
TD_NEW_MSG_SEG(TDMT_MND_MSG)
TD_DEF_MSG_TYPE(TDMT_MND_CONNECT, "connect", NULL, NULL)
@ -101,6 +102,7 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_QNODE, "alter-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_QNODE, "drop-qnode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_QNODE_LIST, "qnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DNODE_LIST, "dnode-list", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_CREATE_SNODE, "create-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_ALTER_SNODE, "alter-snode", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_DROP_SNODE, "drop-snode", NULL, NULL)
@ -145,13 +147,14 @@ enum {
TD_DEF_MSG_TYPE(TDMT_MND_MQ_TIMER, "mq-tmr", SMTimerReq, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TELEM_TIMER, "telem-tmr", SMTimerReq, SMTimerReq)
TD_DEF_MSG_TYPE(TDMT_MND_TRANS_TIMER, "trans-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_TTL_TIMER, "ttl-tmr", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_TRANS, "kill-trans", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_QUERY, "kill-query", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_KILL_CONN, "kill-conn", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_HEARTBEAT, "heartbeat", SClientHbBatchReq, SClientHbBatchRsp)
TD_DEF_MSG_TYPE(TDMT_MND_STATUS, "status", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SHOW, "show", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_SYSTABLE_RETRIEVE, "mnd-retrieve", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_GRANT, "grant", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_AUTH, "auth", NULL, NULL)
TD_DEF_MSG_TYPE(TDMT_MND_APPLY_MSG, "mnode-apply", NULL, NULL)

View File

@ -33,7 +33,7 @@ struct SDataSink;
struct SSDataBlock;
typedef struct SDeleterRes {
uint64_t uid;
uint64_t suid;
SArray* uidList;
int64_t skey;
int64_t ekey;
@ -41,7 +41,8 @@ typedef struct SDeleterRes {
} SDeleterRes;
typedef struct SDeleterParam {
SArray* pUidList;
uint64_t suid;
SArray* pUidList;
} SDeleterParam;
typedef struct SDataSinkStat {

View File

@ -36,11 +36,13 @@ typedef struct SReadHandle {
void* vnode;
void* mnd;
SMsgCb* pMsgCb;
int8_t initTsdbReader;
} SReadHandle;
enum {
STREAM_DATA_TYPE_SUBMIT_BLOCK = 1,
STREAM_DATA_TYPE_SSDATA_BLOCK = 2,
STREAM_DATA_TYPE_FROM_SNAPSHOT = 3,
};
typedef enum {
@ -56,6 +58,13 @@ typedef enum {
*/
qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle);
/**
* Switch the stream scan to snapshot mode
* @param tinfo
* @return
*/
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo);
/**
* Set the input data block for the stream scan.
* @param tinfo

View File

@ -36,6 +36,10 @@ extern "C" {
#define SHOW_CREATE_TB_RESULT_FIELD1_LEN (TSDB_TABLE_NAME_LEN + VARSTR_HEADER_SIZE)
#define SHOW_CREATE_TB_RESULT_FIELD2_LEN (TSDB_MAX_BINARY_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_COLS 2
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN (TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE)
#define SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN (TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE)
#define PRIVILEGE_TYPE_MASK(n) (1 << n)

View File

@ -226,6 +226,7 @@ typedef enum ENodeType {
QUERY_NODE_PHYSICAL_PLAN_EXCHANGE,
QUERY_NODE_PHYSICAL_PLAN_MERGE,
QUERY_NODE_PHYSICAL_PLAN_SORT,
QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT,
QUERY_NODE_PHYSICAL_PLAN_HASH_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_INTERVAL,
QUERY_NODE_PHYSICAL_PLAN_MERGE_ALIGNED_INTERVAL,

View File

@ -66,6 +66,7 @@ typedef struct SScanLogicNode {
int8_t intervalUnit;
int8_t slidingUnit;
SNode* pTagCond;
SNode* pTagIndexCond;
int8_t triggerType;
int64_t watermark;
int16_t tsColId;
@ -420,6 +421,8 @@ typedef struct SSortPhysiNode {
SNodeList* pTargets;
} SSortPhysiNode;
typedef SSortPhysiNode SGroupSortPhysiNode;
typedef struct SPartitionPhysiNode {
SPhysiNode node;
SNodeList* pExprs; // these are expression list of partition_by_clause
@ -466,6 +469,7 @@ typedef struct SSubplan {
SPhysiNode* pNode; // physical plan of current subplan
SDataSinkNode* pDataSink; // data of the subplan flow into the datasink
SNode* pTagCond;
SNode* pTagIndexCond;
} SSubplan;
typedef enum EExplainMode { EXPLAIN_MODE_DISABLE = 1, EXPLAIN_MODE_STATIC, EXPLAIN_MODE_ANALYZE } EExplainMode;

View File

@ -288,11 +288,11 @@ typedef enum ESqlClause {
} ESqlClause;
typedef struct SDeleteStmt {
ENodeType type; // QUERY_NODE_DELETE_STMT
SNode* pFromTable; // FROM clause
SNode* pWhere; // WHERE clause
SNode* pCountFunc; // count the number of rows affected
SNode* pTagIndexCond; // pWhere divided into pTagIndexCond and timeRange
ENodeType type; // QUERY_NODE_DELETE_STMT
SNode* pFromTable; // FROM clause
SNode* pWhere; // WHERE clause
SNode* pCountFunc; // count the number of rows affected
SNode* pTagCond; // pWhere divided into pTagCond and timeRange
STimeWindow timeRange;
uint8_t precision;
bool deleteZeroRows;
@ -397,7 +397,8 @@ void nodesValueNodeToVariant(const SValueNode* pNode, SVariant* pVal);
char* nodesGetFillModeString(EFillMode mode);
int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc);
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond);
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
SNode** pOtherCond);
#ifdef __cplusplus
}

View File

@ -32,7 +32,7 @@ enum {
};
typedef struct SDeleteRes {
uint64_t uid;
uint64_t suid;
SArray* uidList;
int64_t skey;
int64_t ekey;

View File

@ -239,17 +239,14 @@ typedef struct {
struct SStreamTask {
int64_t streamId;
int32_t taskId;
int8_t inputType;
int8_t taskStatus;
int8_t execStatus;
int8_t isDataScan;
int8_t execType;
int8_t sinkType;
int8_t dispatchType;
int16_t dispatchMsgType;
int8_t isDataScan;
int8_t taskStatus;
int8_t execStatus;
// node info
int32_t selfChildId;

View File

@ -195,7 +195,6 @@ void walCloseReadHandle(SWalReadHandle *);
int32_t walReadWithHandle(SWalReadHandle *pRead, int64_t ver);
// only for tq usage
// int32_t walReadWithHandle_s(SWalReadHandle *pRead, int64_t ver, SWalReadHead **ppHead);
void walSetReaderCapacity(SWalReadHandle *pRead, int32_t capacity);
int32_t walFetchHead(SWalReadHandle *pRead, int64_t ver, SWalHead *pHead);
int32_t walFetchBody(SWalReadHandle *pRead, SWalHead **ppHead);
@ -211,13 +210,8 @@ void walCloseRef(SWalRef *);
int32_t walRefVer(SWalRef *, int64_t ver);
int32_t walUnrefVer(SWal *);
// deprecated
#if 0
int32_t walRead(SWal *, SWalHead **, int64_t ver);
int32_t walReadWithFp(SWal *, FWalWrite writeFp, int64_t verStart, int32_t readNum);
#endif
// lifecycle check
bool walIsEmpty(SWal *);
int64_t walGetFirstVer(SWal *);
int64_t walGetSnapshotVer(SWal *);
int64_t walGetLastVer(SWal *);

View File

@ -104,8 +104,6 @@ extern "C" {
#include "osTimezone.h"
#include "osEnv.h"
void osDefaultInit();
#ifdef __cplusplus
}
#endif

View File

@ -104,6 +104,8 @@ int32_t cfgAddTimezone(SConfig *pCfg, const char *name, const char *defaultVal);
const char *cfgStypeStr(ECfgSrcType type);
const char *cfgDtypeStr(ECfgDataType type);
void cfgDumpItemValue(SConfigItem *pItem, char* buf, int32_t bufSize, int32_t* pLen);
void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump);
int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl);

View File

@ -443,8 +443,8 @@ enum {
#define VNODE_HANDLE -3
#define BNODE_HANDLE -4
#define TSDB_CONFIG_OPTION_LEN 16
#define TSDB_CONIIG_VALUE_LEN 48
#define TSDB_CONFIG_OPTION_LEN 32
#define TSDB_CONFIG_VALUE_LEN 64
#define TSDB_CONFIG_NUMBER 8
#define QUERY_ID_SIZE 20

View File

@ -1507,7 +1507,7 @@ static int32_t estimateJsonLen(SReqResultInfo* pResultInfo, int32_t numOfCols, i
char* jsonInnerData = data + CHAR_BYTES;
if (jsonInnerType == TSDB_DATA_TYPE_NULL) {
len += (VARSTR_HEADER_SIZE + strlen(TSDB_DATA_NULL_STR_L));
} else if (jsonInnerType & TD_TAG_JSON) {
} else if (tTagIsJson(data)) {
len += (VARSTR_HEADER_SIZE + ((const STag*)(data))->len);
} else if (jsonInnerType == TSDB_DATA_TYPE_NCHAR) { // value -> "value"
len += varDataTLen(jsonInnerData) + CHAR_BYTES * 2;
@ -1592,7 +1592,7 @@ static int32_t doConvertJson(SReqResultInfo* pResultInfo, int32_t numOfCols, int
if (jsonInnerType == TSDB_DATA_TYPE_NULL) {
sprintf(varDataVal(dst), "%s", TSDB_DATA_NULL_STR_L);
varDataSetLen(dst, strlen(varDataVal(dst)));
} else if (jsonInnerType & TD_TAG_JSON) {
} else if (tTagIsJson(data)) {
char* jsonString = parseTagDatatoJson(data);
STR_TO_VARSTR(dst, jsonString);
taosMemoryFree(jsonString);

View File

@ -54,6 +54,7 @@ struct tmq_conf_t {
int8_t autoCommit;
int8_t resetOffset;
int8_t withTbName;
int8_t useSnapshot;
uint16_t port;
int32_t autoCommitInterval;
char* ip;
@ -69,6 +70,7 @@ struct tmq_t {
char groupId[TSDB_CGROUP_LEN];
char clientId[256];
int8_t withTbName;
int8_t useSnapshot;
int8_t autoCommit;
int32_t autoCommitInterval;
int32_t resetOffsetCfg;
@ -282,6 +284,18 @@ tmq_conf_res_t tmq_conf_set(tmq_conf_t* conf, const char* key, const char* value
}
}
if (strcmp(key, "experiment.use.snapshot") == 0) {
if (strcmp(value, "true") == 0) {
conf->useSnapshot = true;
return TMQ_CONF_OK;
} else if (strcmp(value, "false") == 0) {
conf->useSnapshot = false;
return TMQ_CONF_OK;
} else {
return TMQ_CONF_INVALID;
}
}
if (strcmp(key, "td.connect.ip") == 0) {
conf->ip = strdup(value);
return TMQ_CONF_OK;
@ -953,6 +967,7 @@ tmq_t* tmq_consumer_new(tmq_conf_t* conf, char* errstr, int32_t errstrLen) {
strcpy(pTmq->clientId, conf->clientId);
strcpy(pTmq->groupId, conf->groupId);
pTmq->withTbName = conf->withTbName;
pTmq->useSnapshot = conf->useSnapshot;
pTmq->autoCommit = conf->autoCommit;
pTmq->autoCommitInterval = conf->autoCommitInterval;
pTmq->commitCb = conf->commitCb;
@ -1534,6 +1549,8 @@ SMqPollReq* tmqBuildConsumeReqImpl(tmq_t* tmq, int64_t timeout, SMqClientTopic*
pReq->currentOffset = reqOffset;
pReq->reqId = generateRequestId();
pReq->useSnapshot = tmq->useSnapshot;
pReq->head.vgId = htonl(pVg->vgId);
pReq->head.contLen = htonl(sizeof(SMqPollReq));
return pReq;

View File

@ -231,7 +231,13 @@ static const SSysDbTableSchema transSchema[] = {
static const SSysDbTableSchema configSchema[] = {
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "value", .bytes = TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysDbTableSchema variablesSchema[] = {
{.name = "dnode_id", .bytes = 4, .type = TSDB_DATA_TYPE_INT},
{.name = "name", .bytes = TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
{.name = "value", .bytes = TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR},
};
static const SSysTableMeta infosMeta[] = {
@ -253,6 +259,7 @@ static const SSysTableMeta infosMeta[] = {
{TSDB_INS_TABLE_LICENCES, grantsSchema, tListLen(grantsSchema)},
{TSDB_INS_TABLE_VGROUPS, vgroupsSchema, tListLen(vgroupsSchema)},
{TSDB_INS_TABLE_CONFIGS, configSchema, tListLen(configSchema)},
{TSDB_INS_TABLE_DNODE_VARIABLES, variablesSchema, tListLen(variablesSchema)},
};
static const SSysDbTableSchema connectionsSchema[] = {

View File

@ -110,7 +110,7 @@ int32_t getJsonValueLen(const char* data) {
dataLen = DOUBLE_BYTES + CHAR_BYTES;
} else if (*data == TSDB_DATA_TYPE_BOOL) {
dataLen = CHAR_BYTES + CHAR_BYTES;
} else if (*data & TD_TAG_JSON) { // json string
} else if (tTagIsJson(data)) { // json string
dataLen = ((STag*)(data))->len;
} else {
ASSERT(0);

View File

@ -924,6 +924,18 @@ static int32_t tGetTagVal(uint8_t *p, STagVal *pTagVal, int8_t isJson) {
return n;
}
bool tTagIsJson(const void *pTag){
return (((const STag *)pTag)->flags & TD_TAG_JSON);
}
bool tTagIsJsonNull(void *data){
STag *pTag = (STag*)data;
int8_t isJson = tTagIsJson(pTag);
if(!isJson) return false;
return ((STag*)data)->nTag == 0;
}
int32_t tTagNew(SArray *pArray, int32_t version, int8_t isJson, STag **ppTag) {
int32_t code = 0;
uint8_t *p = NULL;

View File

@ -187,6 +187,9 @@ bool tsStartUdfd = true;
// internal
int32_t tsTransPullupInterval = 2;
int32_t tsMqRebalanceInterval = 2;
int32_t tsTtlUnit = 86400;
int32_t tsTtlPushInterval = 60;
void taosAddDataDir(int32_t index, char *v1, int32_t level, int32_t primary) {
tstrncpy(tsDiskCfg[index].dir, v1, TSDB_FILENAME_LEN);
@ -467,6 +470,8 @@ static int32_t taosAddServerCfg(SConfig *pCfg) {
if (cfgAddInt32(pCfg, "transPullupInterval", tsTransPullupInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "mqRebalanceInterval", tsMqRebalanceInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlUnit", tsTtlUnit, 1, 86400*365, 1) != 0) return -1;
if (cfgAddInt32(pCfg, "ttlPushInterval", tsTtlPushInterval, 1, 10000, 1) != 0) return -1;
if (cfgAddBool(pCfg, "udf", tsStartUdfd, 0) != 0) return -1;
return 0;
@ -619,6 +624,8 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
tsTransPullupInterval = cfgGetItem(pCfg, "transPullupInterval")->i32;
tsMqRebalanceInterval = cfgGetItem(pCfg, "mqRebalanceInterval")->i32;
tsTtlUnit = cfgGetItem(pCfg, "ttlUnit")->i32;
tsTtlPushInterval = cfgGetItem(pCfg, "ttlPushInterval")->i32;
tsStartUdfd = cfgGetItem(pCfg, "udf")->bval;
@ -631,7 +638,7 @@ static int32_t taosSetServerCfg(SConfig *pCfg) {
int32_t taosCreateLog(const char *logname, int32_t logFileNum, const char *cfgDir, const char **envCmd,
const char *envFile, char *apolloUrl, SArray *pArgs, bool tsc) {
osDefaultInit();
if (tsCfg == NULL) osDefaultInit();
SConfig *pCfg = cfgInit();
if (pCfg == NULL) return -1;

View File

@ -2194,6 +2194,32 @@ int32_t tDeserializeSQnodeListReq(void *buf, int32_t bufLen, SQnodeListReq *pReq
return 0;
}
int32_t tSerializeSDnodeListReq(void *buf, int32_t bufLen, SDnodeListReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
if (tEncodeI32(&encoder, pReq->rowNum) < 0) return -1;
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSDnodeListReq(void *buf, int32_t bufLen, SDnodeListReq *pReq) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
if (tDecodeI32(&decoder, &pReq->rowNum) < 0) return -1;
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
int32_t tSerializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
@ -2237,6 +2263,50 @@ int32_t tDeserializeSQnodeListRsp(void *buf, int32_t bufLen, SQnodeListRsp *pRsp
void tFreeSQnodeListRsp(SQnodeListRsp *pRsp) { taosArrayDestroy(pRsp->qnodeList); }
int32_t tSerializeSDnodeListRsp(void *buf, int32_t bufLen, SDnodeListRsp *pRsp) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);
if (tStartEncode(&encoder) < 0) return -1;
int32_t num = taosArrayGetSize(pRsp->dnodeList);
if (tEncodeI32(&encoder, num) < 0) return -1;
for (int32_t i = 0; i < num; ++i) {
SEpSet *pEpSet = taosArrayGet(pRsp->dnodeList, i);
if (tEncodeSEpSet(&encoder, pEpSet) < 0) return -1;
}
tEndEncode(&encoder);
int32_t tlen = encoder.pos;
tEncoderClear(&encoder);
return tlen;
}
int32_t tDeserializeSDnodeListRsp(void *buf, int32_t bufLen, SDnodeListRsp *pRsp) {
SDecoder decoder = {0};
tDecoderInit(&decoder, buf, bufLen);
if (tStartDecode(&decoder) < 0) return -1;
int32_t num = 0;
if (tDecodeI32(&decoder, &num) < 0) return -1;
if (NULL == pRsp->dnodeList) {
pRsp->dnodeList = taosArrayInit(num, sizeof(SEpSet));
if (NULL == pRsp->dnodeList) return -1;
}
for (int32_t i = 0; i < num; ++i) {
SEpSet epSet = {0};
if (tDecodeSEpSet(&decoder, &epSet) < 0) return -1;
taosArrayPush(pRsp->dnodeList, &epSet);
}
tEndDecode(&decoder);
tDecoderClear(&decoder);
return 0;
}
void tFreeSDnodeListRsp(SDnodeListRsp *pRsp) { taosArrayDestroy(pRsp->dnodeList); }
int32_t tSerializeSCompactDbReq(void *buf, int32_t bufLen, SCompactDbReq *pReq) {
SEncoder encoder = {0};
tEncoderInit(&encoder, buf, bufLen);

View File

@ -45,6 +45,7 @@ int32_t dmProcessConfigReq(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessAuthRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessGrantRsp(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);
// dmWorker.c
int32_t dmPutNodeMsgToMgmtQueue(SDnodeMgmt *pMgmt, SRpcMsg *pMsg);

View File

@ -15,6 +15,10 @@
#define _DEFAULT_SOURCE
#include "dmInt.h"
#include "systable.h"
extern SConfig *tsCfg;
static void dmUpdateDnodeCfg(SDnodeMgmt *pMgmt, SDnodeCfg *pCfg) {
if (pMgmt->pData->dnodeId == 0 || pMgmt->pData->clusterId == 0) {
@ -175,6 +179,130 @@ int32_t dmProcessServerRunStatus(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
return 0;
}
SSDataBlock* dmBuildVariablesBlock(void) {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
size_t size = 0;
const SSysTableMeta* pMeta = NULL;
getInfosDbMeta(&pMeta, &size);
int32_t index = 0;
for (int32_t i = 0; i < size; ++i) {
if (strcmp(pMeta[i].name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) {
index = i;
break;
}
}
pBlock->pDataBlock = taosArrayInit(pMeta[index].colNum, sizeof(SColumnInfoData));
for (int32_t i = 0; i < pMeta[index].colNum; ++i) {
SColumnInfoData colInfoData = {0};
colInfoData.info.colId = i + 1;
colInfoData.info.type = pMeta[index].schema[i].type;
colInfoData.info.bytes = pMeta[index].schema[i].bytes;
taosArrayPush(pBlock->pDataBlock, &colInfoData);
}
pBlock->info.numOfCols = pMeta[index].colNum;
pBlock->info.hasVarCol = true;
return pBlock;
}
int32_t dmAppendVariablesToBlock(SSDataBlock* pBlock, int32_t dnodeId) {
int32_t numOfCfg = taosArrayGetSize(tsCfg->array);
int32_t numOfRows = 0;
blockDataEnsureCapacity(pBlock, numOfCfg);
for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) {
SConfigItem *pItem = taosArrayGet(tsCfg->array, i);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, (const char *)&dnodeId, false);
char name[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(name, pItem->name, TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE);
pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, name, false);
char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t valueLen = 0;
cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen);
varDataSetLen(value, valueLen);
pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, value, false);
numOfRows++;
}
pBlock->info.rows = numOfRows;
return TSDB_CODE_SUCCESS;
}
int32_t dmProcessRetrieve(SDnodeMgmt *pMgmt, SRpcMsg *pMsg) {
int32_t size = 0;
int32_t rowsRead = 0;
SRetrieveTableReq retrieveReq = {0};
if (tDeserializeSRetrieveTableReq(pMsg->pCont, pMsg->contLen, &retrieveReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
if (strcasecmp(retrieveReq.tb, TSDB_INS_TABLE_DNODE_VARIABLES)) {
terrno = TSDB_CODE_INVALID_MSG;
return -1;
}
SSDataBlock* pBlock = dmBuildVariablesBlock();
dmAppendVariablesToBlock(pBlock, pMgmt->pData->dnodeId);
size = sizeof(SRetrieveMetaTableRsp) + sizeof(int32_t) + sizeof(SSysTableSchema) * pBlock->info.numOfCols +
blockDataGetSize(pBlock) + blockDataGetSerialMetaSize(pBlock->info.numOfCols);
SRetrieveMetaTableRsp *pRsp = rpcMallocCont(size);
if (pRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
dError("failed to retrieve data since %s", terrstr());
blockDataDestroy(pBlock);
return -1;
}
char *pStart = pRsp->data;
*(int32_t *)pStart = htonl(pBlock->info.numOfCols);
pStart += sizeof(int32_t); // number of columns
for (int32_t i = 0; i < pBlock->info.numOfCols; ++i) {
SSysTableSchema *pSchema = (SSysTableSchema *)pStart;
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, i);
pSchema->bytes = htonl(pColInfo->info.bytes);
pSchema->colId = htons(pColInfo->info.colId);
pSchema->type = pColInfo->info.type;
pStart += sizeof(SSysTableSchema);
}
int32_t len = 0;
blockCompressEncode(pBlock, pStart, &len, pBlock->info.numOfCols, false);
pRsp->numOfRows = htonl(pBlock->info.rows);
pRsp->precision = TSDB_TIME_PRECISION_MILLI; // millisecond time precision
pRsp->completed = 1;
pMsg->info.rsp = pRsp;
pMsg->info.rspLen = size;
dDebug("dnode variables retrieve completed");
blockDataDestroy(pBlock);
return TSDB_CODE_SUCCESS;
}
SArray *dmGetMsgHandles() {
int32_t code = -1;
SArray *pArray = taosArrayInit(16, sizeof(SMgmtHandle));
@ -191,6 +319,7 @@ SArray *dmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_DND_DROP_BNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_CONFIG_DNODE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_SERVER_STATUS, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_DND_SYSTABLE_RETRIEVE, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;
// Requests handled by MNODE
if (dmSetMgmtHandle(pArray, TDMT_MND_GRANT_RSP, dmPutNodeMsgToMgmtQueue, 0) == NULL) goto _OVER;

View File

@ -141,6 +141,9 @@ static void dmProcessMgmtQueue(SQueueInfo *pInfo, SRpcMsg *pMsg) {
case TDMT_DND_SERVER_STATUS:
code = dmProcessServerRunStatus(pMgmt, pMsg);
break;
case TDMT_DND_SYSTABLE_RETRIEVE:
code = dmProcessRetrieve(pMgmt, pMsg);
break;
default:
terrno = TSDB_CODE_MSG_NOT_PROCESSED;
break;

View File

@ -161,6 +161,7 @@ SArray *mmGetMsgHandles() {
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_QNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_QNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_QNODE_LIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DNODE_LIST, mmPutMsgToReadQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_SNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_DROP_SNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;
if (dmSetMgmtHandle(pArray, TDMT_MND_CREATE_BNODE, mmPutMsgToWriteQueue, 0) == NULL) goto _OVER;

View File

@ -85,6 +85,7 @@ static void dmProcessRpcMsg(SDnode *pDnode, SRpcMsg *pRpc, SEpSet *pEpSet) {
dmProcessNetTestReq(pDnode, pRpc);
return;
case TDMT_MND_SYSTABLE_RETRIEVE_RSP:
case TDMT_DND_SYSTABLE_RETRIEVE_RSP:
case TDMT_VND_FETCH_RSP:
qWorkerProcessFetchRsp(NULL, NULL, pRpc, 0);
return;

View File

@ -47,6 +47,7 @@ static SSdbRow *mndDnodeActionDecode(SSdbRaw *pRaw);
static int32_t mndDnodeActionInsert(SSdb *pSdb, SDnodeObj *pDnode);
static int32_t mndDnodeActionDelete(SSdb *pSdb, SDnodeObj *pDnode);
static int32_t mndDnodeActionUpdate(SSdb *pSdb, SDnodeObj *pOld, SDnodeObj *pNew);
static int32_t mndProcessDnodeListReq(SRpcMsg *pReq);
static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq);
static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq);
@ -76,6 +77,7 @@ int32_t mndInitDnode(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_MND_CONFIG_DNODE, mndProcessConfigDnodeReq);
mndSetMsgHandle(pMnode, TDMT_DND_CONFIG_DNODE_RSP, mndProcessConfigDnodeRsp);
mndSetMsgHandle(pMnode, TDMT_MND_STATUS, mndProcessStatusReq);
mndSetMsgHandle(pMnode, TDMT_MND_DNODE_LIST, mndProcessDnodeListReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndRetrieveConfigs);
mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CONFIGS, mndCancelGetNextConfig);
@ -499,6 +501,60 @@ _OVER:
return code;
}
static int32_t mndProcessDnodeListReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
SDnodeObj *pObj = NULL;
void *pIter = NULL;
SDnodeListRsp rsp = {0};
int32_t code = -1;
rsp.dnodeList = taosArrayInit(5, sizeof(SEpSet));
if (NULL == rsp.dnodeList) {
mError("failed to alloc epSet while process dnode list req");
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
while (1) {
pIter = sdbFetch(pSdb, SDB_DNODE, pIter, (void **)&pObj);
if (pIter == NULL) break;
SEpSet epSet = {0};
epSet.numOfEps = 1;
tstrncpy(epSet.eps[0].fqdn, pObj->fqdn, TSDB_FQDN_LEN);
epSet.eps[0].port = pObj->port;
(void)taosArrayPush(rsp.dnodeList, &epSet);
sdbRelease(pSdb, pObj);
}
int32_t rspLen = tSerializeSDnodeListRsp(NULL, 0, &rsp);
void *pRsp = rpcMallocCont(rspLen);
if (pRsp == NULL) {
terrno = TSDB_CODE_OUT_OF_MEMORY;
goto _OVER;
}
tSerializeSDnodeListRsp(pRsp, rspLen, &rsp);
pReq->info.rspLen = rspLen;
pReq->info.rsp = pRsp;
code = 0;
_OVER:
if (code != 0) {
mError("failed to get dnode list since %s", terrstr());
}
tFreeSDnodeListRsp(&rsp);
return code;
}
static int32_t mndProcessCreateDnodeReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
@ -580,6 +636,7 @@ static int32_t mndDropDnode(SMnode *pMnode, SRpcMsg *pReq, SDnodeObj *pDnode, SM
mInfo("trans:%d, %d vnodes on dnode:%d will be dropped", pTrans->id, numOfVnodes, pDnode->id);
if (mndSetMoveVgroupsInfoToTrans(pMnode, pTrans, pDnode->id) != 0) goto _OVER;
}
if (mndTransPrepare(pMnode, pTrans) != 0) goto _OVER;
code = 0;
@ -591,30 +648,30 @@ _OVER:
}
static int32_t mndProcessDropDnodeReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
SDnodeObj *pDnode = NULL;
SMnodeObj *pMObj = NULL;
SQnodeObj *pQObj = NULL;
SSnodeObj *pSObj = NULL;
SMDropMnodeReq dropReq = {0};
SMnode *pMnode = pReq->info.node;
int32_t code = -1;
SDnodeObj *pDnode = NULL;
SMnodeObj *pMObj = NULL;
SQnodeObj *pQObj = NULL;
SSnodeObj *pSObj = NULL;
SDropDnodeReq dropReq = {0};
if (tDeserializeSCreateDropMQSBNodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
if (tDeserializeSDropDnodeReq(pReq->pCont, pReq->contLen, &dropReq) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
mInfo("dnode:%d, start to drop", dropReq.dnodeId);
if (dropReq.dnodeId <= 0) {
terrno = TSDB_CODE_MND_INVALID_DNODE_ID;
goto _OVER;
}
mInfo("dnode:%d, start to drop, ep:%s:%d", dropReq.dnodeId, dropReq.fqdn, dropReq.port);
pDnode = mndAcquireDnode(pMnode, dropReq.dnodeId);
if (pDnode == NULL) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
goto _OVER;
char ep[TSDB_EP_LEN + 1] = {0};
snprintf(ep, sizeof(ep), dropReq.fqdn, dropReq.port);
pDnode = mndAcquireDnodeByEp(pMnode, ep);
if (pDnode == NULL) {
terrno = TSDB_CODE_MND_DNODE_NOT_EXIST;
goto _OVER;
}
}
pQObj = mndAcquireQnode(pMnode, dropReq.dnodeId);
@ -669,6 +726,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
return -1;
}
mInfo("dnode:%d, start to config, option:%s, value:%s", cfgReq.dnodeId, cfgReq.config, cfgReq.value);
SDnodeObj *pDnode = mndAcquireDnode(pMnode, cfgReq.dnodeId);
if (pDnode == NULL) {
mError("dnode:%d, failed to config since %s ", cfgReq.dnodeId, terrstr());
@ -685,7 +743,7 @@ static int32_t mndProcessConfigDnodeReq(SRpcMsg *pReq) {
tSerializeSMCfgDnodeReq(pBuf, bufLen, &cfgReq);
mDebug("dnode:%d, send config req to dnode, app:%p", cfgReq.dnodeId, pReq->info.ahandle);
SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen, .info = pReq->info};
SRpcMsg rpcMsg = {.msgType = TDMT_DND_CONFIG_DNODE, .pCont = pBuf, .contLen = bufLen};
return tmsgSendReq(&epSet, &rpcMsg);
}
@ -699,28 +757,28 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
int32_t totalRows = 0;
int32_t numOfRows = 0;
char *cfgOpts[TSDB_CONFIG_NUMBER] = {0};
char cfgVals[TSDB_CONFIG_NUMBER][TSDB_CONIIG_VALUE_LEN + 1] = {0};
char cfgVals[TSDB_CONFIG_NUMBER][TSDB_CONFIG_VALUE_LEN + 1] = {0};
char *pWrite = NULL;
int32_t cols = 0;
cfgOpts[totalRows] = "statusInterval";
snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%d", tsStatusInterval);
snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%d", tsStatusInterval);
totalRows++;
cfgOpts[totalRows] = "timezone";
snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsTimezoneStr);
snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsTimezoneStr);
totalRows++;
cfgOpts[totalRows] = "locale";
snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsLocale);
snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsLocale);
totalRows++;
cfgOpts[totalRows] = "charset";
snprintf(cfgVals[totalRows], TSDB_CONIIG_VALUE_LEN, "%s", tsCharset);
snprintf(cfgVals[totalRows], TSDB_CONFIG_VALUE_LEN, "%s", tsCharset);
totalRows++;
char buf[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0};
char bufVal[TSDB_CONIIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
char bufVal[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
for (int32_t i = 0; i < totalRows; i++) {
cols = 0;
@ -729,7 +787,7 @@ static int32_t mndRetrieveConfigs(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *p
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)buf, false);
STR_WITH_MAXSIZE_TO_VARSTR(bufVal, cfgVals[i], TSDB_CONIIG_VALUE_LEN);
STR_WITH_MAXSIZE_TO_VARSTR(bufVal, cfgVals[i], TSDB_CONFIG_VALUE_LEN);
pColInfo = taosArrayGet(pBlock->pDataBlock, cols++);
colDataAppend(pColInfo, numOfRows, (const char *)bufVal, false);

View File

@ -65,6 +65,13 @@ static void mndPullupTrans(SMnode *pMnode) {
}
}
static void mndTtlTimer(SMnode *pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
SRpcMsg rpcMsg = {.msgType = TDMT_MND_TTL_TIMER, .pCont = pReq, .contLen = contLen};
tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg);
}
static void mndCalMqRebalance(SMnode *pMnode) {
int32_t contLen = 0;
void *pReq = mndBuildTimerMsg(&contLen);
@ -83,41 +90,6 @@ static void mndPullupTelem(SMnode *pMnode) {
}
}
static void mndPushTtlTime(SMnode *pMnode) {
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
int32_t contLen = sizeof(SMsgHead) + sizeof(int32_t);
SMsgHead *pHead = rpcMallocCont(contLen);
if (pHead == NULL) {
sdbCancelFetch(pSdb, pIter);
sdbRelease(pSdb, pVgroup);
continue;
}
pHead->contLen = htonl(contLen);
pHead->vgId = htonl(pVgroup->vgId);
int32_t t = taosGetTimestampSec();
*(int32_t *)(POINTER_SHIFT(pHead, sizeof(SMsgHead))) = htonl(t);
SRpcMsg rpcMsg = {.msgType = TDMT_VND_DROP_TTL_TABLE, .pCont = pHead, .contLen = contLen};
SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t code = tmsgSendReq(&epSet, &rpcMsg);
if (code != 0) {
mError("failed to send ttl time seed msg, code:0x%x", code);
} else {
mInfo("send ttl time seed msg, time:%d", t);
}
sdbRelease(pSdb, pVgroup);
}
}
static void *mndThreadFp(void *param) {
SMnode *pMnode = param;
int64_t lastTime = 0;
@ -125,14 +97,13 @@ static void *mndThreadFp(void *param) {
while (1) {
lastTime++;
if (lastTime % (864000) == 0) { // sleep 1 day for ttl
mndPushTtlTime(pMnode);
}
taosMsleep(100);
if (mndGetStop(pMnode)) break;
if (lastTime % (tsTransPullupInterval * 10) == 1) {
mndTtlTimer(pMnode);
}
if (lastTime % (tsTransPullupInterval * 10) == 0) {
mndPullupTrans(pMnode);
}
@ -558,12 +529,12 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0;
if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER ||
pMsg->msgType == TDMT_MND_TRANS_TIMER) {
pMsg->msgType == TDMT_MND_TRANS_TIMER || TDMT_MND_TTL_TIMER) {
return -1;
}
const STraceId *trace = &pMsg->info.traceId;
mGError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType));
mError("msg:%p, failed to check mnode state since %s, type:%s", pMsg, terrstr(), TMSG_INFO(pMsg->msgType));
SEpSet epSet = {0};
mndGetMnodeEpSet(pMsg->info.node, &epSet);
@ -584,7 +555,7 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) {
static int32_t mndCheckMsgContent(SRpcMsg *pMsg) {
if (!IsReq(pMsg)) return 0;
if (pMsg->contLen != 0 && pMsg->pCont != NULL) return 0;
const STraceId *trace = &pMsg->info.traceId;
mGError("msg:%p, failed to check msg, cont:%p contLen:%d, app:%p type:%s", pMsg, pMsg->pCont, pMsg->contLen,
pMsg->info.ahandle, TMSG_INFO(pMsg->msgType));

View File

@ -270,10 +270,8 @@ int32_t mndAddShuffleSinkTasksToStream(SMnode* pMnode, STrans* pTrans, SStreamOb
pTask->nodeId = pVgroup->vgId;
pTask->epSet = mndGetVgroupEpset(pMnode, pVgroup);
pTask->isDataScan = 0;
// source
pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
pTask->isDataScan = 0;
// exec
pTask->execType = TASK_EXEC__NONE;
@ -320,10 +318,8 @@ int32_t mndAddFixedSinkTaskToStream(SMnode* pMnode, STrans* pTrans, SStreamObj*
#endif
pTask->epSet = mndGetVgroupEpset(pMnode, &pStream->fixedSinkVg);
pTask->isDataScan = 0;
// source
pTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
pTask->isDataScan = 0;
// exec
pTask->execType = TASK_EXEC__NONE;
@ -392,12 +388,10 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
pInnerTask = tNewSStreamTask(pStream->uid);
mndAddTaskToTaskSet(taskInnerLevel, pInnerTask);
pInnerTask->isDataScan = 0;
pInnerTask->childEpInfo = taosArrayInit(0, sizeof(void*));
// input
pInnerTask->inputType = TASK_INPUT_TYPE__DATA_BLOCK;
// source
pInnerTask->isDataScan = 0;
// trigger
pInnerTask->triggerParam = pStream->triggerParam;
@ -458,11 +452,9 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
mndAddTaskToTaskSet(taskSourceLevel, pTask);
// source
pTask->isDataScan = 1;
// input
pTask->inputType = TASK_INPUT_TYPE__SUMBIT_BLOCK;
// add fixed vg dispatch
pTask->sinkType = TASK_SINK__NONE;
pTask->dispatchMsgType = TDMT_STREAM_TASK_DISPATCH;
@ -517,11 +509,9 @@ int32_t mndScheduleStream(SMnode* pMnode, STrans* pTrans, SStreamObj* pStream) {
SStreamTask* pTask = tNewSStreamTask(pStream->uid);
mndAddTaskToTaskSet(taskOneLevel, pTask);
// source
pTask->isDataScan = 1;
// input
pTask->inputType = TASK_INPUT_TYPE__SUMBIT_BLOCK;
// trigger
pTask->triggerParam = pStream->triggerParam;

View File

@ -37,6 +37,7 @@ static SSdbRow *mndStbActionDecode(SSdbRaw *pRaw);
static int32_t mndStbActionInsert(SSdb *pSdb, SStbObj *pStb);
static int32_t mndStbActionDelete(SSdb *pSdb, SStbObj *pStb);
static int32_t mndStbActionUpdate(SSdb *pSdb, SStbObj *pOld, SStbObj *pNew);
static int32_t mndProcessTtlTimer(SRpcMsg *pReq);
static int32_t mndProcessCreateStbReq(SRpcMsg *pReq);
static int32_t mndProcessAlterStbReq(SRpcMsg *pReq);
static int32_t mndProcessDropStbReq(SRpcMsg *pReq);
@ -63,6 +64,7 @@ int32_t mndInitStb(SMnode *pMnode) {
mndSetMsgHandle(pMnode, TDMT_VND_ALTER_STB_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_VND_DROP_STB_RSP, mndTransProcessRsp);
mndSetMsgHandle(pMnode, TDMT_MND_TABLE_META, mndProcessTableMetaReq);
mndSetMsgHandle(pMnode, TDMT_MND_TTL_TIMER, mndProcessTtlTimer);
mndSetMsgHandle(pMnode, TDMT_MND_TABLE_CFG, mndProcessTableCfgReq);
mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_STB, mndRetrieveStb);
@ -799,6 +801,43 @@ int32_t mndAddStbToTrans(SMnode *pMnode, STrans *pTrans, SDbObj *pDb, SStbObj *p
return 0;
}
static int32_t mndProcessTtlTimer(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
SSdb *pSdb = pMnode->pSdb;
SVgObj *pVgroup = NULL;
void *pIter = NULL;
while (1) {
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
int32_t contLen = sizeof(SMsgHead) + sizeof(int32_t);
SMsgHead *pHead = rpcMallocCont(contLen);
if (pHead == NULL) {
sdbCancelFetch(pSdb, pVgroup);
sdbRelease(pSdb, pVgroup);
continue;
}
pHead->contLen = htonl(contLen);
pHead->vgId = htonl(pVgroup->vgId);
int32_t t = taosGetTimestampSec();
*(int32_t *)((char *)pHead + sizeof(SMsgHead)) = htonl(t);
SRpcMsg rpcMsg = {.msgType = TDMT_VND_DROP_TTL_TABLE, .pCont = pHead, .contLen = contLen};
SEpSet epSet = mndGetVgroupEpset(pMnode, pVgroup);
int32_t code = tmsgSendReq(&epSet, &rpcMsg);
if (code != 0) {
mError("failed to send ttl time seed, code:0x%x", code);
} else {
mDebug("send ttl time seed success, time:%d", t);
}
sdbRelease(pSdb, pVgroup);
}
return 0;
}
static int32_t mndProcessCreateStbReq(SRpcMsg *pReq) {
SMnode *pMnode = pReq->info.node;
int32_t code = -1;

View File

@ -56,6 +56,7 @@ static bool mndCannotExecuteTransAction(SMnode *pMnode) { return !pMnode->dep
static void mndTransSendRpcRsp(SMnode *pMnode, STrans *pTrans);
static int32_t mndProcessTransReq(SRpcMsg *pReq);
static int32_t mndProcessTtl(SRpcMsg *pReq);
static int32_t mndProcessKillTransReq(SRpcMsg *pReq);
static int32_t mndRetrieveTrans(SRpcMsg *pReq, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows);

View File

@ -1553,10 +1553,11 @@ static int32_t mndSetBalanceVgroupInfoToTrans(SMnode *pMnode, STrans *pTrans, SD
static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDnodeObj *pSrc, SDnodeObj *pDst) {
void *pIter = NULL;
int32_t code = -1;
SSdb *pSdb = pMnode->pSdb;
while (1) {
SVgObj *pVgroup = NULL;
pIter = sdbFetch(pMnode->pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
pIter = sdbFetch(pSdb, SDB_VGROUP, pIter, (void **)&pVgroup);
if (pIter == NULL) break;
bool existInSrc = false;
@ -1568,13 +1569,15 @@ static int32_t mndBalanceVgroupBetweenDnode(SMnode *pMnode, STrans *pTrans, SDno
}
if (!existInSrc || existInDst) {
sdbRelease(pMnode->pSdb, pVgroup);
sdbRelease(pSdb, pVgroup);
continue;
}
SDbObj *pDb = mndAcquireDb(pMnode, pVgroup->dbName);
code = mndSetBalanceVgroupInfoToTrans(pMnode, pTrans, pDb, pVgroup, pSrc, pDst);
mndReleaseDb(pMnode, pDb);
sdbRelease(pMnode->pSdb, pVgroup);
sdbRelease(pSdb, pVgroup);
sdbCancelFetch(pSdb, pIter);
break;
}
@ -1593,15 +1596,25 @@ static int32_t mndBalanceVgroup(SMnode *pMnode, SRpcMsg *pReq, SArray *pArray) {
while (1) {
taosArraySort(pArray, (__compar_fn_t)mndCompareDnodeVnodes);
SDnodeObj *pSrc = taosArrayGet(pArray, 0);
SDnodeObj *pDst = taosArrayGet(pArray, taosArrayGetSize(pArray) - 1);
for (int32_t i = 0; i < taosArrayGetSize(pArray); ++i) {
SDnodeObj *pDnode = taosArrayGet(pArray, i);
mDebug("dnode:%d, equivalent vnodes:%d support:%d, score:%f", pDnode->id, pDnode->numOfVnodes,
pDnode->numOfSupportVnodes, (float)pDnode->numOfVnodes / pDnode->numOfSupportVnodes);
}
SDnodeObj *pSrc = taosArrayGet(pArray, taosArrayGetSize(pArray) - 1);
SDnodeObj *pDst = taosArrayGet(pArray, 0);
float srcScore = (float)(pSrc->numOfVnodes - 1) / pSrc->numOfSupportVnodes;
float dstScore = (float)(pDst->numOfVnodes + 1) / pDst->numOfSupportVnodes;
if (srcScore + 0.0001 < dstScore) {
mDebug("trans:%d, balance vgroup from dnode:%d to dnode:%d", pTrans->id, pSrc->id, pDst->id);
mDebug("trans:%d, after balance, src dnode:%d score:%f, dst dnode:%d score:%f", pTrans->id, pSrc->id, srcScore,
pDst->id, dstScore);
if (srcScore > dstScore - 0.000001) {
code = mndBalanceVgroupBetweenDnode(pMnode, pTrans, pSrc, pDst);
if (code == 0) {
pSrc->numOfVnodes--;
pDst->numOfVnodes++;
numOfVgroups++;
continue;
} else {
@ -1635,7 +1648,13 @@ static int32_t mndProcessBalanceVgroupMsg(SRpcMsg *pReq) {
void *pIter = NULL;
int64_t curMs = taosGetTimestampMs();
mDebug("start to balance vgroup");
SBalanceVgroupReq req = {0};
if (tDeserializeSBalanceVgroupReq(pReq->pCont, pReq->contLen, &req) != 0) {
terrno = TSDB_CODE_INVALID_MSG;
goto _OVER;
}
mInfo("start to balance vgroup");
if (mndCheckOperAuth(pMnode, pReq->info.conn.user, MND_OPER_BALANCE_VGROUP) != 0) goto _OVER;

View File

@ -150,6 +150,7 @@ int64_t tqFetchLog(STQ* pTq, STqHandle* pHandle, int64_t* fetchOffset, SWalHead*
// tqExec
int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId);
int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId);
int32_t tqSendPollRsp(STQ* pTq, const SRpcMsg* pMsg, const SMqPollReq* pReq, const SMqDataBlkRsp* pRsp);
// tqMeta

View File

@ -219,11 +219,9 @@ _err:
}
int metaTtlSmaller(SMeta *pMeta, uint64_t ttl, SArray *uidList){
metaRLock(pMeta);
TBC * pCur;
int ret = tdbTbcOpen(pMeta->pTtlIdx, &pCur, NULL);
if (ret < 0) {
metaULock(pMeta);
return ret;
}
@ -249,6 +247,7 @@ int metaTtlSmaller(SMeta *pMeta, uint64_t ttl, SArray *uidList){
tdbTbcClose(pCur);
tdbFree(pKey);
return 0;
}
@ -613,9 +612,6 @@ const void *metaGetTableTagVal(SMetaEntry *pEntry, int16_t type, STagVal *val) {
ASSERT(pEntry->type == TSDB_CHILD_TABLE);
STag *tag = (STag *)pEntry->ctbEntry.pTags;
if (type == TSDB_DATA_TYPE_JSON) {
if (tag->nTag == 0) {
return NULL;
}
return tag;
}
bool find = tTagGet(tag, val);

View File

@ -375,6 +375,7 @@ int metaTtlDropTable(SMeta *pMeta, int64_t ttl, SArray *tbUids) {
metaWLock(pMeta);
int ret = metaTtlSmaller(pMeta, ttl, tbUids);
if(ret != 0){
metaULock(pMeta);
return ret;
}
for (int i = 0; i < taosArrayGetSize(tbUids); ++i) {
@ -400,8 +401,7 @@ static void metaBuildTtlIdxKey(STtlIdxKey *ttlKey, const SMetaEntry *pME){
if (ttlDays <= 0) return;
ttlKey->dtime = ctime / 1000 + ttlDays * 24 * 60 * 60;
// ttlKey->dtime = ctime / 1000 + ttlDays;
ttlKey->dtime = ctime / 1000 + ttlDays * tsTtlUnit;
ttlKey->uid = pME->uid;
}

View File

@ -227,19 +227,16 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
consumerEpoch = atomic_val_compare_exchange_32(&pHandle->epoch, consumerEpoch, reqEpoch);
}
SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048);
if (pHeadWithCkSum == NULL) {
return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
SMqDataBlkRsp rsp = {0};
rsp.reqOffset = pReq->currentOffset;
rsp.blockData = taosArrayInit(0, sizeof(void*));
rsp.blockDataLen = taosArrayInit(0, sizeof(int32_t));
if (rsp.blockData == NULL || rsp.blockDataLen == NULL) {
return -1;
}
rsp.withTbName = pReq->withTbName;
if (rsp.withTbName) {
rsp.blockTbName = taosArrayInit(0, sizeof(void*));
@ -253,6 +250,30 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
rsp.blockSchema = taosArrayInit(0, sizeof(void*));
}
#if 1
if (pReq->useSnapshot) {
tqInfo("retrieve using snapshot");
int64_t lastVer = walGetCommittedVer(pTq->pWal);
if (rsp.reqOffset < lastVer) {
tqScanSnapshot(pTq, &pHandle->execHandle, &rsp, workerId);
if (rsp.blockNum != 0) {
rsp.withTbName = false;
rsp.rspOffset = lastVer;
tqInfo("direct send by snapshot rsp offset %ld", lastVer);
goto SEND_RSP;
}
}
}
#endif
SWalHead* pHeadWithCkSum = taosMemoryMalloc(sizeof(SWalHead) + 2048);
if (pHeadWithCkSum == NULL) {
return -1;
}
walSetReaderCapacity(pHandle->pWalReader, 2048);
while (1) {
consumerEpoch = atomic_load_32(&pHandle->epoch);
if (consumerEpoch > reqEpoch) {
@ -292,6 +313,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
metaRsp.metaRsp = pHead->body;
if (tqSendMetaPollRsp(pTq, pMsg, pReq, &metaRsp) < 0) {
code = -1;
goto OVER;
}
code = 0;
goto OVER;
@ -308,6 +330,7 @@ int32_t tqProcessPollReq(STQ* pTq, SRpcMsg* pMsg, int32_t workerId) {
taosMemoryFree(pHeadWithCkSum);
SEND_RSP:
ASSERT(taosArrayGetSize(rsp.blockData) == rsp.blockNum);
ASSERT(taosArrayGetSize(rsp.blockDataLen) == rsp.blockNum);
if (rsp.withSchema) {
@ -376,6 +399,8 @@ int32_t tqProcessVgChangeReq(STQ* pTq, char* msg, int32_t msgLen) {
SReadHandle handle = {
.reader = pHandle->execHandle.pExecReader[i],
.meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTsdbReader = 1,
};
pHandle->execHandle.execCol.task[i] = qCreateStreamExecTaskInfo(pHandle->execHandle.execCol.qmsg, &handle);
ASSERT(pHandle->execHandle.execCol.task[i]);
@ -426,6 +451,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
ASSERT(0);
}
tDecoderClear(&decoder);
ASSERT(pTask->isDataScan == 0 || pTask->isDataScan == 1);
pTask->execStatus = TASK_EXEC_STATUS__IDLE;
@ -447,6 +473,7 @@ int32_t tqProcessTaskDeployReq(STQ* pTq, char* msg, int32_t msgLen) {
.reader = pStreamReader,
.meta = pTq->pVnode->pMeta,
.vnode = pTq->pVnode,
.initTsdbReader = 1,
};
/*pTask->exec.inputHandle = pStreamReader;*/
pTask->exec.executor = qCreateStreamExecTaskInfo(pTask->exec.qmsg, &handle);
@ -505,7 +532,7 @@ int32_t tqProcessStreamTrigger(STQ* pTq, SSubmitReq* pReq) {
if (atomic_load_8(&pTask->taskStatus) == TASK_STATUS__DROPPING) {
continue;
}
if (pTask->inputType != STREAM_INPUT__DATA_SUBMIT) continue;
if (!pTask->isDataScan) continue;
if (!failed) {
if (streamTaskInput(pTask, (SStreamQueueItem*)pSubmit) < 0) {

View File

@ -60,6 +60,30 @@ static int32_t tqAddTbNameToRsp(const STQ* pTq, const STqExecHandle* pExec, SMqD
return 0;
}
int32_t tqScanSnapshot(STQ* pTq, const STqExecHandle* pExec, SMqDataBlkRsp* pRsp, int32_t workerId) {
ASSERT(pExec->subType == TOPIC_SUB_TYPE__COLUMN);
qTaskInfo_t task = pExec->execCol.task[workerId];
if (qStreamScanSnapshot(task) < 0) {
ASSERT(0);
}
while (1) {
SSDataBlock* pDataBlock = NULL;
uint64_t ts = 0;
if (qExecTask(task, &pDataBlock, &ts) < 0) {
ASSERT(0);
}
if (pDataBlock == NULL) break;
ASSERT(pDataBlock->info.rows != 0);
ASSERT(pDataBlock->info.numOfCols != 0);
tqAddBlockDataToRsp(pDataBlock, pRsp);
pRsp->blockNum++;
}
return 0;
}
int32_t tqDataExec(STQ* pTq, STqExecHandle* pExec, SSubmitReq* pReq, SMqDataBlkRsp* pRsp, int32_t workerId) {
if (pExec->subType == TOPIC_SUB_TYPE__COLUMN) {
qTaskInfo_t task = pExec->execCol.task[workerId];

View File

@ -345,8 +345,8 @@ int32_t tqUpdateTbUidList(STQ* pTq, const SArray* tbUidList, bool isAdd) {
while (1) {
pIter = taosHashIterate(pTq->pStreamTasks, pIter);
if (pIter == NULL) break;
SStreamTask* pTask = (SStreamTask*)pIter;
if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
SStreamTask* pTask = *(SStreamTask**)pIter;
if (pTask->isDataScan) {
int32_t code = qUpdateQualifiedTableId(pTask->exec.executor, tbUidList, isAdd);
ASSERT(code == 0);
}

View File

@ -315,7 +315,7 @@ static int32_t vnodeProcessDropTtlTbReq(SVnode *pVnode, int64_t version, void *p
if (tbUids == NULL) return TSDB_CODE_OUT_OF_MEMORY;
int32_t t = ntohl(*(int32_t *)pReq);
vError("rec ttl time:%d", t);
vDebug("vgId:%d, recv ttl msg, time:%d", pVnode->config.vgId, t);
int32_t ret = metaTtlDropTable(pVnode->pMeta, t, tbUids);
if (ret != 0) {
goto end;

View File

@ -65,6 +65,7 @@ enum {
typedef enum {
CTG_TASK_GET_QNODE = 0,
CTG_TASK_GET_DNODE,
CTG_TASK_GET_DB_VGROUP,
CTG_TASK_GET_DB_CFG,
CTG_TASK_GET_DB_INFO,
@ -216,6 +217,7 @@ typedef struct SCtgJob {
int32_t dbVgNum;
int32_t udfNum;
int32_t qnodeNum;
int32_t dnodeNum;
int32_t dbCfgNum;
int32_t indexNum;
int32_t userNum;
@ -565,6 +567,7 @@ int32_t ctgGetTbHashVgroupFromCache(SCatalog *pCtg, const SName *pTableName, SVg
int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize, int32_t rspCode, char* target);
int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask);
int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask);
int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask);
int32_t ctgGetDBCfgFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *dbFName, SDbCfgInfo *out, SCtgTask* pTask);
int32_t ctgGetIndexInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, const char *indexName, SIndexInfo *out, SCtgTask* pTask);
int32_t ctgGetTbIndexFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SName *name, STableIndex* out, SCtgTask* pTask);

View File

@ -1099,8 +1099,19 @@ _return:
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
int32_t catalogGetDnodeList(SCatalog* pCatalog, SRequestConnInfo* pConn, SArray** pDnodeList) {
return TSDB_CODE_CTG_INVALID_INPUT;
int32_t catalogGetDnodeList(SCatalog* pCtg, SRequestConnInfo* pConn, SArray** pDnodeList) {
CTG_API_ENTER();
int32_t code = 0;
if (NULL == pCtg || NULL == pConn || NULL == pDnodeList) {
CTG_API_LEAVE(TSDB_CODE_CTG_INVALID_INPUT);
}
CTG_ERR_JRET(ctgGetDnodeListFromMnode(pCtg, pConn, pDnodeList, NULL));
_return:
CTG_API_LEAVE(TSDB_CODE_SUCCESS);
}
int32_t catalogGetExpiredSTables(SCatalog* pCtg, SSTableVersion **stables, uint32_t *num) {

View File

@ -168,6 +168,21 @@ int32_t ctgInitGetQnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetDnodeTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
SCtgTask task = {0};
task.type = CTG_TASK_GET_DNODE;
task.taskId = taskIdx;
task.pJob = pJob;
task.taskCtx = NULL;
taosArrayPush(pJob->pTasks, &task);
qDebug("QID:0x%" PRIx64 " the %d task type %s initialized", pJob->queryId, taskIdx, ctgTaskTypeStr(task.type));
return TSDB_CODE_SUCCESS;
}
int32_t ctgInitGetIndexTask(SCtgJob *pJob, int32_t taskIdx, void* param) {
char *name = (char*)param;
SCtgTask task = {0};
@ -405,6 +420,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
int32_t tbHashNum = (int32_t)taosArrayGetSize(pReq->pTableHash);
int32_t udfNum = (int32_t)taosArrayGetSize(pReq->pUdf);
int32_t qnodeNum = pReq->qNodeRequired ? 1 : 0;
int32_t dnodeNum = pReq->dNodeRequired ? 1 : 0;
int32_t dbCfgNum = (int32_t)taosArrayGetSize(pReq->pDbCfg);
int32_t indexNum = (int32_t)taosArrayGetSize(pReq->pIndex);
int32_t userNum = (int32_t)taosArrayGetSize(pReq->pUser);
@ -412,7 +428,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
int32_t tbIndexNum = (int32_t)taosArrayGetSize(pReq->pTableIndex);
int32_t tbCfgNum = (int32_t)taosArrayGetSize(pReq->pTableCfg);
*taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum;
*taskNum = tbMetaNum + dbVgNum + udfNum + tbHashNum + qnodeNum + dnodeNum + dbCfgNum + indexNum + userNum + dbInfoNum + tbIndexNum + tbCfgNum;
if (*taskNum <= 0) {
ctgDebug("Empty input for job, no need to retrieve meta, reqId:0x%" PRIx64, reqId);
return TSDB_CODE_SUCCESS;
@ -435,6 +451,7 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
pJob->tbMetaNum = tbMetaNum;
pJob->tbHashNum = tbHashNum;
pJob->qnodeNum = qnodeNum;
pJob->dnodeNum = dnodeNum;
pJob->dbVgNum = dbVgNum;
pJob->udfNum = udfNum;
pJob->dbCfgNum = dbCfgNum;
@ -509,6 +526,10 @@ int32_t ctgInitJob(SCatalog* pCtg, SRequestConnInfo *pConn, SCtgJob** job, uint6
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_QNODE, NULL, NULL));
}
if (dnodeNum) {
CTG_ERR_JRET(ctgInitTask(pJob, CTG_TASK_GET_DNODE, NULL, NULL));
}
pJob->refId = taosAddRef(gCtgMgmt.jobPool, pJob);
if (pJob->refId < 0) {
ctgError("add job to ref failed, error: %s", tstrerror(terrno));
@ -631,6 +652,22 @@ int32_t ctgDumpQnodeRes(SCtgTask* pTask) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgDumpDnodeRes(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDnodeList) {
pJob->jobRes.pDnodeList = taosArrayInit(1, sizeof(SMetaRes));
if (NULL == pJob->jobRes.pDnodeList) {
CTG_ERR_RET(TSDB_CODE_OUT_OF_MEMORY);
}
}
SMetaRes res = {.code = pTask->code, .pRes = pTask->res};
taosArrayPush(pJob->jobRes.pDnodeList, &res);
return TSDB_CODE_SUCCESS;
}
int32_t ctgDumpDbCfgRes(SCtgTask* pTask) {
SCtgJob* pJob = pTask->pJob;
if (NULL == pJob->jobRes.pDbCfg) {
@ -1036,6 +1073,19 @@ _return:
CTG_RET(code);
}
int32_t ctgHandleGetDnodeRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
CTG_ERR_JRET(ctgProcessRspMsg(&pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
TSWAP(pTask->res, pTask->msgCtx.out);
_return:
ctgHandleTaskEnd(pTask, code);
CTG_RET(code);
}
int32_t ctgHandleGetIndexRsp(SCtgTask* pTask, int32_t reqType, const SDataBuf *pMsg, int32_t rspCode) {
int32_t code = 0;
CTG_ERR_JRET(ctgProcessRspMsg(pTask->msgCtx.out, reqType, pMsg->pData, pMsg->len, rspCode, pTask->msgCtx.target));
@ -1311,6 +1361,15 @@ int32_t ctgLaunchGetQnodeTask(SCtgTask *pTask) {
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetDnodeTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
CTG_ERR_RET(ctgGetDnodeListFromMnode(pCtg, pConn, NULL, pTask));
return TSDB_CODE_SUCCESS;
}
int32_t ctgLaunchGetDbCfgTask(SCtgTask *pTask) {
SCatalog* pCtg = pTask->pJob->pCtg;
SRequestConnInfo* pConn = &pTask->pJob->conn;
@ -1462,6 +1521,7 @@ int32_t ctgCloneDbVg(SCtgTask* pTask, void** pRes) {
SCtgAsyncFps gCtgAsyncFps[] = {
{ctgInitGetQnodeTask, ctgLaunchGetQnodeTask, ctgHandleGetQnodeRsp, ctgDumpQnodeRes, NULL, NULL},
{ctgInitGetDnodeTask, ctgLaunchGetDnodeTask, ctgHandleGetDnodeRsp, ctgDumpDnodeRes, NULL, NULL},
{ctgInitGetDbVgTask, ctgLaunchGetDbVgTask, ctgHandleGetDbVgRsp, ctgDumpDbVgRes, ctgCompDbVgTasks, ctgCloneDbVg},
{ctgInitGetDbCfgTask, ctgLaunchGetDbCfgTask, ctgHandleGetDbCfgRsp, ctgDumpDbCfgRes, NULL, NULL},
{ctgInitGetDbInfoTask, ctgLaunchGetDbInfoTask, ctgHandleGetDbInfoRsp, ctgDumpDbInfoRes, NULL, NULL},

View File

@ -40,6 +40,21 @@ int32_t ctgProcessRspMsg(void* out, int32_t reqType, char* msg, int32_t msgSize,
qDebug("Got qnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(out));
break;
}
case TDMT_MND_DNODE_LIST: {
if (TSDB_CODE_SUCCESS != rspCode) {
qError("error rsp for dnode list, error:%s", tstrerror(rspCode));
CTG_ERR_RET(rspCode);
}
code = queryProcessMsgRsp[TMSG_INDEX(reqType)](out, msg, msgSize);
if (code) {
qError("Process dnode list rsp failed, error:%s", tstrerror(rspCode));
CTG_ERR_RET(code);
}
qDebug("Got dnode list from mnode, listNum:%d", (int32_t)taosArrayGetSize(*(SArray**)out));
break;
}
case TDMT_MND_USE_DB: {
if (TSDB_CODE_SUCCESS != rspCode) {
qError("error rsp for use db, error:%s, dbFName:%s", tstrerror(rspCode), target);
@ -309,9 +324,6 @@ _return:
CTG_RET(code);
}
int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray *out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
@ -349,6 +361,39 @@ int32_t ctgGetQnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetDnodeListFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SArray **out, SCtgTask* pTask) {
char *msg = NULL;
int32_t msgLen = 0;
int32_t reqType = TDMT_MND_DNODE_LIST;
void*(*mallocFp)(int32_t) = pTask ? taosMemoryMalloc : rpcMallocCont;
ctgDebug("try to get dnode list from mnode, mgmtEpInUse:%d", pConn->mgmtEps.inUse);
int32_t code = queryBuildMsg[TMSG_INDEX(reqType)](NULL, &msg, 0, &msgLen, mallocFp);
if (code) {
ctgError("Build dnode list msg failed, error:%s", tstrerror(code));
CTG_ERR_RET(code);
}
if (pTask) {
CTG_ERR_RET(ctgUpdateMsgCtx(&pTask->msgCtx, reqType, NULL, NULL));
CTG_RET(ctgAsyncSendMsg(pCtg, pConn, pTask, reqType, msg, msgLen));
}
SRpcMsg rpcMsg = {
.msgType = reqType,
.pCont = msg,
.contLen = msgLen,
};
SRpcMsg rpcRsp = {0};
rpcSendRecv(pConn->pTrans, &pConn->mgmtEps, &rpcMsg, &rpcRsp);
CTG_ERR_RET(ctgProcessRspMsg(out, reqType, rpcRsp.pCont, rpcRsp.contLen, rpcRsp.code, NULL));
return TSDB_CODE_SUCCESS;
}
int32_t ctgGetDBVgInfoFromMnode(SCatalog* pCtg, SRequestConnInfo *pConn, SBuildUseDBInput *input, SUseDbOutput *out, SCtgTask* pTask) {
char *msg = NULL;

View File

@ -23,6 +23,8 @@ char *ctgTaskTypeStr(CTG_TASK_TYPE type) {
switch (type) {
case CTG_TASK_GET_QNODE:
return "[get qnode list]";
case CTG_TASK_GET_DNODE:
return "[get dnode list]";
case CTG_TASK_GET_DB_VGROUP:
return "[get db vgroup]";
case CTG_TASK_GET_DB_CFG:
@ -349,6 +351,11 @@ void ctgFreeTaskRes(CTG_TASK_TYPE type, void **pRes) {
*pRes = NULL;
break;
}
case CTG_TASK_GET_DNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;
@ -413,6 +420,11 @@ void ctgFreeSubTaskRes(CTG_TASK_TYPE type, void **pRes) {
*pRes = NULL;
break;
}
case CTG_TASK_GET_DNODE: {
taosArrayDestroy((SArray*)*pRes);
*pRes = NULL;
break;
}
case CTG_TASK_GET_TB_META: {
taosMemoryFreeClear(*pRes);
break;

View File

@ -18,6 +18,7 @@
#include "tdatablock.h"
#include "tglobal.h"
extern SConfig *tsCfg;
static int32_t getSchemaBytes(const SSchema* pSchema) {
switch (pSchema->type) {
case TSDB_DATA_TYPE_BINARY:
@ -551,7 +552,85 @@ static int32_t execShowCreateSTable(SShowCreateTableStmt* pStmt, SRetrieveTableR
static int32_t execAlterLocal(SAlterLocalStmt* pStmt) { return TSDB_CODE_FAILED; }
static int32_t execShowLocalVariables() { return TSDB_CODE_FAILED; }
static SSDataBlock* buildLocalVariablesResultDataBlock() {
SSDataBlock* pBlock = taosMemoryCalloc(1, sizeof(SSDataBlock));
pBlock->info.numOfCols = SHOW_LOCAL_VARIABLES_RESULT_COLS;
pBlock->info.hasVarCol = true;
pBlock->pDataBlock = taosArrayInit(pBlock->info.numOfCols, sizeof(SColumnInfoData));
SColumnInfoData infoData = {0};
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD1_LEN;
taosArrayPush(pBlock->pDataBlock, &infoData);
infoData.info.type = TSDB_DATA_TYPE_VARCHAR;
infoData.info.bytes = SHOW_LOCAL_VARIABLES_RESULT_FIELD2_LEN;
taosArrayPush(pBlock->pDataBlock, &infoData);
return pBlock;
}
int32_t setLocalVariablesResultIntoDataBlock(SSDataBlock* pBlock) {
int32_t numOfCfg = taosArrayGetSize(tsCfg->array);
int32_t numOfRows = 0;
blockDataEnsureCapacity(pBlock, numOfCfg);
for (int32_t i = 0, c = 0; i < numOfCfg; ++i, c = 0) {
SConfigItem *pItem = taosArrayGet(tsCfg->array, i);
char name[TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE] = {0};
STR_WITH_MAXSIZE_TO_VARSTR(name, pItem->name, TSDB_CONFIG_OPTION_LEN + VARSTR_HEADER_SIZE);
SColumnInfoData *pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, name, false);
char value[TSDB_CONFIG_VALUE_LEN + VARSTR_HEADER_SIZE] = {0};
int32_t valueLen = 0;
cfgDumpItemValue(pItem, &value[VARSTR_HEADER_SIZE], TSDB_CONFIG_VALUE_LEN, &valueLen);
varDataSetLen(value, valueLen);
pColInfo = taosArrayGet(pBlock->pDataBlock, c++);
colDataAppend(pColInfo, i, value, false);
numOfRows++;
}
pBlock->info.rows = numOfRows;
return TSDB_CODE_SUCCESS;
}
static int32_t execShowLocalVariables(SRetrieveTableRsp** pRsp) {
SSDataBlock* pBlock = buildLocalVariablesResultDataBlock();
int32_t code = setLocalVariablesResultIntoDataBlock(pBlock);
if (code) {
return code;
}
size_t rspSize = sizeof(SRetrieveTableRsp) + blockGetEncodeSize(pBlock);
*pRsp = taosMemoryCalloc(1, rspSize);
if (NULL == *pRsp) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pRsp)->useconds = 0;
(*pRsp)->completed = 1;
(*pRsp)->precision = 0;
(*pRsp)->compressed = 0;
(*pRsp)->compLen = 0;
(*pRsp)->numOfRows = htonl(pBlock->info.rows);
(*pRsp)->numOfCols = htonl(SHOW_LOCAL_VARIABLES_RESULT_COLS);
int32_t len = 0;
blockCompressEncode(pBlock, (*pRsp)->data, &len, SHOW_LOCAL_VARIABLES_RESULT_COLS, false);
ASSERT(len == rspSize - sizeof(SRetrieveTableRsp));
blockDataDestroy(pBlock);
return TSDB_CODE_SUCCESS;
}
int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) {
switch (nodeType(pStmt)) {
@ -568,7 +647,7 @@ int32_t qExecCommand(SNode* pStmt, SRetrieveTableRsp** pRsp) {
case QUERY_NODE_ALTER_LOCAL_STMT:
return execAlterLocal((SAlterLocalStmt*)pStmt);
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return execShowLocalVariables();
return execShowLocalVariables(pRsp);
default:
break;
}

View File

@ -106,7 +106,8 @@ int32_t getNumOfTotalRes(SGroupResInfo* pGroupResInfo);
SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode);
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond);
EDealRes doTranslateTagExpr(SNode** pNode, void* pContext);
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo);
SArray* createSortInfo(SNodeList* pNodeList);
SArray* extractPartitionColInfo(SNodeList* pNodeList);
SArray* extractColMatchInfo(SNodeList* pNodeList, SDataBlockDescNode* pOutputNodeList, int32_t* numOfOutputCols, int32_t type);

View File

@ -282,7 +282,6 @@ typedef struct STagScanInfo {
int32_t curPos;
SReadHandle readHandle;
STableListInfo *pTableList;
SNode* pFilterNode; // filter info,
} STagScanInfo;
typedef enum EStreamScanMode {
@ -839,13 +838,11 @@ int32_t finalizeResultRowIntoResultDataBlock(SDiskbasedBuf* pBuf, SResultRowPosi
SSDataBlock* pBlock, SExecTaskInfo* pTaskInfo);
int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId,
SNode* pTagCond);
int32_t doCreateMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId,
uint64_t taskId);
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SArray* dataReaders,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo);
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId);
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
SExecTaskInfo* pTaskInfo);
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo *pTableListInfo,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId, uint64_t taskId);
void copyUpdateDataBlock(SSDataBlock* pDest, SSDataBlock* pSource, int32_t tsColIndex);

View File

@ -86,7 +86,7 @@ static void toDataCacheEntry(SDataDeleterHandle* pHandle, const SInputData* pInp
SColumnInfoData* pColRes = (SColumnInfoData*)taosArrayGet(pInput->pData->pDataBlock, 0);
SDeleterRes* pRes = (SDeleterRes*)pEntry->data;
pRes->uid = pHandle->pDeleter->tableId;
pRes->suid = pHandle->pParam->suid;
pRes->uidList = pHandle->pParam->pUidList;
pRes->skey = pHandle->pDeleter->deleteTimeRange.skey;
pRes->ekey = pHandle->pDeleter->deleteTimeRange.ekey;

View File

@ -214,28 +214,113 @@ SSDataBlock* createResDataBlock(SDataBlockDescNode* pNode) {
return pBlock;
}
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo, SNode* pTagCond) {
EDealRes doTranslateTagExpr(SNode** pNode, void* pContext) {
SMetaReader* mr = (SMetaReader*)pContext;
if(nodeType(*pNode) == QUERY_NODE_COLUMN){
SColumnNode* pSColumnNode = *(SColumnNode**)pNode;
SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE);
if (NULL == res) {
return DEAL_RES_ERROR;
}
res->translate = true;
res->node.resType = pSColumnNode->node.resType;
STagVal tagVal = {0};
tagVal.cid = pSColumnNode->colId;
const char* p = metaGetTableTagVal(&mr->me, pSColumnNode->node.resType.type, &tagVal);
if (p == NULL) {
res->node.resType.type = TSDB_DATA_TYPE_NULL;
}else if (pSColumnNode->node.resType.type == TSDB_DATA_TYPE_JSON) {
int32_t len = ((const STag*)p) -> len;
res->datum.p = taosMemoryCalloc(len + 1, 1);
memcpy(res->datum.p, p, len);
} else if (IS_VAR_DATA_TYPE(pSColumnNode->node.resType.type)) {
res->datum.p = taosMemoryCalloc(tagVal.nData + VARSTR_HEADER_SIZE + 1, 1);
memcpy(varDataVal(res->datum.p), tagVal.pData, tagVal.nData);
varDataSetLen(res->datum.p, tagVal.nData);
} else {
nodesSetValueNodeValue(res, &(tagVal.i64));
}
nodesDestroyNode(*pNode);
*pNode = (SNode*)res;
}else if (nodeType(*pNode) == QUERY_NODE_FUNCTION){
SFunctionNode * pFuncNode = *(SFunctionNode**)pNode;
if(pFuncNode->funcType == FUNCTION_TYPE_TBNAME){
SValueNode *res = (SValueNode *)nodesMakeNode(QUERY_NODE_VALUE);
if (NULL == res) {
return DEAL_RES_ERROR;
}
res->translate = true;
res->node.resType = pFuncNode->node.resType;
int32_t len = strlen(mr->me.name);
res->datum.p = taosMemoryCalloc(len + VARSTR_HEADER_SIZE + 1, 1);
memcpy(varDataVal(res->datum.p), mr->me.name, len);
varDataSetLen(res->datum.p, len);
nodesDestroyNode(*pNode);
*pNode = (SNode*)res;
}
}
return DEAL_RES_CONTINUE;
}
static bool isTableOk(STableKeyInfo* info, SNode *pTagCond, SMeta *metaHandle){
SMetaReader mr = {0};
metaReaderInit(&mr, metaHandle, 0);
metaGetTableEntryByUid(&mr, info->uid);
SNode *pTagCondTmp = nodesCloneNode(pTagCond);
nodesRewriteExprPostOrder(&pTagCondTmp, doTranslateTagExpr, &mr);
metaReaderClear(&mr);
SNode* pNew = NULL;
int32_t code = scalarCalculateConstants(pTagCondTmp, &pNew);
if (TSDB_CODE_SUCCESS != code) {
nodesDestroyNode(pTagCondTmp);
return false;
}
ASSERT(nodeType(pNew) == QUERY_NODE_VALUE);
SValueNode *pValue = (SValueNode *)pNew;
ASSERT(pValue->node.resType.type == TSDB_DATA_TYPE_BOOL);
bool result = pValue->datum.b;
nodesDestroyNode(pNew);
return result;
}
int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo* pListInfo) {
int32_t code = TSDB_CODE_SUCCESS;
pListInfo->pTableList = taosArrayInit(8, sizeof(STableKeyInfo));
uint64_t tableUid = pScanNode->uid;
pListInfo->suid = pScanNode->suid;
SNode* pTagCond = (SNode*)pListInfo->pTagCond;
SNode* pTagIndexCond = (SNode*)pListInfo->pTagIndexCond;
if (pScanNode->tableType == TSDB_SUPER_TABLE) {
if (pTagCond) {
if (pTagIndexCond) {
SIndexMetaArg metaArg = {
.metaEx = metaHandle, .idx = tsdbGetIdx(metaHandle), .ivtIdx = tsdbGetIvtIdx(metaHandle), .suid = tableUid};
SArray* res = taosArrayInit(8, sizeof(uint64_t));
code = doFilterTag(pTagCond, &metaArg, res);
if (code == TSDB_CODE_INDEX_REBUILDING) { // todo
// doFilter();
//code = doFilterTag(pTagIndexCond, &metaArg, res);
code = TSDB_CODE_INDEX_REBUILDING;
if (code == TSDB_CODE_INDEX_REBUILDING) {
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
} else if (code != TSDB_CODE_SUCCESS) {
qError("failed to get tableIds, reason: %s, suid: %" PRIu64 "", tstrerror(code), tableUid);
taosArrayDestroy(res);
terrno = code;
return code;
} else {
qDebug("success to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
qDebug("sucess to get tableIds, size: %d, suid: %" PRIu64 "", (int)taosArrayGetSize(res), tableUid);
}
for (int i = 0; i < taosArrayGetSize(res); i++) {
@ -246,7 +331,20 @@ int32_t getTableList(void* metaHandle, SScanPhysiNode* pScanNode, STableListInfo
} else {
code = tsdbGetAllTableList(metaHandle, tableUid, pListInfo->pTableList);
}
} else { // Create one table group.
if(pTagCond){
int32_t i = 0;
while(i < taosArrayGetSize(pListInfo->pTableList)) {
STableKeyInfo* info = taosArrayGet(pListInfo->pTableList, i);
bool isOk = isTableOk(info, pTagCond, metaHandle);
if(!isOk){
taosArrayRemove(pListInfo->pTableList, i);
continue;
}
i++;
}
}
}else { // Create one table group.
STableKeyInfo info = {.lastKey = 0, .uid = tableUid, .groupId = 0};
taosArrayPush(pListInfo->pTableList, &info);
}

View File

@ -40,7 +40,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
SStreamBlockScanInfo* pInfo = pOperator->info;
pInfo->assignBlockUid = assignUid;
// the block type can not be changed in the streamscan operators
// no need to check
#if 0
if (pInfo->blockType == 0) {
pInfo->blockType = type;
@ -49,10 +49,7 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
return TSDB_CODE_QRY_APP_ERROR;
}
#endif
// rollup sma, the same qTaskInfo is used to insert data by SubmitReq and fetch result by SSDataBlock
if (pInfo->blockType != type) {
pInfo->blockType = type;
}
pInfo->blockType = type;
if (type == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
if (tqReadHandleSetMsg(pInfo->streamBlockReader, input, 0) < 0) {
@ -70,6 +67,9 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
taosArrayAddAll(p->pDataBlock, pDataBlock->pDataBlock);
taosArrayPush(pInfo->pBlockLists, &p);
}
} else if (type == STREAM_DATA_TYPE_FROM_SNAPSHOT) {
// do nothing
ASSERT(pInfo->blockType == STREAM_DATA_TYPE_FROM_SNAPSHOT);
} else {
ASSERT(0);
}
@ -78,6 +78,14 @@ static int32_t doSetStreamBlock(SOperatorInfo* pOperator, void* input, size_t nu
}
}
int32_t qStreamScanSnapshot(qTaskInfo_t tinfo) {
if (tinfo == NULL) {
return TSDB_CODE_QRY_APP_ERROR;
}
SExecTaskInfo* pTaskInfo = (SExecTaskInfo*)tinfo;
return doSetStreamBlock(pTaskInfo->pRoot, NULL, 0, STREAM_DATA_TYPE_FROM_SNAPSHOT, 0, NULL);
}
int32_t qSetStreamInput(qTaskInfo_t tinfo, const void* input, int32_t type, bool assignUid) {
return qSetMultiStreamInput(tinfo, input, 1, type, assignUid);
}
@ -109,14 +117,6 @@ qTaskInfo_t qCreateStreamExecTaskInfo(void* msg, void* streamReadHandle) {
return NULL;
}
// print those info into log
#if 0
pMsg->sId = pMsg->sId;
pMsg->queryId = pMsg->queryId;
pMsg->taskId = pMsg->taskId;
pMsg->contentLen = pMsg->contentLen;
#endif
/*qDebugL("stream task string %s", (const char*)msg);*/
struct SSubplan* plan = NULL;

View File

@ -3872,8 +3872,7 @@ static SExecTaskInfo* createExecTaskInfo(uint64_t queryId, uint64_t taskId, EOPT
}
static tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId,
SNode* pTagCond);
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId);
static SArray* extractColumnInfo(SNodeList* pNodeList);
@ -3987,7 +3986,7 @@ int32_t generateGroupIdMap(STableListInfo* pTableListInfo, SReadHandle* pHandle,
}
SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo, SReadHandle* pHandle,
uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo, SNode* pTagCond) {
uint64_t queryId, uint64_t taskId, STableListInfo* pTableListInfo) {
int32_t type = nodeType(pPhyNode);
if (pPhyNode->pChildren == NULL || LIST_LENGTH(pPhyNode->pChildren) == 0) {
@ -3995,7 +3994,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
STableScanPhysiNode* pTableScanNode = (STableScanPhysiNode*)pPhyNode;
tsdbReaderT pDataReader =
doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId);
if (pDataReader == NULL && terrno != 0) {
pTaskInfo->code = terrno;
return NULL;
@ -4023,14 +4022,9 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
return pOperator;
} else if (QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN == type) {
STableMergeScanPhysiNode* pTableScanNode = (STableMergeScanPhysiNode*)pPhyNode;
SArray* dataReaders = taosArrayInit(8, POINTER_BYTES);
createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId, pTagCond);
doCreateMultipleDataReaders(pTableScanNode, pHandle, pTableListInfo, dataReaders, queryId, taskId);
createScanTableListInfo(pTableScanNode, pHandle, pTableListInfo, queryId, taskId);
extractTableSchemaVersion(pHandle, pTableScanNode->scan.uid, pTaskInfo);
SOperatorInfo* pOperator = createTableMergeScanOperatorInfo(pTableScanNode, dataReaders, pHandle, pTaskInfo);
SOperatorInfo* pOperator = createTableMergeScanOperatorInfo(pTableScanNode, pTableListInfo, pHandle, pTaskInfo, queryId, taskId);
STableScanInfo* pScanInfo = pOperator->info;
pTaskInfo->cost.pRecoder = &pScanInfo->readRecorder;
return pOperator;
@ -4047,16 +4041,17 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
tsdbReaderT pDataReader = NULL;
if (pHandle) {
if (pHandle->vnode) {
// for stram
if (pHandle->initTsdbReader) {
// for stream
ASSERT(pHandle->vnode);
pDataReader =
doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId, pTagCond);
doCreateDataReader(pTableScanNode, pHandle, pTableListInfo, (uint64_t)queryId, taskId);
} else {
// for tq
getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pTagCond);
ASSERT(pHandle->meta);
getTableList(pHandle->meta, pScanPhyNode, pTableListInfo);
}
}
if (pDataReader == NULL && terrno != 0) {
qDebug("%s pDataReader is NULL", GET_TASKID(pTaskInfo));
// return NULL;
@ -4081,7 +4076,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
} else if (QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN == type) {
STagScanPhysiNode* pScanPhyNode = (STagScanPhysiNode*)pPhyNode;
int32_t code = getTableList(pHandle->meta, pScanPhyNode, pTableListInfo, pScanPhyNode->node.pConditions);
int32_t code = getTableList(pHandle->meta, pScanPhyNode, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
pTaskInfo->code = terrno;
return NULL;
@ -4139,7 +4134,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
SOperatorInfo** ops = taosMemoryCalloc(size, POINTER_BYTES);
for (int32_t i = 0; i < size; ++i) {
SPhysiNode* pChildNode = (SPhysiNode*)nodesListGetNode(pPhyNode->pChildren, i);
ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo, pTagCond);
ops[i] = createOperatorTree(pChildNode, pTaskInfo, pHandle, queryId, taskId, pTableListInfo);
if (ops[i] == NULL) {
return NULL;
}
@ -4230,6 +4225,8 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo
pOptr = createStreamFinalIntervalOperatorInfo(ops[0], pPhyNode, pTaskInfo, children);
} else if (QUERY_NODE_PHYSICAL_PLAN_SORT == type) {
pOptr = createSortOperatorInfo(ops[0], (SSortPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT == type) {
pOptr = createGroupSortOperatorInfo(ops[0], (SGroupSortPhysiNode*)pPhyNode, pTaskInfo);
} else if (QUERY_NODE_PHYSICAL_PLAN_MERGE == type) {
SMergePhysiNode* pMergePhyNode = (SMergePhysiNode*)pPhyNode;
@ -4341,8 +4338,8 @@ SArray* extractColumnInfo(SNodeList* pNodeList) {
}
tsdbReaderT doCreateDataReader(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId, SNode* pTagCond) {
int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond);
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId) {
int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -4468,6 +4465,7 @@ int32_t createDataSinkParam(SDataSinkNode* pNode, void** pParam, qTaskInfo_t* pT
return TSDB_CODE_OUT_OF_MEMORY;
}
int32_t tbNum = taosArrayGetSize(pTask->tableqinfoList.pTableList);
pDeleterParam->suid = pTask->tableqinfoList.suid;
pDeleterParam->pUidList = taosArrayInit(tbNum, sizeof(uint64_t));
if (NULL == pDeleterParam->pUidList) {
taosMemoryFree(pDeleterParam);
@ -4500,8 +4498,10 @@ int32_t createExecTaskInfoImpl(SSubplan* pPlan, SExecTaskInfo** pTaskInfo, SRead
}
(*pTaskInfo)->sql = sql;
(*pTaskInfo)->tableqinfoList.pTagCond = pPlan->pTagCond;
(*pTaskInfo)->tableqinfoList.pTagIndexCond = pPlan->pTagIndexCond;
(*pTaskInfo)->pRoot = createOperatorTree(pPlan->pNode, *pTaskInfo, pHandle, queryId, taskId,
&(*pTaskInfo)->tableqinfoList, pPlan->pTagCond);
&(*pTaskInfo)->tableqinfoList);
if (NULL == (*pTaskInfo)->pRoot) {
code = (*pTaskInfo)->code;
goto _complete;

View File

@ -337,7 +337,7 @@ void addTagPseudoColumnData(SReadHandle* pHandle, SExprInfo* pPseudoExpr, int32_
}
for (int32_t i = 0; i < pBlock->info.rows; ++i) {
colDataAppend(pColInfoData, i, data, (data == NULL));
colDataAppend(pColInfoData, i, data, (data == NULL) || (pColInfoData->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data)));
}
if (data && (pColInfoData->info.type != TSDB_DATA_TYPE_JSON) && p != NULL &&
@ -537,7 +537,7 @@ SOperatorInfo* createTableScanOperatorInfo(STableScanPhysiNode* pTableScanNode,
// taosSsleep(20);
SDataBlockDescNode* pDescNode = pTableScanNode->scan.node.pOutputDataBlockDesc;
int32_t numOfCols = 0;
int32_t numOfCols = 0;
SArray* pColList = extractColMatchInfo(pTableScanNode->scan.pScanCols, pDescNode, &numOfCols, COL_MATCH_FROM_COL_ID);
int32_t code = initQueryTableDataCond(&pInfo->cond, pTableScanNode);
@ -916,6 +916,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
size_t total = taosArrayGetSize(pInfo->pBlockLists);
// TODO: refactor
if (pInfo->blockType == STREAM_DATA_TYPE_SSDATA_BLOCK) {
if (pInfo->validBlockIndex >= total) {
/*doClearBufferedBlocks(pInfo);*/
@ -927,7 +928,7 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
SSDataBlock* pBlock = taosArrayGetP(pInfo->pBlockLists, current);
blockDataUpdateTsWindow(pBlock, 0);
return pBlock;
} else {
} else if (pInfo->blockType == STREAM_DATA_TYPE_SUBMIT_BLOCK) {
if (pInfo->scanMode == STREAM_SCAN_FROM_RES) {
blockDataDestroy(pInfo->pUpdateRes);
pInfo->scanMode = STREAM_SCAN_FROM_READERHANDLE;
@ -1061,6 +1062,15 @@ static SSDataBlock* doStreamBlockScan(SOperatorInfo* pOperator) {
}
return (pBlockInfo->rows == 0) ? NULL : pInfo->pRes;
} else if (pInfo->blockType == STREAM_DATA_TYPE_FROM_SNAPSHOT) {
SSDataBlock* pResult = doTableScan(pInfo->pOperatorDumy);
if (pResult) {
return pResult->info.rows > 0 ? pResult : NULL;
}
return NULL;
} else {
ASSERT(0);
return NULL;
}
}
@ -1547,11 +1557,14 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
return NULL;
}
int32_t msgType = (strcasecmp(name, TSDB_INS_TABLE_DNODE_VARIABLES) == 0) ? TDMT_DND_SYSTABLE_RETRIEVE : TDMT_MND_SYSTABLE_RETRIEVE;
pMsgSendInfo->param = pOperator;
pMsgSendInfo->msgInfo.pData = buf1;
pMsgSendInfo->msgInfo.len = contLen;
pMsgSendInfo->msgType = TDMT_MND_SYSTABLE_RETRIEVE;
pMsgSendInfo->msgType = msgType;
pMsgSendInfo->fp = loadSysTableCallback;
pMsgSendInfo->requestId = pTaskInfo->id.queryId;
int64_t transporterId = 0;
int32_t code =
@ -1586,6 +1599,8 @@ static SSDataBlock* doSysTableScan(SOperatorInfo* pOperator) {
taosMemoryFree(pRsp);
if (pInfo->pRes->info.rows > 0) {
return pInfo->pRes;
} else if (pOperator->status == OP_EXEC_DONE) {
return NULL;
}
}
}
@ -1828,7 +1843,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
} else {
data = (char*)p;
}
colDataAppend(pDst, count, data, (data == NULL));
colDataAppend(pDst, count, data, (data == NULL) || (pDst->info.type == TSDB_DATA_TYPE_JSON && tTagIsJsonNull(data)));
if (pDst->info.type != TSDB_DATA_TYPE_JSON && p != NULL && IS_VAR_DATA_TYPE(((const STagVal*)p)->type) &&
data != NULL) {
@ -1851,9 +1866,7 @@ static SSDataBlock* doTagScan(SOperatorInfo* pOperator) {
}
pRes->info.rows = count;
doFilter(pInfo->pFilterNode, pRes);
pOperator->resultInfo.totalRows += pRes->info.rows;
pOperator->resultInfo.totalRows += count;
return (pRes->info.rows == 0) ? NULL : pInfo->pRes;
}
@ -1883,13 +1896,11 @@ SOperatorInfo* createTagScanOperatorInfo(SReadHandle* pReadHandle, STagScanPhysi
goto _error;
}
pInfo->pTableList = pTableListInfo;
pInfo->pColMatchInfo = colList;
pInfo->pRes = createResDataBlock(pDescNode);
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pInfo->pFilterNode = pPhyNode->node.pConditions;
pInfo->pTableList = pTableListInfo;
pInfo->pColMatchInfo = colList;
pInfo->pRes = createResDataBlock(pDescNode);
pInfo->readHandle = *pReadHandle;
pInfo->curPos = 0;
pOperator->name = "TagScanOperator";
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TAG_SCAN;
@ -1914,6 +1925,12 @@ _error:
}
typedef struct STableMergeScanInfo {
STableListInfo* tableListInfo;
int32_t tableStartIndex;
int32_t tableEndIndex;
bool hasGroupId;
uint64_t groupId;
SArray* dataReaders; // array of tsdbReaderT*
SReadHandle readHandle;
@ -1926,11 +1943,9 @@ typedef struct STableMergeScanInfo {
SSDataBlock* pSortInputBlock;
int64_t startTs; // sort start time
bool hasGroupId;
uint64_t groupId;
STupleHandle* prefetchedTuple;
SArray* sortSourceParams;
SArray* sortSourceParams;
uint64_t queryId;
uint64_t taskId;
SFileBlockLoadRecorder readRecorder;
int64_t numOfRows;
@ -1958,8 +1973,6 @@ typedef struct STableMergeScanInfo {
// window to check if current data block needs to be loaded.
SSampleExecInfo sample; // sample execution info
int32_t curTWinIdx;
} STableMergeScanInfo;
int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) {
@ -1969,9 +1982,8 @@ int32_t compareTableKeyInfoByGid(const void* p1, const void* p2) {
}
int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId,
SNode* pTagCond) {
int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo, pTagCond);
STableListInfo* pTableListInfo, uint64_t queryId, uint64_t taskId) {
int32_t code = getTableList(pHandle->meta, &pTableScanNode->scan, pTableListInfo);
if (code != TSDB_CODE_SUCCESS) {
return code;
}
@ -1990,11 +2002,10 @@ int32_t createScanTableListInfo(STableScanPhysiNode* pTableScanNode, SReadHandle
}
int32_t doCreateMultipleDataReaders(STableScanPhysiNode* pTableScanNode, SReadHandle* pHandle,
STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId,
uint64_t taskId) {
STableListInfo* pTableListInfo, SArray* arrayReader, uint64_t queryId,
uint64_t taskId) {
SQueryTableDataCond cond = {0};
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
int32_t code = initQueryTableDataCond(&cond, pTableScanNode);
if (code != TSDB_CODE_SUCCESS) {
goto _error;
}
@ -2017,6 +2028,24 @@ _error:
return code;
}
int32_t createMultipleDataReaders(SQueryTableDataCond* pQueryCond, SReadHandle* pHandle, STableListInfo* pTableListInfo,
int32_t tableStartIdx, int32_t tableEndIdx, SArray* arrayReader, uint64_t queryId,
uint64_t taskId) {
for (int32_t i = tableStartIdx; i <= tableEndIdx; ++i) {
STableListInfo* subListInfo = taosMemoryCalloc(1, sizeof(subListInfo));
subListInfo->pTableList = taosArrayInit(1, sizeof(STableKeyInfo));
taosArrayPush(subListInfo->pTableList, taosArrayGet(pTableListInfo->pTableList, i));
tsdbReaderT* pReader = tsdbReaderOpen(pHandle->vnode, pQueryCond, subListInfo, queryId, taskId);
taosArrayPush(arrayReader, &pReader);
taosArrayDestroy(subListInfo->pTableList);
taosMemoryFree(subListInfo);
}
return TSDB_CODE_SUCCESS;
}
static int32_t loadDataBlockFromOneTable(SOperatorInfo* pOperator, STableMergeScanInfo* pTableScanInfo,
int32_t readerIdx, SSDataBlock* pBlock, uint32_t* status) {
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
@ -2196,22 +2225,47 @@ SArray* generateSortByTsInfo(int32_t order) {
return pList;
}
int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) {
int32_t startGroupTableMergeScan(SOperatorInfo* pOperator) {
STableMergeScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
if (OPTR_IS_OPENED(pOperator)) {
return TSDB_CODE_SUCCESS;
{
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
int32_t i = pInfo->tableStartIndex + 1;
for (; i < tableListSize; ++i) {
STableKeyInfo* tableKeyInfo = taosArrayGet(pInfo->tableListInfo->pTableList, i);
if (tableKeyInfo->groupId != pInfo->groupId) {
break;
}
}
pInfo->tableEndIndex = i - 1;
}
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
int32_t tableStartIdx = pInfo->tableStartIndex;
int32_t tableEndIdx = pInfo->tableEndIndex;
STableListInfo* tableListInfo = pInfo->tableListInfo;
createMultipleDataReaders(&pInfo->cond, &pInfo->readHandle, tableListInfo, tableStartIdx, tableEndIdx,
pInfo->dataReaders, pInfo->queryId, pInfo->taskId);
// todo the total available buffer should be determined by total capacity of buffer of this task.
// the additional one is reserved for merge result
pInfo->sortBufSize = pInfo->bufPageSize * (tableEndIdx - tableStartIdx + 1 + 1);
int32_t numOfBufPage = pInfo->sortBufSize / pInfo->bufPageSize;
pInfo->pSortHandle = tsortCreateSortHandle(pInfo->pSortInfo, SORT_MULTISOURCE_MERGE, pInfo->bufPageSize, numOfBufPage,
pInfo->pSortInputBlock, pTaskInfo->id.str);
tsortSetFetchRawDataFp(pInfo->pSortHandle, getTableDataBlock, NULL, NULL);
size_t numReaders = taosArrayGetSize(pInfo->dataReaders);
for (int32_t i = 0; i < numReaders; ++i) {
STableMergeScanSortSourceParam param = {0};
param.readerIdx = i;
param.pOperator = pOperator;
param.inputBlock = createOneDataBlock(pInfo->pResBlock, false);
taosArrayPush(pInfo->sortSourceParams, &param);
}
for (int32_t i = 0; i < numReaders; ++i) {
SSortSource* ps = taosMemoryCalloc(1, sizeof(SSortSource));
STableMergeScanSortSourceParam* param = taosArrayGet(pInfo->sortSourceParams, i);
@ -2225,9 +2279,22 @@ int32_t doOpenTableMergeScanOperator(SOperatorInfo* pOperator) {
longjmp(pTaskInfo->env, terrno);
}
pOperator->status = OP_RES_TO_RETURN;
return TSDB_CODE_SUCCESS;
}
int32_t stopGroupTableMergeScan(SOperatorInfo* pOperator) {
STableMergeScanInfo* pInfo = pOperator->info;
SExecTaskInfo* pTaskInfo = pOperator->pTaskInfo;
tsortDestroySortHandle(pInfo->pSortHandle);
taosArrayClear(pInfo->sortSourceParams);
for (int32_t i = 0; i < taosArrayGetSize(pInfo->dataReaders); ++i) {
tsdbReaderT* reader = taosArrayGetP(pInfo->dataReaders, i);
tsdbCleanupReadHandle(reader);
}
taosArrayDestroy(pInfo->dataReaders);
OPTR_SET_OPENED(pOperator);
return TSDB_CODE_SUCCESS;
}
@ -2270,14 +2337,38 @@ SSDataBlock* doTableMergeScan(SOperatorInfo* pOperator) {
if (code != TSDB_CODE_SUCCESS) {
longjmp(pTaskInfo->env, code);
}
size_t tableListSize = taosArrayGetSize(pInfo->tableListInfo->pTableList);
if (!pInfo->hasGroupId) {
pInfo->hasGroupId = true;
SSDataBlock* pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
if (pBlock != NULL) {
pOperator->resultInfo.totalRows += pBlock->info.rows;
} else {
doSetOperatorCompleted(pOperator);
if (tableListSize == 0) {
doSetOperatorCompleted(pOperator);
return NULL;
}
pInfo->tableStartIndex = 0;
pInfo->groupId = ((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId;
startGroupTableMergeScan(pOperator);
}
SSDataBlock* pBlock = NULL;
while (pInfo->tableStartIndex < tableListSize) {
pBlock = getSortedTableMergeScanBlockData(pInfo->pSortHandle, pOperator->resultInfo.capacity, pOperator);
if (pBlock != NULL) {
pBlock->info.groupId = pInfo->groupId;
pOperator->resultInfo.totalRows += pBlock->info.rows;
return pBlock;
} else {
stopGroupTableMergeScan(pOperator);
if (pInfo->tableEndIndex >= tableListSize - 1) {
doSetOperatorCompleted(pOperator);
break;
}
pInfo->tableStartIndex = pInfo->tableEndIndex + 1;
pInfo->groupId =
((STableKeyInfo*)taosArrayGet(pInfo->tableListInfo->pTableList, pInfo->tableStartIndex))->groupId;
startGroupTableMergeScan(pOperator);
}
}
return pBlock;
}
@ -2285,17 +2376,10 @@ void destroyTableMergeScanOperatorInfo(void* param, int32_t numOfOutput) {
STableMergeScanInfo* pTableScanInfo = (STableMergeScanInfo*)param;
cleanupQueryTableDataCond(&pTableScanInfo->cond);
for (int32_t i = 0; i < taosArrayGetSize(pTableScanInfo->dataReaders); ++i) {
tsdbReaderT* reader = taosArrayGetP(pTableScanInfo->dataReaders, i);
tsdbCleanupReadHandle(reader);
}
taosArrayDestroy(pTableScanInfo->dataReaders);
if (pTableScanInfo->pColMatchInfo != NULL) {
taosArrayDestroy(pTableScanInfo->pColMatchInfo);
}
taosArrayDestroy(pTableScanInfo->sortSourceParams);
pTableScanInfo->pResBlock = blockDataDestroy(pTableScanInfo->pResBlock);
pTableScanInfo->pSortInputBlock = blockDataDestroy(pTableScanInfo->pSortInputBlock);
@ -2321,8 +2405,9 @@ int32_t getTableMergeScanExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExpla
return TSDB_CODE_SUCCESS;
}
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, SArray* dataReaders,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo) {
SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanNode, STableListInfo* pTableListInfo,
SReadHandle* readHandle, SExecTaskInfo* pTaskInfo, uint64_t queryId,
uint64_t taskId) {
STableMergeScanInfo* pInfo = taosMemoryCalloc(1, sizeof(STableMergeScanInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL) {
@ -2352,22 +2437,16 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
pInfo->sample.seed = taosGetTimestampSec();
pInfo->dataBlockLoadFlag = pTableScanNode->dataRequired;
pInfo->pFilterNode = pTableScanNode->scan.node.pConditions;
pInfo->dataReaders = dataReaders;
pInfo->tableListInfo = pTableListInfo;
pInfo->scanFlag = MAIN_SCAN;
pInfo->pColMatchInfo = pColList;
pInfo->curTWinIdx = 0;
pInfo->pResBlock = createResDataBlock(pDescNode);
pInfo->dataReaders = taosArrayInit(64, POINTER_BYTES);
pInfo->queryId = queryId;
pInfo->taskId = taskId;
pInfo->sortSourceParams = taosArrayInit(taosArrayGetSize(dataReaders), sizeof(STableMergeScanSortSourceParam));
for (int32_t i = 0; i < taosArrayGetSize(dataReaders); ++i) {
STableMergeScanSortSourceParam* param = taosMemoryCalloc(1, sizeof(STableMergeScanSortSourceParam));
param->readerIdx = i;
param->pOperator = pOperator;
param->inputBlock = createOneDataBlock(pInfo->pResBlock, false);
taosArrayPush(pInfo->sortSourceParams, param);
taosMemoryFree(param);
}
pInfo->sortSourceParams = taosArrayInit(64, sizeof(STableMergeScanSortSourceParam));
pInfo->pSortInfo = generateSortByTsInfo(pInfo->cond.order);
pInfo->pSortInputBlock = createOneDataBlock(pInfo->pResBlock, false);
@ -2375,14 +2454,7 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
int32_t rowSize = pInfo->pResBlock->info.rowSize;
pInfo->bufPageSize = getProperSortPageSize(rowSize);
// todo the total available buffer should be determined by total capacity of buffer of this task.
// the additional one is reserved for merge result
pInfo->sortBufSize = pInfo->bufPageSize * (taosArrayGetSize(dataReaders) + 1);
pInfo->hasGroupId = false;
pInfo->prefetchedTuple = NULL;
pOperator->name = "TableMergeScanOperator";
// TODO : change it
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_TABLE_MERGE_SCAN;
pOperator->blocking = false;
pOperator->status = OP_NOT_OPENED;
@ -2392,8 +2464,8 @@ SOperatorInfo* createTableMergeScanOperatorInfo(STableScanPhysiNode* pTableScanN
initResultSizeInfo(pOperator, 1024);
pOperator->fpSet =
createOperatorFpSet(doOpenTableMergeScanOperator, doTableMergeScan, NULL, NULL, destroyTableMergeScanOperatorInfo,
NULL, NULL, getTableMergeScanExplainExecInfo);
createOperatorFpSet(operatorDummyOpenFn, doTableMergeScan, NULL, NULL, destroyTableMergeScanOperatorInfo, NULL,
NULL, getTableMergeScanExplainExecInfo);
pOperator->cost.openCost = 0;
return pOperator;

View File

@ -424,10 +424,17 @@ int32_t getGroupSortExplainExecInfo(SOperatorInfo* pOptr, void** pOptrExplain, u
return TSDB_CODE_SUCCESS;
}
// TODO:
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysiNode* pSortPhyNode,
void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) {
SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
taosArrayDestroy(pInfo->pSortInfo);
taosArrayDestroy(pInfo->pColMatchInfo);
}
SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SGroupSortPhysiNode* pSortPhyNode,
SExecTaskInfo* pTaskInfo) {
SSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SSortOperatorInfo));
SGroupSortOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SGroupSortOperatorInfo));
SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo));
if (pInfo == NULL || pOperator == NULL /* || rowSize > 100 * 1024 * 1024*/) {
goto _error;
@ -452,8 +459,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysi
;
pInfo->pColMatchInfo = pColMatchColInfo;
pOperator->name = "GroupSortOperator";
// TODO
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_SORT;
pOperator->operatorType = QUERY_NODE_PHYSICAL_PLAN_GROUP_SORT;
pOperator->blocking = true;
pOperator->status = OP_NOT_OPENED;
pOperator->info = pInfo;
@ -461,7 +467,7 @@ SOperatorInfo* createGroupSortOperatorInfo(SOperatorInfo* downstream, SSortPhysi
pOperator->exprSupp.numOfExprs = numOfCols;
pOperator->pTaskInfo = pTaskInfo;
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doGroupSort, NULL, NULL, destroyOrderOperatorInfo, NULL,
pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doGroupSort, NULL, NULL, destroyGroupSortOperatorInfo, NULL,
NULL, getGroupSortExplainExecInfo);
int32_t code = appendDownstream(pOperator, &downstream, 1);
@ -478,18 +484,6 @@ _error:
return NULL;
}
void destroyGroupSortOperatorInfo(void* param, int32_t numOfOutput) {
SGroupSortOperatorInfo* pInfo = (SGroupSortOperatorInfo*)param;
pInfo->binfo.pRes = blockDataDestroy(pInfo->binfo.pRes);
taosArrayDestroy(pInfo->pSortInfo);
taosArrayDestroy(pInfo->pColMatchInfo);
}
// TODO: sort group
// TODO: msortCompare compare group id in multiway merge sort.
// TODO: table merge scan, group first, then for each group, multiple readers
//=====================================================================================
// Multiway Sort Merge operator
typedef struct SMultiwaySortMergeOperatorInfo {

View File

@ -1656,10 +1656,9 @@ static SSDataBlock* doSessionWindowAgg(SOperatorInfo* pOperator) {
doBuildResultDatablock(pOperator, pBInfo, &pInfo->groupResInfo, pInfo->aggSup.pResultBuf);
if (pBInfo->pRes->info.rows == 0 || !hasDataInGroupInfo(&pInfo->groupResInfo)) {
doSetOperatorCompleted(pOperator);
return NULL;
}
return pBInfo->pRes;
return pBInfo->pRes->info.rows > 0 ? pBInfo->pRes : NULL;
}
int64_t st = taosGetTimestampUs();

View File

@ -351,6 +351,7 @@ static SNode* logicScanCopy(const SScanLogicNode* pSrc, SScanLogicNode* pDst) {
COPY_SCALAR_FIELD(intervalUnit);
COPY_SCALAR_FIELD(slidingUnit);
CLONE_NODE_FIELD(pTagCond);
CLONE_NODE_FIELD(pTagIndexCond);
COPY_SCALAR_FIELD(triggerType);
COPY_SCALAR_FIELD(watermark);
COPY_SCALAR_FIELD(tsColId);

View File

@ -2326,6 +2326,7 @@ static const char* jkSubplanNodeAddr = "NodeAddr";
static const char* jkSubplanRootNode = "RootNode";
static const char* jkSubplanDataSink = "DataSink";
static const char* jkSubplanTagCond = "TagCond";
static const char* jkSubplanTagIndexCond = "TagIndexCond";
static int32_t subplanToJson(const void* pObj, SJson* pJson) {
const SSubplan* pNode = (const SSubplan*)pObj;
@ -2355,6 +2356,9 @@ static int32_t subplanToJson(const void* pObj, SJson* pJson) {
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSubplanTagCond, nodeToJson, pNode->pTagCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkSubplanTagIndexCond, nodeToJson, pNode->pTagIndexCond);
}
return code;
}
@ -2388,6 +2392,9 @@ static int32_t jsonToSubplan(const SJson* pJson, void* pObj) {
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkSubplanTagCond, (SNode**)&pNode->pTagCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkSubplanTagIndexCond, (SNode**)&pNode->pTagIndexCond);
}
return code;
}
@ -3954,7 +3961,7 @@ static int32_t deleteStmtToJson(const void* pObj, SJson* pJson) {
code = tjsonAddObject(pJson, jkDeleteStmtCountFunc, nodeToJson, pNode->pCountFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddObject(pJson, jkDeleteStmtTagIndexCond, nodeToJson, pNode->pTagIndexCond);
code = tjsonAddObject(pJson, jkDeleteStmtTagIndexCond, nodeToJson, pNode->pTagCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonAddIntegerToObject(pJson, jkDeleteStmtTimeRangeStartKey, pNode->timeRange.skey);
@ -3983,7 +3990,7 @@ static int32_t jsonToDeleteStmt(const SJson* pJson, void* pObj) {
code = jsonToNodeObject(pJson, jkDeleteStmtCountFunc, &pNode->pCountFunc);
}
if (TSDB_CODE_SUCCESS == code) {
code = jsonToNodeObject(pJson, jkDeleteStmtTagIndexCond, &pNode->pTagIndexCond);
code = jsonToNodeObject(pJson, jkDeleteStmtTagIndexCond, &pNode->pTagCond);
}
if (TSDB_CODE_SUCCESS == code) {
code = tjsonGetBigIntValue(pJson, jkDeleteStmtTimeRangeStartKey, &pNode->timeRange.skey);

View File

@ -669,7 +669,7 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyNode(pStmt->pFromTable);
nodesDestroyNode(pStmt->pWhere);
nodesDestroyNode(pStmt->pCountFunc);
nodesDestroyNode(pStmt->pTagIndexCond);
nodesDestroyNode(pStmt->pTagCond);
break;
}
case QUERY_NODE_QUERY: {
@ -688,7 +688,13 @@ void nodesDestroyNode(SNode* pNode) {
SScanLogicNode* pLogicNode = (SScanLogicNode*)pNode;
destroyLogicNode((SLogicNode*)pLogicNode);
nodesDestroyList(pLogicNode->pScanCols);
nodesDestroyList(pLogicNode->pScanPseudoCols);
taosMemoryFreeClear(pLogicNode->pVgroupList);
nodesDestroyList(pLogicNode->pDynamicScanFuncs);
nodesDestroyNode(pLogicNode->pTagCond);
nodesDestroyNode(pLogicNode->pTagIndexCond);
taosArrayDestroy(pLogicNode->pSmaIndexes);
nodesDestroyList(pLogicNode->pPartTags);
break;
}
case QUERY_NODE_LOGIC_PLAN_JOIN: {
@ -897,6 +903,8 @@ void nodesDestroyNode(SNode* pNode) {
nodesDestroyList(pSubplan->pChildren);
nodesDestroyNode((SNode*)pSubplan->pNode);
nodesDestroyNode((SNode*)pSubplan->pDataSink);
nodesDestroyNode((SNode*)pSubplan->pTagCond);
nodesDestroyNode((SNode*)pSubplan->pTagIndexCond);
nodesClearList(pSubplan->pParents);
break;
}
@ -1130,6 +1138,7 @@ void* nodesGetValueFromNode(SValueNode* pNode) {
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_VARCHAR:
case TSDB_DATA_TYPE_VARBINARY:
case TSDB_DATA_TYPE_JSON:
return (void*)pNode->datum.p;
default:
break;
@ -1659,6 +1668,7 @@ int32_t nodesMergeConds(SNode** pDst, SNodeList** pSrc) {
typedef struct SClassifyConditionCxt {
bool hasPrimaryKey;
bool hasTagIndexCol;
bool hasTagCol;
bool hasOtherCol;
} SClassifyConditionCxt;
@ -1670,6 +1680,9 @@ static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) {
pCxt->hasPrimaryKey = true;
} else if (pCol->hasIndex) {
pCxt->hasTagIndexCol = true;
pCxt->hasTagCol = true;
} else if (COLUMN_TYPE_TAG == pCol->colType) {
pCxt->hasTagCol = true;
} else {
pCxt->hasOtherCol = true;
}
@ -1678,23 +1691,31 @@ static EDealRes classifyConditionImpl(SNode* pNode, void* pContext) {
return DEAL_RES_CONTINUE;
}
typedef enum EConditionType { COND_TYPE_PRIMARY_KEY = 1, COND_TYPE_TAG_INDEX, COND_TYPE_NORMAL } EConditionType;
typedef enum EConditionType {
COND_TYPE_PRIMARY_KEY = 1,
COND_TYPE_TAG_INDEX,
COND_TYPE_TAG,
COND_TYPE_NORMAL
} EConditionType;
static EConditionType classifyCondition(SNode* pNode) {
SClassifyConditionCxt cxt = {.hasPrimaryKey = false, .hasTagIndexCol = false, .hasOtherCol = false};
nodesWalkExpr(pNode, classifyConditionImpl, &cxt);
return cxt.hasOtherCol ? COND_TYPE_NORMAL
: (cxt.hasPrimaryKey && cxt.hasTagIndexCol
: (cxt.hasPrimaryKey && cxt.hasTagCol
? COND_TYPE_NORMAL
: (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY : COND_TYPE_TAG_INDEX));
: (cxt.hasPrimaryKey ? COND_TYPE_PRIMARY_KEY
: (cxt.hasTagIndexCol ? COND_TYPE_TAG_INDEX : COND_TYPE_TAG)));
}
static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond) {
static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
SNode** pOtherCond) {
SLogicConditionNode* pLogicCond = (SLogicConditionNode*)(*pCondition);
int32_t code = TSDB_CODE_SUCCESS;
SNodeList* pPrimaryKeyConds = NULL;
SNodeList* pTagIndexConds = NULL;
SNodeList* pTagConds = NULL;
SNodeList* pOtherConds = NULL;
SNode* pCond = NULL;
@ -1706,6 +1727,14 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S
}
break;
case COND_TYPE_TAG_INDEX:
if (NULL != pTagIndexCond) {
code = nodesListMakeAppend(&pTagIndexConds, nodesCloneNode(pCond));
}
if (NULL != pTagCond) {
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
}
break;
case COND_TYPE_TAG:
if (NULL != pTagCond) {
code = nodesListMakeAppend(&pTagConds, nodesCloneNode(pCond));
}
@ -1723,11 +1752,15 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S
}
SNode* pTempPrimaryKeyCond = NULL;
SNode* pTempTagIndexCond = NULL;
SNode* pTempTagCond = NULL;
SNode* pTempOtherCond = NULL;
if (TSDB_CODE_SUCCESS == code) {
code = nodesMergeConds(&pTempPrimaryKeyCond, &pPrimaryKeyConds);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesMergeConds(&pTempTagIndexCond, &pTagIndexConds);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesMergeConds(&pTempTagCond, &pTagConds);
}
@ -1739,6 +1772,9 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S
if (NULL != pPrimaryKeyCond) {
*pPrimaryKeyCond = pTempPrimaryKeyCond;
}
if (NULL != pTagIndexCond) {
*pTagIndexCond = pTempTagIndexCond;
}
if (NULL != pTagCond) {
*pTagCond = pTempTagCond;
}
@ -1749,9 +1785,11 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S
*pCondition = NULL;
} else {
nodesDestroyList(pPrimaryKeyConds);
nodesDestroyList(pTagIndexConds);
nodesDestroyList(pTagConds);
nodesDestroyList(pOtherConds);
nodesDestroyNode(pTempPrimaryKeyCond);
nodesDestroyNode(pTempTagIndexCond);
nodesDestroyNode(pTempTagCond);
nodesDestroyNode(pTempOtherCond);
}
@ -1759,10 +1797,11 @@ static int32_t partitionLogicCond(SNode** pCondition, SNode** pPrimaryKeyCond, S
return code;
}
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagCond, SNode** pOtherCond) {
int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode** pTagIndexCond, SNode** pTagCond,
SNode** pOtherCond) {
if (QUERY_NODE_LOGIC_CONDITION == nodeType(*pCondition) &&
LOGIC_COND_TYPE_AND == ((SLogicConditionNode*)*pCondition)->condType) {
return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagCond, pOtherCond);
return partitionLogicCond(pCondition, pPrimaryKeyCond, pTagIndexCond, pTagCond, pOtherCond);
}
switch (classifyCondition(*pCondition)) {
@ -1772,6 +1811,21 @@ int32_t nodesPartitionCond(SNode** pCondition, SNode** pPrimaryKeyCond, SNode**
}
break;
case COND_TYPE_TAG_INDEX:
if (NULL != pTagIndexCond) {
*pTagIndexCond = *pCondition;
}
if (NULL != pTagCond) {
SNode* pTempCond = *pCondition;
if (NULL != pTagIndexCond) {
pTempCond = nodesCloneNode(*pCondition);
if (NULL == pTempCond) {
return TSDB_CODE_OUT_OF_MEMORY;
}
}
*pTagCond = pTempCond;
}
break;
case COND_TYPE_TAG:
if (NULL != pTagCond) {
*pTagCond = *pCondition;
}

View File

@ -137,6 +137,9 @@ static int32_t collectMetaKeyFromRealTableImpl(SCollectMetaKeyCxt* pCxt, SRealTa
code = reserveTableIndexInCache(pCxt->pParseCxt->acctId, pRealTable->table.dbName, pRealTable->table.tableName,
pCxt->pMetaCache);
}
if (TSDB_CODE_SUCCESS == code && (0 == strcmp(pRealTable->table.tableName, TSDB_INS_TABLE_DNODE_VARIABLES))) {
code = reserveDnodeRequiredInCache(pCxt->pMetaCache);
}
return code;
}

View File

@ -2013,7 +2013,7 @@ static int32_t getFillTimeRange(STranslateContext* pCxt, SNode* pWhere, STimeWin
}
SNode* pPrimaryKeyCond = NULL;
nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL);
nodesPartitionCond(&pCond, &pPrimaryKeyCond, NULL, NULL, NULL);
int32_t code = TSDB_CODE_SUCCESS;
if (NULL != pPrimaryKeyCond) {
@ -2161,9 +2161,6 @@ static EDealRes checkStateExpr(SNode* pNode, void* pContext) {
if (COLUMN_TYPE_TAG == pCol->colType) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_STATE_WIN_COL);
}
if (TSDB_SUPER_TABLE == pCol->tableType) {
return generateDealNodeErrMsg(pCxt, TSDB_CODE_PAR_INVALID_STATE_WIN_TABLE);
}
}
return DEAL_RES_CONTINUE;
}
@ -2685,7 +2682,7 @@ static int32_t partitionDeleteWhere(STranslateContext* pCxt, SDeleteStmt* pDelet
SNode* pPrimaryKeyCond = NULL;
SNode* pOtherCond = NULL;
int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, &pDelete->pTagIndexCond, &pOtherCond);
int32_t code = nodesPartitionCond(&pDelete->pWhere, &pPrimaryKeyCond, NULL, &pDelete->pTagCond, &pOtherCond);
if (TSDB_CODE_SUCCESS == code && NULL != pOtherCond) {
code = generateSyntaxErrMsg(&pCxt->msgBuf, TSDB_CODE_PAR_INVALID_DELETE_WHERE);
}
@ -4614,6 +4611,25 @@ static int32_t extractShowCreateTableResultSchema(int32_t* numOfCols, SSchema**
return TSDB_CODE_SUCCESS;
}
static int32_t extractShowLocalVariablesResultSchema(int32_t* numOfCols, SSchema** pSchema) {
*numOfCols = 2;
*pSchema = taosMemoryCalloc((*numOfCols), sizeof(SSchema));
if (NULL == (*pSchema)) {
return TSDB_CODE_OUT_OF_MEMORY;
}
(*pSchema)[0].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[0].bytes = TSDB_CONFIG_OPTION_LEN;
strcpy((*pSchema)[0].name, "name");
(*pSchema)[1].type = TSDB_DATA_TYPE_BINARY;
(*pSchema)[1].bytes = TSDB_CONFIG_VALUE_LEN;
strcpy((*pSchema)[1].name, "value");
return TSDB_CODE_SUCCESS;
}
int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pSchema) {
if (NULL == pRoot) {
return TSDB_CODE_SUCCESS;
@ -4632,6 +4648,8 @@ int32_t extractResultSchema(const SNode* pRoot, int32_t* numOfCols, SSchema** pS
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
return extractShowCreateTableResultSchema(numOfCols, pSchema);
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
return extractShowLocalVariablesResultSchema(numOfCols, pSchema);
default:
break;
}
@ -5948,12 +5966,12 @@ static int32_t setQuery(STranslateContext* pCxt, SQuery* pQuery) {
case QUERY_NODE_SHOW_CREATE_DATABASE_STMT:
case QUERY_NODE_SHOW_CREATE_TABLE_STMT:
case QUERY_NODE_SHOW_CREATE_STABLE_STMT:
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
pQuery->execMode = QUERY_EXEC_MODE_LOCAL;
pQuery->haveResultSet = true;
break;
case QUERY_NODE_RESET_QUERY_CACHE_STMT:
case QUERY_NODE_ALTER_LOCAL_STMT:
case QUERY_NODE_SHOW_LOCAL_VARIABLES_STMT:
pQuery->execMode = QUERY_EXEC_MODE_LOCAL;
break;
default:

View File

@ -928,7 +928,12 @@ int32_t reserveDnodeRequiredInCache(SParseMetaCache* pMetaCache) {
}
int32_t getDnodeListFromCache(SParseMetaCache* pMetaCache, SArray** pDnodes) {
*pDnodes = taosArrayDup(pMetaCache->pDnodes);
SMetaRes* pRes = taosArrayGet(pMetaCache->pDnodes, 0);
if (pRes->code) {
return pRes->code;
}
*pDnodes = taosArrayDup((SArray*)pRes->pRes);
if (NULL == *pDnodes) {
return TSDB_CODE_OUT_OF_MEMORY;
}

View File

@ -166,10 +166,13 @@ class MockCatalogServiceImpl {
}
int32_t catalogGetDnodeList(SArray** pDnodes) const {
*pDnodes = taosArrayInit(dnode_.size(), sizeof(SEpSet));
SMetaRes res = {0};
res.pRes = taosArrayInit(dnode_.size(), sizeof(SEpSet));
for (const auto& dnode : dnode_) {
taosArrayPush(*pDnodes, &dnode.second);
taosArrayPush((SArray*)res.pRes, &dnode.second);
}
*pDnodes = taosArrayInit(1, sizeof(SMetaRes));
taosArrayPush(*pDnodes, &res);
return TSDB_CODE_SUCCESS;
}

View File

@ -1160,8 +1160,8 @@ static int32_t createDeleteScanLogicNode(SLogicPlanContext* pCxt, SDeleteStmt* p
}
}
if (TSDB_CODE_SUCCESS == code && NULL != pDelete->pTagIndexCond) {
pScan->pTagCond = nodesCloneNode(pDelete->pTagIndexCond);
if (TSDB_CODE_SUCCESS == code && NULL != pDelete->pTagCond) {
pScan->pTagCond = nodesCloneNode(pDelete->pTagCond);
if (NULL == pScan->pTagCond) {
code = TSDB_CODE_OUT_OF_MEMORY;
}

View File

@ -15,7 +15,6 @@
#include "filter.h"
#include "functionMgt.h"
#include "index.h"
#include "planInt.h"
#include "ttime.h"
@ -309,32 +308,6 @@ static int32_t cpdCalcTimeRange(SOptimizeContext* pCxt, SScanLogicNode* pScan, S
return code;
}
static int32_t cpdApplyTagIndex(SScanLogicNode* pScan, SNode** pTagCond, SNode** pOtherCond) {
int32_t code = TSDB_CODE_SUCCESS;
SIdxFltStatus idxStatus = idxGetFltStatus(*pTagCond);
switch (idxStatus) {
case SFLT_NOT_INDEX:
code = cpdCondAppend(pOtherCond, pTagCond);
break;
case SFLT_COARSE_INDEX:
pScan->pTagCond = nodesCloneNode(*pTagCond);
if (NULL == pScan->pTagCond) {
code = TSDB_CODE_OUT_OF_MEMORY;
break;
}
code = cpdCondAppend(pOtherCond, pTagCond);
break;
case SFLT_ACCURATE_INDEX:
pScan->pTagCond = *pTagCond;
*pTagCond = NULL;
break;
default:
code = TSDB_CODE_FAILED;
break;
}
return code;
}
static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode* pScan) {
if (NULL == pScan->node.pConditions || OPTIMIZE_FLAG_TEST_MASK(pScan->node.optimizedFlag, OPTIMIZE_FLAG_CPD) ||
TSDB_SYSTEM_TABLE == pScan->tableType) {
@ -342,15 +315,12 @@ static int32_t cpdOptimizeScanCondition(SOptimizeContext* pCxt, SScanLogicNode*
}
SNode* pPrimaryKeyCond = NULL;
SNode* pTagCond = NULL;
SNode* pOtherCond = NULL;
int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pTagCond, &pOtherCond);
int32_t code = nodesPartitionCond(&pScan->node.pConditions, &pPrimaryKeyCond, &pScan->pTagIndexCond, &pScan->pTagCond,
&pOtherCond);
if (TSDB_CODE_SUCCESS == code && NULL != pPrimaryKeyCond) {
code = cpdCalcTimeRange(pCxt, pScan, &pPrimaryKeyCond, &pOtherCond);
}
if (TSDB_CODE_SUCCESS == code && NULL != pTagCond) {
code = cpdApplyTagIndex(pScan, &pTagCond, &pOtherCond);
}
if (TSDB_CODE_SUCCESS == code) {
pScan->node.pConditions = pOtherCond;
}

View File

@ -436,6 +436,15 @@ static int32_t createScanPhysiNodeFinalize(SPhysiPlanContext* pCxt, SSubplan* pS
}
}
if (TSDB_CODE_SUCCESS == code) {
if (NULL != pScanLogicNode->pTagIndexCond) {
pSubplan->pTagIndexCond = nodesCloneNode(pScanLogicNode->pTagIndexCond);
if (NULL == pSubplan->pTagIndexCond) {
code = TSDB_CODE_OUT_OF_MEMORY;
}
}
}
if (TSDB_CODE_SUCCESS == code) {
*pPhyNode = (SPhysiNode*)pScanPhysiNode;
} else {

View File

@ -166,6 +166,31 @@ static bool stbSplHasMultiTbScan(bool streamQuery, SLogicNode* pNode) {
return (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pChild) && stbSplIsMultiTbScan(streamQuery, (SScanLogicNode*)pChild));
}
static bool stbSplNeedSplitWindow(bool streamQuery, SLogicNode* pNode) {
SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode;
if (WINDOW_TYPE_INTERVAL == pWindow->winType) {
return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
}
if (WINDOW_TYPE_SESSION == pWindow->winType) {
if (!streamQuery) {
return stbSplHasMultiTbScan(streamQuery, pNode);
} else {
return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
}
}
if (WINDOW_TYPE_STATE == pWindow->winType) {
if (!streamQuery) {
return stbSplHasMultiTbScan(streamQuery, pNode);
} else {
return false;
}
}
return false;
}
static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
switch (nodeType(pNode)) {
case QUERY_NODE_LOGIC_PLAN_SCAN:
@ -174,13 +199,8 @@ static bool stbSplNeedSplit(bool streamQuery, SLogicNode* pNode) {
return !(((SJoinLogicNode*)pNode)->isSingleTableJoin);
case QUERY_NODE_LOGIC_PLAN_AGG:
return !stbSplHasGatherExecFunc(((SAggLogicNode*)pNode)->pAggFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
case QUERY_NODE_LOGIC_PLAN_WINDOW: {
SWindowLogicNode* pWindow = (SWindowLogicNode*)pNode;
if (WINDOW_TYPE_STATE == pWindow->winType || (!streamQuery && WINDOW_TYPE_SESSION == pWindow->winType)) {
return false;
}
return !stbSplHasGatherExecFunc(pWindow->pFuncs) && stbSplHasMultiTbScan(streamQuery, pNode);
}
case QUERY_NODE_LOGIC_PLAN_WINDOW:
return stbSplNeedSplitWindow(streamQuery, pNode);
case QUERY_NODE_LOGIC_PLAN_SORT:
return stbSplHasMultiTbScan(streamQuery, pNode);
default:
@ -477,11 +497,64 @@ static int32_t stbSplSplitSessionForStream(SSplitContext* pCxt, SStableSplitInfo
return code;
}
static void splSetTableScanType(SLogicNode* pNode, EScanType scanType) {
if (QUERY_NODE_LOGIC_PLAN_SCAN == nodeType(pNode)) {
((SScanLogicNode*)pNode)->scanType = scanType;
} else {
if (1 == LIST_LENGTH(pNode->pChildren)) {
splSetTableScanType((SLogicNode*)nodesListGetNode(pNode->pChildren, 0), scanType);
}
}
}
static int32_t stbSplSplitSessionOrStateForBatch(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
SLogicNode* pWindow = pInfo->pSplitNode;
SLogicNode* pChild = (SLogicNode*)nodesListGetNode(pWindow->pChildren, 0);
SNodeList* pMergeKeys = NULL;
int32_t code = stbSplCreateMergeKeysByPrimaryKey(((SWindowLogicNode*)pWindow)->pTspk, &pMergeKeys);
if (TSDB_CODE_SUCCESS == code) {
code = stbSplCreateMergeNode(pCxt, pInfo->pSubplan, pChild, pMergeKeys, (SLogicNode*)pChild);
}
if (TSDB_CODE_SUCCESS == code) {
code = nodesListMakeStrictAppend(&pInfo->pSubplan->pChildren,
(SNode*)splCreateScanSubplan(pCxt, pChild, SPLIT_FLAG_STABLE_SPLIT));
}
if (TSDB_CODE_SUCCESS == code) {
splSetTableScanType(pChild, SCAN_TYPE_TABLE_MERGE);
++(pCxt->groupId);
}
if (TSDB_CODE_SUCCESS == code) {
pInfo->pSubplan->subplanType = SUBPLAN_TYPE_MERGE;
SPLIT_FLAG_SET_MASK(pInfo->pSubplan->splitFlag, SPLIT_FLAG_STABLE_SPLIT);
} else {
nodesDestroyList(pMergeKeys);
}
return code;
}
static int32_t stbSplSplitSession(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
if (pCxt->pPlanCxt->streamQuery) {
return stbSplSplitSessionForStream(pCxt, pInfo);
} else {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
return stbSplSplitSessionOrStateForBatch(pCxt, pInfo);
}
}
static int32_t stbSplSplitStateForStream(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
return TSDB_CODE_PLAN_INTERNAL_ERROR;
}
static int32_t stbSplSplitState(SSplitContext* pCxt, SStableSplitInfo* pInfo) {
if (pCxt->pPlanCxt->streamQuery) {
return stbSplSplitStateForStream(pCxt, pInfo);
} else {
return stbSplSplitSessionOrStateForBatch(pCxt, pInfo);
}
}
@ -511,6 +584,8 @@ static int32_t stbSplSplitWindowForMergeTable(SSplitContext* pCxt, SStableSplitI
return stbSplSplitInterval(pCxt, pInfo);
case WINDOW_TYPE_SESSION:
return stbSplSplitSession(pCxt, pInfo);
case WINDOW_TYPE_STATE:
return stbSplSplitState(pCxt, pInfo);
default:
break;
}

View File

@ -40,6 +40,8 @@ TEST_F(PlanOptimizeTest, ConditionPushDown) {
run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 or tag1 < 2");
run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 AND tag2 = 'hello'");
run("SELECT ts, c1 FROM st1 WHERE tag1 > 4 AND tag2 = 'hello' AND c1 > 10");
}
TEST_F(PlanOptimizeTest, orderByPrimaryKey) {

View File

@ -34,3 +34,13 @@ TEST_F(PlanSessionTest, selectFunc) {
// select function along with the columns of select row, and with SESSION clause
run("SELECT MAX(c1), c2 FROM t1 SESSION(ts, 10s)");
}
TEST_F(PlanSessionTest, stable) {
useDb("root", "test");
// select function for SESSION clause
run("SELECT MAX(c1), MIN(c1) FROM st1 SESSION(ts, 10s)");
// select function along with the columns of select row, and with SESSION clause
run("SELECT MAX(c1), c2 FROM st1 SESSION(ts, 10s)");
run("SELECT count(ts) FROM st1 PARTITION BY c1 SESSION(ts, 10s)");
}

View File

@ -40,3 +40,12 @@ TEST_F(PlanStateTest, selectFunc) {
// select function along with the columns of select row, and with STATE_WINDOW clause
run("SELECT MAX(c1), c2 FROM t1 STATE_WINDOW(c3)");
}
TEST_F(PlanStateTest, stable) {
useDb("root", "test");
// select function for STATE_WINDOW clause
run("SELECT MAX(c1), MIN(c1) FROM st1 STATE_WINDOW(c2)");
// select function along with the columns of select row, and with STATE_WINDOW clause
run("SELECT MAX(c1), c2 FROM st1 STATE_WINDOW(c2)");
}

View File

@ -126,6 +126,25 @@ int32_t queryBuildQnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t
return TSDB_CODE_SUCCESS;
}
int32_t queryBuildDnodeListMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
}
SDnodeListReq dnodeListReq = {0};
dnodeListReq.rowNum = -1;
int32_t bufLen = tSerializeSDnodeListReq(NULL, 0, &dnodeListReq);
void *pBuf = (*mallcFp)(bufLen);
tSerializeSDnodeListReq(pBuf, bufLen, &dnodeListReq);
*msg = pBuf;
*msgLen = bufLen;
return TSDB_CODE_SUCCESS;
}
int32_t queryBuildGetDBCfgMsg(void *input, char **msg, int32_t msgSize, int32_t *msgLen, void*(*mallcFp)(int32_t)) {
if (NULL == msg || NULL == msgLen) {
return TSDB_CODE_TSC_INVALID_INPUT;
@ -428,6 +447,27 @@ int32_t queryProcessQnodeListRsp(void *output, char *msg, int32_t msgSize) {
return code;
}
int32_t queryProcessDnodeListRsp(void *output, char *msg, int32_t msgSize) {
SDnodeListRsp out = {0};
int32_t code = 0;
if (NULL == output || NULL == msg || msgSize <= 0) {
code = TSDB_CODE_TSC_INVALID_INPUT;
return code;
}
if (tDeserializeSDnodeListRsp(msg, msgSize, &out) != 0) {
qError("invalid dnode list rsp msg, msgSize:%d", msgSize);
code = TSDB_CODE_INVALID_MSG;
return code;
}
*(SArray**)output = out.dnodeList;
return code;
}
int32_t queryProcessGetDbCfgRsp(void *output, char *msg, int32_t msgSize) {
SDbCfgRsp out = {0};
@ -535,6 +575,7 @@ void initQueryModuleMsgHandle() {
queryBuildMsg[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryBuildTableMetaReqMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_USE_DB)] = queryBuildUseDbMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_QNODE_LIST)] = queryBuildQnodeListMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_DNODE_LIST)] = queryBuildDnodeListMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryBuildGetDBCfgMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryBuildGetIndexMsg;
queryBuildMsg[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryBuildRetrieveFuncMsg;
@ -547,6 +588,7 @@ void initQueryModuleMsgHandle() {
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_TABLE_META)] = queryProcessTableMetaRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_USE_DB)] = queryProcessUseDBRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_QNODE_LIST)] = queryProcessQnodeListRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_DNODE_LIST)] = queryProcessDnodeListRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_DB_CFG)] = queryProcessGetDbCfgRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_GET_INDEX)] = queryProcessGetIndexRsp;
queryProcessMsgRsp[TMSG_INDEX(TDMT_MND_RETRIEVE_FUNC)] = queryProcessRetrieveFuncRsp;

View File

@ -271,7 +271,7 @@ int32_t qwGetDeleteResFromSink(QW_FPARAMS_DEF, SQWTaskCtx *ctx, int32_t *dataLen
SDeleterRes* pDelRes = (SDeleterRes*)output.pData;
rsp.affectedRows = pDelRes->affectedRows;
pRes->uid = pDelRes->uid;
pRes->suid = pDelRes->suid;
pRes->uidList = pDelRes->uidList;
pRes->skey = pDelRes->skey;
pRes->ekey = pDelRes->ekey;

View File

@ -23,6 +23,7 @@
#include "sclvector.h"
#include "tcompare.h"
#include "tdatablock.h"
#include "tdataformat.h"
#include "ttypes.h"
#include "ttime.h"
@ -506,6 +507,16 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t
}
}
// if types can not comparable
if((IS_NUMERIC_TYPE(typeLeft) && !IS_NUMERIC_TYPE(typeRight)) ||
(IS_NUMERIC_TYPE(typeRight) && !IS_NUMERIC_TYPE(typeLeft)) ||
(IS_VAR_DATA_TYPE(typeLeft) && !IS_VAR_DATA_TYPE(typeRight)) ||
(IS_VAR_DATA_TYPE(typeRight) && !IS_VAR_DATA_TYPE(typeLeft)) ||
((typeLeft == TSDB_DATA_TYPE_BOOL) && (typeRight != TSDB_DATA_TYPE_BOOL)) ||
((typeRight == TSDB_DATA_TYPE_BOOL) && (typeLeft != TSDB_DATA_TYPE_BOOL)))
return false;
if(typeLeft == TSDB_DATA_TYPE_NULL || typeRight == TSDB_DATA_TYPE_NULL){
*isNull = true;
return true;
@ -519,24 +530,28 @@ bool convertJsonValue(__compar_fn_t *fp, int32_t optr, int8_t typeLeft, int8_t t
*fp = filterGetCompFunc(type, optr);
if(IS_NUMERIC_TYPE(type) || IS_FLOAT_TYPE(type)){
if(IS_NUMERIC_TYPE(type)){
if(typeLeft == TSDB_DATA_TYPE_NCHAR) {
convertNcharToDouble(*pLeftData, pLeftOut);
*pLeftData = pLeftOut;
ASSERT(0);
// convertNcharToDouble(*pLeftData, pLeftOut);
// *pLeftData = pLeftOut;
} else if(typeLeft == TSDB_DATA_TYPE_BINARY) {
convertBinaryToDouble(*pLeftData, pLeftOut);
*pLeftData = pLeftOut;
ASSERT(0);
// convertBinaryToDouble(*pLeftData, pLeftOut);
// *pLeftData = pLeftOut;
} else if(typeLeft != type) {
convertNumberToNumber(*pLeftData, pLeftOut, typeLeft, type);
*pLeftData = pLeftOut;
}
if(typeRight == TSDB_DATA_TYPE_NCHAR) {
convertNcharToDouble(*pRightData, pRightOut);
*pRightData = pRightOut;
ASSERT(0);
// convertNcharToDouble(*pRightData, pRightOut);
// *pRightData = pRightOut;
} else if(typeRight == TSDB_DATA_TYPE_BINARY) {
convertBinaryToDouble(*pRightData, pRightOut);
*pRightData = pRightOut;
ASSERT(0);
// convertBinaryToDouble(*pRightData, pRightOut);
// *pRightData = pRightOut;
} else if(typeRight != type) {
convertNumberToNumber(*pRightData, pRightOut, typeRight, type);
*pRightData = pRightOut;
@ -1693,6 +1708,13 @@ void vectorIsTrue(SScalarParam* pLeft, SScalarParam* pRight, SScalarParam *pOut,
STagVal getJsonValue(char *json, char *key, bool *isExist) {
STagVal val = {.pKey = key};
if (tTagIsJson((const STag *)json) == false){
if(isExist){
*isExist = false;
}
return val;
}
bool find = tTagGet(((const STag *)json), &val); // json value is null and not exist is different
if(isExist){
*isExist = find;

View File

@ -1310,11 +1310,11 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json string--0 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n");
key = "k2";
bool eRes1[len+len1] = {false, false, true, true, false, false, false, true, false, true, false, true, true};
bool eRes1[len+len1] = {false, false, false, false, false, false, false, true, false, true, false, true, true};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes1[i], op[i], false);
}
bool eRes_1[len0] = {true, true, false, false, false, false};
bool eRes_1[len0] = {false, false, false, false, false, false};
for(int i = 0; i < len0; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_1[i], op[i], true);
}
@ -1346,11 +1346,11 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json bool--1 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n");
key = "k4";
bool eRes3[len+len1] = {false, false, true, true, false, true, false, true, true, false, false, false, false};
bool eRes3[len+len1] = {false, false, false, false, false, false, false, true, true, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes3[i], op[i], false);
}
bool eRes_3[len0] = {false, true, false, false, false, true};
bool eRes_3[len0] = {false, false, false, false, false, false};
for(int i = 0; i < len0; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_3[i], op[i], true);
}
@ -1419,11 +1419,11 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json bool-- 0 {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n");
key = "k8";
bool eRes7[len+len1] = {false, false, true, true, false, false, false, true, false, false, false, false, false};
bool eRes7[len+len1] = {false, false, false, false, false, false, false, true, false, false, false, false, false};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes7[i], op[i], false);
}
bool eRes_7[len0] = {true, true, false, false, false, false};
bool eRes_7[len0] = {false, false, false, false, false, false};
for(int i = 0; i < len0; i++) {
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_7[i], op[i], true);
}
@ -1438,11 +1438,11 @@ TEST(columnTest, json_column_logic_op) {
printf("--------------------json string-- 6.6hello {1, 8, 2, 2, 3, 0, 0, 0, 0}-------------------\n");
key = "k9";
bool eRes8[len+len1] = {true, false, false, false, false, true, false, true, true, false, true, false, true};
bool eRes8[len+len1] = {false, false, false, false, false, false, false, true, true, false, true, false, true};
for(int i = 0; i < len; i++){
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes8[i], op[i], false);
}
bool eRes_8[len0] = {false, true, true, true, false, true};
bool eRes_8[len0] = {false, false, false, false, false, false};
for(int i = 0; i < len0; i++) {
makeCalculate(row, key, TSDB_DATA_TYPE_INT, &input[i], eRes_8[i], op[i], true);
}

View File

@ -24,13 +24,12 @@ static int32_t streamTaskExecImpl(SStreamTask* pTask, void* data, SArray* pRes)
SStreamTrigger* pTrigger = (SStreamTrigger*)data;
qSetMultiStreamInput(exec, pTrigger->pBlock, 1, STREAM_DATA_TYPE_SSDATA_BLOCK, false);
} else if (pItem->type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->isDataScan);
SStreamDataSubmit* pSubmit = (SStreamDataSubmit*)data;
ASSERT(pTask->inputType == STREAM_INPUT__DATA_SUBMIT);
qSetStreamInput(exec, pSubmit->data, STREAM_DATA_TYPE_SUBMIT_BLOCK, false);
} else if (pItem->type == STREAM_INPUT__DATA_BLOCK) {
SStreamDataBlock* pBlock = (SStreamDataBlock*)data;
ASSERT(pTask->inputType == STREAM_INPUT__DATA_BLOCK);
SArray* blocks = pBlock->blocks;
SArray* blocks = pBlock->blocks;
qSetMultiStreamInput(exec, blocks->pData, blocks->size, STREAM_DATA_TYPE_SSDATA_BLOCK, false);
} else if (pItem->type == STREAM_INPUT__DROP) {
// TODO exec drop
@ -81,7 +80,6 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
}
qRes->type = STREAM_INPUT__DATA_BLOCK;
qRes->blocks = pRes;
/*qRes->sourceVg = pTask->nodeId;*/
if (streamTaskOutput(pTask, qRes) < 0) {
streamQueueProcessFail(pTask->inputQueue);
taosArrayDestroy(pRes);
@ -89,17 +87,17 @@ static SArray* streamExecForQall(SStreamTask* pTask, SArray* pRes) {
return NULL;
}
if (((SStreamQueueItem*)data)->type == STREAM_INPUT__TRIGGER) {
int8_t type = ((SStreamQueueItem*)data)->type;
if (type == STREAM_INPUT__TRIGGER) {
blockDataDestroy(((SStreamTrigger*)data)->pBlock);
taosFreeQitem(data);
} else {
if (pTask->inputType == STREAM_INPUT__DATA_SUBMIT) {
streamDataSubmitRefDec((SStreamDataSubmit*)data);
taosFreeQitem(data);
} else {
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
taosFreeQitem(data);
}
} else if (type == STREAM_INPUT__DATA_BLOCK) {
taosArrayDestroyEx(((SStreamDataBlock*)data)->blocks, (FDelete)tDeleteSSDataBlock);
taosFreeQitem(data);
} else if (type == STREAM_INPUT__DATA_SUBMIT) {
ASSERT(pTask->isDataScan);
streamDataSubmitRefDec((SStreamDataSubmit*)data);
taosFreeQitem(data);
}
streamQueueProcessSuccess(pTask->inputQueue);
return taosArrayInit(0, sizeof(SSDataBlock));

View File

@ -50,14 +50,14 @@ int32_t tEncodeSStreamTask(SEncoder* pEncoder, const SStreamTask* pTask) {
/*if (tStartEncode(pEncoder) < 0) return -1;*/
if (tEncodeI64(pEncoder, pTask->streamId) < 0) return -1;
if (tEncodeI32(pEncoder, pTask->taskId) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->inputType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->taskStatus) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->execStatus) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->isDataScan) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->execType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->sinkType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->dispatchType) < 0) return -1;
if (tEncodeI16(pEncoder, pTask->dispatchMsgType) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->isDataScan) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->taskStatus) < 0) return -1;
if (tEncodeI8(pEncoder, pTask->execStatus) < 0) return -1;
if (tEncodeI32(pEncoder, pTask->selfChildId) < 0) return -1;
if (tEncodeI32(pEncoder, pTask->nodeId) < 0) return -1;
@ -106,14 +106,14 @@ int32_t tDecodeSStreamTask(SDecoder* pDecoder, SStreamTask* pTask) {
/*if (tStartDecode(pDecoder) < 0) return -1;*/
if (tDecodeI64(pDecoder, &pTask->streamId) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->taskId) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->inputType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->taskStatus) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->execStatus) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->isDataScan) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->execType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->sinkType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->dispatchType) < 0) return -1;
if (tDecodeI16(pDecoder, &pTask->dispatchMsgType) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->isDataScan) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->taskStatus) < 0) return -1;
if (tDecodeI8(pDecoder, &pTask->execStatus) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->selfChildId) < 0) return -1;
if (tDecodeI32(pDecoder, &pTask->nodeId) < 0) return -1;

View File

@ -857,20 +857,20 @@ static int tdbBtreeBalance(SBTC *pBtc) {
}
// TDB_BTREE_BALANCE
static int tdbFetchOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) {
static int tdbFetchOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) {
int ret = 0;
*pPgno = 0;
SBtreeInitPageArg iArg;
iArg.pBt = pBt;
iArg.flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL);
ret = tdbPagerFetchPage(pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn);
ret = tdbPagerFetchPage(pBt->pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn);
if (ret < 0) {
return -1;
}
// mark dirty
ret = tdbPagerWrite(pPager, *ppOfp);
ret = tdbPagerWrite(pBt->pPager, *ppOfp);
if (ret < 0) {
ASSERT(0);
return -1;
@ -879,13 +879,13 @@ static int tdbFetchOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pT
return ret;
}
static int tdbLoadOvflPage(SPager *pPager, SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) {
static int tdbLoadOvflPage(SPgno *pPgno, SPage **ppOfp, TXN *pTxn, SBTree *pBt) {
int ret = 0;
SBtreeInitPageArg iArg;
iArg.pBt = pBt;
iArg.flags = TDB_FLAG_ADD(0, TDB_BTREE_OVFL);
ret = tdbPagerFetchPage(pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn);
ret = tdbPagerFetchPage(pBt->pPager, pPgno, ppOfp, tdbBtreeInitPage, &iArg, pTxn);
if (ret < 0) {
return -1;
}
@ -922,7 +922,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
SPgno pgno = 0;
SPage *ofp, *nextOfp;
ret = tdbFetchOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt);
ret = tdbFetchOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -962,7 +962,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt);
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
@ -1019,14 +1019,14 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
nLeft -= lastKeyPageSpace;
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt);
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
}
} else {
// fetch next ofp, a new ofp and make it dirty
ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt);
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -1057,7 +1057,7 @@ static int tdbBtreeEncodePayload(SPage *pPage, SCell *pCell, int nHeader, const
// fetch next ofp if not last page
if (!lastPage) {
// fetch a new ofp and make it dirty
ret = tdbFetchOvflPage(pPage->pPager, &pgno, &nextOfp, pTxn, pBt);
ret = tdbFetchOvflPage(&pgno, &nextOfp, pTxn, pBt);
if (ret < 0) {
tdbFree(pBuf);
return -1;
@ -1198,7 +1198,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
// unpack left val data from ovpages
while (pgno != 0) {
ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt);
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -1235,7 +1235,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
int lastKeyPageSpace = 0;
// load left key & val to ovpages
while (pgno != 0) {
ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt);
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -1280,7 +1280,7 @@ static int tdbBtreeDecodePayload(SPage *pPage, const SCell *pCell, int nHeader,
}
while (nLeft > 0) {
ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt);
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -1411,7 +1411,7 @@ static int tdbBtreeCellSize(const SPage *pPage, SCell *pCell, int dropOfp, TXN *
int bytes;
while (pgno != 0) {
ret = tdbLoadOvflPage(pPage->pPager, &pgno, &ofp, pTxn, pBt);
ret = tdbLoadOvflPage(&pgno, &ofp, pTxn, pBt);
if (ret < 0) {
return -1;
}
@ -2023,7 +2023,7 @@ int tdbBtcMoveTo(SBTC *pBtc, const void *pKey, int kLen, int *pCRst) {
// check if key <= current position
if (idx < nCells) {
pCell = tdbPageGetCell(pPage, idx);
tdbBtreeDecodeCell(pPage, pCell, &cd, pBtc->pTxn, pBtc->pBt);
tdbBtreeDecodeCell(pPage, pCell, &cd);
c = pBt->kcmpr(pKey, kLen, cd.pKey, cd.kLen);
if (c > 0) break;
}

View File

@ -19,6 +19,8 @@
#include "tref.h"
#include "walInt.h"
bool FORCE_INLINE walIsEmpty(SWal* pWal) { return pWal->vers.firstVer == -1; }
int64_t FORCE_INLINE walGetFirstVer(SWal* pWal) { return pWal->vers.firstVer; }
int64_t FORCE_INLINE walGetSnaphostVer(SWal* pWal) { return pWal->vers.snapshotVer; }

View File

@ -141,7 +141,7 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
// validate offset
SWalHead head;
ASSERT(taosValidFile(pLogTFile));
int size = taosReadFile(pLogTFile, &head, sizeof(SWalHead));
int64_t size = taosReadFile(pLogTFile, &head, sizeof(SWalHead));
if (size != sizeof(SWalHead)) {
return -1;
}
@ -149,22 +149,33 @@ int32_t walRollback(SWal *pWal, int64_t ver) {
ASSERT(code == 0);
if (code != 0) {
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
if (head.head.version != ver) {
// TODO
ASSERT(0);
terrno = TSDB_CODE_WAL_FILE_CORRUPTED;
return -1;
}
// truncate old files
code = taosFtruncateFile(pLogTFile, entry.offset);
if (code < 0) {
ASSERT(0);
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
code = taosFtruncateFile(pIdxTFile, idxOff);
if (code < 0) {
ASSERT(0);
terrno = TAOS_SYSTEM_ERROR(errno);
return -1;
}
pWal->vers.lastVer = ver - 1;
if (pWal->vers.lastVer < pWal->vers.firstVer) {
ASSERT(pWal->vers.lastVer == pWal->vers.firstVer - 1);
pWal->vers.firstVer = -1;
}
((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->lastVer = ver - 1;
((SWalFileInfo *)taosArrayGetLast(pWal->fileInfoSet))->fileSize = entry.offset;
taosCloseFile(&pIdxTFile);

View File

@ -88,11 +88,11 @@ void taosSetSystemLocale(const char *inLocale, const char *inCharSet) {
void taosGetSystemLocale(char *outLocale, char *outCharset) {
#ifdef WINDOWS
char *locale = setlocale(LC_CTYPE, "chs");
char *locale = setlocale(LC_CTYPE, "en_US.UTF-8");
if (locale != NULL) {
tstrncpy(outLocale, locale, TD_LOCALE_LEN);
}
strcpy(outCharset, "cp936");
strcpy(outCharset, "UTF-8");
#elif defined(_TD_DARWIN_64)
/*

View File

@ -503,6 +503,38 @@ const char *cfgDtypeStr(ECfgDataType type) {
}
}
void cfgDumpItemValue(SConfigItem *pItem, char* buf, int32_t bufSize, int32_t* pLen) {
int32_t len = 0;
switch (pItem->dtype) {
case CFG_DTYPE_BOOL:
len = snprintf(buf, bufSize, "%u", pItem->bval);
break;
case CFG_DTYPE_INT32:
len = snprintf(buf, bufSize, "%d", pItem->i32);
break;
case CFG_DTYPE_INT64:
len = snprintf(buf, bufSize, "%" PRId64, pItem->i64);
break;
case CFG_DTYPE_FLOAT:
len = snprintf(buf, bufSize, "%f", pItem->fval);
break;
case CFG_DTYPE_STRING:
case CFG_DTYPE_DIR:
case CFG_DTYPE_LOCALE:
case CFG_DTYPE_CHARSET:
case CFG_DTYPE_TIMEZONE:
case CFG_DTYPE_NONE:
len = snprintf(buf, bufSize, "%s", pItem->str);
break;
}
if (len > bufSize) {
len = bufSize;
}
*pLen = len;
}
void cfgDumpCfg(SConfig *pCfg, bool tsc, bool dump) {
if (dump) {
printf(" global config");
@ -996,4 +1028,4 @@ int32_t cfgGetApollUrl(const char **envCmd, const char *envFile, char* apolloUrl
uInfo("fail get apollo url from cmd env file");
return -1;
}
}

View File

@ -12,6 +12,8 @@
## ---- db
./test.sh -f tsim/db/create_all_options.sim
./test.sh -f tsim/db/alter_option.sim
./test.sh -f tsim/db/alter_replica_13.sim
#./test.sh -f tsim/db/alter_replica_31.sim
./test.sh -f tsim/db/basic1.sim
./test.sh -f tsim/db/basic2.sim
./test.sh -f tsim/db/basic3.sim
@ -21,6 +23,7 @@
./test.sh -f tsim/db/taosdlog.sim
# ---- dnode
./test.sh -f tsim/dnode/balance_replica1.sim
./test.sh -f tsim/dnode/create_dnode.sim
./test.sh -f tsim/dnode/drop_dnode_has_mnode.sim
./test.sh -f tsim/dnode/drop_dnode_has_qnode_snode.sim
@ -28,6 +31,8 @@
#./test.sh -f tsim/dnode/drop_dnode_has_vnode_replica3.sim
#./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica1.sim
#./test.sh -f tsim/dnode/drop_dnode_has_multi_vnode_replica3.sim
#./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_leader.sim
./test.sh -f tsim/dnode/redistribute_vgroup_replica3_v1_follower.sim
# ---- insert
./test.sh -f tsim/insert/basic0.sim
@ -104,6 +109,7 @@
./test.sh -f tsim/tmq/basic4Of2Cons.sim
./test.sh -f tsim/tmq/basic2Of2ConsOverlap.sim
./test.sh -f tsim/tmq/topic.sim
./test.sh -f tsim/tmq/snapshot.sim
# --- stable
./test.sh -f tsim/stable/disk.sim

View File

@ -0,0 +1,123 @@
system sh/stop_dnodes.sh
system sh/deploy.sh -n dnode1 -i 1
system sh/deploy.sh -n dnode2 -i 2
system sh/deploy.sh -n dnode3 -i 3
system sh/cfg.sh -n dnode1 -c supportVnodes -v 0
system sh/exec.sh -n dnode1 -s start
system sh/exec.sh -n dnode2 -s start
sql connect
print =============== step1 create dnode2
# no enough vnodes
sql balance vgroup
sql create dnode $hostname port 7200
sql create dnode $hostname port 7300
$x = 0
step1:
$ = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not online!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
print ---> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 3 then
return -1
endi
if $data(1)[4] != ready then
goto step1
endi
if $data(2)[4] != ready then
goto step1
endi
if $data(3)[4] != offline then
goto step1
endi
print =============== step2 create database
sql create database d1 vgroups 2
sql use d1
sql create table d1.st (ts timestamp, i int) tags (j int)
sql create table d1.c1 using st tags(1)
sql create table d1.c2 using st tags(1)
sql create table d1.c3 using st tags(1)
sql create table d1.c4 using st tags(1)
sql create table d1.c5 using st tags(1)
sql create table d1.c6 using st tags(1)
sql show d1.tables
if $rows != 6 then
return -1
endi
sql show d1.vgroups
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
if $rows != 2 then
return -1
endi
if $data(2)[3] != 2 then
return -1
endi
if $data(3)[3] != 2 then
return -1
endi
print =============== step3: balance vgroup
# has offline dnode
sql_error balance vgroup
system sh/exec.sh -n dnode3 -s start
$x = 0
step3:
$ = $x + 1
sleep 1000
if $x == 10 then
print ---> dnode not online!
return -1
endi
sql show dnodes
print ---> $data00 $data01 $data02 $data03 $data04 $data05
print ---> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 3 then
return -1
endi
if $data(1)[4] != ready then
goto step3
endi
if $data(2)[4] != ready then
goto step3
endi
if $data(3)[4] != ready then
goto step3
endi
print =============== step4: balance
sql balance vgroup
print show d1.vgroups
sql show d1.vgroups
print $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4]
if $rows != 2 then
return -1
endi
if $data(2)[3] != 3 then
return -1
endi
if $data(3)[3] != 2 then
return -1
endi
print =============== step7: select data
sql show d1.tables
print rows $rows
if $rows != 6 then
return -1
endi
return
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT

View File

@ -28,7 +28,7 @@ step1:
sql show dnodes
print ===> $data00 $data01 $data02 $data03 $data04 $data05
print ===> $data10 $data11 $data12 $data13 $data14 $data15
if $rows != 3 then
if $rows != 5 then
return -1
endi
if $data(1)[4] != ready then
@ -72,6 +72,13 @@ if $data(2)[7] != 4 then
return -1
endi
system sh/exec.sh -n dnode4 -s stop -x SIGINT
system sh/exec.sh -n dnode3 -s stop -x SIGINT
return
print =============== step4: drop dnode 2
system sh/exec.sh -n dnode5 -s start
$x = 0

View File

@ -176,6 +176,8 @@ if $rows != 1 then
return -1
endi
return
print =============== step33: move follower1
print redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5
sql redistribute vgroup 2 dnode $leaderVnode dnode $follower1 dnode 5

View File

@ -233,6 +233,7 @@ if $data(1)[3] != dropping then
endi
print =============== step9: start mnode1 and wait it dropped
sleep 3000
system sh/exec.sh -n dnode1 -s start
$x = 0

View File

@ -99,7 +99,7 @@ if $rows != 1 then
endi
#sql select * from information_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 30 then
if $rows != 31 then
return -1
endi
#sql select * from information_schema.user_table_distributed
@ -197,7 +197,7 @@ if $rows != 1 then
endi
#sql select * from performance_schema.`streams`
sql select * from information_schema.user_tables
if $rows != 30 then
if $rows != 31 then
return -1
endi
#sql select * from information_schema.user_table_distributed
@ -227,5 +227,20 @@ endi
sql_error show create stable t0;
sql show variables;
if $rows != 4 then
return -1
endi
sql show dnode 1 variables;
if $rows <= 0 then
return -1
endi
sql show local variables;
if $rows <= 0 then
return -1
endi
system sh/exec.sh -n dnode1 -s stop -x SIGINT
system sh/exec.sh -n dnode2 -s stop -x SIGINT

View File

@ -17,8 +17,9 @@ VALGRIND=0
SIGNAL=SIGINT
SHOW_MSG=0
SHOW_ROW=0
EXP_USE_SNAPSHOT=0
while getopts "d:s:v:y:x:g:r:w:" arg
while getopts "d:s:v:y:x:g:r:w:e:" arg
do
case $arg in
d)
@ -45,6 +46,9 @@ do
w)
CDB_NAME=$OPTARG
;;
e)
EXP_USE_SNAPSHOT=$OPTARG
;;
?)
echo "unkown argument"
;;
@ -91,8 +95,8 @@ if [ "$EXEC_OPTON" = "start" ]; then
echo nohup valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${LOG_DIR}/valgrind-tmq_sim.log $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW > /dev/null 2>&1 &
nohup valgrind --tool=memcheck --leak-check=full --show-reachable=no --track-origins=yes --show-leak-kinds=all -v --workaround-gcc296-bugs=yes --log-file=${LOG_DIR}/valgrind-tmq_sim.log $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW > /dev/null 2>&1 &
else
echo "nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME > /dev/null 2>&1 &"
nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME > /dev/null 2>&1 &
echo "nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME -e $EXP_USE_SNAPSHOT > /dev/null 2>&1 &"
nohup $PROGRAM -c $CFG_DIR -y $POLL_DELAY -d $DB_NAME -g $SHOW_MSG -r $SHOW_ROW -w $CDB_NAME -e $EXP_USE_SNAPSHOT > /dev/null 2>&1 &
fi
else
PID=`ps -ef|grep tmq_sim | grep -v grep | awk '{print $2}'`

View File

@ -0,0 +1,289 @@
#### test scenario, please refer to https://jira.taosdata.com:18090/pages/viewpage.action?pageId=135120406
#basic1.sim: vgroups=1, one topic for one consumer, firstly insert data, then start consume. Include six topics
#basic2.sim: vgroups=1, multi topics for one consumer, firstly insert data, then start consume. Include six topics
#basic3.sim: vgroups=4, one topic for one consumer, firstly insert data, then start consume. Include six topics
#basic4.sim: vgroups=4, multi topics for one consumer, firstly insert data, then start consume. Include six topics
# notes1: Scalar function: ABS/ACOS/ASIN/ATAN/CEIL/COS/FLOOR/LOG/POW/ROUND/SIN/SQRT/TAN
# The above use cases are combined with where filter conditions, such as: where ts > "2017-08-12 18:25:58.128Z" and sin(a) > 0.5;
#
# notes2: not support aggregate functions(such as sum/count/min/max) and time-windows(interval).
#
run tsim/tmq/prepareBasicEnv-1vgrp.sim
#---- global parameters start ----#
$dbName = db
$vgroups = 1
$stbPrefix = stb
$ctbPrefix = ctb
$ntbPrefix = ntb
$stbNum = 1
$ctbNum = 10
$ntbNum = 10
$rowsPerCtb = 10
$tstart = 1640966400000 # 2022-01-01 00:00:00.000
#---- global parameters end ----#
$pullDelay = 3
$ifcheckdata = 1
$ifmanualcommit = 1
$showMsg = 1
$showRow = 0
sql connect
sql use $dbName
print == create topics from super table
sql create topic topic_stb_column as select ts, c3 from stb
sql create topic topic_stb_all as select ts, c1, c2, c3 from stb
sql create topic topic_stb_function as select ts, abs(c1), sin(c2) from stb
print == create topics from child table
sql create topic topic_ctb_column as select ts, c3 from ctb0
sql create topic topic_ctb_all as select * from ctb0
sql create topic topic_ctb_function as select ts, abs(c1), sin(c2) from ctb0
print == create topics from normal table
sql create topic topic_ntb_column as select ts, c3 from ntb0
sql create topic topic_ntb_all as select * from ntb0
sql create topic topic_ntb_function as select ts, abs(c1), sin(c2) from ntb0
#sql show topics
#if $rows != 9 then
# return -1
#endi
#'group.id:cgrp1,enable.auto.commit:false,auto.commit.interval.ms:6000,auto.offset.reset:earliest'
$keyList = ' . group.id:cgrp1
$keyList = $keyList . ,
$keyList = $keyList . enable.auto.commit:false
#$keyList = $keyList . ,
#$keyList = $keyList . auto.commit.interval.ms:6000
#$keyList = $keyList . ,
#$keyList = $keyList . auto.offset.reset:earliest
$keyList = $keyList . '
print ========== key list: $keyList
$cdb_index = 0
#=============================== start consume =============================#
print ================ test consume from stb
$loop_cnt = 0
loop_consume_diff_topic_from_stb:
#######################################################################################
# clear consume info and consume result
#run tsim/tmq/clearConsume.sim
# because drop table function no stable, so by create new db for consume info and result. Modify it later
$cdb_index = $cdb_index + 1
$cdbName = cdb . $cdb_index
sql create database $cdbName vgroups 1
sleep 500
sql use $cdbName
print == create consume info table and consume result table
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
sql show tables
if $rows != 2 then
return -1
endi
#######################################################################################
if $loop_cnt == 0 then
print == scenario 1: topic_stb_column
$topicList = ' . topic_stb_column
$topicList = $topicList . '
elif $loop_cnt == 1 then
print == scenario 2: topic_stb_all
$topicList = ' . topic_stb_all
$topicList = $topicList . '
elif $loop_cnt == 2 then
print == scenario 3: topic_stb_function
$topicList = ' . topic_stb_function
$topicList = $topicList . '
else
goto loop_consume_diff_topic_from_stb_end
endi
$consumerId = 0
$totalMsgOfStb = $ctbNum * $rowsPerCtb
$expectmsgcnt = 1
$expectrowcnt = 100
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from stb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -e 1 -s start
system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -e 1 -s start
print == check consume result
wait_consumer_end_from_stb:
sql select * from consumeresult
print ==> rows: $rows
print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
sleep 1000
goto wait_consumer_end_from_stb
endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != $expectmsgcnt then
return -1
endi
if $data[0][3] != $expectrowcnt then
return -1
endi
$loop_cnt = $loop_cnt + 1
goto loop_consume_diff_topic_from_stb
loop_consume_diff_topic_from_stb_end:
print ================ test consume from ctb
$loop_cnt = 0
loop_consume_diff_topic_from_ctb:
#######################################################################################
# clear consume info and consume result
#run tsim/tmq/clearConsume.sim
# because drop table function no stable, so by create new db for consume info and result. Modify it later
$cdb_index = $cdb_index + 1
$cdbName = cdb . $cdb_index
sql create database $cdbName vgroups 1
sleep 500
sql use $cdbName
print == create consume info table and consume result table
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
sql show tables
if $rows != 2 then
return -1
endi
#######################################################################################
if $loop_cnt == 0 then
print == scenario 1: topic_ctb_column
$topicList = ' . topic_ctb_column
$topicList = $topicList . '
elif $loop_cnt == 1 then
print == scenario 2: topic_ctb_all
$topicList = ' . topic_ctb_all
$topicList = $topicList . '
elif $loop_cnt == 2 then
print == scenario 3: topic_ctb_function
$topicList = ' . topic_ctb_function
$topicList = $topicList . '
else
goto loop_consume_diff_topic_from_ctb_end
endi
$consumerId = 0
$totalMsgOfCtb = $rowsPerCtb
$expectmsgcnt = 1
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ctb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start -e 1
system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start -e 1
print == check consume result
wait_consumer_end_from_ctb:
sql select * from consumeresult
print ==> rows: $rows
print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
sleep 1000
goto wait_consumer_end_from_ctb
endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != 1 then
return -1
endi
if $data[0][3] != 10 then
return -1
endi
$loop_cnt = $loop_cnt + 1
goto loop_consume_diff_topic_from_ctb
loop_consume_diff_topic_from_ctb_end:
print ================ test consume from ntb
$loop_cnt = 0
loop_consume_diff_topic_from_ntb:
#######################################################################################
# clear consume info and consume result
#run tsim/tmq/clearConsume.sim
# because drop table function no stable, so by create new db for consume info and result. Modify it later
$cdb_index = $cdb_index + 1
$cdbName = cdb . $cdb_index
sql create database $cdbName vgroups 1
sleep 500
sql use $cdbName
print == create consume info table and consume result table
sql create table consumeinfo (ts timestamp, consumerid int, topiclist binary(1024), keylist binary(1024), expectmsgcnt bigint, ifcheckdata int, ifmanualcommit int)
sql create table consumeresult (ts timestamp, consumerid int, consummsgcnt bigint, consumrowcnt bigint, checkresult int)
sql show tables
if $rows != 2 then
return -1
endi
#######################################################################################
if $loop_cnt == 0 then
print == scenario 1: topic_ntb_column
$topicList = ' . topic_ntb_column
$topicList = $topicList . '
elif $loop_cnt == 1 then
print == scenario 2: topic_ntb_all
$topicList = ' . topic_ntb_all
$topicList = $topicList . '
elif $loop_cnt == 2 then
print == scenario 3: topic_ntb_function
$topicList = ' . topic_ntb_function
$topicList = $topicList . '
else
goto loop_consume_diff_topic_from_ntb_end
endi
$consumerId = 0
$totalMsgOfNtb = $rowsPerCtb
$expectmsgcnt = $totalMsgOfNtb
sql insert into consumeinfo values (now , $consumerId , $topicList , $keyList , $expectmsgcnt , $ifcheckdata , $ifmanualcommit )
print == start consumer to pull msgs from ntb
print == tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -s start -e 1
system tsim/tmq/consume.sh -d $dbName -y $pullDelay -g $showMsg -r $showRow -w $cdbName -s start -e 1
print == check consume result from ntb
wait_consumer_end_from_ntb:
sql select * from consumeresult
print ==> rows: $rows
print ==> rows[0]: $data[0][0] $data[0][1] $data[0][2] $data[0][3] $data[0][4] $data[0][5] $data[0][6]
if $rows != 1 then
sleep 1000
goto wait_consumer_end_from_ntb
endi
if $data[0][1] != $consumerId then
return -1
endi
if $data[0][2] != 1 then
return -1
endi
if $data[0][3] != $totalMsgOfNtb then
return -1
endi
$loop_cnt = $loop_cnt + 1
goto loop_consume_diff_topic_from_ntb
loop_consume_diff_topic_from_ntb_end:
#------ not need stop consumer, because it exit after pull msg overthan expect msg
#system tsim/tmq/consume.sh -s stop -x SIGINT
system sh/exec.sh -n dnode1 -s stop -x SIGINT

View File

@ -52,7 +52,7 @@ echo wal 0 >> %TAOS_CFG%
echo asyncLog 0 >> %TAOS_CFG%
echo locale en_US.UTF-8 >> %TAOS_CFG%
echo enableCoreFile 1 >> %TAOS_CFG%
echo charset cp65001 >> %TAOS_CFG%
echo charset UTF-8 >> %TAOS_CFG%
set "FILE_NAME=testSuite.sim"
if "%1" == "-f" set "FILE_NAME=%2"

View File

@ -68,7 +68,7 @@ class TDTestCase:
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('[1,true]')")
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{222}')")
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"fe\"}')")
#
# test invalidate json key, key must can be printed assic char
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":[1,true]}')")
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"tag1\":{}}')")
@ -79,7 +79,7 @@ class TDTestCase:
# test invalidate json value, value number can not be inf,nan TD-12166
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":1.8e308}')")
tdSql.error("CREATE TABLE if not exists jsons1_14 using jsons1 tags('{\"k\":-1.8e308}')")
#
#test length limit
char1= ''.join(['abcd']*64)
char3= ''.join(['abcd']*1021)
@ -87,15 +87,15 @@ class TDTestCase:
tdSql.error("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s1\":5}')" % char1) # len(key)=257
tdSql.execute("CREATE TABLE if not exists jsons1_15 using jsons1 tags('{\"%s\":5}')" % char1) # len(key)=256
tdSql.error("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSSS\":\"%s\"}')" % char3) # len(object)=4096
#tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095
tdSql.execute("CREATE TABLE if not exists jsons1_16 using jsons1 tags('{\"TSSS\":\"%s\"}')" % char3) # len(object)=4095
tdSql.execute("drop table if exists jsons1_15")
tdSql.execute("drop table if exists jsons1_16")
#
print("============== STEP 2 ===== alter table json tag")
tdSql.error("ALTER STABLE jsons1 add tag tag2 nchar(20)")
tdSql.error("ALTER STABLE jsons1 drop tag jtag")
tdSql.error("ALTER TABLE jsons1 MODIFY TAG jtag nchar(128)")
#
tdSql.execute("ALTER TABLE jsons1_1 SET TAG jtag='{\"tag1\":\"femail\",\"tag2\":35,\"tag3\":true}'")
tdSql.query("select jtag from jsons1_1")
tdSql.checkData(0, 0, '{"tag1":"femail","tag2":35,"tag3":true}')
@ -105,9 +105,9 @@ class TDTestCase:
tdSql.execute("create table st(ts timestamp, i int) tags(t int)")
tdSql.error("ALTER STABLE st add tag jtag json")
tdSql.error("ALTER STABLE st add column jtag json")
#
# print("============== STEP 3 ===== query table")
# # test error syntax
print("============== STEP 3 ===== query table")
# test error syntax
tdSql.error("select * from jsons1 where jtag->tag1='beijing'")
tdSql.error("select -> from jsons1")
tdSql.error("select * from jsons1 where contains")
@ -115,17 +115,17 @@ class TDTestCase:
tdSql.error("select jtag->location from jsons1")
tdSql.error("select jtag contains location from jsons1")
tdSql.error("select * from jsons1 where jtag contains location")
#tdSql.error("select * from jsons1 where jtag contains''")
tdSql.query("select * from jsons1 where jtag contains''")
tdSql.error("select * from jsons1 where jtag contains 'location'='beijing'")
#
# # test function error
# test function error
tdSql.error("select avg(jtag->'tag1') from jsons1")
tdSql.error("select avg(jtag) from jsons1")
tdSql.error("select min(jtag->'tag1') from jsons1")
tdSql.error("select min(jtag) from jsons1")
tdSql.error("select ceil(jtag->'tag1') from jsons1")
tdSql.error("select ceil(jtag) from jsons1")
#
#test scalar operation
tdSql.query("select jtag contains 'tag1',jtag->'tag1' from jsons1 order by jtag->'tag1'")
@ -158,10 +158,11 @@ class TDTestCase:
tdSql.checkData(0, 0, None)
tdSql.checkData(0, 1, False)
tdSql.checkData(7, 0, "false")
tdSql.checkData(7, 1, True)
tdSql.checkData(7, 1, False)
tdSql.checkData(8, 1, False)
tdSql.checkData(12, 1, True)
# # test select normal column
# test select normal column
tdSql.query("select dataint from jsons1 order by dataint")
tdSql.checkRows(9)
tdSql.checkData(1, 0, 1)
@ -180,7 +181,7 @@ class TDTestCase:
tdSql.query("select jtag from jsons1_9")
tdSql.checkData(0, 0, None)
# # test select json tag->'key', value is string
# test select json tag->'key', value is string
tdSql.query("select jtag->'tag1' from jsons1_1")
tdSql.checkData(0, 0, '"femail"')
tdSql.query("select jtag->'tag2' from jsons1_6")
@ -200,7 +201,7 @@ class TDTestCase:
# test select json tag->'key', key is not exist
tdSql.query("select jtag->'tag10' from jsons1_4")
tdSql.checkData(0, 0, None)
#
tdSql.query("select jtag->'tag1' from jsons1")
tdSql.checkRows(13)
# test header name
@ -210,24 +211,25 @@ class TDTestCase:
tdSql.checkColNameList(res, cname_list)
# # test where with json tag
# test where with json tag
tdSql.query("select * from jsons1_1 where jtag is not null")
# tdSql.error("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
# tdSql.error("select * from jsons1 where jtag->'tag1'={}")
#
# # where json value is string
# tdSql.query("select * from jsons1 where jtag='{\"tag1\":11,\"tag2\":\"\"}'")
tdSql.error("select * from jsons1 where jtag->'tag1'={}")
# where json value is string
tdSql.query("select * from jsons1 where jtag->'tag2'='beijing'")
tdSql.checkRows(2)
tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing'")
tdSql.query("select dataint,tbname,jtag->'tag1',jtag from jsons1 where jtag->'tag2'='beijing' order by dataint")
tdSql.checkRows(2)
# out of order, cannot compare value
#tdSql.checkData(0, 0, 2)
#tdSql.checkData(0, 1, 'jsons1_2')
#tdSql.checkData(0, 2, 5)
#tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}')
#tdSql.checkData(1, 0, 3)
#tdSql.checkData(1, 1, 'jsons1_3')
#tdSql.checkData(1, 2, 'false')
tdSql.checkData(0, 0, 2)
tdSql.checkData(0, 1, 'jsons1_2')
tdSql.checkData(0, 2, "5.000000000")
tdSql.checkData(0, 3, '{"tag1":5,"tag2":"beijing"}')
tdSql.checkData(1, 0, 3)
tdSql.checkData(1, 1, 'jsons1_3')
tdSql.checkData(1, 2, 'false')
tdSql.query("select * from jsons1 where jtag->'tag1'='beijing'")
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where jtag->'tag1'='收到货'")
@ -236,72 +238,73 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag2'>='beijing'")
tdSql.checkRows(3)
# open
#tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'")
#tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'")
tdSql.query("select * from jsons1 where jtag->'tag2'<'beijing'")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag2'<='beijing'")
tdSql.checkRows(4)
tdSql.query("select * from jsons1 where jtag->'tag2'!='beijing'")
tdSql.checkRows(5)
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag2'=''")
tdSql.checkRows(2)
#
# # where json value is int
# where json value is int
tdSql.query("select * from jsons1 where jtag->'tag1'=5")
tdSql.checkRows(1)
tdSql.checkData(0, 1, 2)
tdSql.query("select * from jsons1 where jtag->'tag1'=10")
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where jtag->'tag1'<54")
tdSql.checkRows(4)
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'<=11")
tdSql.checkRows(4)
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'>4")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1'>=5")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1'!=5")
tdSql.checkRows(6)
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1'!=55")
tdSql.checkRows(7)
#
# # where json value is double
tdSql.checkRows(3)
# where json value is double
tdSql.query("select * from jsons1 where jtag->'tag1'=1.232")
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1'<1.232")
tdSql.checkRows(1)
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where jtag->'tag1'<=1.232")
tdSql.checkRows(2)
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1'>1.23")
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'>=1.232")
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'!=1.232")
tdSql.checkRows(6)
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1'!=3.232")
tdSql.checkRows(7)
#tdSql.error("select * from jsons1 where jtag->'tag1'/0=3")
#tdSql.error("select * from jsons1 where jtag->'tag1'/5=1")
#
# # where json value is bool
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'/0=3")
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where jtag->'tag1'/5=1")
tdSql.checkRows(1)
# where json value is bool
tdSql.query("select * from jsons1 where jtag->'tag1'=true")
tdSql.checkRows(0)
#tdSql.query("select * from jsons1 where jtag->'tag1'=false")
#tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1'=false")
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1'!=false")
tdSql.checkRows(3)
#tdSql.error("select * from jsons1 where jtag->'tag1'>false")
#
# # where json value is null
# open
#tdSql.query("select * from jsons1 where jtag->'tag1'=null") # only json suport =null. This synatx will change later.
#tdSql.checkRows(1)
#
# # where json key is null
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where jtag->'tag1'>false")
tdSql.checkRows(0)
# where json value is null
tdSql.query("select * from jsons1 where jtag->'tag1'=null")
tdSql.checkRows(0)
# where json key is null
tdSql.query("select * from jsons1 where jtag->'tag_no_exist'=3")
tdSql.checkRows(0)
#
# # where json value is not exist
# where json value is not exist
tdSql.query("select * from jsons1 where jtag->'tag1' is null")
tdSql.checkData(0, 0, 'jsons1_9')
tdSql.checkRows(2)
@ -309,16 +312,16 @@ class TDTestCase:
tdSql.checkRows(9)
tdSql.query("select * from jsons1 where jtag->'tag3' is not null")
tdSql.checkRows(3)
#
# # test contains
# test contains
tdSql.query("select * from jsons1 where jtag contains 'tag1'")
tdSql.checkRows(7)
tdSql.checkRows(8)
tdSql.query("select * from jsons1 where jtag contains 'tag3'")
tdSql.checkRows(3)
tdSql.checkRows(4)
tdSql.query("select * from jsons1 where jtag contains 'tag_no_exist'")
tdSql.checkRows(0)
#
# # test json tag in where condition with and/or
# test json tag in where condition with and/or
tdSql.query("select * from jsons1 where jtag->'tag1'=false and jtag->'tag2'='beijing'")
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1'=false or jtag->'tag2'='beijing'")
@ -335,15 +338,15 @@ class TDTestCase:
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1'='femail' and jtag contains 'tag3'")
tdSql.checkRows(2)
#
#
# # test with between and
# test with between and
tdSql.query("select * from jsons1 where jtag->'tag1' between 1 and 30")
tdSql.checkRows(3)
tdSql.query("select * from jsons1 where jtag->'tag1' between 'femail' and 'beijing'")
tdSql.checkRows(2)
#
# # test with tbname/normal column
# test with tbname/normal column
tdSql.query("select * from jsons1 where tbname = 'jsons1_1'")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3'")
@ -352,20 +355,18 @@ class TDTestCase:
tdSql.checkRows(0)
tdSql.query("select * from jsons1 where tbname = 'jsons1_1' and jtag contains 'tag3' and dataint=23")
tdSql.checkRows(1)
#
#
# # test where condition like
# open
# syntax error
#tdSql.query("select *,tbname from jsons1 where jtag->'tag2' like 'bei%'")
#tdSql.checkRows(2)
#tdSql.query("select *,tbname from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
#tdSql.checkRows(2)
#
# # test where condition in no support in
# test where condition like
tdSql.query("select * from jsons1 where jtag->'tag2' like 'bei%'")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1' like 'fe%' and jtag->'tag2' is not null")
tdSql.checkRows(2)
# test where condition in no support in
# tdSql.error("select * from jsons1 where jtag->'tag1' in ('beijing')")
#
# # test where condition match/nmath
# test where condition match/nmath
tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma'")
tdSql.checkRows(2)
tdSql.query("select * from jsons1 where jtag->'tag1' match 'ma$'")
@ -376,23 +377,22 @@ class TDTestCase:
tdSql.checkRows(1)
tdSql.query("select * from jsons1 where jtag->'tag1' nmatch 'ma'")
tdSql.checkRows(1)
#
# # test distinct
# test distinct
tdSql.execute("insert into jsons1_14 using jsons1 tags('{\"tag1\":\"收到货\",\"tag2\":\"\",\"tag3\":null}') values(1591062628000, 2, NULL, '你就会', 'dws')")
tdSql.query("select distinct jtag->'tag1' from jsons1")
tdSql.checkRows(8)
tdSql.query("select distinct jtag from jsons1")
tdSql.checkRows(9)
#
# #test dumplicate key with normal colomn
#test dumplicate key with normal colomn
tdSql.execute("INSERT INTO jsons1_15 using jsons1 tags('{\"tbname\":\"tt\",\"databool\":true,\"datastr\":\"是是是\"}') values(1591060828000, 4, false, 'jjsf', \"你就会\")")
#tdSql.query("select *,tbname,jtag from jsons1 where jtag->'datastr' match '是' and datastr match 'js'")
#tdSql.checkRows(1)
# open
#tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'")
#tdSql.checkRows(0)
#
# # test join
tdSql.query("select * from jsons1 where jtag->'datastr' match '' and datastr match 'js'")
tdSql.checkRows(1)
# tdSql.query("select tbname,jtag->'tbname' from jsons1 where jtag->'tbname'='tt' and tbname='jsons1_14'")
# tdSql.checkRows(1)
# test join
tdSql.execute("create table if not exists jsons2(ts timestamp, dataInt int, dataBool bool, dataStr nchar(50), dataStrBin binary(150)) tags(jtag json)")
tdSql.execute("insert into jsons2_1 using jsons2 tags('{\"tag1\":\"fff\",\"tag2\":5, \"tag3\":true}') values(1591060618000, 2, false, 'json2', '你是2')")
tdSql.execute("insert into jsons2_2 using jsons2 tags('{\"tag1\":5,\"tag2\":null}') values (1591060628000, 2, true, 'json2', 'sss')")
@ -460,19 +460,18 @@ class TDTestCase:
tdSql.checkColNameList(res, cname_list)
# test top/bottom with group by json tag
# random failure
#tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
#tdSql.checkRows(11)
#tdSql.checkData(0, 1, None)
#tdSql.checkData(2, 0, 4)
#tdSql.checkData(3, 0, 3)
#tdSql.checkData(3, 1, "false")
#tdSql.checkData(10, 0, 23)
#tdSql.checkData(10, 1, '"femail"')
# tdSql.query("select top(dataint,2),jtag->'tag1' from jsons1 group by jtag->'tag1' order by jtag->'tag1'")
# tdSql.checkRows(11)
# tdSql.checkData(0, 1, None)
# tdSql.checkData(2, 0, 4)
# tdSql.checkData(3, 0, 3)
# tdSql.checkData(3, 1, "false")
# tdSql.checkData(8, 0, 2)
# tdSql.checkData(10, 1, '"femail"')
# test having
#tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
#tdSql.checkRows(3)
tdSql.query("select count(*),jtag->'tag1' from jsons1 group by jtag->'tag1' having count(*) > 1")
tdSql.checkRows(3)
# subquery with json tag
tdSql.query("select * from (select jtag, dataint from jsons1) order by dataint")
@ -480,22 +479,13 @@ class TDTestCase:
tdSql.checkData(1, 1, 1)
tdSql.checkData(5, 0, '{"tag1":false,"tag2":"beijing"}')
# tdSql.query("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
# tdSql.checkRows(11)
# tdSql.checkData(1, 0, '"femail"')
# tdSql.checkData(2, 0, 5)
#
# res = tdSql.getColNameList("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
# cname_list = []
# cname_list.append("jtag->'tag1'")
# tdSql.checkColNameList(res, cname_list)
#
# tdSql.query("select ts,tbname,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
tdSql.error("select jtag->'tag1' from (select jtag->'tag1', dataint from jsons1)")
# tdSql.query("select ts,jtag->'tag1' from (select jtag->'tag1',tbname,ts from jsons1 order by ts)")
# tdSql.checkRows(11)
# tdSql.checkData(1, 1, "jsons1_1")
# tdSql.checkData(1, 2, '"femail"')
#
# # union all
# union all
tdSql.query("select jtag->'tag1' from jsons1 union all select jtag->'tag2' from jsons2")
tdSql.checkRows(17)
tdSql.query("select jtag->'tag1' from jsons1_1 union all select jtag->'tag2' from jsons2_1")
@ -508,24 +498,25 @@ class TDTestCase:
tdSql.query("select dataint,jtag,tbname from jsons1 union all select dataint,jtag,tbname from jsons2")
tdSql.checkRows(13)
# #show create table
# tdSql.query("show create table jsons1")
# tdSql.checkData(0, 1, 'CREATE TABLE `jsons1` (`ts` TIMESTAMP,`dataint` INT,`databool` BOOL,`datastr` NCHAR(50),`datastrbin` BINARY(150)) TAGS (`jtag` JSON)')
#
# #test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares
#show create table
tdSql.query("show create table jsons1")
tdSql.checkData(0, 1, 'CREATE STABLE `jsons1` (`ts` TIMESTAMP, `dataint` INT, `databool` BOOL, `datastr` NCHAR(50), `datastrbin` VARCHAR(150)) TAGS (`jtag` JSON) WATERMARK 5000a, 5000a')
#test aggregate function:count/avg/twa/irate/sum/stddev/leastsquares
tdSql.query("select count(*) from jsons1 where jtag is not null")
tdSql.checkData(0, 0, 10)
tdSql.query("select avg(dataint) from jsons1 where jtag is not null")
tdSql.checkData(0, 0, 5.3)
#tdSql.error("select twa(dataint) from jsons1 where jtag is not null")
tdSql.query("select irate(dataint) from jsons1 where jtag is not null")
#tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
#tdSql.checkData(0, 0, 49)
# tdSql.query("select twa(dataint) from jsons1 where jtag is not null")
# tdSql.checkData(0, 0, 36)
# tdSql.error("select irate(dataint) from jsons1 where jtag is not null")
tdSql.query("select sum(dataint) from jsons1 where jtag->'tag1' is not null")
tdSql.checkData(0, 0, 45)
tdSql.query("select stddev(dataint) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 4.496912521)
#tdSql.error("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null")
#
# #test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp
tdSql.query("SELECT LEASTSQUARES(dataint, 1, 1) from jsons1 where jtag is not null")
#test selection function:min/max/first/last/top/bottom/percentile/apercentile/last_row/interp
tdSql.query("select min(dataint) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1)
tdSql.query("select max(dataint) from jsons1 where jtag->'tag1'>1")
@ -541,13 +532,16 @@ class TDTestCase:
tdSql.query("select percentile(dataint,20) from jsons1 where jtag->'tag1'>1")
tdSql.query("select apercentile(dataint, 50) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 1.5)
#tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")
#tdSql.checkData(0, 0, 11)
#tdSql.error("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
#
# #test calculation function:diff/derivative/spread/ceil/floor/round/
#tdSql.error("select diff(dataint) from jsons1 where jtag->'tag1'>1")
#tdSql.error("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
# tdSql.query("select last_row(dataint) from jsons1 where jtag->'tag1'>1")
# tdSql.query("select interp(dataint) from jsons1 where ts = '2020-06-02 09:17:08.000' and jtag->'tag1'>1")
#test calculation function:diff/derivative/spread/ceil/floor/round/
tdSql.query("select diff(dataint) from jsons1 where jtag->'tag1'>1")
# tdSql.checkRows(2)
# tdSql.checkData(0, 0, -1)
# tdSql.checkData(1, 0, 10)
tdSql.query("select derivative(dataint, 10m, 0) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, -2)
tdSql.query("select spread(dataint) from jsons1 where jtag->'tag1'>1")
tdSql.checkData(0, 0, 10)
tdSql.query("select ceil(dataint) from jsons1 where jtag->'tag1'>1")

View File

@ -0,0 +1,182 @@
import taos
import sys
import time
import socket
import os
import threading
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
'rowsPerTbl': 10000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1}
topicNameList = ['topic1', 'topic2', 'topic3']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts, log(c1), ceil(pow(c1,3)) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, log(c1), cos(c1) from %s.%s where c1 > 3169" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("1 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, log(c1), atan(c1) from %s.%s where ts >= %d" %(paraDict['dbName'], paraDict['stbName'], paraDict["startTs"]+6137)
sqlString = "create topic %s as %s" %(topicNameList[2], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 2
topicList = topicNameList[2]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
# if expectRowsList[2] != resultList[0]:
# tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[2], resultList[0]))
# tdLog.exit("2 tmq consume rows error!")
# self.checkFileContent(consumerId, queryString)
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
self.tmqCase1()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -0,0 +1,194 @@
from distutils.log import error
import taos
import sys
import time
import socket
import os
import threading
import subprocess
import platform
from util.log import *
from util.sql import *
from util.cases import *
from util.dnodes import *
from util.common import *
sys.path.append("./7-tmq")
from tmqCommon import *
class TDTestCase:
def init(self, conn, logSql):
tdLog.debug(f"start to excute {__file__}")
tdSql.init(conn.cursor())
#tdSql.init(conn.cursor(), logSql) # output sql.txt file
def prepare_udf_so(self):
selfPath = os.path.dirname(os.path.realpath(__file__))
if ("community" in selfPath):
projPath = selfPath[:selfPath.find("community")]
else:
projPath = selfPath[:selfPath.find("tests")]
print(projPath)
if platform.system().lower() == 'windows':
self.libudf1 = subprocess.Popen('(for /r %s %%i in ("udf1.d*") do @echo %%i)|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
if (not tdDnodes.dnodes[0].remoteIP == ""):
tdDnodes.dnodes[0].remote_conn.get(tdDnodes.dnodes[0].config["path"]+'/debug/build/lib/libudf1.so',projPath+"\\debug\\build\\lib\\")
self.libudf1 = self.libudf1.replace('udf1.dll','libudf1.so')
else:
self.libudf1 = subprocess.Popen('find %s -name "libudf1.so"|grep lib|head -n1'%projPath , shell=True, stdout=subprocess.PIPE,stderr=subprocess.STDOUT).stdout.read().decode("utf-8")
self.libudf1 = self.libudf1.replace('\r','').replace('\n','')
return
def create_udf_function(self):
# create scalar functions
tdSql.execute("create function udf1 as '%s' outputtype int bufSize 8;"%self.libudf1)
functions = tdSql.getResult("show functions")
function_nums = len(functions)
if function_nums == 1:
tdLog.info("create one udf functions success ")
else:
tdLog.exit("create udf functions fail")
return
def checkFileContent(self, consumerId, queryString):
buildPath = tdCom.getBuildPath()
cfgPath = tdCom.getClientCfgPath()
dstFile = '%s/../log/dstrows_%d.txt'%(cfgPath, consumerId)
cmdStr = '%s/build/bin/taos -c %s -s "%s >> %s"'%(buildPath, cfgPath, queryString, dstFile)
tdLog.info(cmdStr)
os.system(cmdStr)
consumeRowsFile = '%s/../log/consumerid_%d.txt'%(cfgPath, consumerId)
tdLog.info("rows file: %s, %s"%(consumeRowsFile, dstFile))
consumeFile = open(consumeRowsFile, mode='r')
queryFile = open(dstFile, mode='r')
# skip first line for it is schema
queryFile.readline()
while True:
dst = queryFile.readline()
src = consumeFile.readline()
if dst:
if dst != src:
tdLog.exit("consumerId %d consume rows is not match the rows by direct query"%consumerId)
else:
break
return
def tmqCase1(self):
tdLog.printNoPrefix("======== test case 1: ")
paraDict = {'dbName': 'db1',
'dropFlag': 1,
'event': '',
'vgroups': 4,
'stbName': 'stb',
'colPrefix': 'c',
'tagPrefix': 't',
'colSchema': [{'type': 'INT', 'count':2}, {'type': 'binary', 'len':20, 'count':1}],
'tagSchema': [{'type': 'INT', 'count':1}, {'type': 'binary', 'len':20, 'count':1}],
'ctbPrefix': 'ctb',
'ctbNum': 1,
'rowsPerTbl': 1000,
'batchNum': 10,
'startTs': 1640966400000, # 2022-01-01 00:00:00.000
'pollDelay': 10,
'showMsg': 1,
'showRow': 1}
topicNameList = ['topic1', 'topic2']
expectRowsList = []
tmqCom.initConsumerTable()
tdCom.create_database(tdSql, paraDict["dbName"],paraDict["dropFlag"], vgroups=4,replica=1)
tdLog.info("create stb")
tdCom.create_stable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"], column_elm_list=paraDict['colSchema'], tag_elm_list=paraDict['tagSchema'])
tdLog.info("create ctb")
tdCom.create_ctable(tdSql, dbname=paraDict["dbName"],stbname=paraDict["stbName"],tag_elm_list=paraDict['tagSchema'],count=paraDict["ctbNum"], default_ctbname_prefix=paraDict['ctbPrefix'])
tdLog.info("insert data")
tmqCom.insert_data_1(tdSql,paraDict["dbName"],paraDict["ctbPrefix"],paraDict["ctbNum"],paraDict["rowsPerTbl"],paraDict["batchNum"],paraDict["startTs"])
tdLog.info("create topics from stb with filter")
queryString = "select ts, c1,udf1(c1),c2,udf1(c2) from %s.%s where c1 %% 7 == 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[0], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
# init consume info, and start tmq_sim, then check consume result
tdLog.info("insert consume info to consume processor")
consumerId = 0
expectrowcnt = paraDict["rowsPerTbl"] * paraDict["ctbNum"]
topicList = topicNameList[0]
ifcheckdata = 1
ifManualCommit = 1
keyList = 'group.id:cgrp1, enable.auto.commit:false, auto.commit.interval.ms:6000, auto.offset.reset:earliest'
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[0] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[0], resultList[0]))
tdLog.exit("0 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
# reinit consume info, and start tmq_sim, then check consume result
tmqCom.initConsumerTable()
queryString = "select ts, c1,udf1(c1),sin(udf1(c2)), log(udf1(c2)) from %s.%s where udf1(c1) == 88 or sin(udf1(c1)) > 0" %(paraDict['dbName'], paraDict['stbName'])
sqlString = "create topic %s as %s" %(topicNameList[1], queryString)
tdLog.info("create topic sql: %s"%sqlString)
tdSql.execute(sqlString)
tdSql.query(queryString)
expectRowsList.append(tdSql.getRows())
consumerId = 1
topicList = topicNameList[1]
tmqCom.insertConsumerInfo(consumerId, expectrowcnt,topicList,keyList,ifcheckdata,ifManualCommit)
tdLog.info("start consume processor")
tmqCom.startTmqSimProcess(paraDict['pollDelay'],paraDict["dbName"],paraDict['showMsg'], paraDict['showRow'])
tdLog.info("wait the consume result")
expectRows = 1
resultList = tmqCom.selectConsumeResult(expectRows)
if expectRowsList[1] != resultList[0]:
tdLog.info("expect consume rows: %d, act consume rows: %d"%(expectRowsList[1], resultList[0]))
tdLog.exit("1 tmq consume rows error!")
self.checkFileContent(consumerId, queryString)
tdLog.printNoPrefix("consumerId %d check data ok!"%(consumerId))
time.sleep(10)
for i in range(len(topicNameList)):
tdSql.query("drop topic %s"%topicNameList[i])
tdLog.printNoPrefix("======== test case 1 end ...... ")
def run(self):
tdSql.prepare()
self.prepare_udf_so()
self.create_udf_function()
self.tmqCase1()
def stop(self):
tdSql.close()
tdLog.success(f"{__file__} successfully executed")
event = threading.Event()
tdCases.addLinux(__file__, TDTestCase())
tdCases.addWindows(__file__, TDTestCase())

View File

@ -132,3 +132,5 @@ python3 ./test.py -f 7-tmq/db.py
python3 ./test.py -f 7-tmq/tmqError.py
python3 ./test.py -f 7-tmq/schema.py
python3 ./test.py -f 7-tmq/stbFilter.py
python3 ./test.py -f 7-tmq/tmqCheckData.py
python3 ./test.py -f 7-tmq/tmqUdf.py

View File

@ -5,7 +5,7 @@ python3 .\test.py -f 0-others\taosShellNetChk.py
python3 .\test.py -f 0-others\telemetry.py
python3 .\test.py -f 0-others\taosdMonitor.py
python3 .\test.py -f 0-others\udfTest.py
python3 .\test.py -f 0-others\udf_create.py
@REM python3 .\test.py -f 0-others\udf_create.py
@REM python3 .\test.py -f 0-others\udf_restart_taosd.py
@REM python3 .\test.py -f 0-others\cachelast.py

View File

@ -22,8 +22,10 @@
#include <time.h>
#include "taos.h"
#include "taosdef.h"
#include "taoserror.h"
#include "tlog.h"
#include "types.h"
#define GREEN "\033[1;32m"
#define NC "\033[0m"
@ -34,11 +36,7 @@
#define MAX_CONSUMER_THREAD_CNT (16)
#define MAX_VGROUP_CNT (32)
typedef enum {
NOTIFY_CMD_START_CONSUM,
NOTIFY_CMD_START_COMMIT,
NOTIFY_CMD_ID_BUTT
}NOTIFY_CMD_ID;
typedef enum { NOTIFY_CMD_START_CONSUM, NOTIFY_CMD_START_COMMIT, NOTIFY_CMD_ID_BUTT } NOTIFY_CMD_ID;
typedef struct {
TdThread thread;
@ -49,8 +47,9 @@ typedef struct {
// char autoCommit[8]; // true, false
// char autoOffsetRest[16]; // none, earliest, latest
int32_t ifCheckData;
int64_t expectMsgCnt;
TdFilePtr pConsumeRowsFile;
int32_t ifCheckData;
int64_t expectMsgCnt;
int64_t consumeMsgCnt;
int64_t consumeRowCnt;
@ -86,6 +85,7 @@ typedef struct {
int32_t saveRowFlag;
int32_t consumeDelay; // unit s
int32_t numOfThread;
int32_t useSnapshot;
SThreadInfo stThreads[MAX_CONSUMER_THREAD_CNT];
} SConfInfo;
@ -93,6 +93,8 @@ static SConfInfo g_stConfInfo;
TdFilePtr g_fp = NULL;
static int running = 1;
int8_t useSnapshot = 0;
// char* g_pRowValue = NULL;
// TdFilePtr g_fp = NULL;
@ -129,7 +131,6 @@ void initLogFile() {
char tmpString[128];
sprintf(filename, "%s/../log/tmqlog_%s.txt", configDir, getCurrentTimeString(tmpString));
// sprintf(filename, "%s/../log/tmqlog.txt", configDir);
#ifdef WINDOWS
for (int i = 2; i < sizeof(filename); i++) {
if (filename[i] == ':') filename[i] = '-';
@ -203,6 +204,8 @@ void parseArgument(int32_t argc, char* argv[]) {
g_stConfInfo.saveRowFlag = atol(argv[++i]);
} else if (strcmp(argv[i], "-y") == 0) {
g_stConfInfo.consumeDelay = atol(argv[++i]);
} else if (strcmp(argv[i], "-e") == 0) {
useSnapshot = (int8_t)atol(argv[++i]);
} else {
pError("%s unknow para: %s %s", GREEN, argv[++i], NC);
exit(-1);
@ -296,17 +299,148 @@ int32_t saveConsumeContentToTbl(SThreadInfo* pInfo, char* buf) {
return 0;
}
static char* shellFormatTimestamp(char* buf, int64_t val, int32_t precision) {
// if (shell.args.is_raw_time) {
// sprintf(buf, "%" PRId64, val);
// return buf;
// }
time_t tt;
int32_t ms = 0;
if (precision == TSDB_TIME_PRECISION_NANO) {
tt = (time_t)(val / 1000000000);
ms = val % 1000000000;
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
tt = (time_t)(val / 1000000);
ms = val % 1000000;
} else {
tt = (time_t)(val / 1000);
ms = val % 1000;
}
/*
comment out as it make testcases like select_with_tags.sim fail.
but in windows, this may cause the call to localtime crash if tt < 0,
need to find a better solution.
if (tt < 0) {
tt = 0;
}
*/
#ifdef WINDOWS
if (tt < 0) tt = 0;
#endif
if (tt <= 0 && ms < 0) {
tt--;
if (precision == TSDB_TIME_PRECISION_NANO) {
ms += 1000000000;
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
ms += 1000000;
} else {
ms += 1000;
}
}
struct tm* ptm = taosLocalTime(&tt, NULL);
size_t pos = strftime(buf, 35, "%Y-%m-%d %H:%M:%S", ptm);
if (precision == TSDB_TIME_PRECISION_NANO) {
sprintf(buf + pos, ".%09d", ms);
} else if (precision == TSDB_TIME_PRECISION_MICRO) {
sprintf(buf + pos, ".%06d", ms);
} else {
sprintf(buf + pos, ".%03d", ms);
}
return buf;
}
static void shellDumpFieldToFile(TdFilePtr pFile, const char* val, TAOS_FIELD* field, int32_t length,
int32_t precision) {
if (val == NULL) {
taosFprintfFile(pFile, "%s", TSDB_DATA_NULL_STR);
return;
}
int n;
char buf[TSDB_MAX_BYTES_PER_ROW];
switch (field->type) {
case TSDB_DATA_TYPE_BOOL:
taosFprintfFile(pFile, "%d", ((((int32_t)(*((char*)val))) == 1) ? 1 : 0));
break;
case TSDB_DATA_TYPE_TINYINT:
taosFprintfFile(pFile, "%d", *((int8_t*)val));
break;
case TSDB_DATA_TYPE_UTINYINT:
taosFprintfFile(pFile, "%u", *((uint8_t*)val));
break;
case TSDB_DATA_TYPE_SMALLINT:
taosFprintfFile(pFile, "%d", *((int16_t*)val));
break;
case TSDB_DATA_TYPE_USMALLINT:
taosFprintfFile(pFile, "%u", *((uint16_t*)val));
break;
case TSDB_DATA_TYPE_INT:
taosFprintfFile(pFile, "%d", *((int32_t*)val));
break;
case TSDB_DATA_TYPE_UINT:
taosFprintfFile(pFile, "%u", *((uint32_t*)val));
break;
case TSDB_DATA_TYPE_BIGINT:
taosFprintfFile(pFile, "%" PRId64, *((int64_t*)val));
break;
case TSDB_DATA_TYPE_UBIGINT:
taosFprintfFile(pFile, "%" PRIu64, *((uint64_t*)val));
break;
case TSDB_DATA_TYPE_FLOAT:
taosFprintfFile(pFile, "%.5f", GET_FLOAT_VAL(val));
break;
case TSDB_DATA_TYPE_DOUBLE:
n = snprintf(buf, TSDB_MAX_BYTES_PER_ROW, "%*.9f", length, GET_DOUBLE_VAL(val));
if (n > TMAX(25, length)) {
taosFprintfFile(pFile, "%*.15e", length, GET_DOUBLE_VAL(val));
} else {
taosFprintfFile(pFile, "%s", buf);
}
break;
case TSDB_DATA_TYPE_BINARY:
case TSDB_DATA_TYPE_NCHAR:
case TSDB_DATA_TYPE_JSON:
memcpy(buf, val, length);
buf[length] = 0;
taosFprintfFile(pFile, "\'%s\'", buf);
break;
case TSDB_DATA_TYPE_TIMESTAMP:
shellFormatTimestamp(buf, *(int64_t*)val, precision);
taosFprintfFile(pFile, "'%s'", buf);
break;
default:
break;
}
}
static void dumpToFileForCheck(TdFilePtr pFile, TAOS_ROW row, TAOS_FIELD* fields, int32_t* length, int32_t num_fields,
int32_t precision) {
for (int32_t i = 0; i < num_fields; i++) {
if (i > 0) {
taosFprintfFile(pFile, "\n");
}
shellDumpFieldToFile(pFile, (const char*)row[i], fields + i, length[i], precision);
}
taosFprintfFile(pFile, "\n");
}
static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex) {
char buf[1024];
int32_t totalRows = 0;
// printf("topic: %s\n", tmq_get_topic_name(msg));
int32_t vgroupId = tmq_get_vgroup_id(msg);
int32_t vgroupId = tmq_get_vgroup_id(msg);
const char* dbName = tmq_get_db_name(msg);
taosFprintfFile(g_fp, "msg index:%" PRId64 ", consumerId: %d\n", msgIndex, pInfo->consumerId);
// taosFprintfFile(g_fp, "topic: %s, vgroupId: %d, tableName: %s\n", tmq_get_topic_name(msg), vgroupId,
// tmq_get_table_name(msg));
taosFprintfFile(g_fp, "topic: %s, vgroupId: %d\n", tmq_get_topic_name(msg), vgroupId);
taosFprintfFile(g_fp, "consumerId: %d, msg index:%" PRId64 "\n", pInfo->consumerId, msgIndex);
taosFprintfFile(g_fp, "dbName: %s, topic: %s, vgroupId: %d\n", dbName != NULL ? dbName : "invalid table",
tmq_get_topic_name(msg), vgroupId);
while (1) {
TAOS_ROW row = taos_fetch_row(msg);
@ -315,16 +449,18 @@ static int32_t msg_process(TAOS_RES* msg, SThreadInfo* pInfo, int32_t msgIndex)
TAOS_FIELD* fields = taos_fetch_fields(msg);
int32_t numOfFields = taos_field_count(msg);
taos_print_row(buf, row, fields, numOfFields);
int32_t* length = taos_fetch_lengths(msg);
int32_t precision = taos_result_precision(msg);
const char* tbName = tmq_get_table_name(msg);
dumpToFileForCheck(pInfo->pConsumeRowsFile, row, fields, length, numOfFields, precision);
taos_print_row(buf, row, fields, numOfFields);
if (0 != g_stConfInfo.showRowFlag) {
taosFprintfFile(g_fp, "tbname:%s, rows[%d]: %s\n", (tbName != NULL ? tbName : "null table"), totalRows, buf);
if (0 != g_stConfInfo.saveRowFlag) {
saveConsumeContentToTbl(pInfo, buf);
}
// if (0 != g_stConfInfo.saveRowFlag) {
// saveConsumeContentToTbl(pInfo, buf);
// }
}
totalRows++;
@ -347,8 +483,7 @@ int queryDB(TAOS* taos, char* command) {
return 0;
}
static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {
}
static void appNothing(void* param, TAOS_RES* res, int32_t numOfRows) {}
int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) {
char sqlStr[1024] = {0};
@ -356,11 +491,8 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) {
int64_t now = taosGetTimestampMs();
// schema: ts timestamp, consumerid int, consummsgcnt bigint, checkresult int
sprintf(sqlStr, "insert into %s.notifyinfo values (%"PRId64", %d, %d)",
g_stConfInfo.cdbName,
now,
cmdId,
pInfo->consumerId);
sprintf(sqlStr, "insert into %s.notifyinfo values (%" PRId64 ", %d, %d)", g_stConfInfo.cdbName, now, cmdId,
pInfo->consumerId);
taos_query_a(pInfo->taos, sqlStr, appNothing, NULL);
@ -370,12 +502,12 @@ int32_t notifyMainScript(SThreadInfo* pInfo, int32_t cmdId) {
}
static int32_t g_once_commit_flag = 0;
static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
pError("tmq_commit_cb_print() commit %d\n", code);
static void tmq_commit_cb_print(tmq_t* tmq, int32_t code, void* param) {
pError("tmq_commit_cb_print() commit %d\n", code);
if (0 == g_once_commit_flag) {
g_once_commit_flag = 1;
notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT);
notifyMainScript((SThreadInfo*)param, (int32_t)NOTIFY_CMD_START_COMMIT);
}
taosFprintfFile(g_fp, "tmq_commit_cb_print() be called\n");
}
@ -409,6 +541,10 @@ void build_consumer(SThreadInfo* pInfo) {
// tmq_conf_set(conf, "auto.offset.reset", "none");
// tmq_conf_set(conf, "auto.offset.reset", "earliest");
// tmq_conf_set(conf, "auto.offset.reset", "latest");
//
if (useSnapshot) {
tmq_conf_set(conf, "experiment.use.snapshot", "true");
}
pInfo->tmq = tmq_consumer_new(conf, NULL, 0);
@ -464,6 +600,19 @@ void loop_consume(SThreadInfo* pInfo) {
pInfo->ts = taosGetTimestampMs();
if (pInfo->ifCheckData) {
char filename[256] = {0};
char tmpString[128];
// sprintf(filename, "%s/../log/consumerid_%d_%s.txt", configDir, pInfo->consumerId,
// getCurrentTimeString(tmpString));
sprintf(filename, "%s/../log/consumerid_%d.txt", configDir, pInfo->consumerId);
pInfo->pConsumeRowsFile = taosOpenFile(filename, TD_FILE_CREATE | TD_FILE_WRITE | TD_FILE_TRUNC | TD_FILE_STREAM);
if (pInfo->pConsumeRowsFile == NULL) {
taosFprintfFile(g_fp, "%s create file fail for save rows content\n", getCurrentTimeString(tmpString));
return;
}
}
while (running) {
TAOS_RES* tmqMsg = tmq_consumer_poll(pInfo->tmq, g_stConfInfo.consumeDelay * 1000);
if (tmqMsg) {
@ -475,10 +624,10 @@ void loop_consume(SThreadInfo* pInfo) {
totalMsgs++;
if (0 == once_flag) {
if (0 == once_flag) {
once_flag = 1;
notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM);
}
notifyMainScript(pInfo, NOTIFY_CMD_START_CONSUM);
}
if (totalRows >= pInfo->expectMsgCnt) {
char tmpString[128];
@ -507,7 +656,7 @@ void* consumeThreadFunc(void* param) {
pInfo->taos = taos_connect(NULL, "root", "taosdata", NULL, 0);
if (pInfo->taos == NULL) {
taosFprintfFile(g_fp, "taos_connect() fail, can not notify and save consume result to main scripte\n");
exit(-1);
exit(-1);
}
build_consumer(pInfo);

View File

@ -21,6 +21,7 @@
#define UP 3
#define DOWN 4
#define PSIZE shell.info.promptSize
#define SHELL_INPUT_MAX_COMMAND_SIZE 10000
typedef struct {
char *buffer;
@ -227,6 +228,7 @@ void shellPrintChar(char c, int32_t times) {
}
void shellPositionCursor(int32_t step, int32_t direction) {
#ifndef WINDOWS
if (step > 0) {
if (direction == LEFT) {
fprintf(stdout, "\033[%dD", step);
@ -239,6 +241,7 @@ void shellPositionCursor(int32_t step, int32_t direction) {
}
fflush(stdout);
}
#endif
}
void shellUpdateBuffer(SShellCmd *cmd) {
@ -330,10 +333,14 @@ void shellClearScreen(int32_t ecmd_pos, int32_t cursor_pos) {
int32_t command_x = ecmd_pos / ws_col;
shellPositionCursor(cursor_y, LEFT);
shellPositionCursor(command_x - cursor_x, DOWN);
#ifndef WINDOWS
fprintf(stdout, "\033[2K");
#endif
for (int32_t i = 0; i < command_x; i++) {
shellPositionCursor(1, UP);
#ifndef WINDOWS
fprintf(stdout, "\033[2K");
#endif
}
fflush(stdout);
}
@ -394,6 +401,41 @@ void shellShowOnScreen(SShellCmd *cmd) {
fflush(stdout);
}
char taosGetConsoleChar() {
#ifdef WINDOWS
static void *console = NULL;
if (console == NULL) {
console = GetStdHandle(STD_INPUT_HANDLE);
}
static TdWchar buf[SHELL_INPUT_MAX_COMMAND_SIZE];
static char mbStr[5];
static unsigned long bufLen = 0;
static uint16_t bufIndex = 0, mbStrIndex = 0, mbStrLen = 0;
if (bufLen == 0) {
ReadConsoleW(console, buf, SHELL_INPUT_MAX_COMMAND_SIZE, &bufLen, NULL);
bufIndex = 0;
}
if (mbStrLen == 0){
if (buf[bufIndex] == '\r') {
bufIndex++;
}
mbStrLen = WideCharToMultiByte(CP_UTF8, 0, &buf[bufIndex], 1, mbStr, sizeof(mbStr), NULL, NULL);
mbStrIndex = 0;
bufIndex++;
}
mbStrIndex++;
if (mbStrIndex == mbStrLen) {
mbStrLen = 0;
if (bufIndex == bufLen) {
bufLen = 0;
}
}
return mbStr[mbStrIndex-1];
#else
return (char)getchar(); // getchar() return an 'int32_t' value
#endif
}
int32_t shellReadCommand(char *command) {
SShellHistory *pHistory = &shell.history;
SShellCmd cmd = {0};
@ -407,7 +449,7 @@ int32_t shellReadCommand(char *command) {
// Read input.
char c;
while (1) {
c = (char)getchar(); // getchar() return an 'int32_t' value
c = taosGetConsoleChar();
if (c == EOF) {
return c;
@ -417,7 +459,7 @@ int32_t shellReadCommand(char *command) {
int32_t count = shellCountPrefixOnes(c);
utf8_array[0] = c;
for (int32_t k = 1; k < count; k++) {
c = (char)getchar();
c = taosGetConsoleChar();
utf8_array[k] = c;
}
shellInsertChar(&cmd, utf8_array, count);
@ -472,10 +514,10 @@ int32_t shellReadCommand(char *command) {
break;
}
} else if (c == '\033') {
c = (char)getchar();
c = taosGetConsoleChar();
switch (c) {
case '[':
c = (char)getchar();
c = taosGetConsoleChar();
switch (c) {
case 'A': // Up arrow
if (hist_counter != pHistory->hstart) {
@ -502,35 +544,35 @@ int32_t shellReadCommand(char *command) {
shellMoveCursorLeft(&cmd);
break;
case '1':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// Home key
shellPositionCursorHome(&cmd);
}
break;
case '2':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// Insert key
}
break;
case '3':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// Delete key
shellDeleteChar(&cmd);
}
break;
case '4':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// End key
shellPositionCursorEnd(&cmd);
}
break;
case '5':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// Page up key
}
break;
case '6':
if ((c = (char)getchar()) == '~') {
if ((c = taosGetConsoleChar()) == '~') {
// Page down key
}
break;

Some files were not shown because too many files have changed in this diff Show More