From 4845ca7f9921e2a2638b4eba639132d7a6fe06fd Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 28 Oct 2021 09:56:20 +0800 Subject: [PATCH 1/7] [raft]refactor raft interface,add log store methods --- include/libs/sync/sync.h | 50 ++++++++++++++++++++++++++++++++-------- 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index a0602ec1b0..30583686c5 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -22,7 +22,6 @@ extern "C" { #include #include "taosdef.h" -#include "wal.h" typedef int64_t SyncNodeId; typedef int32_t SyncGroupId; @@ -41,6 +40,7 @@ typedef struct { } SSyncBuffer; typedef struct { + SyncNodeId nodeId; uint16_t nodePort; // node sync Port char nodeFqdn[TSDB_FQDN_LEN]; // node FQDN } SNodeInfo; @@ -83,11 +83,38 @@ typedef struct SSyncFSM { } SSyncFSM; +typedef struct SSyncLogStore { + void* pData; + + // write log with given index + int32_t (*logWrite)(struct SSyncLogStore* logStore, SyncIndex index, SSyncBuffer* pBuf); + + // mark log with given index has been commtted + int32_t (*logCommit)(struct SSyncLogStore* logStore, SyncIndex index); + + // prune log before given index + int32_t (*logPrune)(struct SSyncLogStore* logStore, SyncIndex index); + + // rollback log after given index + int32_t (*logRollback)(struct SSyncLogStore* logStore, SyncIndex index); +} SSyncLogStore; + typedef struct SSyncServerState { - SNodeInfo voteFor; + SyncNodeId voteFor; SSyncTerm term; } SSyncServerState; +typedef struct SSyncClusterConfig { + // Log index number of current cluster config. + SyncIndex index; + + // Log index number of previous cluster config. + SyncIndex prevIndex; + + // current cluster + const SSyncCluster* cluster; +} SSyncClusterConfig; + typedef struct SStateManager { void* pData; @@ -95,35 +122,38 @@ typedef struct SStateManager { const SSyncServerState* (*readServerState)(struct SStateManager* stateMng); - void (*saveCluster)(struct SStateManager* stateMng, const SSyncCluster* cluster); + void (*saveCluster)(struct SStateManager* stateMng, const SSyncClusterConfig* cluster); - const SSyncCluster* (*readCluster)(struct SStateManager* stateMng); + const SSyncClusterConfig* (*readCluster)(struct SStateManager* stateMng); } SStateManager; typedef struct { SyncGroupId vgId; - twalh walHandle; - SyncIndex snapshotIndex; SSyncCluster syncCfg; SSyncFSM fsm; + SSyncLogStore logStore; + SStateManager stateManager; } SSyncInfo; +struct SSyncNode; +typedef struct SSyncNode SSyncNode; + int32_t syncInit(); void syncCleanUp(); -SyncNodeId syncStart(const SSyncInfo*); +SSyncNode syncStart(const SSyncInfo*); void syncStop(SyncNodeId); -int32_t syncPropose(SyncNodeId nodeId, SSyncBuffer buffer, void* pData, bool isWeak); +int32_t syncPropose(SSyncNode syncNode, SSyncBuffer buffer, void* pData, bool isWeak); -int32_t syncAddNode(SyncNodeId nodeId, const SNodeInfo *pNode); +int32_t syncAddNode(SSyncNode syncNode, const SNodeInfo *pNode); -int32_t syncRemoveNode(SyncNodeId nodeId, const SNodeInfo *pNode); +int32_t syncRemoveNode(SSyncNode syncNode, const SNodeInfo *pNode); extern int32_t syncDebugFlag; From c50a21cb79f9642bf1a694359798c164dcf991f4 Mon Sep 17 00:00:00 2001 From: lichuang Date: Thu, 28 Oct 2021 10:03:52 +0800 Subject: [PATCH 2/7] merge from 3.0 From d84f44c1499642c0bc975fbd50d40cd8cb01aaec Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 28 Oct 2021 11:36:51 +0800 Subject: [PATCH 3/7] minor changes --- include/util/taoserror.h | 8 +-- source/server/vnode/inc/vnodeInt.h | 2 - source/server/vnode/src/vnodeFile.c | 83 ++++++++++++---------------- source/server/vnode/src/vnodeInt.c | 35 ++++++++++++ source/server/vnode/src/vnodeMain.c | 36 ------------ source/server/vnode/src/vnodeWrite.c | 2 - source/util/src/terror.c | 8 +-- 7 files changed, 78 insertions(+), 96 deletions(-) diff --git a/include/util/taoserror.h b/include/util/taoserror.h index cf8cd510c5..76c5f575a5 100644 --- a/include/util/taoserror.h +++ b/include/util/taoserror.h @@ -233,11 +233,11 @@ int32_t* taosGetErrno(); #define TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR TAOS_DEF_ERROR_CODE(0, 0x0507) //"Missing data file") #define TSDB_CODE_VND_OUT_OF_MEMORY TAOS_DEF_ERROR_CODE(0, 0x0508) //"Out of memory") #define TSDB_CODE_VND_APP_ERROR TAOS_DEF_ERROR_CODE(0, 0x0509) //"Unexpected generic error in vnode") -#define TSDB_CODE_VND_INVALID_VRESION_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid version file") -#define TSDB_CODE_VND_IS_FULL TAOS_DEF_ERROR_CODE(0, 0x050B) //"Database memory is full for commit failed") -#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full for waiting commit") +#define TSDB_CODE_VND_INVALID_CFG_FILE TAOS_DEF_ERROR_CODE(0, 0x050A) //"Invalid config file) +#define TSDB_CODE_VND_INVALID_TERM_FILE TAOS_DEF_ERROR_CODE(0, 0x050B) //"Invalid term file") +#define TSDB_CODE_VND_IS_FLOWCTRL TAOS_DEF_ERROR_CODE(0, 0x050C) //"Database memory is full") #define TSDB_CODE_VND_IS_DROPPING TAOS_DEF_ERROR_CODE(0, 0x050D) //"Database is dropping") -#define TSDB_CODE_VND_IS_BALANCING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is balancing") +#define TSDB_CODE_VND_IS_UPDATING TAOS_DEF_ERROR_CODE(0, 0x050E) //"Database is updating") #define TSDB_CODE_VND_IS_CLOSING TAOS_DEF_ERROR_CODE(0, 0x0510) //"Database is closing") #define TSDB_CODE_VND_NOT_SYNCED TAOS_DEF_ERROR_CODE(0, 0x0511) //"Database suspended") #define TSDB_CODE_VND_NO_WRITE_AUTH TAOS_DEF_ERROR_CODE(0, 0x0512) //"Database write operation denied") diff --git a/source/server/vnode/inc/vnodeInt.h b/source/server/vnode/inc/vnodeInt.h index 245455ef18..d94c5ba2b5 100644 --- a/source/server/vnode/inc/vnodeInt.h +++ b/source/server/vnode/inc/vnodeInt.h @@ -77,8 +77,6 @@ typedef struct SVnodeCfg { SSyncCfg sync; } SVnodeCfg; - - typedef struct { int32_t vgId; // global vnode group ID int32_t refCount; // reference count diff --git a/source/server/vnode/src/vnodeFile.c b/source/server/vnode/src/vnodeFile.c index 8453c985c3..9835e3e0fb 100644 --- a/source/server/vnode/src/vnodeFile.c +++ b/source/server/vnode/src/vnodeFile.c @@ -280,90 +280,77 @@ int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg) { return TSDB_CODE_SUCCESS; } -int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState){ -#if 0 +int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState) { + int32_t ret = TSDB_CODE_VND_APP_ERROR; int32_t len = 0; int32_t maxLen = 100; - char * content = calloc(1, maxLen + 1); - cJSON * root = NULL; - FILE * fp = NULL; + char *content = calloc(1, maxLen + 1); + cJSON *root = NULL; + FILE *fp = NULL; - terrno = TSDB_CODE_VND_INVALID_VRESION_FILE; - char file[TSDB_FILENAME_LEN + 30] = {0}; - sprintf(file, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); - - fp = fopen(file, "r"); - if (!fp) { - if (errno != ENOENT) { - vError("vgId:%d, failed to read %s, error:%s", pVnode->vgId, file, strerror(errno)); - terrno = TAOS_SYSTEM_ERROR(errno); - } else { - terrno = TSDB_CODE_SUCCESS; - } - goto PARSE_VER_ERROR; - } + char file[PATH_MAX + 30] = {0}; + sprintf(file, "%s/vnode%d/term.json", tsVnodeDir, vgId); len = (int32_t)fread(content, 1, maxLen, fp); if (len <= 0) { - vError("vgId:%d, failed to read %s, content is null", pVnode->vgId, file); - goto PARSE_VER_ERROR; + vError("vgId:%d, failed to read %s since content is null", vgId, file); + goto PARSE_TERM_ERROR; } root = cJSON_Parse(content); if (root == NULL) { - vError("vgId:%d, failed to read %s, invalid json format", pVnode->vgId, file); - goto PARSE_VER_ERROR; + vError("vgId:%d, failed to read %s since invalid json format", vgId, file); + goto PARSE_TERM_ERROR; } - cJSON *ver = cJSON_GetObjectItem(root, "version"); - if (!ver || ver->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, version not found", pVnode->vgId, file); - goto PARSE_VER_ERROR; + cJSON *term = cJSON_GetObjectItem(root, "term"); + if (!term || term->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since term not found", vgId, file); + goto PARSE_TERM_ERROR; } -#if 0 - pVnode->version = (uint64_t)ver->valueint; + pState->term = (uint64_t)term->valueint; - terrno = TSDB_CODE_SUCCESS; - vInfo("vgId:%d, read %s successfully, fver:%" PRIu64, pVnode->vgId, file, pVnode->version); -#endif + cJSON *voteFor = cJSON_GetObjectItem(root, "voteFor"); + if (!voteFor || voteFor->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since voteFor not found", vgId, file); + goto PARSE_TERM_ERROR; + } + pState->voteFor = (int64_t)voteFor->valueint; -PARSE_VER_ERROR: + vInfo("vgId:%d, read %s success, voteFor:%" PRIu64 ", term:%" PRIu64, vgId, file, pState->voteFor, pState->term); + +PARSE_TERM_ERROR: if (content != NULL) free(content); if (root != NULL) cJSON_Delete(root); if (fp != NULL) fclose(fp); - return terrno; -#endif - return 0; + return ret; } -int32_t vnodeWriteTerm(int32_t vgid, SSyncServerState *pState) { -#if 0 - char file[TSDB_FILENAME_LEN + 30] = {0}; - sprintf(file, "%s/vnode%d/version.json", tsVnodeDir, pVnode->vgId); +int32_t vnodeWriteTerm(int32_t vgId, SSyncServerState *pState) { + char file[PATH_MAX + 30] = {0}; + sprintf(file, "%s/vnode%d/term.json", tsVnodeDir, vgId); FILE *fp = fopen(file, "w"); if (!fp) { - vError("vgId:%d, failed to write %s, reason:%s", pVnode->vgId, file, strerror(errno)); + vError("vgId:%d, failed to write %s since %s", vgId, file, strerror(errno)); return -1; } int32_t len = 0; int32_t maxLen = 100; - char * content = calloc(1, maxLen + 1); + char *content = calloc(1, maxLen + 1); -#if 0 len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"version\": %" PRIu64 "\n", pVnode->fversion); + len += snprintf(content + len, maxLen - len, " \"term\": %" PRIu64 "\n", pState->term); + len += snprintf(content + len, maxLen - len, " \"voteFor\": %" PRIu64 "\n", pState->voteFor); len += snprintf(content + len, maxLen - len, "}\n"); -#endif + fwrite(content, 1, len, fp); taosFsyncFile(fileno(fp)); fclose(fp); free(content); - terrno = 0; - // vInfo("vgId:%d, successed to write %s, fver:%" PRIu64, pVnode->vgId, file, pVnode->fversion); -#endif + vInfo("vgId:%d, write %s success, voteFor:%" PRIu64 ", term:%" PRIu64, vgId, file, pState->voteFor, pState->term); return TSDB_CODE_SUCCESS; } \ No newline at end of file diff --git a/source/server/vnode/src/vnodeInt.c b/source/server/vnode/src/vnodeInt.c index 5a5ba4df01..ed295160ba 100644 --- a/source/server/vnode/src/vnodeInt.c +++ b/source/server/vnode/src/vnodeInt.c @@ -24,6 +24,7 @@ static struct { SSteps *steps; SVnodeFp fp; + void (*msgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); } tsVint; void vnodeGetDnodeEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port) { @@ -36,7 +37,41 @@ void vnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg) { void vnodeSendMsgToMnode(struct SRpcMsg *rpcMsg) { return (*tsVint.fp.SendMsgToMnode)(rpcMsg); } +void vnodeProcessMsg(SRpcMsg *pMsg) { + if (tsVint.msgFp[pMsg->msgType]) { + (*tsVint.msgFp[pMsg->msgType])(pMsg); + } else { + assert(0); + } +} + +static void vnodeInitMsgFp() { + tsVint.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMgmtMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessWriteMsg; + // mq related + tsVint.msgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessWriteMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessReadMsg; + tsVint.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessReadMsg; + // mq related end + tsVint.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessReadMsg; + tsVint.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessReadMsg; +} + int32_t vnodeInit(SVnodePara para) { + vnodeInitMsgFp(); tsVint.fp = para.fp; struct SSteps *steps = taosStepInit(8, NULL); diff --git a/source/server/vnode/src/vnodeMain.c b/source/server/vnode/src/vnodeMain.c index bfe2df9e43..5143f04c5b 100644 --- a/source/server/vnode/src/vnodeMain.c +++ b/source/server/vnode/src/vnodeMain.c @@ -44,7 +44,6 @@ static struct { SHashObj *hash; int32_t openVnodes; int32_t totalVnodes; - void (*msgFp[TSDB_MSG_TYPE_MAX])(SRpcMsg *); } tsVnode; static bool vnodeSetInitStatus(SVnode *pVnode) { @@ -566,34 +565,7 @@ void vnodeRelease(SVnode *pVnode) { } } -static void vnodeInitMsgFp() { - tsVnode.msgFp[TSDB_MSG_TYPE_MD_CREATE_VNODE] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_VNODE] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_SYNC_VNODE] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_COMPACT_VNODE] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_DROP_VNODE] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_STREAM] = vnodeProcessMgmtMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_CREATE_TABLE] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_DROP_TABLE] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_ALTER_TABLE] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MD_DROP_STABLE] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_SUBMIT] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_UPDATE_TAG_VAL] = vnodeProcessWriteMsg; - // mq related - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_CONNECT] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_DISCONNECT] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_ACK] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_RESET] = vnodeProcessWriteMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_QUERY] = vnodeProcessReadMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_MQ_CONSUME] = vnodeProcessReadMsg; - // mq related end - tsVnode.msgFp[TSDB_MSG_TYPE_QUERY] = vnodeProcessReadMsg; - tsVnode.msgFp[TSDB_MSG_TYPE_FETCH] = vnodeProcessReadMsg; -} - int32_t vnodeInitMain() { - vnodeInitMsgFp(); - tsVnode.hash = taosHashInit(TSDB_MIN_VNODES, taosGetDefaultHashFunction(TSDB_DATA_TYPE_INT), true, HASH_ENTRY_LOCK); if (tsVnode.hash == NULL) { vError("failed to init vnode mgmt"); @@ -654,11 +626,3 @@ void vnodeSetAccess(SVgroupAccess *pAccess, int32_t numOfVnodes) { } } } - -void vnodeProcessMsg(SRpcMsg *pMsg) { - if (tsVnode.msgFp[pMsg->msgType]) { - (*tsVnode.msgFp[pMsg->msgType])(pMsg); - } else { - assert(0); - } -} diff --git a/source/server/vnode/src/vnodeWrite.c b/source/server/vnode/src/vnodeWrite.c index 70fa9bff80..f3258af0bf 100644 --- a/source/server/vnode/src/vnodeWrite.c +++ b/source/server/vnode/src/vnodeWrite.c @@ -15,8 +15,6 @@ #define _DEFAULT_SOURCE #include "os.h" -#include "tqueue.h" -#include "tworker.h" #include "vnodeMain.h" #include "vnodeWrite.h" #include "vnodeWriteMsg.h" diff --git a/source/util/src/terror.c b/source/util/src/terror.c index 68cd067fb9..8e5d7a47fd 100644 --- a/source/util/src/terror.c +++ b/source/util/src/terror.c @@ -245,11 +245,11 @@ TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_DISK_PERMISSIONS, "No write permission f TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_SUCH_FILE_OR_DIR, "Missing data file") TAOS_DEFINE_ERROR(TSDB_CODE_VND_OUT_OF_MEMORY, "Out of memory") TAOS_DEFINE_ERROR(TSDB_CODE_VND_APP_ERROR, "Unexpected generic error in vnode") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_VRESION_FILE, "Invalid version file") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FULL, "Database memory is full for commit failed") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full for waiting commit") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_CFG_FILE, "Invalid config file") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_INVALID_TERM_FILE, "Invalid term file") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_FLOWCTRL, "Database memory is full") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_DROPPING, "Database is dropping") -TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_BALANCING, "Database is balancing") +TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_UPDATING, "Database is updating") TAOS_DEFINE_ERROR(TSDB_CODE_VND_IS_CLOSING, "Database is closing") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NOT_SYNCED, "Database suspended") TAOS_DEFINE_ERROR(TSDB_CODE_VND_NO_WRITE_AUTH, "Database write operation denied") From 18697d841b9898d7e1be64f85de031637a748e3a Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 28 Oct 2021 11:46:27 +0800 Subject: [PATCH 4/7] add progress step --- include/server/dnode/dnode.h | 5 +++++ include/server/vnode/vnode.h | 5 +++++ source/server/dnode/src/dnodeInt.c | 3 ++- source/server/vnode/inc/vnodeInt.h | 1 + source/server/vnode/src/vnodeInt.c | 2 ++ 5 files changed, 15 insertions(+), 1 deletion(-) diff --git a/include/server/dnode/dnode.h b/include/server/dnode/dnode.h index 3499913afa..bc0d1e89b0 100644 --- a/include/server/dnode/dnode.h +++ b/include/server/dnode/dnode.h @@ -67,6 +67,11 @@ void dnodeSendRedirectMsg(struct SRpcMsg *rpcMsg, bool forShell); */ void dnodeGetEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); +/** + * Report the startup progress. + */ +void dnodeReportStartup(char *name, char *desc); + #ifdef __cplusplus } #endif diff --git a/include/server/vnode/vnode.h b/include/server/vnode/vnode.h index 00decfe338..ecb1412b06 100644 --- a/include/server/vnode/vnode.h +++ b/include/server/vnode/vnode.h @@ -46,6 +46,11 @@ typedef struct { */ void (*GetDnodeEp)(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); + /** + * Report the startup progress. + */ + void (*ReportStartup)(char *name, char *desc); + } SVnodeFp; typedef struct { diff --git a/source/server/dnode/src/dnodeInt.c b/source/server/dnode/src/dnodeInt.c index d294143e57..7b0b87368e 100644 --- a/source/server/dnode/src/dnodeInt.c +++ b/source/server/dnode/src/dnodeInt.c @@ -37,7 +37,7 @@ EDnStat dnodeGetRunStat() { return tsDnode.runStatus; } void dnodeSetRunStat(EDnStat stat) { tsDnode.runStatus = stat; } -static void dnodeReportStartup(char *name, char *desc) { +void dnodeReportStartup(char *name, char *desc) { SStartupStep *startup = &tsDnode.startup; tstrncpy(startup->name, name, strlen(startup->name)); tstrncpy(startup->desc, desc, strlen(startup->desc)); @@ -58,6 +58,7 @@ static int32_t dnodeInitVnode() { para.fp.GetDnodeEp = dnodeGetEp; para.fp.SendMsgToDnode = dnodeSendMsgToDnode; para.fp.SendMsgToMnode = dnodeSendMsgToMnode; + para.fp.ReportStartup = dnodeReportStartup; return vnodeInit(para); } diff --git a/source/server/vnode/inc/vnodeInt.h b/source/server/vnode/inc/vnodeInt.h index d94c5ba2b5..3c7487f681 100644 --- a/source/server/vnode/inc/vnodeInt.h +++ b/source/server/vnode/inc/vnodeInt.h @@ -112,6 +112,7 @@ typedef struct { void vnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg); void vnodeSendMsgToMnode(struct SRpcMsg *rpcMsg); void vnodeGetDnodeEp(int32_t dnodeId, char *ep, char *fqdn, uint16_t *port); +void vnodeReportStartup(char *name, char *desc); #ifdef __cplusplus } diff --git a/source/server/vnode/src/vnodeInt.c b/source/server/vnode/src/vnodeInt.c index ed295160ba..9e1739a68e 100644 --- a/source/server/vnode/src/vnodeInt.c +++ b/source/server/vnode/src/vnodeInt.c @@ -37,6 +37,8 @@ void vnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg) { void vnodeSendMsgToMnode(struct SRpcMsg *rpcMsg) { return (*tsVint.fp.SendMsgToMnode)(rpcMsg); } +void vnodeReportStartup(char *name, char *desc) { (*tsVint.fp.ReportStartup)(name, desc); } + void vnodeProcessMsg(SRpcMsg *pMsg) { if (tsVint.msgFp[pMsg->msgType]) { (*tsVint.msgFp[pMsg->msgType])(pMsg); From 8ad7c2fd26ee7502d81381b9209f14ea29c1cd96 Mon Sep 17 00:00:00 2001 From: Shengliang Guan Date: Thu, 28 Oct 2021 15:50:53 +0800 Subject: [PATCH 5/7] add sync interface --- include/common/taosmsg.h | 5 +- include/libs/sync/sync.h | 59 +++--- source/libs/sync/src/sync.c | 11 +- source/server/mnode/inc/mnodeInt.h | 2 +- source/server/mnode/src/mnodeTelem.c | 7 +- source/server/mnode/src/mondeInt.c | 12 +- source/server/vnode/inc/vnodeInt.h | 21 +- source/server/vnode/src/vnodeFile.c | 278 ++++++++++++++------------- source/server/vnode/src/vnodeMain.c | 40 +++- source/server/vnode/src/vnodeMgmt.c | 9 +- 10 files changed, 249 insertions(+), 195 deletions(-) diff --git a/include/common/taosmsg.h b/include/common/taosmsg.h index 50594fac00..d571153c1a 100644 --- a/include/common/taosmsg.h +++ b/include/common/taosmsg.h @@ -721,6 +721,8 @@ typedef struct { int32_t daysToKeep2; int32_t minRowsPerFileBlock; int32_t maxRowsPerFileBlock; + int32_t fsyncPeriod; + int8_t reserved[16]; int8_t precision; int8_t compression; int8_t cacheLastRow; @@ -728,8 +730,7 @@ typedef struct { int8_t walLevel; int8_t replica; int8_t quorum; - int8_t reserved[9]; - int32_t fsyncPeriod; + int8_t selfIndex; SVnodeDesc nodes[TSDB_MAX_REPLICA]; } SCreateVnodeMsg, SAlterVnodeMsg; diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index 30583686c5..e8a8dee866 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -23,9 +23,9 @@ extern "C" { #include #include "taosdef.h" -typedef int64_t SyncNodeId; -typedef int32_t SyncGroupId; -typedef int64_t SyncIndex; +typedef int32_t SyncNodeId; +typedef int32_t SyncGroupId; +typedef int64_t SyncIndex; typedef uint64_t SSyncTerm; typedef enum { @@ -41,21 +41,21 @@ typedef struct { typedef struct { SyncNodeId nodeId; - uint16_t nodePort; // node sync Port - char nodeFqdn[TSDB_FQDN_LEN]; // node FQDN + uint16_t nodePort; // node sync Port + char nodeFqdn[TSDB_FQDN_LEN]; // node FQDN } SNodeInfo; typedef struct { - int selfIndex; - int nNode; - SNodeInfo* nodeInfo; + int selfIndex; + int replica; + SNodeInfo nodeInfo[TSDB_MAX_REPLICA]; } SSyncCluster; typedef struct { - int32_t selfIndex; - int nNode; - SNodeInfo* node; - ESyncRole* role; + int32_t selfIndex; + int replica; + SNodeInfo node[TSDB_MAX_REPLICA]; + ESyncRole role[TSDB_MAX_REPLICA]; } SNodesRole; typedef struct SSyncFSM { @@ -101,13 +101,13 @@ typedef struct SSyncLogStore { typedef struct SSyncServerState { SyncNodeId voteFor; - SSyncTerm term; + SSyncTerm term; } SSyncServerState; typedef struct SSyncClusterConfig { // Log index number of current cluster config. SyncIndex index; - + // Log index number of previous cluster config. SyncIndex prevIndex; @@ -122,21 +122,17 @@ typedef struct SStateManager { const SSyncServerState* (*readServerState)(struct SStateManager* stateMng); - void (*saveCluster)(struct SStateManager* stateMng, const SSyncClusterConfig* cluster); + // void (*saveCluster)(struct SStateManager* stateMng, const SSyncClusterConfig* cluster); - const SSyncClusterConfig* (*readCluster)(struct SStateManager* stateMng); + // const SSyncClusterConfig* (*readCluster)(struct SStateManager* stateMng); } SStateManager; typedef struct { - SyncGroupId vgId; - - SyncIndex snapshotIndex; - SSyncCluster syncCfg; - - SSyncFSM fsm; - + SyncGroupId vgId; + SyncIndex snapshotIndex; + SSyncCluster syncCfg; + SSyncFSM fsm; SSyncLogStore logStore; - SStateManager stateManager; } SSyncInfo; @@ -146,19 +142,20 @@ typedef struct SSyncNode SSyncNode; int32_t syncInit(); void syncCleanUp(); -SSyncNode syncStart(const SSyncInfo*); -void syncStop(SyncNodeId); +SSyncNode* syncStart(const SSyncInfo*); +void syncReconfig(const SSyncNode*, const SSyncCluster*); +void syncStop(const SSyncNode*); -int32_t syncPropose(SSyncNode syncNode, SSyncBuffer buffer, void* pData, bool isWeak); +int32_t syncPropose(SSyncNode* syncNode, SSyncBuffer buffer, void* pData, bool isWeak); -int32_t syncAddNode(SSyncNode syncNode, const SNodeInfo *pNode); +//int32_t syncAddNode(SSyncNode syncNode, const SNodeInfo *pNode); -int32_t syncRemoveNode(SSyncNode syncNode, const SNodeInfo *pNode); +//int32_t syncRemoveNode(SSyncNode syncNode, const SNodeInfo *pNode); -extern int32_t syncDebugFlag; +extern int32_t syncDebugFlag; #ifdef __cplusplus } #endif -#endif /*_TD_LIBS_SYNC_H*/ +#endif /*_TD_LIBS_SYNC_H*/ diff --git a/source/libs/sync/src/sync.c b/source/libs/sync/src/sync.c index 4b3ca11e4b..879f2d4f6d 100644 --- a/source/libs/sync/src/sync.c +++ b/source/libs/sync/src/sync.c @@ -15,5 +15,12 @@ #include "sync.h" -int32_t syncInit() {return 0;} -void syncCleanUp() {} \ No newline at end of file +int32_t syncInit() { return 0; } + +void syncCleanUp() {} + +SSyncNode* syncStart(const SSyncInfo* pInfo) { return NULL; } + +void syncStop(const SSyncNode* pNode) {} + +void syncReconfig(const SSyncNode* pNode, const SSyncCluster* pCfg) {} \ No newline at end of file diff --git a/source/server/mnode/inc/mnodeInt.h b/source/server/mnode/inc/mnodeInt.h index 42d3c53fa2..0ce47cbe36 100644 --- a/source/server/mnode/inc/mnodeInt.h +++ b/source/server/mnode/inc/mnodeInt.h @@ -24,7 +24,7 @@ extern "C" { tmr_h mnodeGetTimer(); int32_t mnodeGetDnodeId(); -char *mnodeGetClusterId(); +int64_t mnodeGetClusterId(); EMnStatus mnodeGetStatus(); void mnodeSendMsgToDnode(struct SRpcEpSet *epSet, struct SRpcMsg *rpcMsg); diff --git a/source/server/mnode/src/mnodeTelem.c b/source/server/mnode/src/mnodeTelem.c index cb292342c7..8b8e4f9ce0 100644 --- a/source/server/mnode/src/mnodeTelem.c +++ b/source/server/mnode/src/mnodeTelem.c @@ -202,12 +202,13 @@ static void mnodeSendTelemetryReport() { return; } - char clusterId[TSDB_CLUSTER_ID_LEN] = {0}; - mnodeGetClusterId(clusterId); + int64_t clusterId = mnodeGetClusterId(); + char clusterIdStr[20] = {0}; + snprintf(clusterIdStr, sizeof(clusterIdStr), "%" PRId64, clusterId); SBufferWriter bw = tbufInitWriter(NULL, false); mnodeBeginObject(&bw); - mnodeAddStringField(&bw, "instanceId", clusterId); + mnodeAddStringField(&bw, "instanceId", clusterIdStr); mnodeAddIntField(&bw, "reportVersion", 1); mnodeAddOsInfo(&bw); mnodeAddCpuInfo(&bw); diff --git a/source/server/mnode/src/mondeInt.c b/source/server/mnode/src/mondeInt.c index 37af26f604..343384ba67 100644 --- a/source/server/mnode/src/mondeInt.c +++ b/source/server/mnode/src/mondeInt.c @@ -39,7 +39,7 @@ static struct { int32_t state; int32_t dnodeId; - char clusterId[TSDB_CLUSTER_ID_LEN]; + int64_t clusterId; tmr_h timer; SMnodeFp fp; SSteps * steps1; @@ -50,7 +50,7 @@ tmr_h mnodeGetTimer() { return tsMint.timer; } int32_t mnodeGetDnodeId() { return tsMint.dnodeId; } -char *mnodeGetClusterId() { return tsMint.clusterId; } +int64_t mnodeGetClusterId() { return tsMint.clusterId; } EMnStatus mnodeGetStatus() { return tsMint.state; } @@ -71,12 +71,14 @@ int32_t mnodeGetStatistics(SMnodeStat *stat) { return 0; } static int32_t mnodeSetPara(SMnodePara para) { tsMint.fp = para.fp; tsMint.dnodeId = para.dnodeId; - strncpy(tsMint.clusterId, para.clusterId, TSDB_CLUSTER_ID_LEN); + tsMint.clusterId = para.clusterId; if (tsMint.fp.SendMsgToDnode == NULL) return -1; if (tsMint.fp.SendMsgToMnode == NULL) return -1; if (tsMint.fp.SendRedirectMsg == NULL) return -1; + if (tsMint.fp.GetDnodeEp == NULL) return -1; if (tsMint.dnodeId < 0) return -1; + if (tsMint.clusterId < 0) return -1; return 0; } @@ -141,7 +143,7 @@ static void mnodeCleanupStep2() { taosStepCleanup(tsMint.steps2); } static bool mnodeNeedDeploy() { if (tsMint.dnodeId > 0) return false; - if (tsMint.clusterId[0] != 0) return false; + if (tsMint.clusterId > 0) return false; if (strcmp(tsFirst, tsLocalEp) != 0) return false; return true; } @@ -154,7 +156,7 @@ int32_t mnodeDeploy() { tsMint.state = MN_STATUS_INIT; } - if (tsMint.dnodeId <= 0 || tsMint.clusterId[0] == 0) { + if (tsMint.dnodeId <= 0 || tsMint.clusterId <= 0) { mError("failed to deploy mnode since cluster not ready"); return TSDB_CODE_MND_NOT_READY; } diff --git a/source/server/vnode/inc/vnodeInt.h b/source/server/vnode/inc/vnodeInt.h index 3c7487f681..ac6c77041f 100644 --- a/source/server/vnode/inc/vnodeInt.h +++ b/source/server/vnode/inc/vnodeInt.h @@ -62,19 +62,14 @@ typedef struct STsdbCfg { typedef struct SMetaCfg { } SMetaCfg; -typedef struct SSyncCluster { - int8_t replica; - int8_t quorum; - SNodeInfo nodes[TSDB_MAX_REPLICA]; -} SSyncCfg; - typedef struct SVnodeCfg { - char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; - int8_t dropped; - SWalCfg wal; - STsdbCfg tsdb; - SMetaCfg meta; - SSyncCfg sync; + char db[TSDB_ACCT_ID_LEN + TSDB_DB_NAME_LEN]; + int8_t dropped; + int8_t quorum; + SWalCfg wal; + STsdbCfg tsdb; + SMetaCfg meta; + SSyncCluster sync; } SVnodeCfg; typedef struct { @@ -86,7 +81,7 @@ typedef struct { STQ *pTQ; twalh pWal; void *pQuery; - SyncNodeId syncNode; + SSyncNode *pSync; taos_queue pWriteQ; // write queue taos_queue pQueryQ; // read query queue taos_queue pFetchQ; // read fetch/cancel queue diff --git a/source/server/vnode/src/vnodeFile.c b/source/server/vnode/src/vnodeFile.c index 9835e3e0fb..a77c99ec34 100644 --- a/source/server/vnode/src/vnodeFile.c +++ b/source/server/vnode/src/vnodeFile.c @@ -30,149 +30,156 @@ int32_t vnodeReadCfg(int32_t vgId, SVnodeCfg *pCfg) { fp = fopen(file, "r"); if (!fp) { - vError("vgId:%d, failed to open vnode cfg file:%s to read, error:%s", vgId, file, strerror(errno)); + vError("vgId:%d, failed to open vnode cfg file:%s to read since %s", vgId, file, strerror(errno)); ret = TAOS_SYSTEM_ERROR(errno); goto PARSE_VCFG_ERROR; } len = (int32_t)fread(content, 1, maxLen, fp); if (len <= 0) { - vError("vgId:%d, failed to read %s, content is null", vgId, file); + vError("vgId:%d, failed to read %s since content is null", vgId, file); goto PARSE_VCFG_ERROR; } content[len] = 0; root = cJSON_Parse(content); if (root == NULL) { - vError("vgId:%d, failed to read %s, invalid json format", vgId, file); + vError("vgId:%d, failed to read %s since invalid json format", vgId, file); goto PARSE_VCFG_ERROR; } cJSON *db = cJSON_GetObjectItem(root, "db"); if (!db || db->type != cJSON_String || db->valuestring == NULL) { - vError("vgId:%d, failed to read %s, db not found", vgId, file); + vError("vgId:%d, failed to read %s since db not found", vgId, file); goto PARSE_VCFG_ERROR; } tstrncpy(pCfg->db, db->valuestring, sizeof(pCfg->db)); cJSON *dropped = cJSON_GetObjectItem(root, "dropped"); if (!dropped || dropped->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, dropped not found", vgId, file); + vError("vgId:%d, failed to read %s since dropped not found", vgId, file); goto PARSE_VCFG_ERROR; } pCfg->dropped = (int32_t)dropped->valueint; - cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize"); - if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, cacheBlockSize not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.cacheBlockSize = (int32_t)cacheBlockSize->valueint; - - cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks"); - if (!totalBlocks || totalBlocks->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, totalBlocks not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.totalBlocks = (int32_t)totalBlocks->valueint; - - cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); - if (!daysPerFile || daysPerFile->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, daysPerFile not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.daysPerFile = (int32_t)daysPerFile->valueint; - - cJSON *daysToKeep0 = cJSON_GetObjectItem(root, "daysToKeep0"); - if (!daysToKeep0 || daysToKeep0->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, daysToKeep0 not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.daysToKeep0 = (int32_t)daysToKeep0->valueint; - - cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1"); - if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, daysToKeep1 not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.daysToKeep1 = (int32_t)daysToKeep1->valueint; - - cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2"); - if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, daysToKeep2 not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.daysToKeep2 = (int32_t)daysToKeep2->valueint; - - cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); - if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, minRowsPerFileBlock not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.minRowsPerFileBlock = (int32_t)minRowsPerFileBlock->valueint; - - cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); - if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, maxRowsPerFileBlock not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.maxRowsPerFileBlock = (int32_t)maxRowsPerFileBlock->valueint; - - cJSON *precision = cJSON_GetObjectItem(root, "precision"); - if (!precision || precision->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, precision not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.precision = (int8_t)precision->valueint; - - cJSON *compression = cJSON_GetObjectItem(root, "compression"); - if (!compression || compression->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, compression not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.compression = (int8_t)compression->valueint; - - cJSON *update = cJSON_GetObjectItem(root, "update"); - if (!update || update->type != cJSON_Number) { - vError("vgId: %d, failed to read %s, update not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.update = (int8_t)update->valueint; - - cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow"); - if (!cacheLastRow || cacheLastRow->type != cJSON_Number) { - vError("vgId: %d, failed to read %s, cacheLastRow not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->tsdb.cacheLastRow = (int8_t)cacheLastRow->valueint; - - cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel"); - if (!walLevel || walLevel->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, walLevel not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->wal.walLevel = (int8_t)walLevel->valueint; - - cJSON *fsyncPeriod = cJSON_GetObjectItem(root, "fsyncPeriod"); - if (!walLevel || walLevel->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, fsyncPeriod not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->wal.fsyncPeriod = (int32_t)fsyncPeriod->valueint; - - cJSON *replica = cJSON_GetObjectItem(root, "replica"); - if (!replica || replica->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, replica not found", vgId, file); - goto PARSE_VCFG_ERROR; - } - pCfg->sync.replica = (int8_t)replica->valueint; - cJSON *quorum = cJSON_GetObjectItem(root, "quorum"); if (!quorum || quorum->type != cJSON_Number) { vError("vgId: %d, failed to read %s, quorum not found", vgId, file); goto PARSE_VCFG_ERROR; } - pCfg->sync.quorum = (int8_t)quorum->valueint; + pCfg->quorum = (int8_t)quorum->valueint; + + cJSON *cacheBlockSize = cJSON_GetObjectItem(root, "cacheBlockSize"); + if (!cacheBlockSize || cacheBlockSize->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since cacheBlockSize not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.cacheBlockSize = (int32_t)cacheBlockSize->valueint; + + cJSON *totalBlocks = cJSON_GetObjectItem(root, "totalBlocks"); + if (!totalBlocks || totalBlocks->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since totalBlocks not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.totalBlocks = (int32_t)totalBlocks->valueint; + + cJSON *daysPerFile = cJSON_GetObjectItem(root, "daysPerFile"); + if (!daysPerFile || daysPerFile->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since daysPerFile not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.daysPerFile = (int32_t)daysPerFile->valueint; + + cJSON *daysToKeep0 = cJSON_GetObjectItem(root, "daysToKeep0"); + if (!daysToKeep0 || daysToKeep0->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since daysToKeep0 not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.daysToKeep0 = (int32_t)daysToKeep0->valueint; + + cJSON *daysToKeep1 = cJSON_GetObjectItem(root, "daysToKeep1"); + if (!daysToKeep1 || daysToKeep1->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since daysToKeep1 not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.daysToKeep1 = (int32_t)daysToKeep1->valueint; + + cJSON *daysToKeep2 = cJSON_GetObjectItem(root, "daysToKeep2"); + if (!daysToKeep2 || daysToKeep2->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since daysToKeep2 not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.daysToKeep2 = (int32_t)daysToKeep2->valueint; + + cJSON *minRowsPerFileBlock = cJSON_GetObjectItem(root, "minRowsPerFileBlock"); + if (!minRowsPerFileBlock || minRowsPerFileBlock->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since minRowsPerFileBlock not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.minRowsPerFileBlock = (int32_t)minRowsPerFileBlock->valueint; + + cJSON *maxRowsPerFileBlock = cJSON_GetObjectItem(root, "maxRowsPerFileBlock"); + if (!maxRowsPerFileBlock || maxRowsPerFileBlock->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since maxRowsPerFileBlock not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.maxRowsPerFileBlock = (int32_t)maxRowsPerFileBlock->valueint; + + cJSON *precision = cJSON_GetObjectItem(root, "precision"); + if (!precision || precision->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since precision not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.precision = (int8_t)precision->valueint; + + cJSON *compression = cJSON_GetObjectItem(root, "compression"); + if (!compression || compression->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since compression not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.compression = (int8_t)compression->valueint; + + cJSON *update = cJSON_GetObjectItem(root, "update"); + if (!update || update->type != cJSON_Number) { + vError("vgId: %d, failed to read %s since update not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.update = (int8_t)update->valueint; + + cJSON *cacheLastRow = cJSON_GetObjectItem(root, "cacheLastRow"); + if (!cacheLastRow || cacheLastRow->type != cJSON_Number) { + vError("vgId: %d, failed to read %s since cacheLastRow not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->tsdb.cacheLastRow = (int8_t)cacheLastRow->valueint; + + cJSON *walLevel = cJSON_GetObjectItem(root, "walLevel"); + if (!walLevel || walLevel->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since walLevel not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->wal.walLevel = (int8_t)walLevel->valueint; + + cJSON *fsyncPeriod = cJSON_GetObjectItem(root, "fsyncPeriod"); + if (!walLevel || walLevel->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since fsyncPeriod not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->wal.fsyncPeriod = (int32_t)fsyncPeriod->valueint; + + cJSON *selfIndex = cJSON_GetObjectItem(root, "selfIndex"); + if (!selfIndex || selfIndex->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since selfIndex not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->sync.selfIndex = selfIndex->valueint; + + cJSON *replica = cJSON_GetObjectItem(root, "replica"); + if (!replica || replica->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since replica not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + pCfg->sync.replica = replica->valueint; cJSON *nodes = cJSON_GetObjectItem(root, "nodes"); if (!nodes || nodes->type != cJSON_Array) { @@ -182,28 +189,35 @@ int32_t vnodeReadCfg(int32_t vgId, SVnodeCfg *pCfg) { int size = cJSON_GetArraySize(nodes); if (size != pCfg->sync.replica) { - vError("vgId:%d, failed to read %s, nodes size not matched", vgId, file); + vError("vgId:%d, failed to read %s since nodes size not matched", vgId, file); goto PARSE_VCFG_ERROR; } for (int i = 0; i < size; ++i) { cJSON *nodeInfo = cJSON_GetArrayItem(nodes, i); if (nodeInfo == NULL) continue; - SNodeInfo *node = &pCfg->sync.nodes[i]; + SNodeInfo *node = &pCfg->sync.nodeInfo[i]; - cJSON *port = cJSON_GetObjectItem(nodeInfo, "port"); - if (!port || port->type != cJSON_Number) { - vError("vgId:%d, failed to read %s, port not found", vgId, file); + cJSON *nodeId = cJSON_GetObjectItem(nodeInfo, "id"); + if (!nodeId || nodeId->type != cJSON_Number) { + vError("vgId:%d, failed to read %s since nodeId not found", vgId, file); goto PARSE_VCFG_ERROR; } - node->nodePort = (uint16_t)port->valueint; + node->nodeId = nodeId->valueint; - cJSON *fqdn = cJSON_GetObjectItem(nodeInfo, "fqdn"); - if (!fqdn || fqdn->type != cJSON_String || fqdn->valuestring == NULL) { - vError("vgId:%d, failed to read %s, fqdn not found", vgId, file); + cJSON *nodePort = cJSON_GetObjectItem(nodeInfo, "port"); + if (!nodePort || nodePort->type != cJSON_Number) { + vError("vgId:%d, failed to read %s sincenodePort not found", vgId, file); goto PARSE_VCFG_ERROR; } - tstrncpy(node->nodeFqdn, fqdn->valuestring, TSDB_FQDN_LEN); + node->nodePort = (uint16_t)nodePort->valueint; + + cJSON *nodeFqdn = cJSON_GetObjectItem(nodeInfo, "fqdn"); + if (!nodeFqdn || nodeFqdn->type != cJSON_String || nodeFqdn->valuestring == NULL) { + vError("vgId:%d, failed to read %s since nodeFqdn not found", vgId, file); + goto PARSE_VCFG_ERROR; + } + tstrncpy(node->nodeFqdn, nodeFqdn->valuestring, TSDB_FQDN_LEN); } ret = TSDB_CODE_SUCCESS; @@ -238,6 +252,7 @@ int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg) { len += snprintf(content + len, maxLen - len, " \"vgId\": %d,\n", vgId); len += snprintf(content + len, maxLen - len, " \"db\": \"%s\",\n", pCfg->db); len += snprintf(content + len, maxLen - len, " \"dropped\": %d,\n", pCfg->dropped); + len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pCfg->quorum); // tsdb len += snprintf(content + len, maxLen - len, " \"cacheBlockSize\": %d,\n", pCfg->tsdb.cacheBlockSize); len += snprintf(content + len, maxLen - len, " \"totalBlocks\": %d,\n", pCfg->tsdb.totalBlocks); @@ -255,11 +270,12 @@ int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg) { len += snprintf(content + len, maxLen - len, " \"walLevel\": %d,\n", pCfg->wal.walLevel); len += snprintf(content + len, maxLen - len, " \"fsyncPeriod\": %d,\n", pCfg->wal.fsyncPeriod); // sync - len += snprintf(content + len, maxLen - len, " \"quorum\": %d,\n", pCfg->sync.quorum); len += snprintf(content + len, maxLen - len, " \"replica\": %d,\n", pCfg->sync.replica); + len += snprintf(content + len, maxLen - len, " \"selfIndex\": %d,\n", pCfg->sync.selfIndex); len += snprintf(content + len, maxLen - len, " \"nodes\": [{\n"); for (int32_t i = 0; i < pCfg->sync.replica; i++) { - SNodeInfo *node = &pCfg->sync.nodes[i]; + SNodeInfo *node = &pCfg->sync.nodeInfo[i]; + len += snprintf(content + len, maxLen - len, " \"id\": %d,\n", node->nodeId); len += snprintf(content + len, maxLen - len, " \"port\": %u,\n", node->nodePort); len += snprintf(content + len, maxLen - len, " \"fqdn\": \"%s\"\n", node->nodeFqdn); if (i < pCfg->sync.replica - 1) { @@ -304,20 +320,20 @@ int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState) { } cJSON *term = cJSON_GetObjectItem(root, "term"); - if (!term || term->type != cJSON_Number) { + if (!term || term->type != cJSON_String) { vError("vgId:%d, failed to read %s since term not found", vgId, file); goto PARSE_TERM_ERROR; } - pState->term = (uint64_t)term->valueint; + pState->term = atoll(term->valuestring); cJSON *voteFor = cJSON_GetObjectItem(root, "voteFor"); - if (!voteFor || voteFor->type != cJSON_Number) { + if (!voteFor || voteFor->type != cJSON_String) { vError("vgId:%d, failed to read %s since voteFor not found", vgId, file); goto PARSE_TERM_ERROR; } - pState->voteFor = (int64_t)voteFor->valueint; + pState->voteFor = atoi(voteFor->valuestring); - vInfo("vgId:%d, read %s success, voteFor:%" PRIu64 ", term:%" PRIu64, vgId, file, pState->voteFor, pState->term); + vInfo("vgId:%d, read %s success, voteFor:%d, term:%" PRIu64, vgId, file, pState->voteFor, pState->term); PARSE_TERM_ERROR: if (content != NULL) free(content); @@ -342,8 +358,8 @@ int32_t vnodeWriteTerm(int32_t vgId, SSyncServerState *pState) { char *content = calloc(1, maxLen + 1); len += snprintf(content + len, maxLen - len, "{\n"); - len += snprintf(content + len, maxLen - len, " \"term\": %" PRIu64 "\n", pState->term); - len += snprintf(content + len, maxLen - len, " \"voteFor\": %" PRIu64 "\n", pState->voteFor); + len += snprintf(content + len, maxLen - len, " \"term\": \"%" PRIu64 "\",\n", pState->term); + len += snprintf(content + len, maxLen - len, " \"voteFor\": \"%d\"\n", pState->voteFor); len += snprintf(content + len, maxLen - len, "}\n"); fwrite(content, 1, len, fp); @@ -351,6 +367,6 @@ int32_t vnodeWriteTerm(int32_t vgId, SSyncServerState *pState) { fclose(fp); free(content); - vInfo("vgId:%d, write %s success, voteFor:%" PRIu64 ", term:%" PRIu64, vgId, file, pState->voteFor, pState->term); + vInfo("vgId:%d, write %s success, voteFor:%d, term:%" PRIu64, vgId, file, pState->voteFor, pState->term); return TSDB_CODE_SUCCESS; } \ No newline at end of file diff --git a/source/server/vnode/src/vnodeMain.c b/source/server/vnode/src/vnodeMain.c index 5143f04c5b..c08ae7708a 100644 --- a/source/server/vnode/src/vnodeMain.c +++ b/source/server/vnode/src/vnodeMain.c @@ -108,6 +108,11 @@ static void vnodeDestroyVnode(SVnode *pVnode) { int32_t code = 0; int32_t vgId = pVnode->vgId; + if (pVnode->pSync != NULL) { + syncStop(pVnode->pSync); + pVnode->pSync = NULL; + } + if (pVnode->pQuery) { // todo } @@ -177,6 +182,9 @@ static int32_t vnodeOpenVnode(int32_t vgId) { pVnode->role = TAOS_SYNC_ROLE_CANDIDATE; pthread_mutex_init(&pVnode->statusMutex, NULL); + vDebug("vgId:%d, vnode is opened", pVnode->vgId); + taosHashPut(tsVnode.hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnode *)); + code = vnodeReadCfg(vgId, &pVnode->cfg); if (code != TSDB_CODE_SUCCESS) { vError("vgId:%d, failed to read config file, set cfgVersion to 0", pVnode->vgId); @@ -209,8 +217,34 @@ static int32_t vnodeOpenVnode(int32_t vgId) { return terrno; } - vDebug("vgId:%d, vnode is opened", pVnode->vgId); - taosHashPut(tsVnode.hash, &pVnode->vgId, sizeof(int32_t), &pVnode, sizeof(SVnode *)); + // create sync node + SSyncInfo syncInfo = {0}; + syncInfo.vgId = vgId; + syncInfo.snapshotIndex = 0; // todo, from tsdb + memcpy(&syncInfo.syncCfg, &pVnode->cfg.sync, sizeof(SSyncCluster)); + syncInfo.fsm.pData = pVnode; + syncInfo.fsm.applyLog = NULL; + syncInfo.fsm.onClusterChanged = NULL; + syncInfo.fsm.getSnapshot = NULL; + syncInfo.fsm.applySnapshot = NULL; + syncInfo.fsm.onRestoreDone = NULL; + syncInfo.fsm.onRollback = NULL; + syncInfo.logStore.pData = pVnode; + syncInfo.logStore.logWrite = NULL; + syncInfo.logStore.logCommit = NULL; + syncInfo.logStore.logPrune = NULL; + syncInfo.logStore.logRollback = NULL; + syncInfo.stateManager.pData = pVnode; + syncInfo.stateManager.saveServerState = NULL; + syncInfo.stateManager.readServerState = NULL; + // syncInfo.stateManager.saveCluster = NULL; + // syncInfo.stateManager.readCluster = NULL; + + pVnode->pSync = syncStart(&syncInfo); + if (pVnode->pSync == NULL) { + vnodeCleanupVnode(pVnode); + return terrno; + } vnodeSetReadyStatus(pVnode); return TSDB_CODE_SUCCESS; @@ -313,7 +347,7 @@ int32_t vnodeAlterVnode(SVnode * pVnode, SVnodeCfg *pCfg) { } if (syncChanged) { - // todo + syncReconfig(pVnode->pSync, &pVnode->cfg.sync); } vnodeRelease(pVnode); diff --git a/source/server/vnode/src/vnodeMgmt.c b/source/server/vnode/src/vnodeMgmt.c index d20e36641e..e0e76d5b56 100644 --- a/source/server/vnode/src/vnodeMgmt.c +++ b/source/server/vnode/src/vnodeMgmt.c @@ -31,6 +31,7 @@ static int32_t vnodeParseCreateVnodeReq(SRpcMsg *rpcMsg, int32_t *vgId, SVnodeCf *vgId = htonl(pCreate->vgId); pCfg->dropped = 0; + pCfg->quorum = pCreate->quorum; tstrncpy(pCfg->db, pCreate->db, sizeof(pCfg->db)); pCfg->tsdb.cacheBlockSize = htonl(pCreate->cacheBlockSize); @@ -50,11 +51,11 @@ static int32_t vnodeParseCreateVnodeReq(SRpcMsg *rpcMsg, int32_t *vgId, SVnodeCf pCfg->wal.walLevel = pCreate->walLevel; pCfg->sync.replica = pCreate->replica; - pCfg->sync.quorum = pCreate->quorum; - + pCfg->sync.selfIndex = pCreate->selfIndex; + for (int32_t j = 0; j < pCreate->replica; ++j) { - pCfg->sync.nodes[j].nodePort = htons(pCreate->nodes[j].port); - tstrncpy(pCfg->sync.nodes[j].nodeFqdn, pCreate->nodes[j].fqdn, TSDB_FQDN_LEN); + pCfg->sync.nodeInfo[j].nodePort = htons(pCreate->nodes[j].port); + tstrncpy(pCfg->sync.nodeInfo[j].nodeFqdn, pCreate->nodes[j].fqdn, TSDB_FQDN_LEN); } return 0; From aa0647094374d9b37a0df6161712f6dd60fb1f39 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 28 Oct 2021 16:16:42 +0800 Subject: [PATCH 6/7] [td-10564] fix memory leak in unit test and refactor some codes. --- include/common/tdataformat.h | 2 +- include/common/ttime.h | 3 +- include/libs/function/function.h | 29 +- include/libs/parser/parser.h | 18 +- include/util/tdef.h | 2 +- source/common/src/ttime.c | 11 +- source/libs/function/CMakeLists.txt | 4 +- source/libs/function/inc/taggfunction.h | 2 - source/libs/function/src/taggfunction.c | 6 +- source/libs/function/src/texpr.c | 16 +- source/libs/function/src/tfunction.c | 87 +- source/libs/parser/inc/astGenerator.h | 2 +- source/libs/parser/inc/parserInt.h | 10 +- source/libs/parser/inc/queryInfoUtil.h | 4 +- source/libs/parser/src/astGenerator.c | 8 +- source/libs/parser/src/astValidate.c | 954 +++++++++++++++++++--- source/libs/parser/src/parser.c | 11 +- source/libs/parser/src/parserUtil.c | 11 +- source/libs/parser/src/queryInfoUtil.c | 64 +- source/libs/parser/test/CMakeLists.txt | 8 +- source/libs/parser/test/parserTests.cpp | 242 +++--- source/libs/parser/test/tokenizerTest.cpp | 102 +-- 22 files changed, 1192 insertions(+), 404 deletions(-) diff --git a/include/common/tdataformat.h b/include/common/tdataformat.h index 2eef9db064..a3de2452ac 100644 --- a/include/common/tdataformat.h +++ b/include/common/tdataformat.h @@ -308,7 +308,7 @@ static FORCE_INLINE void tdCopyColOfRowBySchema(SDataRow dst, STSchema *pDstSche SET_DOUBLE_PTR(pData, value); break; case TSDB_DATA_TYPE_TIMESTAMP: - if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pSrcSchema->columns[srcIdx].colId == PRIMARYKEY_TIMESTAMP_COL_ID) { *(TSKEY *)pData = tdGetKey(*(TKEY *)value); } else { *(TSKEY *)pData = *(TSKEY *)value; diff --git a/include/common/ttime.h b/include/common/ttime.h index 0d9b89b6f9..926d23c239 100644 --- a/include/common/ttime.h +++ b/include/common/ttime.h @@ -23,6 +23,8 @@ extern "C" { #include "taosdef.h" #include "taosmsg.h" +#define TIME_IS_VAR_DURATION(_t) ((_t) == 'n' || (_t) == 'y' || (_t) == 'N' || (_t) == 'Y') + /* * @return timestamp decided by global conf variable, tsTimePrecision * if precision == TSDB_TIME_PRECISION_MICRO, it returns timestamp in microsecond. @@ -50,7 +52,6 @@ void deltaToUtcInitOnce(); int64_t convertTimePrecision(int64_t time, int32_t fromPrecision, int32_t toPrecision); - #ifdef __cplusplus } #endif diff --git a/include/libs/function/function.h b/include/libs/function/function.h index 8c290dbced..fecb2e87fc 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_TFUNCTION_H -#define TDENGINE_TFUNCTION_H +#ifndef TDENGINE_FUNCTION_H +#define TDENGINE_FUNCTION_H #ifdef __cplusplus extern "C" { @@ -24,6 +24,8 @@ extern "C" { #include "tvariant.h" #include "tbuffer.h" +#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results + #define FUNCTION_SCALAR 1 #define FUNCTION_AGG 2 @@ -184,6 +186,25 @@ typedef struct SResultDataInfo { int32_t intermediateBytes; } SResultDataInfo; + +typedef struct SMultiFunctionsDesc { + bool stableQuery; + bool groupbyColumn; + bool simpleAgg; + bool arithmeticOnAgg; + bool projectionQuery; + bool hasFilter; + bool onlyTagQuery; + bool orderProjectQuery; + bool stateWindow; + bool globalMerge; + bool multigroupResult; + bool blockDistribution; + bool timewindow; + bool topbotQuery; + bool interpQuery; +} SMultiFunctionsDesc; + int32_t getResultDataInfo(int32_t dataType, int32_t dataBytes, int32_t functionId, int32_t param, SResultDataInfo* pInfo, int16_t extLength, bool isSuperTable); @@ -199,8 +220,10 @@ bool qIsValidUdf(SArray* pUdfInfo, const char* name, int32_t len, int32_t* funct const char* qGetFunctionName(int32_t functionId); +void extractFunctionDesc(SArray* pFunctionIdList, SMultiFunctionsDesc* pDesc); + #ifdef __cplusplus } #endif -#endif // TDENGINE_TFUNCTION_H +#endif // TDENGINE_FUNCTION_H diff --git a/include/libs/parser/parser.h b/include/libs/parser/parser.h index b80eda0b86..262479ee4a 100644 --- a/include/libs/parser/parser.h +++ b/include/libs/parser/parser.h @@ -24,6 +24,7 @@ extern "C" { #include "common.h" #include "tname.h" #include "tvariant.h" +#include "function.h" typedef struct SColumn { uint64_t tableUid; @@ -130,20 +131,6 @@ typedef struct STableMetaInfo { SArray *tagColList; // SArray, involved tag columns } STableMetaInfo; -typedef struct SQueryAttrInfo { - bool stableQuery; - bool groupbyColumn; - bool simpleAgg; - bool arithmeticOnAgg; - bool projectionQuery; - bool hasFilter; - bool onlyTagQuery; - bool orderProjectQuery; - bool stateWindow; - bool globalMerge; - bool multigroupResult; -} SQueryAttrInfo; - typedef struct SQueryStmtInfo { int16_t command; // the command may be different for each subclause, so keep it seperately. uint32_t type; // query/insert type @@ -177,7 +164,6 @@ typedef struct SQueryStmtInfo { int32_t udColumnId; // current user-defined constant output field column id, monotonically decreases from TSDB_UD_COLUMN_INDEX bool distinct; // distinct tag or not - bool onlyHasTagCond; int32_t bufLen; char* buf; SArray *pUdfInfo; @@ -186,7 +172,7 @@ typedef struct SQueryStmtInfo { SArray *pUpstream; // SArray struct SQueryStmtInfo *pDownstream; int32_t havingFieldNum; - SQueryAttrInfo info; + SMultiFunctionsDesc info; } SQueryStmtInfo; typedef struct SColumnIndex { diff --git a/include/util/tdef.h b/include/util/tdef.h index 21a70c9d91..170502aa9d 100644 --- a/include/util/tdef.h +++ b/include/util/tdef.h @@ -303,7 +303,7 @@ do { \ #define TSDB_MAX_FIELD_LEN 16384 #define TSDB_MAX_BINARY_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 #define TSDB_MAX_NCHAR_LEN (TSDB_MAX_FIELD_LEN-TSDB_KEYSIZE) // keep 16384 -#define PRIMARYKEY_TIMESTAMP_COL_INDEX 0 +#define PRIMARYKEY_TIMESTAMP_COL_ID 0 #define TSDB_MAX_RPC_THREADS 5 diff --git a/source/common/src/ttime.c b/source/common/src/ttime.c index 2686dcd205..6c795bb0cc 100644 --- a/source/common/src/ttime.c +++ b/source/common/src/ttime.c @@ -48,7 +48,7 @@ * An encoding of midnight at the end of the day as 24:00:00 - ie. midnight * tomorrow - (allowable under ISO 8601) is supported. */ -int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, +static int64_t user_mktime64(const unsigned int year0, const unsigned int mon0, const unsigned int day, const unsigned int hour, const unsigned int min, const unsigned int sec, int64_t time_zone) { @@ -79,19 +79,18 @@ void deltaToUtcInitOnce() { (void)strptime("1970-01-01 00:00:00", (const char *)("%Y-%m-%d %H:%M:%S"), &tm); m_deltaUtc = (int64_t)mktime(&tm); //printf("====delta:%lld\n\n", seconds); - return; } static int64_t parseFraction(char* str, char** end, int32_t timePrec); static int32_t parseTimeWithTz(char* timestr, int64_t* time, int32_t timePrec, char delim); static int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec); -static int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec); +static int32_t parseLocaltimeDst(char* timestr, int64_t* time, int32_t timePrec); static char* forwardToTimeStringEnd(char* str); static bool checkTzPresent(char *str, int32_t len); static int32_t (*parseLocaltimeFp[]) (char* timestr, int64_t* time, int32_t timePrec) = { parseLocaltime, - parseLocaltimeWithDst + parseLocaltimeDst }; int32_t taosParseTime(char* timestr, int64_t* time, int32_t len, int32_t timePrec, int8_t day_light) { @@ -116,8 +115,8 @@ bool checkTzPresent(char *str, int32_t len) { } c--; } - return false; + return false; } char* forwardToTimeStringEnd(char* str) { @@ -344,7 +343,7 @@ int32_t parseLocaltime(char* timestr, int64_t* time, int32_t timePrec) { return 0; } -int32_t parseLocaltimeWithDst(char* timestr, int64_t* time, int32_t timePrec) { +int32_t parseLocaltimeDst(char* timestr, int64_t* time, int32_t timePrec) { *time = 0; struct tm tm = {0}; tm.tm_isdst = -1; diff --git a/source/libs/function/CMakeLists.txt b/source/libs/function/CMakeLists.txt index 9fbfc82e3c..a4aa7025e4 100644 --- a/source/libs/function/CMakeLists.txt +++ b/source/libs/function/CMakeLists.txt @@ -7,6 +7,6 @@ target_include_directories( ) target_link_libraries( - function - PRIVATE os util common + function + PRIVATE os util common ) \ No newline at end of file diff --git a/source/libs/function/inc/taggfunction.h b/source/libs/function/inc/taggfunction.h index 0ebba4cd8e..c5e7ea12a5 100644 --- a/source/libs/function/inc/taggfunction.h +++ b/source/libs/function/inc/taggfunction.h @@ -56,8 +56,6 @@ typedef struct SResultRowCellInfo { #define QUERY_DESC_FORWARD_STEP -1 #define GET_FORWARD_DIRECTION_FACTOR(ord) (((ord) == TSDB_ORDER_ASC) ? QUERY_ASC_FORWARD_STEP : QUERY_DESC_FORWARD_STEP) - -#define MAX_INTERVAL_TIME_WINDOW 1000000 // maximum allowed time windows in final results #define TOP_BOTTOM_QUERY_LIMIT 100 enum { diff --git a/source/libs/function/src/taggfunction.c b/source/libs/function/src/taggfunction.c index c130e6244b..c8585a7b64 100644 --- a/source/libs/function/src/taggfunction.c +++ b/source/libs/function/src/taggfunction.c @@ -513,7 +513,7 @@ static void count_func_merge(SQLFunctionCtx *pCtx) { * @return */ int32_t countRequired(SQLFunctionCtx *pCtx, STimeWindow* w, int32_t colId) { - if (colId == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (colId == PRIMARYKEY_TIMESTAMP_COL_ID) { return BLK_DATA_NO_NEEDED; } else { return BLK_DATA_STATIS_NEEDED; @@ -2303,10 +2303,10 @@ static void top_bottom_func_finalizer(SQLFunctionCtx *pCtx) { tValuePair **tvp = pRes->res; // user specify the order of output by sort the result according to timestamp - if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_INDEX) { + if (pCtx->param[1].i64 == PRIMARYKEY_TIMESTAMP_COL_ID) { __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resAscComparFn : resDescComparFn; qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); - } else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_INDEX)*/ { + } else /*if (pCtx->param[1].i64 > PRIMARYKEY_TIMESTAMP_COL_ID)*/ { __compar_fn_t comparator = (pCtx->param[2].i64 == TSDB_ORDER_ASC) ? resDataAscComparFn : resDataDescComparFn; qsort(tvp, (size_t)pResInfo->numOfRes, POINTER_BYTES, comparator); } diff --git a/source/libs/function/src/texpr.c b/source/libs/function/src/texpr.c index 32fe3d2912..cfddb81683 100644 --- a/source/libs/function/src/texpr.c +++ b/source/libs/function/src/texpr.c @@ -128,18 +128,26 @@ static void doExprTreeDestroy(tExprNode **pExpr, void (*fp)(void *)) { if (*pExpr == NULL) { return; } - - if ((*pExpr)->nodeType == TEXPR_BINARYEXPR_NODE) { + + int32_t type = (*pExpr)->nodeType; + if (type == TEXPR_BINARYEXPR_NODE) { doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp); doExprTreeDestroy(&(*pExpr)->_node.pRight, fp); if (fp != NULL) { fp((*pExpr)->_node.info); } - } else if ((*pExpr)->nodeType == TEXPR_VALUE_NODE) { + } else if (type == TEXPR_UNARYEXPR_NODE) { + doExprTreeDestroy(&(*pExpr)->_node.pLeft, fp); + if (fp != NULL) { + fp((*pExpr)->_node.info); + } + + assert((*pExpr)->_node.pRight == NULL); + } else if (type == TEXPR_VALUE_NODE) { taosVariantDestroy((*pExpr)->pVal); free((*pExpr)->pVal); - } else if ((*pExpr)->nodeType == TEXPR_COL_NODE) { + } else if (type == TEXPR_COL_NODE) { free((*pExpr)->pSchema); } diff --git a/source/libs/function/src/tfunction.c b/source/libs/function/src/tfunction.c index 7154c48d2a..0136bbf493 100644 --- a/source/libs/function/src/tfunction.c +++ b/source/libs/function/src/tfunction.c @@ -52,7 +52,7 @@ bool isTagsQuery(SArray* pFunctionIdList) { int16_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); // "select count(tbname)" query -// if (functId == FUNCTION_COUNT && pExpr->base.colInfo.colId == TSDB_TBNAME_COLUMN_INDEX) { +// if (functId == FUNCTION_COUNT && pExpr->base.colpDesc->colId == TSDB_TBNAME_COLUMN_INDEX) { // continue; // } @@ -80,19 +80,6 @@ bool isTagsQuery(SArray* pFunctionIdList) { // return false; //} -bool isBlockInfoQuery(SArray* pFunctionIdList) { - int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); - for (int32_t i = 0; i < num; ++i) { - int32_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); - - if (f == FUNCTION_BLKINFO) { - return true; - } - } - - return false; -} - bool isProjectionQuery(SArray* pFunctionIdList) { int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); for (int32_t i = 0; i < num; ++i) { @@ -101,8 +88,12 @@ bool isProjectionQuery(SArray* pFunctionIdList) { continue; } - if (f != FUNCTION_PRJ && f != FUNCTION_TAGPRJ && f != FUNCTION_TAG && - f != FUNCTION_TS && f != FUNCTION_ARITHM && f != FUNCTION_DIFF && + if (f != FUNCTION_PRJ && + f != FUNCTION_TAGPRJ && + f != FUNCTION_TAG && + f != FUNCTION_TS && + f != FUNCTION_ARITHM && + f != FUNCTION_DIFF && f != FUNCTION_DERIVATIVE) { return false; } @@ -111,7 +102,7 @@ bool isProjectionQuery(SArray* pFunctionIdList) { return true; } -bool isDiffDerivQuery(SArray* pFunctionIdList) { +bool isDiffDerivativeQuery(SArray* pFunctionIdList) { int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); for (int32_t i = 0; i < num; ++i) { int32_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); @@ -127,7 +118,7 @@ bool isDiffDerivQuery(SArray* pFunctionIdList) { return false; } -bool isPointInterpQuery(SArray* pFunctionIdList) { +bool isInterpQuery(SArray* pFunctionIdList) { int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); for (int32_t i = 0; i < num; ++i) { int32_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); @@ -264,8 +255,6 @@ bool needReverseScan(SArray* pFunctionIdList) { } bool isSimpleAggregateRv(SArray* pFunctionIdList) { - assert(0); - // if (pQueryInfo->interval.interval > 0 || pQueryInfo->sessionWindow.gap > 0) { // return false; // } @@ -380,33 +369,17 @@ bool isProjectionQueryOnSTable(SArray* pFunctionIdList, int32_t tableIndex) { } bool hasTagValOutput(SArray* pFunctionIdList) { -// size_t numOfExprs = getNumOfExprs(pQueryInfo); -// SExprInfo* pExpr1 = getExprInfo(pQueryInfo, 0); -// -// if (numOfExprs == 1 && pExpr1->base.functionId == FUNCTION_TS_COMP) { + size_t size = taosArrayGetSize(pFunctionIdList); + + // if (numOfExprs == 1 && pExpr1->base.functionId == FUNCTION_TS_COMP) { // return true; -// } -// -// for (int32_t i = 0; i < numOfExprs; ++i) { -// SExprInfo* pExpr = getExprInfo(pQueryInfo, i); -// if (pExpr == NULL) { -// continue; -// } -// -// // ts_comp column required the tag value for join filter -// if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { -// return true; -// } // } - return false; -} + for (int32_t i = 0; i < size; ++i) { + int32_t functionId = *(int16_t*) taosArrayGet(pFunctionIdList, i); -bool timeWindowInterpoRequired(SArray* pFunctionIdList) { - int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); - for (int32_t i = 0; i < num; ++i) { - int32_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); - if (f == FUNCTION_TWA || f == FUNCTION_INTERP) { + // ts_comp column required the tag value for join filter + if (functionId == FUNCTION_TAG || functionId == FUNCTION_TAGPRJ) { return true; } } @@ -414,8 +387,28 @@ bool timeWindowInterpoRequired(SArray* pFunctionIdList) { return false; } -//SQueryAttrInfo setQueryType(SArray* pFunctionIdList) { -// assert(pFunctionIdList != NULL); +//bool timeWindowInterpoRequired(SArray* pFunctionIdList) { +// int32_t num = (int32_t) taosArrayGetSize(pFunctionIdList); +// for (int32_t i = 0; i < num; ++i) { +// int32_t f = *(int16_t*) taosArrayGet(pFunctionIdList, i); +// if (f == FUNCTION_TWA || f == FUNCTION_INTERP) { +// return true; +// } +// } // -// -//} \ No newline at end of file +// return false; +//} + +void extractFunctionDesc(SArray* pFunctionIdList, SMultiFunctionsDesc* pDesc) { + assert(pFunctionIdList != NULL); + + + pDesc->blockDistribution = isBlockDistQuery(pFunctionIdList); + if (pDesc->blockDistribution) { + return; + } + + pDesc->projectionQuery = isProjectionQuery(pFunctionIdList); + pDesc->onlyTagQuery = isTagsQuery(pFunctionIdList); + pDesc->interpQuery = isInterpQuery(pFunctionIdList); +} diff --git a/source/libs/parser/inc/astGenerator.h b/source/libs/parser/inc/astGenerator.h index f7c7b9d6cc..829112baf3 100644 --- a/source/libs/parser/inc/astGenerator.h +++ b/source/libs/parser/inc/astGenerator.h @@ -298,7 +298,7 @@ void* destroyCreateTableSql(SCreateTableSql* pCreate); void setDropFuncInfo(SSqlInfo *pInfo, int32_t type, SToken* pToken); void setCreateFuncInfo(SSqlInfo *pInfo, int32_t type, SToken *pName, SToken *pPath, SField *output, SToken* bufSize, int32_t funcType); -void SqlInfoDestroy(SSqlInfo *pInfo); +void destroySqlInfo(SSqlInfo *pInfo); void setDCLSqlElems(SSqlInfo *pInfo, int32_t type, int32_t nParams, ...); void setDropDbTableInfo(SSqlInfo *pInfo, int32_t type, SToken* pToken, SToken* existsCheck,int16_t dbType,int16_t tableType); diff --git a/source/libs/parser/inc/parserInt.h b/source/libs/parser/inc/parserInt.h index 27c9140bcd..ca02165382 100644 --- a/source/libs/parser/inc/parserInt.h +++ b/source/libs/parser/inc/parserInt.h @@ -73,7 +73,9 @@ int32_t evaluateSqlNode(SSqlNode* pNode, int32_t tsPrecision, SMsgBuf* pMsgBuf); int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf); -void initQueryInfo(SQueryStmtInfo* pQueryInfo); +SQueryStmtInfo* createQueryInfo(); + +void destroyQueryInfo(SQueryStmtInfo* pQueryInfo); int32_t checkForInvalidExpr(SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf); @@ -87,6 +89,12 @@ int32_t checkForInvalidExpr(SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf); */ int32_t qParserExtractRequestedMetaInfo(const SSqlInfo* pSqlInfo, SMetaReq* pMetaInfo, char* msg, int32_t msgBufLen); +/** + * Destroy the meta data request structure. + * @param pMetaInfo + */ +void qParserClearupMetaRequestInfo(SMetaReq* pMetaInfo); + #ifdef __cplusplus } #endif diff --git a/source/libs/parser/inc/queryInfoUtil.h b/source/libs/parser/inc/queryInfoUtil.h index f1515189b7..fc892762cd 100644 --- a/source/libs/parser/inc/queryInfoUtil.h +++ b/source/libs/parser/inc/queryInfoUtil.h @@ -30,9 +30,7 @@ SSchema *getTableTagSchema(const STableMeta* pTableMeta); SSchema *getOneColumnSchema(const STableMeta* pTableMeta, int32_t colIndex); size_t getNumOfExprs(SQueryStmtInfo* pQueryInfo); -//SExprInfo* createExprInfo(STableMetaInfo* pTableMetaInfo, int16_t functionId, SColumnIndex* pColIndex, struct tExprNode* pParamExpr, SSchema* pResSchema, int16_t interSize); SExprInfo* createBinaryExprInfo(struct tExprNode* pNode, SSchema* pResSchema); -void destroyExprInfoList(); void addExprInfo(SQueryStmtInfo* pQueryInfo, int32_t index, SExprInfo* pExprInfo); void updateExprInfo(SExprInfo* pExprInfo, int16_t functionId, int32_t colId, int16_t srcColumnIndex, int16_t resType, int16_t resSize); @@ -42,9 +40,11 @@ int32_t copyAllExprInfo(SArray* dst, const SArray* src, bool deepcopy); void addExprInfoParam(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes); +int32_t getExprFunctionId(SExprInfo *pExprInfo); void cleanupFieldInfo(SFieldInfo* pFieldInfo); STableComInfo getTableInfo(const STableMeta* pTableMeta); +SArray* extractFunctionIdList(SArray* pExprInfoList); #ifdef __cplusplus } diff --git a/source/libs/parser/src/astGenerator.c b/source/libs/parser/src/astGenerator.c index e9517758d1..6228212c4b 100644 --- a/source/libs/parser/src/astGenerator.c +++ b/source/libs/parser/src/astGenerator.c @@ -242,7 +242,6 @@ tSqlExpr *tSqlExprClone(tSqlExpr *pSrc) { } void tSqlExprCompact(tSqlExpr **pExpr) { - if (*pExpr == NULL || tSqlExprIsParentOfLeaf(*pExpr)) { return; } @@ -770,8 +769,11 @@ void setCreateFuncInfo(SSqlInfo *pInfo, int32_t type, SToken *pName, SToken *pPa } } -void SqlInfoDestroy(SSqlInfo *pInfo) { - if (pInfo == NULL) return;; +void destroySqlInfo(SSqlInfo *pInfo) { + if (pInfo == NULL) { + return; + } + taosArrayDestroy(pInfo->funcs); if (pInfo->type == TSDB_SQL_SELECT) { destroyAllSqlNode(pInfo->list); diff --git a/source/libs/parser/src/astValidate.c b/source/libs/parser/src/astValidate.c index 9d5ee21f72..aaecf5c8a5 100644 --- a/source/libs/parser/src/astValidate.c +++ b/source/libs/parser/src/astValidate.c @@ -14,6 +14,7 @@ */ #include +#include #include "astGenerator.h" #include "function.h" #include "parserInt.h" @@ -28,6 +29,8 @@ #define DEFAULT_PRIMARY_TIMESTAMP_COL_NAME "_c0" #define VALID_COLUMN_INDEX(index) (((index).tableIndex >= 0) && ((index).columnIndex >= TSDB_TBNAME_COLUMN_INDEX)) +#define TSWINDOW_IS_EQUAL(t1, t2) (((t1).skey == (t2).skey) && ((t1).ekey == (t2).ekey)) + // -1 is tbname column index, so here use the -2 as the initial value #define COLUMN_INDEX_INITIAL_VAL (-2) #define COLUMN_INDEX_INITIALIZER { COLUMN_INDEX_INITIAL_VAL, COLUMN_INDEX_INITIAL_VAL } @@ -213,7 +216,9 @@ static STableMeta* extractTempTableMetaFromSubquery(SQueryStmtInfo* pUpstream) { return meta; } -void initQueryInfo(SQueryStmtInfo* pQueryInfo) { +SQueryStmtInfo *createQueryInfo() { + SQueryStmtInfo* pQueryInfo = calloc(1, sizeof(SQueryStmtInfo)); + pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField)); pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); @@ -225,6 +230,58 @@ void initQueryInfo(SQueryStmtInfo* pQueryInfo) { pQueryInfo->slimit.offset = 0; pQueryInfo->pUpstream = taosArrayInit(4, POINTER_BYTES); pQueryInfo->window = TSWINDOW_INITIALIZER; + + return pQueryInfo; +} + +static void destroyQueryInfoImpl(SQueryStmtInfo* pQueryInfo) { + cleanupTagCond(&pQueryInfo->tagCond); + cleanupColumnCond(&pQueryInfo->colCond); + cleanupFieldInfo(&pQueryInfo->fieldsInfo); + + dropAllExprInfo(pQueryInfo->exprList); + pQueryInfo->exprList = NULL; + + if (pQueryInfo->exprList1 != NULL) { + dropAllExprInfo(pQueryInfo->exprList1); + pQueryInfo->exprList1 = NULL; + } + + columnListDestroy(pQueryInfo->colList); + pQueryInfo->colList = NULL; + + if (pQueryInfo->groupbyExpr.columnInfo != NULL) { + taosArrayDestroy(pQueryInfo->groupbyExpr.columnInfo); + pQueryInfo->groupbyExpr.columnInfo = NULL; + } + + pQueryInfo->fillType = 0; + + tfree(pQueryInfo->fillVal); + tfree(pQueryInfo->buf); + + taosArrayDestroy(pQueryInfo->pUpstream); + pQueryInfo->pUpstream = NULL; + pQueryInfo->bufLen = 0; +} + +void destroyQueryInfo(SQueryStmtInfo* pQueryInfo) { + while (pQueryInfo != NULL) { + SQueryStmtInfo* p = pQueryInfo->sibling; + + size_t numOfUpstream = taosArrayGetSize(pQueryInfo->pUpstream); + for (int32_t i = 0; i < numOfUpstream; ++i) { + SQueryStmtInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i); + destroyQueryInfoImpl(pUpQueryInfo); + clearAllTableMetaInfo(pUpQueryInfo, false, 0); + tfree(pUpQueryInfo); + } + + destroyQueryInfoImpl(pQueryInfo); + clearAllTableMetaInfo(pQueryInfo, false, 0); + tfree(pQueryInfo); + pQueryInfo = p; + } } static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf) { @@ -236,8 +293,7 @@ static int32_t doValidateSubquery(SSqlNode* pSqlNode, int32_t index, SQueryStmtI return buildInvalidOperationMsg(pMsgBuf, "not support union in subquery"); } - SQueryStmtInfo* pSub = calloc(1, sizeof(SQueryStmtInfo)); - initQueryInfo(pSub); + SQueryStmtInfo* pSub = createQueryInfo(); SArray *pUdfInfo = NULL; if (pQueryInfo->pUdfInfo) { @@ -391,9 +447,9 @@ int32_t doGetColumnIndexByName(SToken* pToken, SQueryStmtInfo* pQueryInfo, SColu pIndex->type = TSDB_COL_TAG; } else if (strlen(DEFAULT_PRIMARY_TIMESTAMP_COL_NAME) == pToken->n && strncasecmp(pToken->z, DEFAULT_PRIMARY_TIMESTAMP_COL_NAME, pToken->n) == 0) { - pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest + pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_ID; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest } else if (pToken->n == 0) { - pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest + pIndex->columnIndex = PRIMARYKEY_TIMESTAMP_COL_ID; // just make runtime happy, need fix java test case InsertSpecialCharacterJniTest } else { // not specify the table name, try to locate the table index by column name if (pIndex->tableIndex == COLUMN_INDEX_INITIAL_VAL) { @@ -458,39 +514,32 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* return buildInvalidOperationMsg(pMsgBuf, msg4); } - SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr; + SGroupbyExpr* pGroupExpr = &(pQueryInfo->groupbyExpr); + pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex)); if (pGroupExpr->columnInfo == NULL) { - pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex)); - } - - if (pQueryInfo->colList == NULL) { - pQueryInfo->colList = taosArrayInit(4, POINTER_BYTES); - } - - if (pGroupExpr->columnInfo == NULL || pQueryInfo->colList == NULL) { return TSDB_CODE_TSC_OUT_OF_MEMORY; } - int32_t numOfGroupCols = (int16_t) taosArrayGetSize(pList); - if (numOfGroupCols > TSDB_MAX_TAGS) { + size_t num = taosArrayGetSize(pList); + if (num > TSDB_MAX_TAGS) { return buildInvalidOperationMsg(pMsgBuf, msg1); } - SSchema *pSchema = NULL; - int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; + int32_t numOfGroupbyCols = 0; + SSchema *pSchema = NULL; + int32_t tableIndex = COLUMN_INDEX_INITIAL_VAL; - size_t num = taosArrayGetSize(pList); for (int32_t i = 0; i < num; ++i) { SListItem * pItem = taosArrayGet(pList, i); SVariant* pVar = &pItem->pVar; - SToken token = {pVar->nLen, pVar->nType, pVar->pz}; - SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SToken token = {pVar->nLen, pVar->nType, pVar->pz}; if (getColumnIndexByName(&token, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { return buildInvalidOperationMsg(pMsgBuf, msg2); } + // Group by multiple tables is not supported. if (tableIndex == COLUMN_INDEX_INITIAL_VAL) { tableIndex = index.tableIndex; } else if (tableIndex != index.tableIndex) { @@ -506,9 +555,7 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* pSchema = getOneColumnSchema(pTableMeta, index.columnIndex); } - int32_t numOfCols = getNumOfColumns(pTableMeta); - bool groupTag = (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX || index.columnIndex >= numOfCols); - + bool groupTag = TSDB_COL_IS_TAG(index.type); if (groupTag) { if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { return buildInvalidOperationMsg(pMsgBuf, msg6); @@ -516,7 +563,7 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* int32_t relIndex = index.columnIndex; if (index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { - relIndex -= numOfCols; + relIndex -= getNumOfColumns(pTableMeta); } SColIndex colIndex = { .colIndex = relIndex, .flag = TSDB_COL_TAG, .colId = pSchema->colId, }; @@ -527,7 +574,7 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* columnListInsert(pTableMetaInfo->tagColList, index.columnIndex, pTableMeta->uid, pSchema); } else { // check if the column type is valid, here only support the bool/tinyint/smallint/bigint group by - if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) { + if (pSchema->type == TSDB_DATA_TYPE_FLOAT || pSchema->type == TSDB_DATA_TYPE_DOUBLE) { return buildInvalidOperationMsg(pMsgBuf, msg5); } @@ -537,14 +584,15 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* strncpy(colIndex.name, pSchema->name, tListLen(colIndex.name)); taosArrayPush(pGroupExpr->columnInfo, &colIndex); - pQueryInfo->groupbyExpr.orderType = TSDB_ORDER_ASC; - numOfGroupCols++; + + numOfGroupbyCols++; + pQueryInfo->info.groupbyColumn = true; } } // 1. only one normal column allowed in the group by clause - // 2. the normal column in the group by clause can only located in the end position - if (numOfGroupCols > 1) { + // 2. the normal column in the group by clause can only located at the end position + if (numOfGroupbyCols > 1) { return buildInvalidOperationMsg(pMsgBuf, msg7); } @@ -555,7 +603,8 @@ int32_t validateGroupbyNode(SQueryStmtInfo* pQueryInfo, SArray* pList, SMsgBuf* } } - pQueryInfo->groupbyExpr.tableIndex = tableIndex; + pGroupExpr->orderType = TSDB_ORDER_ASC; + pGroupExpr->tableIndex = tableIndex; return TSDB_CODE_SUCCESS; } @@ -589,18 +638,249 @@ int32_t validateWhereNode(SQueryStmtInfo *pQueryInfo, tSqlExpr* pWhereExpr, SMsg return 0; } -// validate the interval info -int32_t validateIntervalNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { - return 0; +static int32_t parseIntervalOffset(SQueryStmtInfo* pQueryInfo, SToken* offsetToken, int32_t precision, SMsgBuf* pMsgBuf) { + const char* msg1 = "interval offset cannot be negative"; + const char* msg2 = "interval offset should be shorter than interval"; + const char* msg3 = "cannot use 'year' as offset when interval is 'month'"; + + SToken* t = offsetToken; + if (t->n == 0) { + pQueryInfo->interval.offsetUnit = pQueryInfo->interval.intervalUnit; + pQueryInfo->interval.offset = 0; + return TSDB_CODE_SUCCESS; + } + + if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.offset, &pQueryInfo->interval.offsetUnit, precision) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + if (pQueryInfo->interval.offset < 0) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + if (!TIME_IS_VAR_DURATION(pQueryInfo->interval.offsetUnit)) { + if (!TIME_IS_VAR_DURATION(pQueryInfo->interval.intervalUnit)) { + if (pQueryInfo->interval.offset > pQueryInfo->interval.interval) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + } + } else if (pQueryInfo->interval.offsetUnit == pQueryInfo->interval.intervalUnit) { + if (pQueryInfo->interval.offset >= pQueryInfo->interval.interval) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + } else if (pQueryInfo->interval.intervalUnit == 'n' && pQueryInfo->interval.offsetUnit == 'y') { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } else if (pQueryInfo->interval.intervalUnit == 'y' && pQueryInfo->interval.offsetUnit == 'n') { + if (pQueryInfo->interval.interval * 12 <= pQueryInfo->interval.offset) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + } else { + // TODO: offset should be shorter than interval, but how to check + // conflicts like 30days offset and 1 month interval + } + + return TSDB_CODE_SUCCESS; } -int32_t validateSessionNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { - return 0; +static int32_t parseSlidingClause(SQueryStmtInfo* pQueryInfo, SToken* pSliding, int32_t precision, SMsgBuf* pMsgBuf) { + const char* msg1 = "sliding value no larger than the interval value"; + const char* msg2 = "sliding value can not less than 1% of interval value"; + const char* msg3 = "does not support sliding when interval is natural month/year"; + const char* msg4 = "sliding value too small"; + + const static int32_t INTERVAL_SLIDING_FACTOR = 100; + + SInterval* pInterval = &pQueryInfo->interval; + if (pSliding->n == 0) { + pInterval->slidingUnit = pInterval->intervalUnit; + pInterval->sliding = pInterval->interval; + return TSDB_CODE_SUCCESS; + } + + if (TIME_IS_VAR_DURATION(pInterval->intervalUnit)) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + parseAbsoluteDuration(pSliding->z, pSliding->n, &pInterval->sliding, &pInterval->slidingUnit, precision); + + // less than the threshold + if (pInterval->sliding < convertTimePrecision(tsMinSlidingTime, TSDB_TIME_PRECISION_MILLI, precision)) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + if (pInterval->sliding > pInterval->interval) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + if ((pInterval->interval != 0) && (pInterval->interval/pInterval->sliding > INTERVAL_SLIDING_FACTOR)) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + + return TSDB_CODE_SUCCESS; +} + +// validate the interval info +int32_t validateIntervalNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { + const char* msg1 = "sliding cannot be used without interval"; + const char* msg2 = "only point interpolation query requires keyword EVERY"; + const char* msg3 = "interval value is too small"; + + STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); + STableComInfo tinfo = getTableInfo(pTableMetaInfo->pTableMeta); + + if (!TPARSER_HAS_TOKEN(pSqlNode->interval.interval)) { + if (TPARSER_HAS_TOKEN(pSqlNode->sliding)) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } else { + return TSDB_CODE_SUCCESS; + } + } + + // orderby column not set yet, set it to be the primary timestamp column + if (pQueryInfo->order.orderColId == INT32_MIN) { + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_ID; + } + + // interval is not null + SToken *t = &pSqlNode->interval.interval; + if (parseNatualDuration(t->z, t->n, &pQueryInfo->interval.interval, + &pQueryInfo->interval.intervalUnit, tinfo.precision) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + if (pQueryInfo->interval.interval <= 0) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + if (!TIME_IS_VAR_DURATION(pQueryInfo->interval.intervalUnit)) { + // interval cannot be less than 10 milliseconds + if (convertTimePrecision(pQueryInfo->interval.interval, tinfo.precision, TSDB_TIME_PRECISION_MICRO) < tsMinIntervalTime) { + char msg[50] = {0}; + snprintf(msg, 50, "interval time window can not be less than %d %s", tsMinIntervalTime, TSDB_TIME_PRECISION_MICRO_STR); + return buildInvalidOperationMsg(pMsgBuf, msg); + } + } + + if (parseIntervalOffset(pQueryInfo, &pSqlNode->interval.offset, tinfo.precision, pMsgBuf) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + if (parseSlidingClause(pQueryInfo, &pSqlNode->sliding, tinfo.precision, pMsgBuf) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + // It is a time window query + pQueryInfo->info.timewindow = true; + return TSDB_CODE_SUCCESS; + // disable it temporarily +// bool interpQuery = tscIsPointInterpQuery(pQueryInfo); +// if ((pSqlNode->interval.token == TK_EVERY && (!interpQuery)) || (pSqlNode->interval.token == TK_INTERVAL && interpQuery)) { +// return buildInvalidOperationMsg(pMsgBuf, msg4); +// } + + // The following part is used to check for the invalid query expression. +// return checkInvalidExprForTimeWindow(pCmd, pQueryInfo); + +} + +int32_t validateSessionNode(SQueryStmtInfo *pQueryInfo, SSessionWindowVal* pSession, int32_t precision, SMsgBuf* pMsgBuf) { + const char* msg1 = "gap should be fixed time window"; + const char* msg2 = "only one type time window allowed"; + const char* msg3 = "invalid column name"; + const char* msg4 = "invalid time window"; + + // no session window + if (!TPARSER_HAS_TOKEN(pSession->gap)) { + return TSDB_CODE_SUCCESS; + } + + SToken* col = &pSession->col; + SToken* gap = &pSession->gap; + + char timeUnit = 0; + if (parseNatualDuration(gap->z, gap->n, &pQueryInfo->sessionWindow.gap, &timeUnit, precision) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + if (TIME_IS_VAR_DURATION(timeUnit)) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + if (pQueryInfo->sessionWindow.gap != 0 && pQueryInfo->interval.interval != 0) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + + if (pQueryInfo->sessionWindow.gap == 0) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if ((getColumnIndexByName(col, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS)) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_ID) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + pQueryInfo->sessionWindow.primaryColId = PRIMARYKEY_TIMESTAMP_COL_ID; + return TSDB_CODE_SUCCESS; + // The following part is used to check for the invalid query expression. +// return checkInvalidExprForTimeWindow(pCmd, pQueryInfo); } // parse the window_state -int32_t validateStateWindowNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { -return 0; +int32_t validateStateWindowNode(SQueryStmtInfo *pQueryInfo, SWindowStateVal* pWindowState, SMsgBuf* pMsgBuf) { + const char* msg1 = "invalid column name"; + const char* msg2 = "invalid column type"; + const char* msg3 = "not support state_window with group by "; + const char* msg4 = "function not support for super table query"; + const char* msg5 = "not support state_window on tag column"; + + SToken *col = &(pWindowState->col) ; + if (!TPARSER_HAS_TOKEN(*col)) { + return TSDB_CODE_SUCCESS; + } + + SGroupbyExpr* pGroupExpr = &pQueryInfo->groupbyExpr; + if (taosArrayGetSize(pGroupExpr->columnInfo) > 0) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if (getColumnIndexByName(col, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + STableMetaInfo *pTableMetaInfo = getMetaInfo(pQueryInfo, index.tableIndex); + STableMeta* pTableMeta = pTableMetaInfo->pTableMeta; + + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + if (TSDB_COL_IS_TAG(index.type)) { + return buildInvalidOperationMsg(pMsgBuf, msg5); + } + + if (pGroupExpr->columnInfo == NULL) { + pGroupExpr->columnInfo = taosArrayInit(4, sizeof(SColIndex)); + } + + SSchema* pSchema = getOneColumnSchema(pTableMeta, index.columnIndex); + if (pSchema->type == TSDB_DATA_TYPE_TIMESTAMP || IS_FLOAT_TYPE(pSchema->type)) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + + columnListInsert(pQueryInfo->colList, index.columnIndex, pTableMeta->uid, pSchema); + SColIndex colIndex = { .colIndex = index.columnIndex, .flag = TSDB_COL_NORMAL, .colId = pSchema->colId }; + + //TODO use group by routine? state window query not support stable query. + taosArrayPush(pGroupExpr->columnInfo, &colIndex); + pGroupExpr->orderType = TSDB_ORDER_ASC; + pQueryInfo->info.stateWindow = true; + + return TSDB_CODE_SUCCESS; } // parse the having clause in the first place @@ -609,16 +889,533 @@ int32_t validateHavingNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgB } int32_t validateLimitNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { - return 0; + STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); + + const char* msg1 = "slimit/soffset only available for STable query"; + const char* msg2 = "slimit/soffset can not apply to projection query"; + const char* msg3 = "soffset/offset can not be less than 0"; + + // handle the limit offset value, validate the limit + pQueryInfo->limit = pSqlNode->limit; + pQueryInfo->slimit = pSqlNode->slimit; + +// tscDebug("0x%"PRIx64" limit:%" PRId64 ", offset:%" PRId64 " slimit:%" PRId64 ", soffset:%" PRId64, pSql->self, +// pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->slimit.limit, pQueryInfo->slimit.offset); + + if (pQueryInfo->slimit.offset < 0 || pQueryInfo->limit.offset < 0) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + if (pQueryInfo->limit.limit == 0) { +// tscDebug("0x%"PRIx64" limit 0, no output result", pSql->self); + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { +// if (!tscQueryTags(pQueryInfo)) { // local handle the super table tag query +// if (tscIsProjectionQueryOnSTable(pQueryInfo, 0)) { +// if (pQueryInfo->slimit.limit > 0 || pQueryInfo->slimit.offset > 0) { +// return buildInvalidOperationMsg(pMsgBuf, msg2); +// } +// +// // for projection query on super table, all queries are subqueries +// if (tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && +// !TSDB_QUERY_HAS_TYPE(pQueryInfo->type, TSDB_QUERY_TYPE_JOIN_QUERY)) { +// pQueryInfo->type |= TSDB_QUERY_TYPE_SUBQUERY; +// } +// } +// } + + if (pQueryInfo->slimit.limit == 0) { +// tscDebug("0x%"PRIx64" slimit 0, no output result", pSql->self); + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + + // No tables included. No results generated. Query results are empty. + if (pTableMetaInfo->vgroupList->numOfVgroups == 0) { +// tscDebug("0x%"PRIx64" no table in super table, no output result", pSql->self); + pQueryInfo->command = TSDB_SQL_RETRIEVE_EMPTY_RESULT; + return TSDB_CODE_SUCCESS; + } + } else { + if (pQueryInfo->slimit.limit != -1 || pQueryInfo->slimit.offset != 0) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + } } -// set order by info +static void setTsOutputExprInfo(SQueryStmtInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, int32_t outputIndex, int32_t tableIndex); + + int32_t validateOrderbyNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { - return 0; + const char* msg1 = "invalid column name in orderby clause"; + const char* msg2 = "too many order by columns"; + const char* msg3 = "only one column allowed in orderby"; + const char* msg4 = "invalid order by column index"; + + if (pSqlNode->pSortOrder == NULL) { + return TSDB_CODE_SUCCESS; + } + + STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); + SArray* pSortOrder = pSqlNode->pSortOrder; + + /* + * for table query, there is only one or none order option is allowed, which is the + * ts or values(top/bottom) order is supported. + * + * for super table query, the order option must be less than 3. + */ + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { + if (size > 1) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + } else { + if (size > 2) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + } + + // handle the first part of order by + SVariant* pVar = taosArrayGet(pSortOrder, 0); + SSchema s = {0}; + if (pVar->nType == TSDB_DATA_TYPE_BINARY) { + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + SToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + s = *(SSchema*) getOneColumnSchema(pTableMetaInfo->pTableMeta, index.columnIndex); + } else { // order by [1|2|3] + if (pVar->i64 > getNumOfExprs(pQueryInfo)) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + SExprInfo* pExprInfo = getExprInfo(pQueryInfo, pVar->i64); + s = pExprInfo->base.resSchema; + } + + SListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = s.colId; + + return TSDB_CODE_SUCCESS; +} + +#if 0 +// set order by info +int32_t checkForInvalidOrderby(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { + const char* msg0 = "only one column allowed in orderby"; + const char* msg1 = "invalid column name in orderby clause"; + const char* msg2 = "too many order by columns"; + const char* msg3 = "only primary timestamp/tbname/first tag in groupby clause allowed"; + const char* msg4 = "only tag in groupby clause allowed in order clause"; + const char* msg5 = "only primary timestamp/column in top/bottom function allowed as order column"; + const char* msg6 = "only primary timestamp allowed as the second order column"; + const char* msg7 = "only primary timestamp/column in groupby clause allowed as order column"; + const char* msg8 = "only column in groupby clause allowed as order column"; + const char* msg9 = "orderby column must projected in subquery"; + const char* msg10 = "not support distinct mixed with order by"; + +// setDefaultOrderInfo(pQueryInfo); + STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); + SSchema* pSchema = getTableColumnSchema(pTableMetaInfo->pTableMeta); + int32_t numOfCols = getNumOfColumns(pTableMetaInfo->pTableMeta); + + if (pSqlNode->pSortOrder == NULL) { + return TSDB_CODE_SUCCESS; + } + + SArray* pSortOrder = pSqlNode->pSortOrder; + + /* + * for table query, there is only one or none order option is allowed, which is the + * ts or values(top/bottom) order is supported. + * + * for super table query, the order option must be less than 3. + */ + size_t size = taosArrayGetSize(pSortOrder); + if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo)) { + if (size > 1) { + return buildInvalidOperationMsg(pMsgBuf, msg0); + } + } else { + if (size > 2) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + } + +#if 0 + if (size > 0 && pQueryInfo->distinct) { + return buildInvalidOperationMsg(pMsgBuf, msg10); + } +#endif + + // handle the first part of order by + SVariant* pVar = taosArrayGet(pSortOrder, 0); + +#if 0 + // e.g., order by 1 asc, return directly with out further check. + if (pVar->nType >= TSDB_DATA_TYPE_TINYINT && pVar->nType <= TSDB_DATA_TYPE_BIGINT) { + return TSDB_CODE_SUCCESS; + } +#endif + + SToken columnName = {pVar->nLen, pVar->nType, pVar->pz}; + + SColumnIndex index = COLUMN_INDEX_INITIALIZER; + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { // super table query + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + bool orderByTags = false; + bool orderByTS = false; + bool orderByGroupbyCol = false; + + if (TSDB_COL_IS_TAG(index.type) && index.columnIndex != TSDB_TBNAME_COLUMN_INDEX) { + // it is a tag column + if (pQueryInfo->groupbyExpr.columnInfo == NULL) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + + int32_t relTagIndex = index.columnIndex - numOfCols; + SColIndex* pColIndex = taosArrayGet(pQueryInfo->groupbyExpr.columnInfo, 0); + if (relTagIndex == pColIndex->colIndex) { + orderByTags = true; + } + } else if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { + orderByTags = true; + } + + if (PRIMARYKEY_TIMESTAMP_COL_ID == index.columnIndex) { + orderByTS = true; + } + + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + if (PRIMARYKEY_TIMESTAMP_COL_ID != index.columnIndex && pColIndex->colIndex == index.columnIndex) { + orderByGroupbyCol = true; + } + } + + if (!(orderByTags || orderByTS || orderByGroupbyCol) /*&& !isTopBottomQuery(pQueryInfo)*/) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } else { // order by top/bottom result value column is not supported in case of interval query. + assert(!(orderByTags && orderByTS && orderByGroupbyCol)); + } + + size_t s = taosArrayGetSize(pSortOrder); + if (s == 1) { + if (orderByTags) { + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - numOfCols; + + SListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + } else if (orderByGroupbyCol) { + SListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else if (isTopBottomQuery(pQueryInfo)) { + /* order of top/bottom query in interval is not valid */ + int32_t pos = tscExprTopBottomIndex(pQueryInfo); + assert(pos > 0); + + SExprInfo* pExpr = getExprInfo(pQueryInfo, pos - 1); +// assert(getExprFunctionId(pExpr) == FUNCTION_TS); + + pExpr = getExprInfo(pQueryInfo, pos); + + // other tag are not allowed + if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_ID) { + return buildInvalidOperationMsg(pMsgBuf, msg5); + } + + SListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = p1->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + return TSDB_CODE_SUCCESS; + } else { + SListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + + pQueryInfo->order.order = p1->sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_ID; + + // orderby ts query on super table + if (tscOrderedProjectionQueryOnSTable(pQueryInfo, 0)) { + bool found = false; + for (int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { + SExprInfo* pExpr = getExprInfo(pQueryInfo, i); + if (getExprFunctionId(pExpr) == FUNCTION_PRJ && pExpr->base.colInfo.colId == PRIMARYKEY_TIMESTAMP_COL_ID) { + found = true; + break; + } + } + + if (!found && pQueryInfo->pDownstream) { + return buildInvalidOperationMsg(pMsgBuf, msg9); + } + + // this is a invisible output column, in order to used to sort the result. + setTsOutputExprInfo(pQueryInfo, pTableMetaInfo, 0, index.tableIndex); + } + } + } else { + SListItem *pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + if (orderByTags) { + pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - numOfCols; + pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; + } else if (orderByGroupbyCol) { + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = index.columnIndex; + } else { + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_ID; + } + + pItem = taosArrayGet(pSqlNode->pSortOrder, 1); + SVariant* pVar2 = &pItem->pVar; + SToken cname = {pVar2->nLen, pVar2->nType, pVar2->pz}; + if (getColumnIndexByName(&cname, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_ID) { + return buildInvalidOperationMsg(pMsgBuf, msg6); + } else { + SListItem* p1 = taosArrayGet(pSortOrder, 1); + pQueryInfo->order.order = p1->sortOrder; + pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_ID; + } + } + + } else if (UTIL_TABLE_IS_NORMAL_TABLE(pTableMetaInfo) || UTIL_TABLE_IS_CHILD_TABLE(pTableMetaInfo)) { // check order by clause for normal table & temp table + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_ID && !isTopBottomQuery(pQueryInfo)) { + bool validOrder = false; + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + validOrder = (pColIndex->colIndex == index.columnIndex); + } + + if (!validOrder) { + return buildInvalidOperationMsg(pMsgBuf, msg7); + } + + SListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + } + + if (isTopBottomQuery(pQueryInfo)) { + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + + if (pColIndex->colIndex == index.columnIndex) { + return buildInvalidOperationMsg(pMsgBuf, msg8); + } + } else { + int32_t pos = tscExprTopBottomIndex(pQueryInfo); + assert(pos > 0); + SExprInfo* pExpr = getExprInfo(pQueryInfo, pos - 1); + assert(getExprFunctionId(pExpr) == FUNCTION_TS); + + pExpr = getExprInfo(pQueryInfo, pos); + + if (pExpr->base.colInfo.colIndex != index.columnIndex && index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_ID) { + return buildInvalidOperationMsg(pMsgBuf, msg5); + } + } + + SListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + return TSDB_CODE_SUCCESS; + } + + SListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } else { + // handle the temp table order by clause. You can order by any single column in case of the temp table, created by + // inner subquery. + assert(UTIL_TABLE_IS_TMP_TABLE(pTableMetaInfo) && taosArrayGetSize(pSqlNode->pSortOrder) == 1); + + if (getColumnIndexByName(&columnName, pQueryInfo, &index, pMsgBuf) != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + SListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; + } + + return TSDB_CODE_SUCCESS; +} +#endif + +static int32_t checkQueryRangeForFill(SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf) { + const char* msg3 = "start(end) time of time range required or time range too large"; + + if (pQueryInfo->interval.interval == 0) { + return TSDB_CODE_SUCCESS; + } + + bool initialWindows = TSWINDOW_IS_EQUAL(pQueryInfo->window, TSWINDOW_INITIALIZER); + if (initialWindows) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + + int64_t timeRange = ABS(pQueryInfo->window.skey - pQueryInfo->window.ekey); + + int64_t intervalRange = 0; + if (!TIME_IS_VAR_DURATION(pQueryInfo->interval.intervalUnit)) { + intervalRange = pQueryInfo->interval.interval; + + // number of result is not greater than 10,000,000 + if ((timeRange == 0) || (timeRange / intervalRange) >= MAX_INTERVAL_TIME_WINDOW) { + return buildInvalidOperationMsg(pMsgBuf, msg3); + } + } + + return TSDB_CODE_SUCCESS; } int32_t validateFillNode(SQueryStmtInfo *pQueryInfo, SSqlNode* pSqlNode, SMsgBuf* pMsgBuf) { - return 0; + SArray* pFillToken = pSqlNode->fillType; + if (pSqlNode->fillType == NULL) { + return TSDB_CODE_SUCCESS; + } + + SListItem* pItem = taosArrayGet(pFillToken, 0); + + const int32_t START_INTERPO_COL_IDX = 1; + + const char* msg1 = "value is expected"; + const char* msg2 = "invalid fill option"; + const char* msg3 = "top/bottom not support fill"; + const char* msg4 = "illegal value or data overflow"; + const char* msg5 = "fill only available for interval query"; + const char* msg6 = "not supported function now"; + +// if ((!isTimeWindowQuery(pQueryInfo)) && (!tscIsPointInterpQuery(pQueryInfo))) { +// return buildInvalidOperationMsg(pMsgBuf, msg5); +// } + + /* + * fill options are set at the end position, when all columns are set properly + * the columns may be increased due to group by operation + */ + if (checkQueryRangeForFill(pQueryInfo, pMsgBuf) != TSDB_CODE_SUCCESS) { + return TSDB_CODE_TSC_INVALID_OPERATION; + } + + + if (pItem->pVar.nType != TSDB_DATA_TYPE_BINARY) { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + + int32_t numOfFields = (int32_t) getNumOfFields(&pQueryInfo->fieldsInfo); + + pQueryInfo->fillVal = calloc(numOfFields, sizeof(int64_t)); + if (pQueryInfo->fillVal == NULL) { + return TSDB_CODE_TSC_OUT_OF_MEMORY; + } + + pQueryInfo->numOfFillVal = (int32_t)numOfFields; + if (strncasecmp(pItem->pVar.pz, "none", 4) == 0 && pItem->pVar.nLen == 4) { + pQueryInfo->fillType = TSDB_FILL_NONE; + } else if (strncasecmp(pItem->pVar.pz, "null", 4) == 0 && pItem->pVar.nLen == 4) { + pQueryInfo->fillType = TSDB_FILL_NULL; + for (int32_t i = START_INTERPO_COL_IDX; i < numOfFields; ++i) { + TAOS_FIELD* pField = &getInternalField(&pQueryInfo->fieldsInfo, i)->field; + setNull((char*)&pQueryInfo->fillVal[i], pField->type, pField->bytes); + } + } else if (strncasecmp(pItem->pVar.pz, "prev", 4) == 0 && pItem->pVar.nLen == 4) { + pQueryInfo->fillType = TSDB_FILL_PREV; +// if (pQueryInfo->info.interpQuery && pQueryInfo->order.order == TSDB_ORDER_DESC) { +// return buildInvalidOperationMsg(pMsgBuf, msg6); +// } + } else if (strncasecmp(pItem->pVar.pz, "next", 4) == 0 && pItem->pVar.nLen == 4) { + pQueryInfo->fillType = TSDB_FILL_NEXT; + } else if (strncasecmp(pItem->pVar.pz, "linear", 6) == 0 && pItem->pVar.nLen == 6) { + pQueryInfo->fillType = TSDB_FILL_LINEAR; + } else if (strncasecmp(pItem->pVar.pz, "value", 5) == 0 && pItem->pVar.nLen == 5) { + pQueryInfo->fillType = TSDB_FILL_SET_VALUE; + + size_t num = taosArrayGetSize(pFillToken); + if (num == 1) { // no actual value, return with error code + return buildInvalidOperationMsg(pMsgBuf, msg1); + } + + int32_t startPos = 1; + int32_t numOfFillVal = (int32_t)(num - 1); + + // for point interpolation query, we do not have the timestamp column + if (pQueryInfo->info.interpQuery) { + startPos = 0; + if (numOfFillVal > numOfFields) { + numOfFillVal = numOfFields; + } + } else { + numOfFillVal = MIN(num, numOfFields); + } + + int32_t j = 1; + + for (int32_t i = startPos; i < numOfFillVal; ++i, ++j) { + TAOS_FIELD* pField = &getInternalField(&pQueryInfo->fieldsInfo, i)->field; + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); + continue; + } + + SVariant* p = taosArrayGet(pFillToken, j); + int32_t ret = taosVariantDump(p, (char*)&pQueryInfo->fillVal[i], pField->type, true); + if (ret != TSDB_CODE_SUCCESS) { + return buildInvalidOperationMsg(pMsgBuf, msg4); + } + } + + if ((num < numOfFields) || ((num - 1 < numOfFields) && (pQueryInfo->info.interpQuery))) { + SListItem* lastItem = taosArrayGetLast(pFillToken); + + for (int32_t i = numOfFillVal; i < numOfFields; ++i) { + TAOS_FIELD* pField = &getInternalField(&pQueryInfo->fieldsInfo, i)->field; + + if (pField->type == TSDB_DATA_TYPE_BINARY || pField->type == TSDB_DATA_TYPE_NCHAR) { + setVardataNull((char*) &pQueryInfo->fillVal[i], pField->type); + } else { + taosVariantDump(&lastItem->pVar, (char*)&pQueryInfo->fillVal[i], pField->type, true); + } + } + } + } else { + return buildInvalidOperationMsg(pMsgBuf, msg2); + } + +// for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { +// SExprInfo* pExpr = getExprInfo(pQueryInfo, i); +// +// int32_t functionId = pExpr->pExpr->_node.functionId; +// if (functionId == FUNCTION_TOP || functionId == FUNCTION_BOTTOM) { +// return buildInvalidOperationMsg(pMsgBuf, msg3); +// } +// } + + return TSDB_CODE_SUCCESS; } int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* pMsgBuf) { @@ -679,6 +1476,7 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* STableMeta* pTableMeta = getMetaInfo(pQueryInfo, 0)->pTableMeta; SSchema* pSchema = getOneColumnSchema(pTableMeta, 0); + int32_t precision = pTableMeta->tableInfo.precision; if (pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) { int32_t numOfExprs = (int32_t)tscNumOfExprs(pQueryInfo); @@ -708,32 +1506,19 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* if (validateIntervalNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } else { - if (validateSessionNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (validateSessionNode(pQueryInfo, &pSqlNode->sessionVal, precision, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } // parse the window_state - if (validateStateWindowNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (validateStateWindowNode(pQueryInfo, &pSqlNode->windowstateVal, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - -// if (isTimeWindowQuery(pQueryInfo)) { -// // check if the first column of the nest query result is timestamp column -// SColumn* pCol = taosArrayGetP(pQueryInfo->colList, 0); -// if (pCol->info.type != TSDB_DATA_TYPE_TIMESTAMP) { -// return buildInvalidOperationMsg(pMsgBuf, msg4); -// } -// -// if (validateFunctionsInIntervalOrGroupbyQuery(pCmd, pQueryInfo) != TSDB_CODE_SUCCESS) { -// return TSDB_CODE_TSC_INVALID_OPERATION; -// } -// } } // parse the having clause in the first place int32_t joinQuery = (pSqlNode->from != NULL && taosArrayGetSize(pSqlNode->from->list) > 1); - if (validateHavingNode(pQueryInfo, pSqlNode, pMsgBuf) != - TSDB_CODE_SUCCESS) { + if (validateHavingNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -758,6 +1543,8 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* } STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); + int32_t precision = pTableMetaInfo->pTableMeta->tableInfo.precision; + bool isSTable = UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo); int32_t type = isSTable? TSDB_QUERY_TYPE_STABLE_QUERY:TSDB_QUERY_TYPE_TABLE_QUERY; @@ -767,14 +1554,12 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* if (validateGroupbyNode(pQueryInfo, pSqlNode->pGroupby, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - pQueryInfo->onlyHasTagCond = true; // set where info if (pSqlNode->pWhere != NULL) { if (validateWhereNode(pQueryInfo, pSqlNode->pWhere, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - pSqlNode->pWhere = NULL; } else { if (taosArrayGetSize(pSqlNode->from->list) > 1) { // Cross join not allowed yet return buildInvalidOperationMsg(pMsgBuf, "cross join not supported yet"); @@ -786,18 +1571,13 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* return TSDB_CODE_TSC_INVALID_OPERATION; } - if (isSTable && (pQueryInfo) && pQueryInfo->distinct && !pQueryInfo->onlyHasTagCond) { - return TSDB_CODE_TSC_INVALID_OPERATION; - } - // parse the window_state - if (validateStateWindowNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (validateStateWindowNode(pQueryInfo, &pSqlNode->windowstateVal, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } // set order by info - if (validateOrderbyNode(pQueryInfo, pSqlNode, pMsgBuf) != - TSDB_CODE_SUCCESS) { + if (validateOrderbyNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -807,8 +1587,7 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* } // parse the having clause in the first place - if (validateHavingNode(pQueryInfo, pSqlNode, pMsgBuf) != - TSDB_CODE_SUCCESS) { + if (validateHavingNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -816,7 +1595,7 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* * transfer sql functions that need secondary merge into another format * in dealing with super table queries such as: count/first/last */ - if (validateSessionNode(pQueryInfo, pSqlNode, pMsgBuf) != TSDB_CODE_SUCCESS) { + if (validateSessionNode(pQueryInfo, &pSqlNode->sessionVal, precision, pMsgBuf) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -839,18 +1618,6 @@ int32_t validateSqlNode(SSqlNode* pSqlNode, SQueryStmtInfo* pQueryInfo, SMsgBuf* } } - // set the query info - SExprInfo** p = NULL; - int32_t numOfExpr = 0; - STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, 0); - code = createProjectionExpr(pQueryInfo, pTableMetaInfo, &p, &numOfExpr); - if (pQueryInfo->exprList1 == NULL) { - pQueryInfo->exprList1 = taosArrayInit(4, POINTER_BYTES); - } - - taosArrayAddBatch(pQueryInfo->exprList1, (void*) p, numOfExpr); - tfree(p); - return TSDB_CODE_SUCCESS; // Does not build query message here } @@ -947,7 +1714,7 @@ void doAddSourceColumnAndResColumn(SQueryStmtInfo* pQueryInfo, SColumnIndex* ind } static int32_t addOneExprInfo(SQueryStmtInfo* pQueryInfo, tSqlExprItem* pItem, int32_t functionId, - int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult, SMsgBuf* pMsgBuf) { + int32_t outputColIndex, SColumnIndex* pColIndex, tExprNode* pNode, bool finalResult, SMsgBuf* pMsgBuf) { const char* msg1 = "not support column types"; STableMetaInfo* pTableMetaInfo = getMetaInfo(pQueryInfo, pColIndex->tableIndex); @@ -967,7 +1734,7 @@ static int32_t addOneExprInfo(SQueryStmtInfo* pQueryInfo, tSqlExprItem* pItem, i getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resInfo, 0, false); SSchema resultSchema = createSchema(resInfo.type, resInfo.bytes, getNewResColId(), name); - doAddOneExprInfo(pQueryInfo, resColIdx, functionId, pColIndex, pSchema, &resultSchema, NULL, resInfo.intermediateBytes, name); + doAddOneExprInfo(pQueryInfo, outputColIndex, functionId, pColIndex, pSchema, &resultSchema, pNode, resInfo.intermediateBytes, name); return TSDB_CODE_SUCCESS; } @@ -1005,7 +1772,7 @@ static int64_t getTickPerSecond(SVariant* pVariant, int32_t precision, int64_t* // set the first column ts for top/bottom query static void setTsOutputExprInfo(SQueryStmtInfo* pQueryInfo, STableMetaInfo* pTableMetaInfo, int32_t outputIndex, int32_t tableIndex) { - SColumnIndex indexTS = {.tableIndex = tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX, .type = TSDB_COL_NORMAL}; + SColumnIndex indexTS = {.tableIndex = tableIndex, .columnIndex = PRIMARYKEY_TIMESTAMP_COL_ID, .type = TSDB_COL_NORMAL}; SSchema s = createSchema(TSDB_DATA_TYPE_TIMESTAMP, TSDB_KEYSIZE, getNewResColId(), "ts"); SExprInfo* pExpr = createExprInfo(pTableMetaInfo, FUNCTION_TS_DUMMY, &indexTS, NULL, &s, TSDB_KEYSIZE); @@ -1038,7 +1805,7 @@ static int32_t setColumnIndex(SQueryStmtInfo* pQueryInfo, SArray* pParamList, SC return buildInvalidOperationMsg(pMsgBuf, msg4); } - index->columnIndex = PRIMARYKEY_TIMESTAMP_COL_INDEX; + index->columnIndex = PRIMARYKEY_TIMESTAMP_COL_ID; } else { // count the number of table created according to the super table if (getColumnIndexByName(pToken, pQueryInfo, index, pMsgBuf) != TSDB_CODE_SUCCESS) { @@ -1046,7 +1813,7 @@ static int32_t setColumnIndex(SQueryStmtInfo* pQueryInfo, SArray* pParamList, SC } } } else { // count(*) is equalled to count(primary_timestamp_key) - *index = (SColumnIndex) {0, PRIMARYKEY_TIMESTAMP_COL_INDEX, false}; + *index = (SColumnIndex) {0, PRIMARYKEY_TIMESTAMP_COL_ID, false}; } return TSDB_CODE_SUCCESS; @@ -1057,7 +1824,7 @@ static int32_t doAddAllColumnExprInSelectClause(SQueryStmtInfo *pQueryInfo, STab for (int32_t i = 0; i < getNumOfColumns(pTableMetaInfo->pTableMeta); ++i) { SColumnIndex index = {.tableIndex = tableIndex, .columnIndex = i, .type = TSDB_COL_NORMAL}; - if (addOneExprInfo(pQueryInfo, pItem, functionId, *colIndex, &index, finalResult, pMsgBuf) != 0) { + if (addOneExprInfo(pQueryInfo, pItem, functionId, *colIndex, &index, NULL, finalResult, pMsgBuf) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } @@ -1102,7 +1869,7 @@ static int32_t doHandleOneParam(SQueryStmtInfo *pQueryInfo, tSqlExprItem* pItem, return buildInvalidOperationMsg(pMsgBuf, msg6); } - if (addOneExprInfo(pQueryInfo, pItem, functionId, (*outputIndex)++, &index, finalResult, pMsgBuf) != 0) { + if (addOneExprInfo(pQueryInfo, pItem, functionId, (*outputIndex)++, &index, pNode, finalResult, pMsgBuf) != 0) { return TSDB_CODE_TSC_INVALID_OPERATION; } } @@ -1156,6 +1923,8 @@ int32_t extractFunctionParameterInfo(SQueryStmtInfo* pQueryInfo, int32_t tokenId pIndex->tableIndex = 0; multiColumnListInsert(pQueryInfo, pColumnList, pMsgBuf); + taosArrayDestroy(colList); + taosArrayDestroy(pColumnList); } else { assert(0); } @@ -1421,7 +2190,6 @@ int32_t addExprAndResColumn(SQueryStmtInfo* pQueryInfo, int32_t colIndex, tSqlEx return buildInvalidOperationMsg(pMsgBuf, msg5); } - getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resInfo, 0, false); /* @@ -2129,13 +2897,11 @@ static int32_t createComplexExpr(SQueryStmtInfo* pQueryInfo, int32_t exprIndex, char* c = tbufGetData(&bw, false); // set the serialized binary string as the parameter of arithmetic expression - SColumnIndex* index1 = taosArrayGet(pColumnList, 0); addExprInfoParam(&pExpr->base, c, TSDB_DATA_TYPE_BINARY, (int32_t)len); addResColumnInfo(pQueryInfo, exprIndex, &pExpr->base.resSchema, pExpr); tbufCloseWriter(&bw); taosArrayDestroy(colList); - tExprTreeDestroy(pNode, NULL); } else { SColumnIndex columnIndex = {0}; @@ -2174,6 +2940,7 @@ static int32_t createComplexExpr(SQueryStmtInfo* pQueryInfo, int32_t exprIndex, // tbufCloseWriter(&bw); // TODO there is a memory leak } + taosArrayDestroy(pColumnList); return TSDB_CODE_SUCCESS; } @@ -2756,6 +3523,9 @@ int32_t qParserValidateSqlNode(struct SCatalog* pCatalog, SSqlInfo* pInfo, SQuer validateSqlNode(p, pQueryInfo, &buf); } + SArray* functionList = extractFunctionIdList(pQueryInfo->exprList); + extractFunctionDesc(functionList, &pQueryInfo->info); + if ((code = checkForInvalidExpr(pQueryInfo, &buf)) != TSDB_CODE_SUCCESS) { return code; } diff --git a/source/libs/parser/src/parser.c b/source/libs/parser/src/parser.c index ff48f9c749..051d19ccbf 100644 --- a/source/libs/parser/src/parser.c +++ b/source/libs/parser/src/parser.c @@ -185,4 +185,13 @@ int32_t qParserExtractRequestedMetaInfo(const SSqlInfo* pSqlInfo, SMetaReq* pMet } return code; -} \ No newline at end of file +} + +void qParserClearupMetaRequestInfo(SMetaReq* pMetaReq) { + if (pMetaReq == NULL) { + return; + } + + taosArrayDestroy(pMetaReq->pTableName); + taosArrayDestroy(pMetaReq->pUdf); +} diff --git a/source/libs/parser/src/parserUtil.c b/source/libs/parser/src/parserUtil.c index bb473e9a73..c970283ca7 100644 --- a/source/libs/parser/src/parserUtil.c +++ b/source/libs/parser/src/parserUtil.c @@ -576,13 +576,6 @@ TAOS_FIELD* getFieldInfo(SFieldInfo* pFieldInfo, int32_t index) { return &((SInternalField*)TARRAY_GET_ELEM(pFieldInfo->internalField, index))->field; } -int16_t getFieldInfoOffset(SQueryStmtInfo* pQueryInfo, int32_t index) { - SInternalField* pInfo = getInternalField(&pQueryInfo->fieldsInfo, index); - assert(pInfo != NULL && pInfo->pExpr->pExpr == NULL); - return 0; -// return pInfo->pExpr->base.offset; -} - int32_t fieldInfoCompare(const SFieldInfo* pFieldInfo1, const SFieldInfo* pFieldInfo2, int32_t *diffSize) { assert(pFieldInfo1 != NULL && pFieldInfo2 != NULL); @@ -780,8 +773,8 @@ SColumn* columnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t uid } SColumn* insertPrimaryTsColumn(SArray* pColumnList, uint64_t tableUid) { - SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_INDEX}; - return columnListInsert(pColumnList, PRIMARYKEY_TIMESTAMP_COL_INDEX, tableUid, &s); + SSchema s = {.type = TSDB_DATA_TYPE_TIMESTAMP, .bytes = TSDB_KEYSIZE, .colId = PRIMARYKEY_TIMESTAMP_COL_ID}; + return columnListInsert(pColumnList, PRIMARYKEY_TIMESTAMP_COL_ID, tableUid, &s); } void columnCopy(SColumn* pDest, const SColumn* pSrc); diff --git a/source/libs/parser/src/queryInfoUtil.c b/source/libs/parser/src/queryInfoUtil.c index 4dd45fd54c..8abbb14e78 100644 --- a/source/libs/parser/src/queryInfoUtil.c +++ b/source/libs/parser/src/queryInfoUtil.c @@ -1,4 +1,5 @@ #include "queryInfoUtil.h" +#include #include "astGenerator.h" #include "function.h" #include "os.h" @@ -55,7 +56,6 @@ SSchema* getTableTagSchema(const STableMeta* pTableMeta) { } static tExprNode* createUnaryFunctionExprNode(int32_t functionId, SSchema* pSchema, tExprNode* pColumnNode) { - if (pColumnNode == NULL) { pColumnNode = calloc(1, sizeof(tExprNode)); pColumnNode->nodeType = TEXPR_COL_NODE; @@ -167,6 +167,10 @@ SExprInfo* getExprInfo(SQueryStmtInfo* pQueryInfo, int32_t index) { void destroyExprInfo(SExprInfo* pExprInfo) { tExprTreeDestroy(pExprInfo->pExpr, NULL); + + for(int32_t i = 0; i < pExprInfo->base.numOfParams; ++i) { + taosVariantDestroy(&pExprInfo->base.param[i]); + } tfree(pExprInfo); } @@ -192,6 +196,11 @@ void addExprInfoParam(SSqlExpr* pExpr, char* argument, int32_t type, int32_t byt assert(pExpr->numOfParams <= 3); } +int32_t getExprFunctionId(SExprInfo *pExprInfo) { + assert(pExprInfo != NULL && pExprInfo->pExpr != NULL && pExprInfo->pExpr->nodeType == TEXPR_UNARYEXPR_NODE); + return pExprInfo->pExpr->_node.functionId; +} + void assignExprInfo(SExprInfo* dst, const SExprInfo* src) { assert(dst != NULL && src != NULL); @@ -284,62 +293,11 @@ int32_t getResRowLength(SArray* pExprList) { return size; } -static void freeQueryInfoImpl(SQueryStmtInfo* pQueryInfo) { - cleanupTagCond(&pQueryInfo->tagCond); - cleanupColumnCond(&pQueryInfo->colCond); - cleanupFieldInfo(&pQueryInfo->fieldsInfo); - - dropAllExprInfo(pQueryInfo->exprList); - pQueryInfo->exprList = NULL; - - if (pQueryInfo->exprList1 != NULL) { - dropAllExprInfo(pQueryInfo->exprList1); - pQueryInfo->exprList1 = NULL; - } - - columnListDestroy(pQueryInfo->colList); - pQueryInfo->colList = NULL; - - if (pQueryInfo->groupbyExpr.columnInfo != NULL) { - taosArrayDestroy(pQueryInfo->groupbyExpr.columnInfo); - pQueryInfo->groupbyExpr.columnInfo = NULL; - } - - pQueryInfo->fillType = 0; - - tfree(pQueryInfo->fillVal); - tfree(pQueryInfo->buf); - - taosArrayDestroy(pQueryInfo->pUpstream); - pQueryInfo->pUpstream = NULL; - pQueryInfo->bufLen = 0; -} - -void freeQueryInfo(SQueryStmtInfo* pQueryInfo, bool removeCachedMeta, uint64_t id) { - while(pQueryInfo != NULL) { - SQueryStmtInfo* p = pQueryInfo->sibling; - - size_t numOfUpstream = taosArrayGetSize(pQueryInfo->pUpstream); - for(int32_t i = 0; i < numOfUpstream; ++i) { - SQueryStmtInfo* pUpQueryInfo = taosArrayGetP(pQueryInfo->pUpstream, i); - freeQueryInfoImpl(pUpQueryInfo); - clearAllTableMetaInfo(pUpQueryInfo, removeCachedMeta, id); - tfree(pUpQueryInfo); - } - - freeQueryInfoImpl(pQueryInfo); - clearAllTableMetaInfo(pQueryInfo, removeCachedMeta, id); - - tfree(pQueryInfo); - pQueryInfo = p; - } -} - SArray* extractFunctionIdList(SArray* pExprInfoList) { assert(pExprInfoList != NULL); size_t len = taosArrayGetSize(pExprInfoList); - SArray* p = taosArrayInit(len, sizeof(int16_t)); + SArray* p = taosArrayInit(len, sizeof(int32_t)); for(int32_t i = 0; i < len; ++i) { SExprInfo* pExprInfo = taosArrayGetP(pExprInfoList, i); taosArrayPush(p, &pExprInfo->pExpr->_node.functionId); diff --git a/source/libs/parser/test/CMakeLists.txt b/source/libs/parser/test/CMakeLists.txt index 184d44a53b..7c90cab6d9 100644 --- a/source/libs/parser/test/CMakeLists.txt +++ b/source/libs/parser/test/CMakeLists.txt @@ -5,14 +5,14 @@ MESSAGE(STATUS "build parser unit test") SET(CMAKE_CXX_STANDARD 11) AUX_SOURCE_DIRECTORY(${CMAKE_CURRENT_SOURCE_DIR} SOURCE_LIST) -ADD_EXECUTABLE(astTest ${SOURCE_LIST}) +ADD_EXECUTABLE(parserTest ${SOURCE_LIST}) TARGET_LINK_LIBRARIES( - astTest - PUBLIC os util common parser catalog transport gtest + parserTest + PUBLIC os util common parser catalog transport gtest function ) TARGET_INCLUDE_DIRECTORIES( - astTest + parserTest PUBLIC "${CMAKE_SOURCE_DIR}/include/libs/parser/" PRIVATE "${CMAKE_SOURCE_DIR}/source/libs/parser/inc" ) diff --git a/source/libs/parser/test/parserTests.cpp b/source/libs/parser/test/parserTests.cpp index 1f4203e9bf..d02df60498 100644 --- a/source/libs/parser/test/parserTests.cpp +++ b/source/libs/parser/test/parserTests.cpp @@ -13,6 +13,7 @@ * along with this program. If not, see . */ +#include #include #include #pragma GCC diagnostic ignored "-Wwrite-strings" @@ -65,61 +66,64 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { } } -//TEST(testCase, validateAST_test) { -// SSqlInfo info1 = doGenerateAST("select a a1111, a+b + 22, tbname from `t.1abc` where tsexprList; -// ASSERT_EQ(taosArrayGetSize(pExprList), 3); -// -// SExprInfo* p1 = (SExprInfo*) taosArrayGetP(pExprList, 0); -// ASSERT_EQ(p1->base.uid, 110); -// ASSERT_EQ(p1->base.numOfParams, 0); -// ASSERT_EQ(p1->base.resSchema.type, TSDB_DATA_TYPE_INT); -// ASSERT_STRCASEEQ(p1->base.resSchema.name, "a1111"); -// ASSERT_STRCASEEQ(p1->base.colInfo.name, "t.1abc.a"); +TEST(testCase, validateAST_test) { + SSqlInfo info1 = doGenerateAST("select a a1111, a+b + 22, tbname from `t.1abc` where tsexprList; + ASSERT_EQ(taosArrayGetSize(pExprList), 3); + + SExprInfo* p1 = (SExprInfo*) taosArrayGetP(pExprList, 0); + ASSERT_EQ(p1->base.uid, 110); + ASSERT_EQ(p1->base.numOfParams, 0); + ASSERT_EQ(p1->base.resSchema.type, TSDB_DATA_TYPE_INT); + ASSERT_STRCASEEQ(p1->base.resSchema.name, "a1111"); + ASSERT_STRCASEEQ(p1->base.colInfo.name, "t.1abc.a"); + ASSERT_EQ(p1->base.colInfo.colId, 1); + ASSERT_EQ(p1->base.colInfo.flag, TSDB_COL_NORMAL); + ASSERT_STRCASEEQ(p1->base.token, "a"); + + ASSERT_EQ(taosArrayGetSize(pExprList), 3); + + SExprInfo* p2 = (SExprInfo*) taosArrayGetP(pExprList, 1); + ASSERT_EQ(p2->base.uid, 0); + ASSERT_EQ(p2->base.numOfParams, 1); // it is the serialized binary string of expression. + ASSERT_EQ(p2->base.resSchema.type, TSDB_DATA_TYPE_DOUBLE); + ASSERT_STRCASEEQ(p2->base.resSchema.name, "a+b + 22"); + +// ASSERT_STRCASEEQ(p2->base.colInfo.name, "t.1abc.a"); // ASSERT_EQ(p1->base.colInfo.colId, 1); // ASSERT_EQ(p1->base.colInfo.flag, TSDB_COL_NORMAL); -// ASSERT_STRCASEEQ(p1->base.token, "a"); -// -// ASSERT_EQ(taosArrayGetSize(pExprList), 3); -// -// SExprInfo* p2 = (SExprInfo*) taosArrayGetP(pExprList, 1); -// ASSERT_EQ(p2->base.uid, 0); -// ASSERT_EQ(p2->base.numOfParams, 1); // it is the serialized binary string of expression. -// ASSERT_EQ(p2->base.resSchema.type, TSDB_DATA_TYPE_DOUBLE); -// ASSERT_STRCASEEQ(p2->base.resSchema.name, "a+b + 22"); -// -//// ASSERT_STRCASEEQ(p2->base.colInfo.name, "t.1abc.a"); -//// ASSERT_EQ(p1->base.colInfo.colId, 1); -//// ASSERT_EQ(p1->base.colInfo.flag, TSDB_COL_NORMAL); -// ASSERT_STRCASEEQ(p2->base.token, "a+b + 22"); -// -// ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 3); -// ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 3); -//} -// + ASSERT_STRCASEEQ(p2->base.token, "a+b + 22"); + + ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 3); + ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 3); + + destroyQueryInfo(pQueryInfo); + qParserClearupMetaRequestInfo(&req); + destroySqlInfo(&info1); +} + //TEST(testCase, function_Test) { // SSqlInfo info1 = doGenerateAST("select count(a) from `t.1abc`"); // ASSERT_EQ(info1.valid, true); @@ -138,8 +142,7 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(ret, 0); // ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); // -// SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); -// initQueryInfo(pQueryInfo); +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); // setTableMetaInfo(pQueryInfo, &req); // // SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); @@ -161,6 +164,10 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // // ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 2); // ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 1); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); //} // //TEST(testCase, function_Test2) { @@ -181,8 +188,7 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(ret, 0); // ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); // -// SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); -// initQueryInfo(pQueryInfo); +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); // setTableMetaInfo(pQueryInfo, &req); // // SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); @@ -204,6 +210,10 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // // ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 2); // ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 1); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); //} // //TEST(testCase, function_Test3) { @@ -224,8 +234,7 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(ret, 0); // ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); // -// SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); -// initQueryInfo(pQueryInfo); +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); // setTableMetaInfo(pQueryInfo, &req); // // SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); @@ -246,6 +255,10 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(p1->base.interBytes, 24); // // ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 4); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); //} // //TEST(testCase, function_Test4) { @@ -266,8 +279,7 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(ret, 0); // ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); // -// SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); -// initQueryInfo(pQueryInfo); +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); // setTableMetaInfo(pQueryInfo, &req); // // SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); @@ -289,6 +301,10 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // // ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 1); // ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 1); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); //} // //TEST(testCase, function_Test5) { @@ -309,8 +325,7 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // ASSERT_EQ(ret, 0); // ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); // -// SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); -// initQueryInfo(pQueryInfo); +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); // setTableMetaInfo(pQueryInfo, &req); // // SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); @@ -333,46 +348,63 @@ void setTableMetaInfo(SQueryStmtInfo* pQueryInfo, SMetaReq *req) { // // ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 3); // ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 1); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); //} - -TEST(testCase, function_Test6) { - SSqlInfo info1 = doGenerateAST("select sum(a+b) as a1, first(b*a) from `t.1abc`"); - ASSERT_EQ(info1.valid, true); - - char msg[128] = {0}; - SMsgBuf buf; - buf.len = 128; - buf.buf = msg; - - SSqlNode* pNode = (SSqlNode*) taosArrayGetP(((SArray*)info1.list), 0); - int32_t code = evaluateSqlNode(pNode, TSDB_TIME_PRECISION_NANO, &buf); - ASSERT_EQ(code, 0); - - SMetaReq req = {0}; - int32_t ret = qParserExtractRequestedMetaInfo(&info1, &req, msg, 128); - ASSERT_EQ(ret, 0); - ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); - - SQueryStmtInfo* pQueryInfo = (SQueryStmtInfo*)calloc(1, sizeof(SQueryStmtInfo)); - initQueryInfo(pQueryInfo); - setTableMetaInfo(pQueryInfo, &req); - - SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); - ret = validateSqlNode(pSqlNode, pQueryInfo, &buf); - ASSERT_EQ(ret, 0); - - SArray* pExprList = pQueryInfo->exprList; - ASSERT_EQ(taosArrayGetSize(pExprList), 2); - - SExprInfo* p1 = (SExprInfo*) taosArrayGetP(pExprList, 0); - ASSERT_EQ(p1->base.uid, 110); - ASSERT_EQ(p1->base.numOfParams, 0); - ASSERT_EQ(p1->base.resSchema.type, TSDB_DATA_TYPE_DOUBLE); - ASSERT_STRCASEEQ(p1->base.resSchema.name, "a1"); - ASSERT_EQ(p1->base.colInfo.flag, TSDB_COL_NORMAL); - ASSERT_STRCASEEQ(p1->base.token, "sum(a+b)"); - ASSERT_EQ(p1->base.interBytes, 16); - - ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 3); - ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 2); -} \ No newline at end of file +// +//TEST(testCase, function_Test6) { +// SSqlInfo info1 = doGenerateAST("select sum(a+b) as a1, first(b*a) from `t.1abc` interval(10s, 1s)"); +// ASSERT_EQ(info1.valid, true); +// +// char msg[128] = {0}; +// SMsgBuf buf; +// buf.len = 128; +// buf.buf = msg; +// +// SSqlNode* pNode = (SSqlNode*) taosArrayGetP(((SArray*)info1.list), 0); +// int32_t code = evaluateSqlNode(pNode, TSDB_TIME_PRECISION_NANO, &buf); +// ASSERT_EQ(code, 0); +// +// SMetaReq req = {0}; +// int32_t ret = qParserExtractRequestedMetaInfo(&info1, &req, msg, 128); +// ASSERT_EQ(ret, 0); +// ASSERT_EQ(taosArrayGetSize(req.pTableName), 1); +// +// SQueryStmtInfo* pQueryInfo = createQueryInfo(); +// setTableMetaInfo(pQueryInfo, &req); +// +// SSqlNode* pSqlNode = (SSqlNode*)taosArrayGetP(info1.list, 0); +// ret = validateSqlNode(pSqlNode, pQueryInfo, &buf); +// ASSERT_EQ(ret, 0); +// +// SArray* pExprList = pQueryInfo->exprList; +// ASSERT_EQ(taosArrayGetSize(pExprList), 2); +// +// SExprInfo* p1 = (SExprInfo*) taosArrayGetP(pExprList, 0); +// ASSERT_EQ(p1->base.uid, 110); +// ASSERT_EQ(p1->base.numOfParams, 0); +// ASSERT_EQ(p1->base.resSchema.type, TSDB_DATA_TYPE_DOUBLE); +// ASSERT_STRCASEEQ(p1->base.resSchema.name, "a1"); +// ASSERT_EQ(p1->base.colInfo.flag, TSDB_COL_NORMAL); +// ASSERT_STRCASEEQ(p1->base.token, "sum(a+b)"); +// ASSERT_EQ(p1->base.interBytes, 16); +// ASSERT_EQ(p1->pExpr->nodeType, TEXPR_UNARYEXPR_NODE); +// ASSERT_EQ(p1->pExpr->_node.functionId, FUNCTION_SUM); +// ASSERT_TRUE(p1->pExpr->_node.pRight == NULL); +// +// tExprNode* pParam = p1->pExpr->_node.pLeft; +// +// ASSERT_EQ(pParam->nodeType, TEXPR_BINARYEXPR_NODE); +// ASSERT_EQ(pParam->_node.optr, TSDB_BINARY_OP_ADD); +// ASSERT_EQ(pParam->_node.pLeft->nodeType, TEXPR_COL_NODE); +// ASSERT_EQ(pParam->_node.pRight->nodeType, TEXPR_COL_NODE); +// +// ASSERT_EQ(taosArrayGetSize(pQueryInfo->colList), 3); +// ASSERT_EQ(pQueryInfo->fieldsInfo.numOfOutput, 2); +// +// destroyQueryInfo(pQueryInfo); +// qParserClearupMetaRequestInfo(&req); +// destroySqlInfo(&info1); +//} \ No newline at end of file diff --git a/source/libs/parser/test/tokenizerTest.cpp b/source/libs/parser/test/tokenizerTest.cpp index 3527e27eb4..2296ede80b 100644 --- a/source/libs/parser/test/tokenizerTest.cpp +++ b/source/libs/parser/test/tokenizerTest.cpp @@ -667,51 +667,59 @@ TEST(testCase, isValidNumber_test) { EXPECT_EQ(tGetNumericStringType(&t1), TK_FLOAT); } -TEST(testCase, generateAST_test) { - SSqlInfo info = doGenerateAST("select * from t1 where ts < now"); - ASSERT_EQ(info.valid, true); - - SSqlInfo info1 = doGenerateAST("select * from `t.1abc` where ts Date: Thu, 28 Oct 2021 16:57:02 +0800 Subject: [PATCH 7/7] Call wal and sync code in vnode --- include/libs/sync/sync.h | 24 ++++----- include/libs/wal/wal.h | 58 ++++++++++---------- source/libs/wal/src/wal.c | 16 +++++- source/server/vnode/inc/vnodeFile.h | 4 +- source/server/vnode/inc/vnodeInt.h | 2 +- source/server/vnode/src/vnodeFile.c | 8 +-- source/server/vnode/src/vnodeMain.c | 84 +++++++++++++++++++++++------ 7 files changed, 129 insertions(+), 67 deletions(-) diff --git a/include/libs/sync/sync.h b/include/libs/sync/sync.h index e8a8dee866..9ffd74c229 100644 --- a/include/libs/sync/sync.h +++ b/include/libs/sync/sync.h @@ -46,14 +46,14 @@ typedef struct { } SNodeInfo; typedef struct { - int selfIndex; - int replica; + int32_t selfIndex; + int32_t replica; SNodeInfo nodeInfo[TSDB_MAX_REPLICA]; } SSyncCluster; typedef struct { int32_t selfIndex; - int replica; + int32_t replica; SNodeInfo node[TSDB_MAX_REPLICA]; ESyncRole role[TSDB_MAX_REPLICA]; } SNodesRole; @@ -62,20 +62,20 @@ typedef struct SSyncFSM { void* pData; // apply committed log, bufs will be free by raft module - int (*applyLog)(struct SSyncFSM* fsm, SyncIndex index, const SSyncBuffer* buf, void* pData); + int32_t (*applyLog)(struct SSyncFSM* fsm, SyncIndex index, const SSyncBuffer* buf, void* pData); // cluster commit callback - int (*onClusterChanged)(struct SSyncFSM* fsm, const SSyncCluster* cluster, void* pData); + int32_t (*onClusterChanged)(struct SSyncFSM* fsm, const SSyncCluster* cluster, void* pData); // fsm return snapshot in ppBuf, bufs will be free by raft module // TODO: getSnapshot SHOULD be async? - int (*getSnapshot)(struct SSyncFSM* fsm, SSyncBuffer** ppBuf, int* objId, bool* isLast); + int32_t (*getSnapshot)(struct SSyncFSM* fsm, SSyncBuffer** ppBuf, int32_t* objId, bool* isLast); // fsm apply snapshot with pBuf data - int (*applySnapshot)(struct SSyncFSM* fsm, SSyncBuffer* pBuf, int objId, bool isLast); + int32_t (*applySnapshot)(struct SSyncFSM* fsm, SSyncBuffer* pBuf, int32_t objId, bool isLast); // call when restore snapshot and log done - int (*onRestoreDone)(struct SSyncFSM* fsm); + int32_t (*onRestoreDone)(struct SSyncFSM* fsm); void (*onRollback)(struct SSyncFSM* fsm, SyncIndex index, const SSyncBuffer* buf); @@ -118,9 +118,9 @@ typedef struct SSyncClusterConfig { typedef struct SStateManager { void* pData; - void (*saveServerState)(struct SStateManager* stateMng, const SSyncServerState* state); + int32_t (*saveServerState)(struct SStateManager* stateMng, SSyncServerState* state); - const SSyncServerState* (*readServerState)(struct SStateManager* stateMng); + int32_t (*readServerState)(struct SStateManager* stateMng, SSyncServerState* state); // void (*saveCluster)(struct SStateManager* stateMng, const SSyncClusterConfig* cluster); @@ -148,9 +148,9 @@ void syncStop(const SSyncNode*); int32_t syncPropose(SSyncNode* syncNode, SSyncBuffer buffer, void* pData, bool isWeak); -//int32_t syncAddNode(SSyncNode syncNode, const SNodeInfo *pNode); +// int32_t syncAddNode(SSyncNode syncNode, const SNodeInfo *pNode); -//int32_t syncRemoveNode(SSyncNode syncNode, const SNodeInfo *pNode); +// int32_t syncRemoveNode(SSyncNode syncNode, const SNodeInfo *pNode); extern int32_t syncDebugFlag; diff --git a/include/libs/wal/wal.h b/include/libs/wal/wal.h index 9a3310922d..143bdf0710 100644 --- a/include/libs/wal/wal.h +++ b/include/libs/wal/wal.h @@ -44,41 +44,41 @@ typedef struct { EWalType walLevel; // wal level } SWalCfg; -typedef void * twalh; // WAL HANDLE -typedef int32_t FWalWrite(void *ahandle, void *pHead, int32_t qtype, void *pMsg); +struct SWal; +typedef struct SWal SWal; // WAL HANDLE +typedef int32_t (*FWalWrite)(void *ahandle, void *pHead, int32_t qtype, void *pMsg); -//module initialization -int32_t walInit(); -void walCleanUp(); +// module initialization +int32_t walInit(); +void walCleanUp(); -//handle open and ctl -twalh walOpen(char *path, SWalCfg *pCfg); -int32_t walAlter(twalh, SWalCfg *pCfg); -void walStop(twalh); -void walClose(twalh); +// handle open and ctl +SWal *walOpen(char *path, SWalCfg *pCfg); +int32_t walAlter(SWal *, SWalCfg *pCfg); +void walClose(SWal *); -//write -//int64_t walWriteWithMsgType(twalh, int8_t msgType, void* body, int32_t bodyLen); -int64_t walWrite(twalh, void* body, int32_t bodyLen); -int64_t walWriteBatch(twalh, void** bodies, int32_t* bodyLen, int32_t batchSize); +// write +// int64_t walWriteWithMsgType(SWal*, int8_t msgType, void* body, int32_t bodyLen); +int64_t walWrite(SWal *, int64_t index, void *body, int32_t bodyLen); +int64_t walWriteBatch(SWal *, void **bodies, int32_t *bodyLen, int32_t batchSize); -//apis for lifecycle management -void walFsync(twalh, bool force); -int32_t walCommit(twalh, int64_t ver); -//truncate after -int32_t walRollback(twalh, int64_t ver); -//notify that previous log can be pruned safely -int32_t walPrune(twalh, int64_t ver); +// apis for lifecycle management +void walFsync(SWal *, bool force); +int32_t walCommit(SWal *, int64_t ver); +// truncate after +int32_t walRollback(SWal *, int64_t ver); +// notify that previous log can be pruned safely +int32_t walPrune(SWal *, int64_t ver); -//read -int32_t walRead(twalh, SWalHead **, int64_t ver); -int32_t walReadWithFp(twalh, FWalWrite writeFp, int64_t verStart, int readNum); +// read +int32_t walRead(SWal *, SWalHead **, int64_t ver); +int32_t walReadWithFp(SWal *, FWalWrite writeFp, int64_t verStart, int32_t readNum); -//lifecycle check -int32_t walFirstVer(twalh); -int32_t walPersistedVer(twalh); -int32_t walLastVer(twalh); -//int32_t walDataCorrupted(twalh); +// lifecycle check +int32_t walFirstVer(SWal *); +int32_t walPersistedVer(SWal *); +int32_t walLastVer(SWal *); +// int32_t walDataCorrupted(SWal*); #ifdef __cplusplus } diff --git a/source/libs/wal/src/wal.c b/source/libs/wal/src/wal.c index 8c0fc2b775..9331cce20b 100644 --- a/source/libs/wal/src/wal.c +++ b/source/libs/wal/src/wal.c @@ -19,6 +19,18 @@ int32_t walInit() { return 0; } void walCleanUp() {} -twalh walOpen(char *path, SWalCfg *pCfg) { return NULL; } +SWal *walOpen(char *path, SWalCfg *pCfg) { return NULL; } -int32_t walAlter(twalh pWal, SWalCfg *pCfg) { return 0; } \ No newline at end of file +int32_t walAlter(SWal *pWal, SWalCfg *pCfg) { return 0; } + +void walClose(SWal *pWal) {} + +void walFsync(SWal *pWal, bool force) {} + +int64_t walWrite(SWal *pWal, int64_t index, void *body, int32_t bodyLen) {} + +int32_t walCommit(SWal *pWal, int64_t ver) { return 0; } + +int32_t walRollback(SWal *pWal, int64_t ver) { return 0; } + +int32_t walPrune(SWal *pWal, int64_t ver) { return 0; } \ No newline at end of file diff --git a/source/server/vnode/inc/vnodeFile.h b/source/server/vnode/inc/vnodeFile.h index 31364d8c03..bea28324ee 100644 --- a/source/server/vnode/inc/vnodeFile.h +++ b/source/server/vnode/inc/vnodeFile.h @@ -23,8 +23,8 @@ extern "C" { int32_t vnodeReadCfg(int32_t vgId, SVnodeCfg *pCfg); int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg); -int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState); -int32_t vnodeWriteTerm(int32_t vgid, SSyncServerState *pState); +int32_t vnodeReadState(int32_t vgId, SSyncServerState *pState); +int32_t vnodeSaveState(int32_t vgid, SSyncServerState *pState); #ifdef __cplusplus } diff --git a/source/server/vnode/inc/vnodeInt.h b/source/server/vnode/inc/vnodeInt.h index ac6c77041f..90d9e7105e 100644 --- a/source/server/vnode/inc/vnodeInt.h +++ b/source/server/vnode/inc/vnodeInt.h @@ -79,7 +79,7 @@ typedef struct { SMeta *pMeta; STsdb *pTsdb; STQ *pTQ; - twalh pWal; + SWal *pWal; void *pQuery; SSyncNode *pSync; taos_queue pWriteQ; // write queue diff --git a/source/server/vnode/src/vnodeFile.c b/source/server/vnode/src/vnodeFile.c index a77c99ec34..ddcbd2689d 100644 --- a/source/server/vnode/src/vnodeFile.c +++ b/source/server/vnode/src/vnodeFile.c @@ -296,7 +296,7 @@ int32_t vnodeWriteCfg(int32_t vgId, SVnodeCfg *pCfg) { return TSDB_CODE_SUCCESS; } -int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState) { +int32_t vnodeReadState(int32_t vgId, SSyncServerState *pState) { int32_t ret = TSDB_CODE_VND_APP_ERROR; int32_t len = 0; int32_t maxLen = 100; @@ -305,7 +305,7 @@ int32_t vnodeReadTerm(int32_t vgId, SSyncServerState *pState) { FILE *fp = NULL; char file[PATH_MAX + 30] = {0}; - sprintf(file, "%s/vnode%d/term.json", tsVnodeDir, vgId); + sprintf(file, "%s/vnode%d/state.json", tsVnodeDir, vgId); len = (int32_t)fread(content, 1, maxLen, fp); if (len <= 0) { @@ -343,9 +343,9 @@ PARSE_TERM_ERROR: return ret; } -int32_t vnodeWriteTerm(int32_t vgId, SSyncServerState *pState) { +int32_t vnodeSaveState(int32_t vgId, SSyncServerState *pState) { char file[PATH_MAX + 30] = {0}; - sprintf(file, "%s/vnode%d/term.json", tsVnodeDir, vgId); + sprintf(file, "%s/vnode%d/state.json", tsVnodeDir, vgId); FILE *fp = fopen(file, "w"); if (!fp) { diff --git a/source/server/vnode/src/vnodeMain.c b/source/server/vnode/src/vnodeMain.c index c08ae7708a..ced93ea6a7 100644 --- a/source/server/vnode/src/vnodeMain.c +++ b/source/server/vnode/src/vnodeMain.c @@ -130,7 +130,8 @@ static void vnodeDestroyVnode(SVnode *pVnode) { } if (pVnode->pWal) { - // todo + walClose(pVnode->pWal); + pVnode->pWal = NULL; } if (pVnode->allocator) { @@ -166,6 +167,56 @@ static void vnodeCleanupVnode(SVnode *pVnode) { vnodeRelease(pVnode); } +static inline int32_t vnodeLogWrite(struct SSyncLogStore *logStore, SyncIndex index, SSyncBuffer *pBuf) { + SVnode *pVnode = logStore->pData; // vnode status can be checked here + return walWrite(pVnode->pWal, index, pBuf->data, (int32_t)pBuf->len); +} + +static inline int32_t vnodeLogCommit(struct SSyncLogStore *logStore, SyncIndex index) { + SVnode *pVnode = logStore->pData; // vnode status can be checked here + return walCommit(pVnode->pWal, index); +} + +static inline int32_t vnodeLogPrune(struct SSyncLogStore *logStore, SyncIndex index) { + SVnode *pVnode = logStore->pData; // vnode status can be checked here + return walPrune(pVnode->pWal, index); +} + +static inline int32_t vnodeLogRollback(struct SSyncLogStore *logStore, SyncIndex index) { + SVnode *pVnode = logStore->pData; // vnode status can be checked here + return walRollback(pVnode->pWal, index); +} + +static inline int32_t vnodeSaveServerState(struct SStateManager *stateMng, SSyncServerState *pState) { + SVnode *pVnode = stateMng->pData; + return vnodeSaveState(pVnode->vgId, pState); +} + +static inline int32_t vnodeReadServerState(struct SStateManager *stateMng, SSyncServerState *pState) { + SVnode *pVnode = stateMng->pData; + return vnodeSaveState(pVnode->vgId, pState); +} + +static inline int32_t vnodeApplyLog(struct SSyncFSM *fsm, SyncIndex index, const SSyncBuffer *buf, void *pData) { + return 0; +} + +static inline int32_t vnodeOnClusterChanged(struct SSyncFSM *fsm, const SSyncCluster *cluster, void *pData) { return 0; } + +static inline int32_t vnodeGetSnapshot(struct SSyncFSM *fsm, SSyncBuffer **ppBuf, int32_t *objId, bool *isLast) { + return 0; +} + +static inline int32_t vnodeApplySnapshot(struct SSyncFSM *fsm, SSyncBuffer *pBuf, int32_t objId, bool isLast) { + return 0; +} + +static inline int32_t vnodeOnRestoreDone(struct SSyncFSM *fsm) { return 0; } + +static inline void vnodeOnRollback(struct SSyncFSM *fsm, SyncIndex index, const SSyncBuffer *buf) {} + +static inline void vnodeOnRoleChanged(struct SSyncFSM *fsm, const SNodesRole *pRole) {} + static int32_t vnodeOpenVnode(int32_t vgId) { int32_t code = 0; @@ -193,7 +244,7 @@ static int32_t vnodeOpenVnode(int32_t vgId) { return 0; } - code = vnodeReadTerm(vgId, &pVnode->term); + code = vnodeSaveState(vgId, &pVnode->term); if (code != TSDB_CODE_SUCCESS) { vError("vgId:%d, failed to read term file since %s", pVnode->vgId, tstrerror(code)); pVnode->cfg.dropped = 1; @@ -220,25 +271,24 @@ static int32_t vnodeOpenVnode(int32_t vgId) { // create sync node SSyncInfo syncInfo = {0}; syncInfo.vgId = vgId; - syncInfo.snapshotIndex = 0; // todo, from tsdb + syncInfo.snapshotIndex = 0; // todo, from tsdb memcpy(&syncInfo.syncCfg, &pVnode->cfg.sync, sizeof(SSyncCluster)); syncInfo.fsm.pData = pVnode; - syncInfo.fsm.applyLog = NULL; - syncInfo.fsm.onClusterChanged = NULL; - syncInfo.fsm.getSnapshot = NULL; - syncInfo.fsm.applySnapshot = NULL; - syncInfo.fsm.onRestoreDone = NULL; - syncInfo.fsm.onRollback = NULL; + syncInfo.fsm.applyLog = vnodeApplyLog; + syncInfo.fsm.onClusterChanged = vnodeOnClusterChanged; + syncInfo.fsm.getSnapshot = vnodeGetSnapshot; + syncInfo.fsm.applySnapshot = vnodeApplySnapshot; + syncInfo.fsm.onRestoreDone = vnodeOnRestoreDone; + syncInfo.fsm.onRollback = vnodeOnRollback; + syncInfo.fsm.onRoleChanged = vnodeOnRoleChanged; syncInfo.logStore.pData = pVnode; - syncInfo.logStore.logWrite = NULL; - syncInfo.logStore.logCommit = NULL; - syncInfo.logStore.logPrune = NULL; - syncInfo.logStore.logRollback = NULL; + syncInfo.logStore.logWrite = vnodeLogWrite; + syncInfo.logStore.logCommit = vnodeLogCommit; + syncInfo.logStore.logPrune = vnodeLogPrune; + syncInfo.logStore.logRollback = vnodeLogRollback; syncInfo.stateManager.pData = pVnode; - syncInfo.stateManager.saveServerState = NULL; - syncInfo.stateManager.readServerState = NULL; - // syncInfo.stateManager.saveCluster = NULL; - // syncInfo.stateManager.readCluster = NULL; + syncInfo.stateManager.saveServerState = vnodeSaveServerState; + syncInfo.stateManager.readServerState = vnodeReadServerState; pVnode->pSync = syncStart(&syncInfo); if (pVnode->pSync == NULL) {