From 3626a2de181af79b9bf13adce6102b0ad7941e8f Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Wed, 23 Jun 2021 10:12:45 +0800 Subject: [PATCH 01/71] update docker cluster script --- .../OneMnodeMultipleVnodesTest.py | 5 ++-- tests/pytest/dockerCluster/basic.py | 27 ++++++++++++++----- 2 files changed, 22 insertions(+), 10 deletions(-) diff --git a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py index ee663f89b0..43e281f437 100644 --- a/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py +++ b/tests/pytest/dockerCluster/OneMnodeMultipleVnodesTest.py @@ -12,9 +12,6 @@ # -*- coding: utf-8 -*- from basic import * -from util.sql import tdSql - - class TDTestCase: @@ -36,4 +33,6 @@ td = TDTestCase() td.init() +## usage: python3 OneMnodeMultipleVnodesTest.py + diff --git a/tests/pytest/dockerCluster/basic.py b/tests/pytest/dockerCluster/basic.py index 50914b0be9..871d69790d 100644 --- a/tests/pytest/dockerCluster/basic.py +++ b/tests/pytest/dockerCluster/basic.py @@ -44,7 +44,16 @@ class BuildDockerCluser: "jnidebugFlag":"135", "qdebugFlag":"135", "maxSQLLength":"1048576" - } + } + cmd = "mkdir -p %s" % self.dockerDir + self.execCmd(cmd) + + cmd = "cp *.yml %s" % self.dockerDir + self.execCmd(cmd) + + cmd = "cp Dockerfile %s" % self.dockerDir + self.execCmd(cmd) + # execute command, and return the output # ref: https://blog.csdn.net/wowocpp/article/details/80775650 @@ -81,7 +90,7 @@ class BuildDockerCluser: def removeFile(self, rootDir, index, dir): cmd = "rm -rf %s/node%d/%s/*" % (rootDir, index, dir) self.execCmd(cmd) - + def clearEnv(self): cmd = "cd %s && docker-compose down --remove-orphans" % self.dockerDir self.execCmd(cmd) @@ -108,10 +117,14 @@ class BuildDockerCluser: self.execCmd(cmd) def updateLocalhosts(self): - cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts" + cmd = "grep '172.27.0.7 *tdnode1' /etc/hosts | sed 's: ::g'" result = self.execCmdAndGetOutput(cmd) - if result and not result.isspace(): + print(result) + if result is None or result.isspace(): + print("==========") cmd = "echo '172.27.0.7 tdnode1' >> /etc/hosts" + display = "echo %s" % cmd + self.execCmd(display) self.execCmd(cmd) def deploy(self): @@ -138,13 +151,13 @@ class BuildDockerCluser: if self.numOfNodes < 2 or self.numOfNodes > 10: print("the number of nodes must be between 2 and 10") exit(0) - self.clearEnv() - self.createDirs() self.updateLocalhosts() self.deploy() def run(self): - cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + cmd = "./buildClusterEnv.sh -n %d -v %s -d %s" % (self.numOfNodes, self.getTaosdVersion(), self.dockerDir) + display = "echo %s" % cmd + self.execCmd(display) self.execCmd(cmd) self.getConnection() self.createDondes() From 15014c2e19e51353e1a2f49a5d1bf6d3a9729733 Mon Sep 17 00:00:00 2001 From: wpan Date: Thu, 24 Jun 2021 10:50:10 +0800 Subject: [PATCH 02/71] fix bug --- src/client/src/tscSubquery.c | 2 +- src/query/inc/qExecutor.h | 1 + src/query/src/qExecutor.c | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSubquery.c b/src/client/src/tscSubquery.c index c3df4773e1..4d97fef52f 100644 --- a/src/client/src/tscSubquery.c +++ b/src/client/src/tscSubquery.c @@ -3604,10 +3604,10 @@ void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGr // todo refactor: filter should not be applied here. createFilterInfo(pQueryAttr, 0); - pQueryAttr->numOfFilterCols = 0; SArray* pa = NULL; if (stage == MASTER_SCAN) { + pQueryAttr->createFilterOperator = false; // no need for parent query pa = createExecOperatorPlan(pQueryAttr); } else { pa = createGlobalMergePlan(pQueryAttr); diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 9cd1c5b033..dbee1a7812 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -192,6 +192,7 @@ typedef struct SQueryAttr { bool needReverseScan; // need reverse scan bool distinctTag; // distinct tag query bool stateWindow; // window State on sub/normal table + bool createFilterOperator; // if filter operator is needed int32_t interBufSize; // intermediate buffer sizse int32_t havingNum; // having expr number diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 97a6cf807c..63e0025550 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -7045,6 +7045,8 @@ int32_t createFilterInfo(SQueryAttr* pQueryAttr, uint64_t qId) { doCreateFilterInfo(pQueryAttr->tableCols, pQueryAttr->numOfCols, pQueryAttr->numOfFilterCols, &pQueryAttr->pFilterInfo, qId); + pQueryAttr->createFilterOperator = true; + return TSDB_CODE_SUCCESS; } From d02ce1e2dfb6a885afef0b13b0b601bc75cb7e75 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 24 Jun 2021 15:23:33 +0800 Subject: [PATCH 03/71] Hotfix/sangshuduo/td 4823 taosdemo gettablename (#6606) * [TD-4823]: taosdemo getTableName return empty. * fix typo. * check table name is empty in early stage. --- src/kit/taosdemo/taosdemo.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6b12e66cb9..6314aa6a00 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -2497,6 +2497,13 @@ static int getChildNameOfSuperTableWithLimitAndOffset(TAOS * taos, char* pTblName = childTblName; while((row = taos_fetch_row(res)) != NULL) { int32_t* len = taos_fetch_lengths(res); + + if (0 == strlen((char *)row[0])) { + errorPrint("%s() LN%d, No.%"PRId64" table return empty name\n", + __func__, __LINE__, count); + exit(-1); + } + tstrncpy(pTblName, (char *)row[0], len[0]+1); //printf("==== sub table name: %s\n", pTblName); count++; @@ -6296,16 +6303,6 @@ static void startMultiThreadInsertData(int threads, char* db_name, } } - // read sample data from file first - if ((superTblInfo) && (0 == strncasecmp(superTblInfo->dataSource, - "sample", strlen("sample")))) { - if (0 != prepareSampleDataForSTable(superTblInfo)) { - errorPrint("%s() LN%d, prepare sample data for stable failed!\n", - __func__, __LINE__); - exit(-1); - } - } - TAOS* taos0 = taos_connect( g_Dbs.host, g_Dbs.user, g_Dbs.password, db_name, g_Dbs.port); From eed57b8a5b46aeaed9231973fcd214eef03603ff Mon Sep 17 00:00:00 2001 From: wpan Date: Thu, 24 Jun 2021 15:28:26 +0800 Subject: [PATCH 04/71] fix bug --- src/query/src/qPlan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index ee587a515d..d2eb9afb22 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -623,7 +623,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); } } else { // diff/add/multiply/subtract/division - if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->vgId == 0) { // todo refactor + if (pQueryAttr->numOfFilterCols > 0 && pQueryAttr->createFilterOperator && pQueryAttr->vgId == 0) { // todo refactor op = OP_Filter; taosArrayPush(plan, &op); } else { From b945a7fd333dbabb6bf0f8216e5a9cea03c0e5c2 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Thu, 24 Jun 2021 16:33:57 +0800 Subject: [PATCH 05/71] [td-4791] check sql function in outer query. --- src/client/src/tscSQLParser.c | 58 ++++++++++-------- src/query/inc/qExecutor.h | 8 +-- src/query/src/qAggMain.c | 1 - src/query/src/qExecutor.c | 54 ++++++++--------- src/query/src/qPlan.c | 14 ++--- tests/script/general/parser/nestquery.sim | 72 +++++++++++++++++++++-- 6 files changed, 138 insertions(+), 69 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e94f77d254..815442a038 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2069,33 +2069,29 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS const char* name, int32_t resColIdx, SColumnIndex* pColIndex, bool finalResult) { const char* msg1 = "not support column types"; - int16_t type = 0; - int16_t bytes = 0; - int32_t functionID = cvtFunc.execFuncId; - - if (functionID == TSDB_FUNC_SPREAD) { + int32_t f = cvtFunc.execFuncId; + if (f == TSDB_FUNC_SPREAD) { int32_t t1 = pSchema->type; - if (t1 == TSDB_DATA_TYPE_BINARY || t1 == TSDB_DATA_TYPE_NCHAR || t1 == TSDB_DATA_TYPE_BOOL) { + if (IS_VAR_DATA_TYPE(t1) || t1 == TSDB_DATA_TYPE_BOOL) { invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); return -1; - } else { - type = TSDB_DATA_TYPE_DOUBLE; - bytes = tDataTypes[type].bytes; } - } else { - type = pSchema->type; - bytes = pSchema->bytes; } - SExprInfo* pExpr = tscExprAppend(pQueryInfo, functionID, pColIndex, type, bytes, getNewResColId(pCmd), bytes, false); + int16_t resType = 0; + int16_t resBytes = 0; + int32_t interBufSize = 0; + + getResultDataInfo(pSchema->type, pSchema->bytes, f, 0, &resType, &resBytes, &interBufSize, 0, false); + SExprInfo* pExpr = tscExprAppend(pQueryInfo, f, pColIndex, resType, resBytes, getNewResColId(pCmd), interBufSize, false); tstrncpy(pExpr->base.aliasName, name, tListLen(pExpr->base.aliasName)); - if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != functionID) { + if (cvtFunc.originFuncId == TSDB_FUNC_LAST_ROW && cvtFunc.originFuncId != f) { pExpr->base.colInfo.flag |= TSDB_COL_NULL; } // set reverse order scan data blocks for last query - if (functionID == TSDB_FUNC_LAST) { + if (f == TSDB_FUNC_LAST) { pExpr->base.numOfParams = 1; pExpr->base.param[0].i64 = TSDB_ORDER_DESC; pExpr->base.param[0].nType = TSDB_DATA_TYPE_INT; @@ -2108,7 +2104,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS // if it is not in the final result, do not add it SColumnList ids = createColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); if (finalResult) { - insertResultField(pQueryInfo, resColIdx, &ids, bytes, (int8_t)type, pExpr->base.aliasName, pExpr); + insertResultField(pQueryInfo, resColIdx, &ids, pSchema->bytes, (int8_t)pSchema->type, pExpr->base.aliasName, pExpr); } else { tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } @@ -2557,8 +2553,9 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tVariant* pVariant = &pParamElem[1].pNode->value; - int8_t resultType = pSchema->type; - int16_t resultSize = pSchema->bytes; + int16_t resultType = pSchema->type; + int16_t resultSize = pSchema->bytes; + int32_t interResult = 0; char val[8] = {0}; @@ -2571,8 +2568,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg5); } - resultSize = sizeof(double); - resultType = TSDB_DATA_TYPE_DOUBLE; + getResultDataInfo(pSchema->type, pSchema->bytes, functionId, 0, &resultType, &resultSize, &interResult, 0, false); /* * sql function transformation @@ -2582,7 +2578,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col tscInsertPrimaryTsSourceColumn(pQueryInfo, pTableMetaInfo->pTableMeta->id.uid); colIndex += 1; // the first column is ts - pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), resultSize, false); + pExpr = tscExprAppend(pQueryInfo, functionId, &index, resultType, resultSize, getNewResColId(pCmd), interResult, false); tscExprAddParams(&pExpr->base, val, TSDB_DATA_TYPE_DOUBLE, sizeof(double)); } else { tVariantDump(pVariant, val, TSDB_DATA_TYPE_BIGINT, true); @@ -7784,8 +7780,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg3 = "start(end) time of query range required or time range too large"; const char* msg4 = "interval query not supported, since the result of sub query not include valid timestamp column"; const char* msg5 = "only tag query not compatible with normal column filter"; - const char* msg6 = "not support stddev/percentile in outer query yet"; - const char* msg7 = "drivative requires timestamp column exists in subquery"; + const char* msg6 = "not support stddev/percentile/interp in the outer query yet"; + const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery"; int32_t code = TSDB_CODE_SUCCESS; @@ -7828,15 +7824,17 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateSelectNodeList(pCmd, pQueryInfo, pSqlNode->pSelNodeList, false, false, timeWindowQuery) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } + // parse the window_state if (validateStateWindowNode(pCmd, pQueryInfo, pSqlNode, false) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } + // todo NOT support yet for(int32_t i = 0; i < tscNumOfExprs(pQueryInfo); ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); int32_t f = pExpr->base.functionId; - if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT) { + if (f == TSDB_FUNC_STDDEV || f == TSDB_FUNC_PERCT || f == TSDB_FUNC_INTERP) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg6); } @@ -7851,9 +7849,17 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, 0); - if (tscNumOfExprs(pQueryInfo) > 1) { + int32_t numOfExprs = tscNumOfExprs(pQueryInfo); + if (numOfExprs == 1) { + SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); + int32_t f = pExpr->base.functionId; + if (f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); + } + } else { SExprInfo* pExpr = tscExprGet(pQueryInfo, 1); - if (pExpr->base.functionId == TSDB_FUNC_DERIVATIVE && pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) { + int32_t f = pExpr->base.functionId; + if ((f == TSDB_FUNC_DERIVATIVE || f == TSDB_FUNC_TWA || f == TSDB_FUNC_IRATE) && pSchema->type != TSDB_DATA_TYPE_TIMESTAMP) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg7); } } diff --git a/src/query/inc/qExecutor.h b/src/query/inc/qExecutor.h index 9cd1c5b033..1f1f9f3e60 100644 --- a/src/query/inc/qExecutor.h +++ b/src/query/inc/qExecutor.h @@ -285,7 +285,7 @@ enum OPERATOR_TYPE_E { OP_TagScan = 4, OP_TableBlockInfoScan= 5, OP_Aggregate = 6, - OP_Arithmetic = 7, + OP_Project = 7, OP_Groupby = 8, OP_Limit = 9, OP_SLimit = 10, @@ -413,13 +413,13 @@ typedef struct SAggOperatorInfo { uint32_t seed; } SAggOperatorInfo; -typedef struct SArithOperatorInfo { +typedef struct SProjectOperatorInfo { SOptrBasicInfo binfo; int32_t bufCapacity; uint32_t seed; SSDataBlock *existDataBlock; -} SArithOperatorInfo; +} SProjectOperatorInfo; typedef struct SLimitOperatorInfo { int64_t limit; @@ -513,7 +513,7 @@ SOperatorInfo* createTableScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* SOperatorInfo* createTableSeqScanOperator(void* pTsdbQueryHandle, SQueryRuntimeEnv* pRuntimeEnv); SOperatorInfo* createAggregateOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); -SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); +SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createLimitOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream); SOperatorInfo* createTimeIntervalOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); SOperatorInfo* createSWindowOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput); diff --git a/src/query/src/qAggMain.c b/src/query/src/qAggMain.c index 8efc4aad4c..676e5b6ce6 100644 --- a/src/query/src/qAggMain.c +++ b/src/query/src/qAggMain.c @@ -74,7 +74,6 @@ } while (0); void noop1(SQLFunctionCtx *UNUSED_PARAM(pCtx)) {} -void noop2(SQLFunctionCtx *UNUSED_PARAM(pCtx), int32_t UNUSED_PARAM(index)) {} void doFinalizer(SQLFunctionCtx *pCtx) { RESET_RESULT_INFO(GET_RES_INFO(pCtx)); } diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 97a6cf807c..49ab0af680 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -184,7 +184,7 @@ static int32_t getNumOfScanTimes(SQueryAttr* pQueryAttr); static void destroyBasicOperatorInfo(void* param, int32_t numOfOutput); static void destroySFillOperatorInfo(void* param, int32_t numOfOutput); static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput); -static void destroyArithOperatorInfo(void* param, int32_t numOfOutput); +static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput); static void destroyTagScanOperatorInfo(void* param, int32_t numOfOutput); static void destroySWindowOperatorInfo(void* param, int32_t numOfOutput); static void destroyStateWindowOperatorInfo(void* param, int32_t numOfOutput); @@ -912,7 +912,7 @@ void setInputDataBlock(SOperatorInfo* pOperator, SQLFunctionCtx* pCtx, SSDataBlo doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order); } } else { - if (/*pCtx[0].pInput == NULL && */pBlock->pDataBlock != NULL) { + if (pBlock->pDataBlock != NULL) { doSetInputDataBlock(pOperator, pCtx, pBlock, order); } else { doSetInputDataBlockInfo(pOperator, pCtx, pBlock, order); @@ -978,7 +978,7 @@ static void doAggregateImpl(SOperatorInfo* pOperator, TSKEY startTs, SQLFunction } } -static void arithmeticApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { +static void projectApplyFunctions(SQueryRuntimeEnv *pRuntimeEnv, SQLFunctionCtx *pCtx, int32_t numOfOutput) { SQueryAttr *pQueryAttr = pRuntimeEnv->pQueryAttr; for (int32_t k = 0; k < numOfOutput; ++k) { @@ -1806,17 +1806,17 @@ static int32_t setupQueryRuntimeEnv(SQueryRuntimeEnv *pRuntimeEnv, int32_t numOf break; } - case OP_Arithmetic: { // TODO refactor to remove arith operator. + case OP_Project: { // TODO refactor to remove arith operator. SOperatorInfo* prev = pRuntimeEnv->proot; if (i == 0) { - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); + pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr1, pQueryAttr->numOfOutput); if (pRuntimeEnv->proot != NULL && prev->operatorType != OP_DummyInput && prev->operatorType != OP_Join) { // TODO refactor setTableScanFilterOperatorInfo(prev->info, pRuntimeEnv->proot); } } else { prev = pRuntimeEnv->proot; assert(pQueryAttr->pExpr2 != NULL); - pRuntimeEnv->proot = createArithOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); + pRuntimeEnv->proot = createProjectOperatorInfo(pRuntimeEnv, prev, pQueryAttr->pExpr2, pQueryAttr->numOfExpr2); } break; } @@ -4578,8 +4578,8 @@ void setTableScanFilterOperatorInfo(STableScanInfo* pTableScanInfo, SOperatorInf pTableScanInfo->pResultRowInfo = &pInfo->resultRowInfo; pTableScanInfo->rowCellInfoOffset = pInfo->rowCellInfoOffset; - } else if (pDownstream->operatorType == OP_Arithmetic) { - SArithOperatorInfo *pInfo = pDownstream->info; + } else if (pDownstream->operatorType == OP_Project) { + SProjectOperatorInfo *pInfo = pDownstream->info; pTableScanInfo->pCtx = pInfo->binfo.pCtx; pTableScanInfo->pResultRowInfo = &pInfo->binfo.resultRowInfo; @@ -4934,23 +4934,23 @@ static SSDataBlock* doSTableAggregate(void* param, bool* newgroup) { return pInfo->pRes; } -static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { +static SSDataBlock* doProjectOperation(void* param, bool* newgroup) { SOperatorInfo* pOperator = (SOperatorInfo*) param; - SArithOperatorInfo* pArithInfo = pOperator->info; + SProjectOperatorInfo* pProjectInfo = pOperator->info; SQueryRuntimeEnv* pRuntimeEnv = pOperator->pRuntimeEnv; - SOptrBasicInfo *pInfo = &pArithInfo->binfo; + SOptrBasicInfo *pInfo = &pProjectInfo->binfo; SSDataBlock* pRes = pInfo->pRes; int32_t order = pRuntimeEnv->pQueryAttr->order.order; pRes->info.rows = 0; - if (pArithInfo->existDataBlock) { // TODO refactor + if (pProjectInfo->existDataBlock) { // TODO refactor STableQueryInfo* pTableQueryInfo = pRuntimeEnv->current; - SSDataBlock* pBlock = pArithInfo->existDataBlock; - pArithInfo->existDataBlock = NULL; + SSDataBlock* pBlock = pProjectInfo->existDataBlock; + pProjectInfo->existDataBlock = NULL; *newgroup = true; // todo dynamic set tags @@ -4960,9 +4960,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); + updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows); - arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } @@ -4990,7 +4990,7 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // Return result of the previous group in the firstly. if (*newgroup) { if (pRes->info.rows > 0) { - pArithInfo->existDataBlock = pBlock; + pProjectInfo->existDataBlock = pBlock; clearNumOfRes(pInfo->pCtx, pOperator->numOfOutput); return pInfo->pRes; } else { // init output buffer for a new group data @@ -5010,9 +5010,9 @@ static SSDataBlock* doArithmeticOperation(void* param, bool* newgroup) { // the pDataBlock are always the same one, no need to call this again setInputDataBlock(pOperator, pInfo->pCtx, pBlock, order); - updateOutputBuf(&pArithInfo->binfo, &pArithInfo->bufCapacity, pBlock->info.rows); + updateOutputBuf(&pProjectInfo->binfo, &pProjectInfo->bufCapacity, pBlock->info.rows); - arithmeticApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); + projectApplyFunctions(pRuntimeEnv, pInfo->pCtx, pOperator->numOfOutput); if (pTableQueryInfo != NULL) { updateTableIdInfo(pTableQueryInfo, pBlock, pRuntimeEnv->pTableRetrieveTsMap, order); } @@ -5649,8 +5649,8 @@ static void destroyGroupbyOperatorInfo(void* param, int32_t numOfOutput) { tfree(pInfo->prevData); } -static void destroyArithOperatorInfo(void* param, int32_t numOfOutput) { - SArithOperatorInfo* pInfo = (SArithOperatorInfo*) param; +static void destroyProjectOperatorInfo(void* param, int32_t numOfOutput) { + SProjectOperatorInfo* pInfo = (SProjectOperatorInfo*) param; doDestroyBasicInfo(&pInfo->binfo, numOfOutput); } @@ -5696,8 +5696,8 @@ SOperatorInfo* createMultiTableAggOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SO return pOperator; } -SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { - SArithOperatorInfo* pInfo = calloc(1, sizeof(SArithOperatorInfo)); +SOperatorInfo* createProjectOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorInfo* upstream, SExprInfo* pExpr, int32_t numOfOutput) { + SProjectOperatorInfo* pInfo = calloc(1, sizeof(SProjectOperatorInfo)); pInfo->seed = rand(); pInfo->bufCapacity = pRuntimeEnv->resultInfo.capacity; @@ -5710,8 +5710,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI setDefaultOutputBuf(pRuntimeEnv, pBInfo, pInfo->seed, MASTER_SCAN); SOperatorInfo* pOperator = calloc(1, sizeof(SOperatorInfo)); - pOperator->name = "ArithmeticOperator"; - pOperator->operatorType = OP_Arithmetic; + pOperator->name = "ProjectOperator"; + pOperator->operatorType = OP_Project; pOperator->blockingOptr = false; pOperator->status = OP_IN_EXECUTING; pOperator->info = pInfo; @@ -5719,8 +5719,8 @@ SOperatorInfo* createArithOperatorInfo(SQueryRuntimeEnv* pRuntimeEnv, SOperatorI pOperator->numOfOutput = numOfOutput; pOperator->pRuntimeEnv = pRuntimeEnv; - pOperator->exec = doArithmeticOperation; - pOperator->cleanup = destroyArithOperatorInfo; + pOperator->exec = doProjectOperation; + pOperator->cleanup = destroyProjectOperatorInfo; appendUpstream(pOperator, upstream); return pOperator; diff --git a/src/query/src/qPlan.c b/src/query/src/qPlan.c index ee587a515d..214ef32cd4 100644 --- a/src/query/src/qPlan.c +++ b/src/query/src/qPlan.c @@ -565,7 +565,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } @@ -585,7 +585,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->sw.gap > 0) { @@ -593,7 +593,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->stateWindow) { @@ -601,7 +601,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { taosArrayPush(plan, &op); if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else if (pQueryAttr->simpleAgg) { @@ -619,7 +619,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL && !pQueryAttr->stableQuery) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } else { // diff/add/multiply/subtract/division @@ -627,7 +627,7 @@ SArray* createExecOperatorPlan(SQueryAttr* pQueryAttr) { op = OP_Filter; taosArrayPush(plan, &op); } else { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } @@ -665,7 +665,7 @@ SArray* createGlobalMergePlan(SQueryAttr* pQueryAttr) { } if (pQueryAttr->pExpr2 != NULL) { - op = OP_Arithmetic; + op = OP_Project; taosArrayPush(plan, &op); } } diff --git a/tests/script/general/parser/nestquery.sim b/tests/script/general/parser/nestquery.sim index 8249d9197f..fd56a91dd6 100644 --- a/tests/script/general/parser/nestquery.sim +++ b/tests/script/general/parser/nestquery.sim @@ -180,20 +180,82 @@ if $data21 != 49.500000000 then endi #define TSDB_FUNC_APERCT 7 -#define TSDB_FUNC_LAST_ROW 10 #define TSDB_FUNC_TWA 14 #define TSDB_FUNC_LEASTSQR 15 -#define TSDB_FUNC_ARITHM 23 #define TSDB_FUNC_DIFF 24 #define TSDB_FUNC_INTERP 28 -#define TSDB_FUNC_RATE 29 #define TSDB_FUNC_IRATE 30 #define TSDB_FUNC_DERIVATIVE 32 sql_error select stddev(c1) from (select c1 from nest_tb0); sql_error select percentile(c1, 20) from (select * from nest_tb0); +sql_error select interp(c1) from (select * from nest_tb0); +sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); +sql_error select twa(c1) from (select c1 from nest_tb0); +sql_error select irate(c1) from (select c1 from nest_tb0); +sql_error select diff(c1), twa(c1) from (select * from nest_tb0); +sql_error select irate(c1), interp(c1), twa(c1) from (select * from nest_tb0); + +sql select apercentile(c1, 50) from (select * from nest_tb0) interval(1d) +sql select twa(c1) from (select * from nest_tb0); +sql select leastsquares(c1, 1, 1) from (select * from nest_tb0); +sql select irate(c1) from (select * from nest_tb0); sql select avg(c1),sum(c2), max(c3), min(c4), count(*), first(c7), last(c7),spread(c6) from (select * from nest_tb0) interval(1d); +if $rows != 7 then + return -1 +endi + +if $data00 != @20-09-15 00:00:00.000@ then + return -1 +endi + +if $data01 != 48.666666667 then + print expect 48.666666667, actual: $data01 + return -1 +endi + +if $data02 != 70080.000000000 then + return -1 +endi + +if $data03 != 99 then + return -1 +endi + +if $data04 != 0 then + return -1 +endi + +if $data05 != 1440 then + return -1 +endi + +if $data06 != 0 then + print $data06 + return -1 +endi + +if $data07 != 1 then + return -1 +endi + +if $data08 != 99.000000000 then + print expect 99.000000000, actual: $data08 + return -1 +endi + +if $data10 != @20-09-16 00:00:00.000@ then + return -1 +endi + +if $data11 != 49.777777778 then + return -1 +endi + +if $data12 != 71680.000000000 then + return -1 +endi sql select top(x, 20) from (select c1 x from nest_tb0); @@ -207,6 +269,9 @@ print ===================> group by + having +print =========================> ascending order/descending order + + print =========================> nest query join @@ -273,7 +338,6 @@ if $data03 != @20-09-15 00:00:00.000@ then return -1 endi -sql_error select derivative(val, 1s, 0) from (select c1 val from nest_tb0); sql select diff(val) from (select c1 val from nest_tb0); if $rows != 9999 then return -1 From 44621d8a0f8b1da94bae86b9e3b145ab513baed9 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Thu, 24 Jun 2021 22:44:46 +0800 Subject: [PATCH 06/71] Hotfix/sangshuduo/td 4892 taosdemo sub fetch (#6614) * [TD-4892]: taosdemo subscribe fetch result. * fix stbname length. * restrict prefix length. * submit empty * fix minor code. --- src/kit/taosdemo/taosdemo.c | 127 ++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 71 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 6314aa6a00..abf3c74436 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -79,10 +79,9 @@ enum TEST_MODE { #define MAX_SQL_SIZE 65536 #define BUFFER_SIZE (65536*2) -#define COND_BUF_LEN BUFFER_SIZE - 30 +#define COND_BUF_LEN (BUFFER_SIZE - 30) #define MAX_USERNAME_SIZE 64 #define MAX_PASSWORD_SIZE 64 -#define MAX_DB_NAME_SIZE 64 #define MAX_HOSTNAME_SIZE 64 #define MAX_TB_NAME_SIZE 64 #define MAX_DATA_SIZE (16*1024)+20 // max record len: 16*1024, timestamp string and ,('') need extra space @@ -90,7 +89,7 @@ enum TEST_MODE { #define OPT_ABORT 1 /* –abort */ #define STRING_LEN 60000 #define MAX_PREPARED_RAND 1000000 -#define MAX_FILE_NAME_LEN 256 +#define MAX_FILE_NAME_LEN 256 // max file name length on linux is 255. #define MAX_SAMPLES_ONCE_FROM_FILE 10000 #define MAX_NUM_DATATYPE 10 @@ -195,13 +194,6 @@ enum _describe_table_index { TSDB_MAX_DESCRIBE_METRIC }; -typedef struct { - char field[TSDB_COL_NAME_LEN + 1]; - char type[16]; - int length; - char note[128]; -} SColDes; - /* Used by main to communicate with parse_opt. */ static char *g_dupstr = NULL; @@ -247,16 +239,16 @@ typedef struct SArguments_S { } SArguments; typedef struct SColumn_S { - char field[TSDB_COL_NAME_LEN + 1]; - char dataType[MAX_TB_NAME_SIZE]; + char field[TSDB_COL_NAME_LEN]; + char dataType[16]; uint32_t dataLen; char note[128]; } StrColumn; typedef struct SSuperTable_S { - char sTblName[MAX_TB_NAME_SIZE+1]; - char dataSource[MAX_TB_NAME_SIZE+1]; // rand_gen or sample - char childTblPrefix[MAX_TB_NAME_SIZE]; + char sTblName[TSDB_TABLE_NAME_LEN]; + char dataSource[MAX_TB_NAME_SIZE]; // rand_gen or sample + char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq char insertMode[MAX_TB_NAME_SIZE]; // taosc, rest uint16_t childTblExists; int64_t childTblCount; @@ -277,8 +269,8 @@ typedef struct SSuperTable_S { int64_t timeStampStep; char startTimestamp[MAX_TB_NAME_SIZE]; char sampleFormat[MAX_TB_NAME_SIZE]; // csv, json - char sampleFile[MAX_FILE_NAME_LEN+1]; - char tagsFile[MAX_FILE_NAME_LEN+1]; + char sampleFile[MAX_FILE_NAME_LEN]; + char tagsFile[MAX_FILE_NAME_LEN]; uint32_t columnCount; StrColumn columns[MAX_COLUMN_COUNT]; @@ -305,7 +297,7 @@ typedef struct SSuperTable_S { } SSuperTable; typedef struct { - char name[TSDB_DB_NAME_LEN + 1]; + char name[TSDB_DB_NAME_LEN]; char create_time[32]; int64_t ntables; int32_t vgroups; @@ -341,11 +333,11 @@ typedef struct SDbCfg_S { int cache; int blocks; int quorum; - char precision[MAX_TB_NAME_SIZE]; + char precision[8]; } SDbCfg; typedef struct SDataBase_S { - char dbName[MAX_DB_NAME_SIZE]; + char dbName[TSDB_DB_NAME_LEN]; bool drop; // 0: use exists, 1: if exists, drop then new create SDbCfg dbCfg; uint64_t superTblCount; @@ -353,14 +345,14 @@ typedef struct SDataBase_S { } SDataBase; typedef struct SDbs_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; + char cfgDir[MAX_FILE_NAME_LEN]; char host[MAX_HOSTNAME_SIZE]; struct sockaddr_in serv_addr; uint16_t port; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; - char resultFile[MAX_FILE_NAME_LEN+1]; + char resultFile[MAX_FILE_NAME_LEN]; bool use_metric; bool insert_only; bool do_aggreFunc; @@ -387,7 +379,7 @@ typedef struct SpecifiedQueryInfo_S { bool subscribeRestart; int subscribeKeepProgress; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume[MAX_QUERY_SQL_COUNT]; int endAfterConsume[MAX_QUERY_SQL_COUNT]; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; @@ -398,7 +390,7 @@ typedef struct SpecifiedQueryInfo_S { } SpecifiedQueryInfo; typedef struct SuperQueryInfo_S { - char sTblName[MAX_TB_NAME_SIZE+1]; + char sTblName[TSDB_TABLE_NAME_LEN]; uint64_t queryInterval; // 0: unlimit > 0 loop/s uint32_t threadCnt; uint32_t asyncMode; // 0: sync, 1: async @@ -407,10 +399,10 @@ typedef struct SuperQueryInfo_S { int subscribeKeepProgress; uint64_t queryTimes; int64_t childTblCount; - char childTblPrefix[MAX_TB_NAME_SIZE]; + char childTblPrefix[TSDB_TABLE_NAME_LEN - 20]; // 20 characters reserved for seq int sqlCount; char sql[MAX_QUERY_SQL_COUNT][MAX_QUERY_SQL_LENGTH+1]; - char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN+1]; + char result[MAX_QUERY_SQL_COUNT][MAX_FILE_NAME_LEN]; int resubAfterConsume; int endAfterConsume; TAOS_SUB* tsub[MAX_QUERY_SQL_COUNT]; @@ -420,13 +412,13 @@ typedef struct SuperQueryInfo_S { } SuperQueryInfo; typedef struct SQueryMetaInfo_S { - char cfgDir[MAX_FILE_NAME_LEN+1]; + char cfgDir[MAX_FILE_NAME_LEN]; char host[MAX_HOSTNAME_SIZE]; uint16_t port; struct sockaddr_in serv_addr; char user[MAX_USERNAME_SIZE]; char password[MAX_PASSWORD_SIZE]; - char dbName[MAX_DB_NAME_SIZE+1]; + char dbName[TSDB_DB_NAME_LEN]; char queryMode[MAX_TB_NAME_SIZE]; // taosc, rest SpecifiedQueryInfo specifiedQueryInfo; @@ -438,11 +430,11 @@ typedef struct SThreadInfo_S { TAOS * taos; TAOS_STMT *stmt; int threadID; - char db_name[MAX_DB_NAME_SIZE+1]; + char db_name[TSDB_DB_NAME_LEN]; uint32_t time_precision; char filePath[4096]; FILE *fp; - char tb_prefix[MAX_TB_NAME_SIZE]; + char tb_prefix[TSDB_TABLE_NAME_LEN]; uint64_t start_table_from; uint64_t end_table_to; int64_t ntables; @@ -608,7 +600,7 @@ SArguments g_args = { 1, // query_times 0, // interlace_rows; 30000, // num_of_RPR - (1024*1024), // max_sql_len + (1024*1024), // max_sql_len 10000, // num_of_tables 10000, // num_of_DPT 0, // abort @@ -3038,7 +3030,7 @@ static int startMultiThreadCreateChildTable( for (int64_t i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->superTblInfo = superTblInfo; verbosePrint("%s() %d db_name: %s\n", __func__, __LINE__, db_name); pThreadInfo->taos = taos_connect( @@ -3329,7 +3321,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( goto PARSE_OVER; } //tstrncpy(superTbls->columns[k].dataType, dataType->valuestring, MAX_TB_NAME_SIZE); - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1); cJSON* dataLen = cJSON_GetObjectItem(column, "len"); if (dataLen && dataLen->type == cJSON_Number) { @@ -3344,7 +3336,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->columns[index].dataType, - columnCase.dataType, MAX_TB_NAME_SIZE); + columnCase.dataType, strlen(columnCase.dataType) + 1); superTbls->columns[index].dataLen = columnCase.dataLen; index++; } @@ -3400,7 +3392,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( __func__, __LINE__); goto PARSE_OVER; } - tstrncpy(columnCase.dataType, dataType->valuestring, MAX_TB_NAME_SIZE); + tstrncpy(columnCase.dataType, dataType->valuestring, strlen(dataType->valuestring) + 1); cJSON* dataLen = cJSON_GetObjectItem(tag, "len"); if (dataLen && dataLen->type == cJSON_Number) { @@ -3415,7 +3407,7 @@ static bool getColumnAndTagTypeFromInsertJsonFile( for (int n = 0; n < count; ++n) { tstrncpy(superTbls->tags[index].dataType, columnCase.dataType, - MAX_TB_NAME_SIZE); + strlen(columnCase.dataType) + 1); superTbls->tags[index].dataLen = columnCase.dataLen; index++; } @@ -3638,7 +3630,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { printf("ERROR: failed to read json, db name not found\n"); goto PARSE_OVER; } - tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].dbName, dbName->valuestring, TSDB_DB_NAME_LEN); cJSON *drop = cJSON_GetObjectItem(dbinfo, "drop"); if (drop && drop->type == cJSON_String && drop->valuestring != NULL) { @@ -3659,10 +3651,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (precision && precision->type == cJSON_String && precision->valuestring != NULL) { tstrncpy(g_Dbs.db[i].dbCfg.precision, precision->valuestring, - MAX_DB_NAME_SIZE); + 8); } else if (!precision) { - //tstrncpy(g_Dbs.db[i].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); - memset(g_Dbs.db[i].dbCfg.precision, 0, MAX_DB_NAME_SIZE); + memset(g_Dbs.db[i].dbCfg.precision, 0, 8); } else { printf("ERROR: failed to read json, precision not found\n"); goto PARSE_OVER; @@ -3839,7 +3830,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].sTblName, stbName->valuestring, - MAX_TB_NAME_SIZE); + TSDB_TABLE_NAME_LEN); cJSON *prefix = cJSON_GetObjectItem(stbInfo, "childtable_prefix"); if (!prefix || prefix->type != cJSON_String || prefix->valuestring == NULL) { @@ -3847,7 +3838,7 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { goto PARSE_OVER; } tstrncpy(g_Dbs.db[i].superTbls[j].childTblPrefix, prefix->valuestring, - MAX_DB_NAME_SIZE); + TSDB_TABLE_NAME_LEN - 20); cJSON *autoCreateTbl = cJSON_GetObjectItem(stbInfo, "auto_create_table"); if (autoCreateTbl @@ -3915,9 +3906,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (dataSource && dataSource->type == cJSON_String && dataSource->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, - dataSource->valuestring, MAX_DB_NAME_SIZE); + dataSource->valuestring, TSDB_DB_NAME_LEN); } else if (!dataSource) { - tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].dataSource, "rand", TSDB_DB_NAME_LEN); } else { errorPrint("%s() LN%d, failed to read json, data_source not found\n", __func__, __LINE__); @@ -3975,10 +3966,10 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { cJSON *ts = cJSON_GetObjectItem(stbInfo, "start_timestamp"); if (ts && ts->type == cJSON_String && ts->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - ts->valuestring, MAX_DB_NAME_SIZE); + ts->valuestring, TSDB_DB_NAME_LEN); } else if (!ts) { tstrncpy(g_Dbs.db[i].superTbls[j].startTimestamp, - "now", MAX_DB_NAME_SIZE); + "now", TSDB_DB_NAME_LEN); } else { printf("ERROR: failed to read json, start_timestamp not found\n"); goto PARSE_OVER; @@ -3998,9 +3989,9 @@ static bool getMetaFromInsertJsonFile(cJSON* root) { if (sampleFormat && sampleFormat->type == cJSON_String && sampleFormat->valuestring != NULL) { tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, - sampleFormat->valuestring, MAX_DB_NAME_SIZE); + sampleFormat->valuestring, TSDB_DB_NAME_LEN); } else if (!sampleFormat) { - tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[i].superTbls[j].sampleFormat, "csv", TSDB_DB_NAME_LEN); } else { printf("ERROR: failed to read json, sample_format not found\n"); goto PARSE_OVER; @@ -4245,7 +4236,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { cJSON* dbs = cJSON_GetObjectItem(root, "databases"); if (dbs && dbs->type == cJSON_String && dbs->valuestring != NULL) { - tstrncpy(g_queryInfo.dbName, dbs->valuestring, MAX_DB_NAME_SIZE); + tstrncpy(g_queryInfo.dbName, dbs->valuestring, TSDB_DB_NAME_LEN); } else if (!dbs) { printf("ERROR: failed to read json, databases not found\n"); goto PARSE_OVER; @@ -4495,7 +4486,7 @@ static bool getMetaFromQueryJsonFile(cJSON* root) { if (stblname && stblname->type == cJSON_String && stblname->valuestring != NULL) { tstrncpy(g_queryInfo.superQueryInfo.sTblName, stblname->valuestring, - MAX_TB_NAME_SIZE); + TSDB_TABLE_NAME_LEN); } else { errorPrint("%s() LN%d, failed to read json, super table name input error\n", __func__, __LINE__); @@ -6408,7 +6399,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; - tstrncpy(pThreadInfo->db_name, db_name, MAX_DB_NAME_SIZE); + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; pThreadInfo->superTblInfo = superTblInfo; @@ -6852,7 +6843,7 @@ static void *specifiedTableQuery(void *sarg) { } } - char sqlStr[MAX_DB_NAME_SIZE + 5]; + char sqlStr[TSDB_DB_NAME_LEN + 5]; sprintf(sqlStr, "use %s", g_queryInfo.dbName); if (0 != queryDbExec(pThreadInfo->taos, sqlStr, NO_INSERT_TYPE, false)) { taos_close(pThreadInfo->taos); @@ -7328,12 +7319,6 @@ static void *superSubscribe(void *sarg) { performancePrint("st: %"PRIu64" et: %"PRIu64" delta: %"PRIu64"\n", st, et, (et - st)); if (res) { - if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { - sprintf(pThreadInfo->filePath, "%s-%d", - g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], - pThreadInfo->threadID); - fetchResult(res, pThreadInfo); - } if (g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq][0] != 0) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.superQueryInfo.result[pThreadInfo->querySeq], @@ -7440,10 +7425,10 @@ static void *specifiedSubscribe(void *sarg) { sprintf(pThreadInfo->filePath, "%s-%d", g_queryInfo.specifiedQueryInfo.result[pThreadInfo->querySeq], pThreadInfo->threadID); - fetchResult( - g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], - pThreadInfo); } + fetchResult( + g_queryInfo.specifiedQueryInfo.res[pThreadInfo->threadID], + pThreadInfo); g_queryInfo.specifiedQueryInfo.consumed[pThreadInfo->threadID] ++; if ((g_queryInfo.specifiedQueryInfo.resubAfterConsume[pThreadInfo->querySeq] != -1) @@ -7680,9 +7665,9 @@ static void setParaFromArg(){ g_Dbs.dbCount = 1; g_Dbs.db[0].drop = true; - tstrncpy(g_Dbs.db[0].dbName, g_args.database, MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].dbName, g_args.database, TSDB_DB_NAME_LEN); g_Dbs.db[0].dbCfg.replica = g_args.replica; - tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", MAX_DB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].dbCfg.precision, "ms", 8); tstrncpy(g_Dbs.resultFile, g_args.output_file, MAX_FILE_NAME_LEN); @@ -7704,7 +7689,7 @@ static void setParaFromArg(){ if (g_args.use_metric) { g_Dbs.db[0].superTblCount = 1; - tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", MAX_TB_NAME_SIZE); + tstrncpy(g_Dbs.db[0].superTbls[0].sTblName, "meters", TSDB_TABLE_NAME_LEN); g_Dbs.db[0].superTbls[0].childTblCount = g_args.num_of_tables; g_Dbs.threadCount = g_args.num_of_threads; g_Dbs.threadCountByCreateTbl = g_args.num_of_threads; @@ -7715,7 +7700,7 @@ static void setParaFromArg(){ g_Dbs.db[0].superTbls[0].disorderRange = g_args.disorderRange; g_Dbs.db[0].superTbls[0].disorderRatio = g_args.disorderRatio; tstrncpy(g_Dbs.db[0].superTbls[0].childTblPrefix, - g_args.tb_prefix, MAX_TB_NAME_SIZE); + g_args.tb_prefix, TSDB_TABLE_NAME_LEN - 20); tstrncpy(g_Dbs.db[0].superTbls[0].dataSource, "rand", MAX_TB_NAME_SIZE); g_Dbs.db[0].superTbls[0].iface = g_args.iface; tstrncpy(g_Dbs.db[0].superTbls[0].startTimestamp, @@ -7732,7 +7717,7 @@ static void setParaFromArg(){ } tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - data_type[i], MAX_TB_NAME_SIZE); + data_type[i], strlen(data_type[i]) + 1); g_Dbs.db[0].superTbls[0].columns[i].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].columnCount++; } @@ -7743,18 +7728,18 @@ static void setParaFromArg(){ for (int i = g_Dbs.db[0].superTbls[0].columnCount; i < g_args.num_of_CPR; i++) { tstrncpy(g_Dbs.db[0].superTbls[0].columns[i].dataType, - "INT", MAX_TB_NAME_SIZE); + "INT", strlen("INT") + 1); g_Dbs.db[0].superTbls[0].columns[i].dataLen = 0; g_Dbs.db[0].superTbls[0].columnCount++; } } tstrncpy(g_Dbs.db[0].superTbls[0].tags[0].dataType, - "INT", MAX_TB_NAME_SIZE); + "INT", strlen("INT") + 1); g_Dbs.db[0].superTbls[0].tags[0].dataLen = 0; tstrncpy(g_Dbs.db[0].superTbls[0].tags[1].dataType, - "BINARY", MAX_TB_NAME_SIZE); + "BINARY", strlen("BINARY") + 1); g_Dbs.db[0].superTbls[0].tags[1].dataLen = g_args.len_of_binary; g_Dbs.db[0].superTbls[0].tagCount = 2; } else { @@ -7890,11 +7875,11 @@ static void queryResult() { pThreadInfo->end_table_to = g_Dbs.db[0].superTbls[0].childTblCount - 1; pThreadInfo->superTblInfo = &g_Dbs.db[0].superTbls[0]; tstrncpy(pThreadInfo->tb_prefix, - g_Dbs.db[0].superTbls[0].childTblPrefix, MAX_TB_NAME_SIZE); + g_Dbs.db[0].superTbls[0].childTblPrefix, TSDB_TABLE_NAME_LEN - 20); } else { pThreadInfo->ntables = g_args.num_of_tables; pThreadInfo->end_table_to = g_args.num_of_tables -1; - tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, MAX_TB_NAME_SIZE); + tstrncpy(pThreadInfo->tb_prefix, g_args.tb_prefix, TSDB_TABLE_NAME_LEN); } pThreadInfo->taos = taos_connect( From 5628cb20913c50e2607325742fb6f5c764391e36 Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Fri, 25 Jun 2021 10:16:18 +0800 Subject: [PATCH 07/71] [TD-4895]: more checking for creating mnode --- src/balance/src/bnMain.c | 21 ++++++++++++++------- src/inc/tbn.h | 1 + src/mnode/src/mnodeDnode.c | 4 ++-- 3 files changed, 17 insertions(+), 9 deletions(-) diff --git a/src/balance/src/bnMain.c b/src/balance/src/bnMain.c index f022fff6d8..9997d44ca5 100644 --- a/src/balance/src/bnMain.c +++ b/src/balance/src/bnMain.c @@ -637,6 +637,19 @@ int32_t bnDropDnode(SDnodeObj *pDnode) { return TSDB_CODE_SUCCESS; } +int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode) { + if (pDnode == NULL) + return 0; + + if (pDnode->isMgmt || pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE + || pDnode->status == TAOS_DN_STATUS_DROPPING + || pDnode->status == TAOS_DN_STATUS_OFFLINE) { + return 0; + } else { + return 1; + } +} + static void bnMonitorDnodeModule() { int32_t numOfMnodes = mnodeGetMnodesNum(); if (numOfMnodes >= tsNumOfMnodes) return; @@ -645,13 +658,7 @@ static void bnMonitorDnodeModule() { SDnodeObj *pDnode = tsBnDnodes.list[i]; if (pDnode == NULL) break; - if (pDnode->isMgmt || pDnode->status == TAOS_DN_STATUS_DROPPING || pDnode->status == TAOS_DN_STATUS_OFFLINE) { - continue; - } - - if (pDnode->alternativeRole == TAOS_DN_ALTERNATIVE_ROLE_VNODE) { - continue; - } + if (!bnDnodeCanCreateMnode(pDnode)) continue; mLInfo("dnode:%d, numOfMnodes:%d expect:%d, create mnode in this dnode", pDnode->dnodeId, numOfMnodes, tsNumOfMnodes); mnodeCreateMnode(pDnode->dnodeId, pDnode->dnodeEp, true); diff --git a/src/inc/tbn.h b/src/inc/tbn.h index b9f4e3c608..b35f90eb15 100644 --- a/src/inc/tbn.h +++ b/src/inc/tbn.h @@ -31,6 +31,7 @@ void bnReset(); int32_t bnAllocVnodes(struct SVgObj *pVgroup); int32_t bnAlterDnode(struct SDnodeObj *pDnode, int32_t vnodeId, int32_t dnodeId); int32_t bnDropDnode(struct SDnodeObj *pDnode); +int32_t bnDnodeCanCreateMnode(struct SDnodeObj *pDnode); #ifdef __cplusplus } diff --git a/src/mnode/src/mnodeDnode.c b/src/mnode/src/mnodeDnode.c index ce21af49c2..2325122830 100644 --- a/src/mnode/src/mnodeDnode.c +++ b/src/mnode/src/mnodeDnode.c @@ -16,7 +16,6 @@ #define _DEFAULT_SOURCE #include "os.h" #include "tgrant.h" -#include "tbn.h" #include "tglobal.h" #include "tconfig.h" #include "tutil.h" @@ -632,7 +631,8 @@ static int32_t mnodeProcessDnodeStatusMsg(SMnodeMsg *pMsg) { } int32_t numOfMnodes = mnodeGetMnodesNum(); - if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() && !pDnode->isMgmt) { + if (numOfMnodes < tsNumOfMnodes && numOfMnodes < mnodeGetOnlineDnodesNum() + && bnDnodeCanCreateMnode(pDnode)) { bnNotify(); } From 795bd8c07676bac3d0a163b862be7ed2d2f4c1e6 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Fri, 25 Jun 2021 10:39:17 +0800 Subject: [PATCH 08/71] [TD-4098] unsigned date type not found --- src/client/src/tscSQLParser.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e94f77d254..5107884422 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -165,6 +165,7 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, } else { tbufWriteUint32(&bw, colType); } + tbufWriteInt32(&bw, (int32_t)(pList->size)); for (int32_t i = 0; i < (int32_t)pList->size; i++) { @@ -181,10 +182,11 @@ bool serializeExprListToVariant(SArray* pList, tVariant **dst, int16_t colType, } tbufWriteInt64(&bw, var->i64); } else if (IS_UNSIGNED_NUMERIC_TYPE(colType)) { - if (IS_SIGNED_NUMERIC_TYPE(var->nType) && IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { + if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { + tbufWriteUint64(&bw, var->u64); + } else { break; - } - tbufWriteUint64(&bw, var->u64); + } } else if (colType == TSDB_DATA_TYPE_DOUBLE || colType == TSDB_DATA_TYPE_FLOAT) { if (IS_SIGNED_NUMERIC_TYPE(var->nType) || IS_UNSIGNED_NUMERIC_TYPE(var->nType)) { tbufWriteDouble(&bw, (double)(var->i64)); From 71bc5842d0ecb3de19da2711374c88281dea4e34 Mon Sep 17 00:00:00 2001 From: zhaoyanggh Date: Fri, 25 Jun 2021 15:02:59 +0800 Subject: [PATCH 09/71] [TD-4868]add taosdump test to full test --- tests/pytest/fulltest.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/pytest/fulltest.sh b/tests/pytest/fulltest.sh index c66ccc5477..88c5909863 100755 --- a/tests/pytest/fulltest.sh +++ b/tests/pytest/fulltest.sh @@ -150,6 +150,7 @@ python3 ./test.py -f import_merge/importCSV.py #======================p2-start=============== # tools python3 test.py -f tools/taosdumpTest.py +python3 test.py -f tools/taosdumpTest2.py python3 test.py -f tools/taosdemoTest.py python3 test.py -f tools/taosdemoTestWithoutMetric.py From 2555665a8e33a8a8774b41acbb181f6fd265f036 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 25 Jun 2021 15:22:22 +0800 Subject: [PATCH 10/71] [TD-4897] : clients should process result set ASAP after calling "taos_consume()". --- documentation20/cn/08.connector/docs.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/documentation20/cn/08.connector/docs.md b/documentation20/cn/08.connector/docs.md index 2d76c866d1..aa5fa50b66 100644 --- a/documentation20/cn/08.connector/docs.md +++ b/documentation20/cn/08.connector/docs.md @@ -427,12 +427,15 @@ TDengine提供时间驱动的实时流式计算API。可以每隔一指定的时 * res:查询结果集,注意结果集中可能没有记录 * param:调用 `taos_subscribe`时客户程序提供的附加参数 * code:错误码 + **注意**:在这个回调函数里不可以做耗时过长的处理,尤其是对于返回的结果集中数据较多的情况,否则有可能导致客户端阻塞等异常状态。如果必须进行复杂计算,则建议在另外的线程中进行处理。 * `TAOS_RES *taos_consume(TAOS_SUB *tsub)` 同步模式下,该函数用来获取订阅的结果。 用户应用程序将其置于一个循环之中。 如两次调用`taos_consume`的间隔小于订阅的轮询周期,API将会阻塞,直到时间间隔超过此周期。 如果数据库有新记录到达,该API将返回该最新的记录,否则返回一个没有记录的空结果集。 如果返回值为 `NULL`,说明系统出错。 异步模式下,用户程序不应调用此API。 + **注意**:在调用 `taos_consume()` 之后,用户应用应确保尽快调用 `taos_fetch_row()` 或 `taos_fetch_block()` 来处理订阅结果,否则服务端会持续缓存查询结果数据等待客户端读取,极端情况下会导致服务端内存消耗殆尽,影响服务稳定性。 + * `void taos_unsubscribe(TAOS_SUB *tsub, int keepProgress)` 取消订阅。 如参数 `keepProgress` 不为0,API会保留订阅的进度信息,后续调用 `taos_subscribe` 时可以基于此进度继续;否则将删除进度信息,后续只能重新开始读取数据。 From 022d6ea765ec90e23968cedb95eabac6fbd3f2e4 Mon Sep 17 00:00:00 2001 From: bryanchang0603 Date: Fri, 25 Jun 2021 15:41:47 +0800 Subject: [PATCH 11/71] taosdemoCfg adding how to use and minor bug fix --- tests/pytest/util/taosdemoCfg.py | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/tests/pytest/util/taosdemoCfg.py b/tests/pytest/util/taosdemoCfg.py index 5071e915a5..d211f86b81 100644 --- a/tests/pytest/util/taosdemoCfg.py +++ b/tests/pytest/util/taosdemoCfg.py @@ -25,6 +25,21 @@ from multiprocessing import cpu_count # TODO: fully test the function. Handle exceptions. # Handle json format not accepted by taosdemo + +### How to use TaosdemoCfg: +# Before you start: +# Make sure you understand how is taosdemo's JSON file structured. Because the python used does +# not support directory in directory for self objects, the config is being tear to different parts. +# Please make sure you understand which directory represent which part of which type of the file +# This module will reassemble the parts when creating the JSON file. +# +# Basic use example +# step 1:use self.append_sql_stb() to append the insert/query/subscribe directory into the module +# you can append many insert/query/subscribe directory, but pay attention about taosdemo's limit +# step 2:use alter function to alter the specific config +# step 3:use the generation function to generate the files +# +# step 1 and step 2 can be replaced with using import functions class TDTaosdemoCfg: def __init__(self): self.insert_cfg = { @@ -264,7 +279,7 @@ class TDTaosdemoCfg: elif key == "super_table_query": self.query_cfg["super_table_query"] = self.stable_query else: - self.table_query[key] = value + self.query_cfg[key] = value def alter_sub_cfg(self, key, value): if key == "specified_table_query": @@ -272,7 +287,7 @@ class TDTaosdemoCfg: elif key == "super_table_query": self.sub_cfg["super_table_query"] = self.stable_sub else: - self.table_query[key] = value + self.sub_cfg[key] = value def alter_sub_stb(self, key, value): if key == "sqls": From d8570135179a1c237997dbad40100106b9ec6111 Mon Sep 17 00:00:00 2001 From: wpan Date: Fri, 25 Jun 2021 17:05:37 +0800 Subject: [PATCH 12/71] fix double free issue --- src/client/src/tscSQLParser.c | 7 ++++++- src/client/src/tscUtil.c | 5 ++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index e94f77d254..b388c21599 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7786,7 +7786,8 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg5 = "only tag query not compatible with normal column filter"; const char* msg6 = "not support stddev/percentile in outer query yet"; const char* msg7 = "drivative requires timestamp column exists in subquery"; - + const char* msg8 = "condition missing for join query"; + int32_t code = TSDB_CODE_SUCCESS; SSqlCmd* pCmd = &pSql->cmd; @@ -7863,6 +7864,10 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf if (validateWhereNode(pQueryInfo, &pSqlNode->pWhere, pSql) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } + } else { + if (pQueryInfo->numOfTables > 1) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg8); + } } // validate the interval info diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 74dbe42eeb..83be33ffef 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4077,7 +4077,10 @@ SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo) { size_t size = sizeof(SVgroupInfo) * pVgroupsInfo->numOfVgroups + sizeof(SVgroupsInfo); SVgroupsInfo* pInfo = calloc(1, size); - memcpy(pInfo, pVgroupsInfo, size); + pInfo->numOfVgroups = pVgroupsInfo->numOfVgroups; + for (int32_t m = 0; m < pVgroupsInfo->numOfVgroups; ++m) { + tscSVgroupInfoCopy(&pInfo->vgroups[m], &pVgroupsInfo->vgroups[m]); + } return pInfo; } From 0ecaba7e9ea4050d2ab813e35469164d6c0efb8a Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Mon, 21 Jun 2021 11:18:17 +0800 Subject: [PATCH 13/71] [TD-4756]: nanosecond precison on gcBuildQueryJson in http plugin --- src/plugins/http/src/httpGcJson.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/plugins/http/src/httpGcJson.c b/src/plugins/http/src/httpGcJson.c index 397791706d..f33a994465 100644 --- a/src/plugins/http/src/httpGcJson.c +++ b/src/plugins/http/src/httpGcJson.c @@ -228,13 +228,11 @@ bool gcBuildQueryJson(HttpContext *pContext, HttpSqlCmd *cmd, TAOS_RES *result, case TSDB_DATA_TYPE_NCHAR: httpJsonStringForTransMean(jsonBuf, (char *)row[i], fields[i].bytes); break; - case TSDB_DATA_TYPE_TIMESTAMP: - if (precision == TSDB_TIME_PRECISION_MILLI) { // ms - httpJsonInt64(jsonBuf, *((int64_t *)row[i])); - } else { - httpJsonInt64(jsonBuf, *((int64_t *)row[i]) / 1000); - } + case TSDB_DATA_TYPE_TIMESTAMP: { + int64_t ts = convertTimePrecision(*((int64_t *)row[i]), precision, TSDB_TIME_PRECISION_MILLI); + httpJsonInt64(jsonBuf, ts); break; + } default: httpJsonString(jsonBuf, "-", 1); break; From 2361040efe22c4abeeb17f5f2ceef6e60e6c387b Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 25 Jun 2021 18:09:37 +0800 Subject: [PATCH 14/71] [TD-4793] : describe SQL function IRATE(). --- documentation20/cn/12.taos-sql/docs.md | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 5904abbbaa..5ab055bcda 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -854,7 +854,23 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 - 适用于:**表**。 + 适用于:**表、(超级表)**。 + + 说明:从 2.1.3.0 版本开始,TWA 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 + +- **IRATE** + ```mysql + SELECT IRATE(field_name) FROM tb_name WHERE clause; + ``` + 功能说明:计算瞬时增长率。使用时间区间中最后两个样本数据来计算瞬时增长速率;如果这两个值呈递减关系,那么只取最后一个数用于计算,而不是使用二者差值。 + + 返回结果数据类型:双精度浮点数Double。 + + 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 + + 适用于:**表、(超级表)**。 + + 说明:从 2.1.3.0 版本开始新增此函数。IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 - **SUM** ```mysql From 41e1d2dd9689cd93fc1a65c88ef5f8d3f9d54953 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Fri, 25 Jun 2021 18:55:01 +0800 Subject: [PATCH 15/71] [TD-4555] : describe SQL function DERIVATIVE(). --- documentation20/cn/12.taos-sql/docs.md | 21 ++++++++++++++++++--- 1 file changed, 18 insertions(+), 3 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 5ab055bcda..844865f6db 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -870,7 +870,7 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 适用于:**表、(超级表)**。 - 说明:从 2.1.3.0 版本开始新增此函数。IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 + 说明:(从 2.1.3.0 版本开始新增此函数)IRATE 可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 - **SUM** ```mysql @@ -1219,13 +1219,14 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 ``` ### 计算函数 + - **DIFF** ```mysql SELECT DIFF(field_name) FROM tb_name [WHERE clause]; ``` 功能说明:统计表中某列的值与前一行对应值的差。 - 返回结果数据类型: 同应用字段。 + 返回结果数据类型:同应用字段。 应用字段:不能应用在timestamp、binary、nchar、bool类型字段。 @@ -1243,13 +1244,27 @@ TDengine支持针对数据的聚合查询。提供支持的聚合和选择函数 Query OK, 2 row(s) in set (0.001162s) ``` +- **DERIVATIVE** + ```mysql + SELECT DERIVATIVE(field_name, time_interval, ignore_negative) FROM tb_name [WHERE clause]; + ``` + 功能说明:统计表中某列数值的单位变化率。其中单位时间区间的长度可以通过 time_interval 参数指定,最小可以是 1 秒(1s);ignore_negative 参数的值可以是 0 或 1,为 1 时表示忽略负值。 + + 返回结果数据类型:双精度浮点数。 + + 应用字段:不能应用在 timestamp、binary、nchar、bool 类型字段。 + + 适用于:**表、(超级表)**。 + + 说明:(从 2.1.3.0 版本开始新增此函数)输出结果行数是范围内总行数减一,第一行没有结果输出。DERIVATIVE 函数可以在由 GROUP BY 划分出单独时间线的情况下用于超级表(也即 GROUP BY tbname)。 + - **SPREAD** ```mysql SELECT SPREAD(field_name) FROM { tb_name | stb_name } [WHERE clause]; ``` 功能说明:统计表/超级表中某列的最大值和最小值之差。 - 返回结果数据类型: 双精度浮点数。 + 返回结果数据类型:双精度浮点数。 应用字段:不能应用在binary、nchar、bool类型字段。 From 11761d016d01159a0c5bb56c1b646bd9ffa996a9 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 25 Jun 2021 23:10:10 +0800 Subject: [PATCH 16/71] [td-4894]:fix bug in group by normal columns. --- src/query/src/qExecutor.c | 18 ++++------- tests/script/general/parser/groupby.sim | 40 +++++++++++++++++++++++++ 2 files changed, 46 insertions(+), 12 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 0742d3eb81..e7f63a027d 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1282,11 +1282,8 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn return; } - int64_t* tsList = NULL; SColumnInfoData* pFirstColData = taosArrayGet(pSDataBlock->pDataBlock, 0); - if (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP) { - tsList = (int64_t*) pFirstColData->pData; - } + int64_t* tsList = (pFirstColData->info.type == TSDB_DATA_TYPE_TIMESTAMP)? (int64_t*) pFirstColData->pData:NULL; STimeWindow w = TSWINDOW_INITIALIZER; @@ -1319,12 +1316,10 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn } if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { - setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, - bytes); + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, pInfo->prevData, bytes); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, - item->groupIndex); + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, pInfo->prevData, type, bytes, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } @@ -1340,17 +1335,16 @@ static void doHashGroupbyAgg(SOperatorInfo* pOperator, SGroupbyOperatorInfo *pIn memcpy(pInfo->prevData, val, bytes); if (pQueryAttr->stableQuery && pQueryAttr->stabledev && (pRuntimeEnv->prevResult != NULL)) { - setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, - bytes); + setParamForStableStddevByColData(pRuntimeEnv, pInfo->binfo.pCtx, pOperator->numOfOutput, pOperator->pExpr, val, bytes); } - int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, - item->groupIndex); + int32_t ret = setGroupResultOutputBuf(pRuntimeEnv, &(pInfo->binfo), pOperator->numOfOutput, val, type, bytes, item->groupIndex); if (ret != TSDB_CODE_SUCCESS) { // null data, too many state code longjmp(pRuntimeEnv->env, TSDB_CODE_QRY_APP_ERROR); } doApplyFunctions(pRuntimeEnv, pInfo->binfo.pCtx, &w, pSDataBlock->info.rows - num, num, tsList, pSDataBlock->info.rows, pOperator->numOfOutput); + tfree(pInfo->prevData); } } diff --git a/tests/script/general/parser/groupby.sim b/tests/script/general/parser/groupby.sim index 507431f536..6ae5d420d8 100644 --- a/tests/script/general/parser/groupby.sim +++ b/tests/script/general/parser/groupby.sim @@ -741,4 +741,44 @@ if $data14 != 2 then return -1 endi +sql create table m1 (ts timestamp, k int, f1 int) tags(a int); +sql create table tm0 using m1 tags(0); +sql create table tm1 using m1 tags(1); + +sql insert into tm0 values('2020-1-1 1:1:1', 1, 10); +sql insert into tm0 values('2020-1-1 1:1:2', 1, 20); +sql insert into tm1 values('2020-2-1 1:1:1', 2, 10); +sql insert into tm1 values('2020-2-1 1:1:2', 2, 20); + +system sh/exec.sh -n dnode1 -s stop -x SIGINT +sleep 100 +system sh/exec.sh -n dnode1 -s start +sleep 100 + +sql connect +sleep 100 +sql use group_db0; + +print =========================>TD-4894 +sql select count(*),k from m1 group by k; +if $rows != 2 then + return -1 +endi + +if $data00 != 2 then + return -1 +endi + +if $data01 != 1 then + return -1 +endi + +if $data10 != 2 then + return -1 +endi + +if $data11 != 2 then + return -1 +endi + system sh/exec.sh -n dnode1 -s stop -x SIGINT From 782e498d1fac32243305ea646f45da8ba2935e8c Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 25 Jun 2021 23:22:22 +0800 Subject: [PATCH 17/71] [td-225]fix compiler error. --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 815442a038..805fd9dfe4 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2613,7 +2613,7 @@ int32_t addExprAndResultField(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, int32_t col SColumnList ids = createColumnList(1, index.tableIndex, index.columnIndex); if (finalResult) { - insertResultField(pQueryInfo, colIndex, &ids, resultSize, resultType, pExpr->base.aliasName, pExpr); + insertResultField(pQueryInfo, colIndex, &ids, resultSize, (int8_t)resultType, pExpr->base.aliasName, pExpr); } else { assert(ids.num == 1); tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); From 9094bf8849d50721dc1996d2798ad938864b97ae Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Fri, 25 Jun 2021 23:42:15 +0800 Subject: [PATCH 18/71] [td-225]fix compiler error. --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 805fd9dfe4..da02d63f1e 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -7849,7 +7849,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf STableMeta* pTableMeta = tscGetMetaInfo(pQueryInfo, 0)->pTableMeta; SSchema* pSchema = tscGetTableColumnSchema(pTableMeta, 0); - int32_t numOfExprs = tscNumOfExprs(pQueryInfo); + int32_t numOfExprs = (int32_t) tscNumOfExprs(pQueryInfo); if (numOfExprs == 1) { SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); int32_t f = pExpr->base.functionId; From 2f37ebf686889b600398987d628f84b375e3c4e7 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 26 Jun 2021 09:08:59 +0800 Subject: [PATCH 19/71] [TD-4825] order by col after group-by --- src/client/src/tscSQLParser.c | 40 +++++++++++++++++---- src/query/src/qExecutor.c | 67 +++++++++++++++++++++++++++++++++-- src/util/inc/tarray.h | 13 +++++++ src/util/src/tarray.c | 52 ++++++++++++++++++++++++++- 4 files changed, 163 insertions(+), 9 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index b2b188661d..76a56a79d0 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5110,7 +5110,7 @@ static void setDefaultOrderInfo(SQueryInfo* pQueryInfo) { int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSqlNode, SSchema* pSchema) { const char* msg0 = "only support order by primary timestamp"; const char* msg1 = "invalid column name"; - const char* msg2 = "order by primary timestamp or first tag in groupby clause allowed"; + const char* msg2 = "order by primary timestamp, first tag or groupby column in groupby clause allowed"; const char* msg3 = "invalid column in order by clause, only primary timestamp or first tag in groupby clause allowed"; setDefaultOrderInfo(pQueryInfo); @@ -5163,6 +5163,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq bool orderByTags = false; bool orderByTS = false; + bool orderByGroupbyCol = false; if (index.columnIndex >= tscGetNumOfColumns(pTableMetaInfo->pTableMeta)) { int32_t relTagIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); @@ -5182,11 +5183,18 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (PRIMARYKEY_TIMESTAMP_COL_INDEX == index.columnIndex) { orderByTS = true; } - - if (!(orderByTags || orderByTS) && !isTopBottomQuery(pQueryInfo)) { + + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + if (pColIndex->colIndex == index.columnIndex) { + orderByGroupbyCol = true; + } + } + if (!(orderByTags || orderByTS || orderByGroupbyCol) && !isTopBottomQuery(pQueryInfo)) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg3); } else { // order by top/bottom result value column is not supported in case of interval query. - assert(!(orderByTags && orderByTS)); + assert(!(orderByTags && orderByTS && orderByGroupbyCol)); } size_t s = taosArrayGetSize(pSortorder); @@ -5196,6 +5204,11 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + } else if (orderByGroupbyCol) { + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } else if (isTopBottomQuery(pQueryInfo)) { /* order of top/bottom query in interval is not valid */ SExprInfo* pExpr = tscExprGet(pQueryInfo, 0); @@ -5228,6 +5241,9 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (orderByTags) { pQueryInfo->groupbyExpr.orderIndex = index.columnIndex - tscGetNumOfColumns(pTableMetaInfo->pTableMeta); pQueryInfo->groupbyExpr.orderType = pItem->sortOrder; + } else if (orderByGroupbyCol){ + pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = index.columnIndex; } else { pQueryInfo->order.order = pItem->sortOrder; pQueryInfo->order.orderColId = PRIMARYKEY_TIMESTAMP_COL_INDEX; @@ -5253,9 +5269,20 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq if (getColumnIndexByName(pCmd, &columnName, pQueryInfo, &index) != TSDB_CODE_SUCCESS) { return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg1); } - if (index.columnIndex != PRIMARYKEY_TIMESTAMP_COL_INDEX && !isTopBottomQuery(pQueryInfo)) { - return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + bool validOrder = false; + SArray *columnInfo = pQueryInfo->groupbyExpr.columnInfo; + if (columnInfo != NULL && taosArrayGetSize(columnInfo) > 0) { + SColIndex* pColIndex = taosArrayGet(columnInfo, 0); + validOrder = (pColIndex->colIndex == index.columnIndex); + } + if (!validOrder) { + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg2); + } + tVariantListItem* p1 = taosArrayGet(pSqlNode->pSortOrder, 0); + pQueryInfo->groupbyExpr.orderIndex = pSchema[index.columnIndex].colId; + pQueryInfo->groupbyExpr.orderType = p1->sortOrder; + } if (isTopBottomQuery(pQueryInfo)) { @@ -5276,6 +5303,7 @@ int32_t validateOrderbyNode(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SSqlNode* pSq tVariantListItem* pItem = taosArrayGet(pSqlNode->pSortOrder, 0); pQueryInfo->order.order = pItem->sortOrder; + pQueryInfo->order.orderColId = pSchema[index.columnIndex].colId; } return TSDB_CODE_SUCCESS; diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 69b17504d3..c0fe7b128a 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -16,6 +16,7 @@ #include "qFill.h" #include "taosmsg.h" #include "tglobal.h" +#include "talgo.h" #include "exception.h" #include "hash.h" @@ -26,6 +27,7 @@ #include "queryLog.h" #include "tlosertree.h" #include "ttype.h" +#include "tcompare.h" #include "tscompression.h" #define IS_MASTER_SCAN(runtime) ((runtime)->scanFlag == MASTER_SCAN) @@ -207,7 +209,65 @@ static void doSetTableGroupOutputBuf(SQueryRuntimeEnv* pRuntimeEnv, SResultRowIn SQLFunctionCtx* pCtx, int32_t* rowCellInfoOffset, int32_t numOfOutput, int32_t groupIndex); -// setup the output buffer for each operator +SArray* getOrderCheckColumns(SQueryAttr* pQuery); + + +typedef struct SRowCompSupporter { + SQueryRuntimeEnv *pRuntimeEnv; + int16_t dataOffset; + __compar_fn_t comFunc; +} SRowCompSupporter; + +static int compareRowData(const void *a, const void *b, const void *userData) { + const SResultRow *pRow1 = (const SResultRow *)a; + const SResultRow *pRow2 = (const SResultRow *)b; + + SRowCompSupporter *supporter = (SRowCompSupporter *)userData; + SQueryRuntimeEnv* pRuntimeEnv = supporter->pRuntimeEnv; + + tFilePage *page1 = getResBufPage(pRuntimeEnv->pResultBuf, pRow1->pageId); + tFilePage *page2 = getResBufPage(pRuntimeEnv->pResultBuf, pRow2->pageId); + + int16_t offset = supporter->dataOffset; + char *in1 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page1, pRow1->offset, offset); + char *in2 = getPosInResultPage(pRuntimeEnv->pQueryAttr, page2, pRow2->offset, offset); + + return (in1 != NULL && in2 != NULL) ? supporter->comFunc(in1, in2) : 0; +} + +static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeEnv *pRuntimeEnv, SSDataBlock* pDataBlock) { + SArray *columnOrderList = getOrderCheckColumns(pRuntimeEnv->pQueryAttr); + if (taosArrayGetSize(columnOrderList) <= 0) { + return; + } + int32_t orderId = pRuntimeEnv->pQueryAttr->order.orderColId; + if (orderId <= 0) { + return; + } + bool found = false; + int16_t dataOffset = 0; + + //SColIndex *index = taosArrayGet(columnOrderList, 0); + for (int32_t j = 0; j < pDataBlock->info.numOfCols; ++j) { + SColumnInfoData* pColInfoData = (SColumnInfoData *)taosArrayGet(pDataBlock->pDataBlock, j); + if (orderId == j) { + found = true; + break; + } + dataOffset += pColInfoData->info.bytes; + } + + if (found == false) { + return; + } + int16_t type = pRuntimeEnv->pQueryAttr->pExpr1[orderId].base.resType; + + SRowCompSupporter support = {.pRuntimeEnv = pRuntimeEnv, .dataOffset = dataOffset, .comFunc = getComparFunc(type, 0)}; + + return taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support); + +} +//setup the output buffer for each operator SSDataBlock* createOutputBuf(SExprInfo* pExpr, int32_t numOfOutput, int32_t numOfRows) { const static int32_t minSize = 8; @@ -5416,8 +5476,11 @@ static SSDataBlock* hashGroupbyAggregate(void* param, bool* newgroup) { } initGroupResInfo(&pRuntimeEnv->groupResInfo, &pInfo->binfo.resultRowInfo); + if (!pRuntimeEnv->pQueryAttr->stableQuery) { + sortGroupResByOrderList(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); + } toSSDataBlock(&pRuntimeEnv->groupResInfo, pRuntimeEnv, pInfo->binfo.pRes); - + if (pInfo->binfo.pRes->info.rows == 0 || !hasRemainDataInCurrentGroup(&pRuntimeEnv->groupResInfo)) { pOperator->status = OP_EXEC_DONE; } diff --git a/src/util/inc/tarray.h b/src/util/inc/tarray.h index fc7b6b8584..63cadf39a3 100644 --- a/src/util/inc/tarray.h +++ b/src/util/inc/tarray.h @@ -197,8 +197,21 @@ void* taosArraySearch(const SArray* pArray, const void* key, __compar_fn_t compa */ char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t comparFn, int flags); + +/** + * sort the pointer data in the array + * @param pArray + * @param compar + * @param param + * @return + */ + +void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param); + #ifdef __cplusplus } #endif + + #endif // TDENGINE_TAOSARRAY_H diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 5e7d9d14da..a59e36d29f 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -15,6 +15,7 @@ #include "os.h" #include "tarray.h" +#include "talgo.h" void* taosArrayInit(size_t size, size_t elemSize) { assert(elemSize > 0); @@ -249,4 +250,53 @@ char* taosArraySearchString(const SArray* pArray, const char* key, __compar_fn_t return NULL; } return *(char**)p; -} \ No newline at end of file +} + +static int taosArrayPartition(SArray *pArray, int i, int j, __ext_compar_fn_t fn, const void *userData) { + void* key = taosArrayGetP(pArray, i); + while (i < j) { + while (i < j && fn(taosArrayGetP(pArray, j), key, userData) >= 0) { j--; } + if (i < j) { + void *a = taosArrayGetP(pArray, j); + taosArraySet(pArray, i, &a); + } + while (i < j && fn(taosArrayGetP(pArray, i), key, userData) <= 0) { i++;} + if (i < j) { + void *a = taosArrayGetP(pArray, i); + taosArraySet(pArray, j, &a); + } + } + taosArraySet(pArray, i, &key); + return i; +} + +static void taosArrayQuicksortHelper(SArray *pArray, int low, int high, __ext_compar_fn_t fn, const void *param) { + if (low < high) { + int idx = taosArrayPartition(pArray, low, high, fn, param); + taosArrayQuicksortHelper(pArray, low, idx - 1, fn, param); + taosArrayQuicksortHelper(pArray, idx + 1, high, fn, param); + } +} + +static void taosArrayQuickSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + return taosArrayQuicksortHelper(pArray, 0, taosArrayGetSize(pArray) - 1, fn, param); +} +static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + for (int i = 1; i <= pArray->size - 1; ++i) { + for (int j = i; j > 0; --j) { + if (fn(taosArrayGetP(pArray, j), taosArrayGetP(pArray, j - 1), param) == -1) { + void *a = taosArrayGetP(pArray, j); + void *b = taosArrayGetP(pArray, j - 1); + taosArraySet(pArray, j - 1, &a); + taosArraySet(pArray, j, &b); + } else { + break; + } + } + } + +} +void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + return taosArrayGetSize(pArray) > 8 ? + taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param); +} From e6273a38dbd9a1dc1971cf3644488a213891fe01 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sat, 26 Jun 2021 13:43:48 +0800 Subject: [PATCH 20/71] [TD-4915] fix show create database except --- src/client/src/tscLocal.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/client/src/tscLocal.c b/src/client/src/tscLocal.c index 2c9717306f..668a9e9406 100644 --- a/src/client/src/tscLocal.c +++ b/src/client/src/tscLocal.c @@ -485,6 +485,7 @@ static int32_t tscGetDBInfo(SCreateBuilder *builder, char *result) { char buf[TSDB_DB_NAME_LEN + 64] = {0}; do { + memset(buf, 0, sizeof(buf)); int32_t* lengths = taos_fetch_lengths(pSql); int32_t ret = tscGetNthFieldResult(row, fields, lengths, 0, buf); if (0 == ret && STR_NOCASE_EQUAL(buf, strlen(buf), builder->buf, strlen(builder->buf))) { From 14d9473b5a56200e43a4c86c8349b6a7bb3cb115 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Sat, 26 Jun 2021 15:43:08 +0800 Subject: [PATCH 21/71] [td-225]fix the bug found by regression test. --- src/client/src/tscSQLParser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index da02d63f1e..cf72b7abd9 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -2104,7 +2104,7 @@ static int32_t setExprInfoForFunctions(SSqlCmd* pCmd, SQueryInfo* pQueryInfo, SS // if it is not in the final result, do not add it SColumnList ids = createColumnList(1, pColIndex->tableIndex, pColIndex->columnIndex); if (finalResult) { - insertResultField(pQueryInfo, resColIdx, &ids, pSchema->bytes, (int8_t)pSchema->type, pExpr->base.aliasName, pExpr); + insertResultField(pQueryInfo, resColIdx, &ids, resBytes, (int8_t)resType, pExpr->base.aliasName, pExpr); } else { tscColumnListInsert(pQueryInfo->colList, ids.ids[0].columnIndex, pExpr->base.uid, pSchema); } From a2c1754168c35840a2c31c765c674d5465f1a9e7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 26 Jun 2021 22:34:30 +0800 Subject: [PATCH 22/71] Hotfix/sangshuduo/td 4892 taosdemo sub fetch (#6634) * [TD-4892]: taosdemo subscribe fetch result. * fix stbname length. * restrict prefix length. * submit empty * fix minor code. * fix crash if no result file. --- src/kit/taosdemo/taosdemo.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index abf3c74436..b5781c6070 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1204,23 +1204,24 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { return ; } - int totalLen = 0; - char temp[16000]; + int64_t totalLen = 0; // fetch the records row by row while((row = taos_fetch_row(res))) { - if ((strlen(pThreadInfo->filePath) > 0) - && (totalLen >= 100*1024*1024 - 32000)) { - appendResultBufToFile(databuf, pThreadInfo); + if (totalLen >= 100*1024*1024 - 32000) { + if (strlen(pThreadInfo->filePath) > 0) + appendResultBufToFile(databuf, pThreadInfo); totalLen = 0; memset(databuf, 0, 100*1024*1024); } num_rows++; + char temp[16000] = {0}; int len = taos_print_row(temp, row, fields, num_fields); len += sprintf(temp + len, "\n"); //printf("query result:%s\n", temp); memcpy(databuf + totalLen, temp, len); totalLen += len; + debugPrint("totalLen: %"PRId64"\n", totalLen); } verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", From 09ae4a83f0209c4809fbbf56fd96044d40c655ec Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sat, 26 Jun 2021 22:34:50 +0800 Subject: [PATCH 23/71] Hotfix/sangshuduo/td 4892 taosdemo sub fetch for develop (#6635) * fix crash if no result file. * make taosdemo same as master branch. --- src/kit/taosdemo/taosdemo.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 0eac1518a7..b2ac11b2ca 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -1204,23 +1204,24 @@ static void fetchResult(TAOS_RES *res, threadInfo* pThreadInfo) { return ; } - int totalLen = 0; + int64_t totalLen = 0; // fetch the records row by row while((row = taos_fetch_row(res))) { - if ((strlen(pThreadInfo->filePath) > 0) - && (totalLen >= 100*1024*1024 - 32000)) { - appendResultBufToFile(databuf, pThreadInfo); + if (totalLen >= 100*1024*1024 - 32000) { + if (strlen(pThreadInfo->filePath) > 0) + appendResultBufToFile(databuf, pThreadInfo); totalLen = 0; memset(databuf, 0, 100*1024*1024); } num_rows++; - char temp[16000] = {0}; + char temp[16000] = {0}; int len = taos_print_row(temp, row, fields, num_fields); len += sprintf(temp + len, "\n"); //printf("query result:%s\n", temp); memcpy(databuf + totalLen, temp, len); totalLen += len; + debugPrint("totalLen: %"PRId64"\n", totalLen); } verbosePrint("%s() LN%d, databuf=%s resultFile=%s\n", From 7145bc5fc2bdb31bc6ab628263829afa3473aebd Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 27 Jun 2021 03:53:30 +0800 Subject: [PATCH 24/71] [TD-4825] order by col after group-by --- src/util/src/tarray.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index a59e36d29f..15fda4ab51 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -279,9 +279,15 @@ static void taosArrayQuicksortHelper(SArray *pArray, int low, int high, __ext_co } static void taosArrayQuickSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { - return taosArrayQuicksortHelper(pArray, 0, taosArrayGetSize(pArray) - 1, fn, param); + if (pArray->size <= 0) { + return; + } + taosArrayQuicksortHelper(pArray, 0, taosArrayGetSize(pArray) - 1, fn, param); } static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { + if (pArray->size <= 0) { + return; + } for (int i = 1; i <= pArray->size - 1; ++i) { for (int j = i; j > 0; --j) { if (fn(taosArrayGetP(pArray, j), taosArrayGetP(pArray, j - 1), param) == -1) { @@ -294,9 +300,12 @@ static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void } } } + return; } +// order array void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param) { - return taosArrayGetSize(pArray) > 8 ? + taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param); } +//TODO(yihaoDeng) order array From a2edc05ba0ee80762dadbd00736c840f28f15829 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 27 Jun 2021 04:46:51 +0800 Subject: [PATCH 25/71] [TD-4825] order by col after group-by --- src/util/src/tarray.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/util/src/tarray.c b/src/util/src/tarray.c index 15fda4ab51..fe529edaac 100644 --- a/src/util/src/tarray.c +++ b/src/util/src/tarray.c @@ -279,13 +279,13 @@ static void taosArrayQuicksortHelper(SArray *pArray, int low, int high, __ext_co } static void taosArrayQuickSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { - if (pArray->size <= 0) { + if (pArray->size <= 1) { return; } - taosArrayQuicksortHelper(pArray, 0, taosArrayGetSize(pArray) - 1, fn, param); + taosArrayQuicksortHelper(pArray, 0, (int)(taosArrayGetSize(pArray) - 1), fn, param); } static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void *param) { - if (pArray->size <= 0) { + if (pArray->size <= 1) { return; } for (int i = 1; i <= pArray->size - 1; ++i) { @@ -303,9 +303,9 @@ static void taosArrayInsertSort(SArray* pArray, __ext_compar_fn_t fn, const void return; } -// order array +// order array void taosArraySortPWithExt(SArray* pArray, __ext_compar_fn_t fn, const void *param) { taosArrayGetSize(pArray) > 8 ? taosArrayQuickSort(pArray, fn, param) : taosArrayInsertSort(pArray, fn, param); } -//TODO(yihaoDeng) order array +//TODO(yihaoDeng) add order array From b2975b7c4d4dac617f270c3940e86c6f24af8e6e Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Sun, 27 Jun 2021 05:04:33 +0800 Subject: [PATCH 26/71] [TD-4825] order by col after group-by --- src/query/src/qExecutor.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index c0fe7b128a..aa5ce4fa57 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -264,7 +264,8 @@ static void sortGroupResByOrderList(SGroupResInfo *pGroupResInfo, SQueryRuntimeE SRowCompSupporter support = {.pRuntimeEnv = pRuntimeEnv, .dataOffset = dataOffset, .comFunc = getComparFunc(type, 0)}; - return taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support); + taosArraySortPWithExt(pGroupResInfo->pRows, compareRowData, &support); + return; } //setup the output buffer for each operator From ae972e2fb44e63bf56a08441dd2db9eac3cc5169 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Sun, 27 Jun 2021 09:12:14 +0800 Subject: [PATCH 27/71] [TD-4926]:fix core during operator time usage calculate --- src/query/src/qExecutor.c | 37 ++++++++++++++++++++++++++++++------- 1 file changed, 30 insertions(+), 7 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1cd50df46b..336925c797 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3792,7 +3792,9 @@ void publishOperatorProfEvent(SOperatorInfo* operatorInfo, EQueryProfEventType e event.operatorType = operatorInfo->operatorType; SQInfo* qInfo = operatorInfo->pRuntimeEnv->qinfo; - taosArrayPush(qInfo->summary.queryProfEvents, &event); + if (qInfo->summary.queryProfEvents) { + taosArrayPush(qInfo->summary.queryProfEvents, &event); + } } void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) { @@ -3801,7 +3803,9 @@ void publishQueryAbortEvent(SQInfo* pQInfo, int32_t code) { event.eventTime = taosGetTimestampUs(); event.abortCode = code; - taosArrayPush(pQInfo->summary.queryProfEvents, &event); + if (pQInfo->summary.queryProfEvents) { + taosArrayPush(pQInfo->summary.queryProfEvents, &event); + } } typedef struct { @@ -3837,10 +3841,21 @@ static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* ev } void calculateOperatorProfResults(SQInfo* pQInfo) { + if (pQInfo->summary.queryProfEvents == NULL) { + qDebug("query prof events array is null"); + return; + } + + if (pQInfo->summary.operatorProfResults == NULL) { + qDebug("operator prof results hash is null"); + return; + } + SArray* opStack = taosArrayInit(32, sizeof(SOperatorStackItem)); if (opStack == NULL) { return; } + size_t size = taosArrayGetSize(pQInfo->summary.queryProfEvents); SHashObj* profResults = pQInfo->summary.operatorProfResults; @@ -3897,11 +3912,13 @@ void queryCostStatis(SQInfo *pQInfo) { qDebug("QInfo:0x%"PRIx64" :cost summary: winResPool size:%.2f Kb, numOfWin:%"PRId64", tableInfoSize:%.2f Kb, hashTable:%.2f Kb", pQInfo->qId, pSummary->winInfoSize/1024.0, pSummary->numOfTimeWindows, pSummary->tableInfoSize/1024.0, pSummary->hashSize/1024.0); - SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL); - while (opRes != NULL) { - qDebug("QInfo:0x%"PRIx64" :cost summary: operator : %d, exec times: %"PRId64", self time: %"PRId64, pQInfo->qId, - opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime ); - opRes = taosHashIterate(pSummary->operatorProfResults, opRes); + if (pSummary->operatorProfResults) { + SOperatorProfResult* opRes = taosHashIterate(pSummary->operatorProfResults, NULL); + while (opRes != NULL) { + qDebug("QInfo:0x%" PRIx64 " :cost summary: operator : %d, exec times: %" PRId64 ", self time: %" PRId64, + pQInfo->qId, opRes->operatorType, opRes->sumRunTimes, opRes->sumSelfTime); + opRes = taosHashIterate(pSummary->operatorProfResults, opRes); + } } } @@ -4305,8 +4322,14 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr int32_t numOfTables = (int32_t)pQueryAttr->tableGroupInfo.numOfTables; pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); pQInfo->summary.queryProfEvents = taosArrayInit(512, sizeof(SQueryProfEvent)); + if (pQInfo->summary.queryProfEvents == NULL) { + qDebug("failed to allocate query prof events array"); + } pQInfo->summary.operatorProfResults = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK); + if (pQInfo->summary.operatorProfResults == NULL) { + qDebug("failed to allocate operator prof results hash"); + } code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param); if (code != TSDB_CODE_SUCCESS) { From 3451e7fba9a7a4652232303cd38988cbbf4dfb0c Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Sun, 27 Jun 2021 14:16:22 +0800 Subject: [PATCH 28/71] [TD-4926]:follow log format convention --- src/query/src/qExecutor.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 336925c797..4aca4d3534 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -3842,12 +3842,12 @@ static void doOperatorExecProfOnce(SOperatorStackItem* item, SQueryProfEvent* ev void calculateOperatorProfResults(SQInfo* pQInfo) { if (pQInfo->summary.queryProfEvents == NULL) { - qDebug("query prof events array is null"); + qDebug("QInfo:0x%"PRIx64" query prof events array is null", pQInfo->qId); return; } if (pQInfo->summary.operatorProfResults == NULL) { - qDebug("operator prof results hash is null"); + qDebug("QInfo:0x%"PRIx64" operator prof results hash is null", pQInfo->qId); return; } @@ -4323,12 +4323,12 @@ int32_t doInitQInfo(SQInfo* pQInfo, STSBuf* pTsBuf, void* tsdb, void* sourceOptr pQInfo->summary.tableInfoSize += (numOfTables * sizeof(STableQueryInfo)); pQInfo->summary.queryProfEvents = taosArrayInit(512, sizeof(SQueryProfEvent)); if (pQInfo->summary.queryProfEvents == NULL) { - qDebug("failed to allocate query prof events array"); + qDebug("QInfo:0x%"PRIx64" failed to allocate query prof events array", pQInfo->qId); } pQInfo->summary.operatorProfResults = taosHashInit(8, taosGetDefaultHashFunction(TSDB_DATA_TYPE_TINYINT), true, HASH_NO_LOCK); if (pQInfo->summary.operatorProfResults == NULL) { - qDebug("failed to allocate operator prof results hash"); + qDebug("QInfo:0x%"PRIx64" failed to allocate operator prof results hash", pQInfo->qId); } code = setupQueryRuntimeEnv(pRuntimeEnv, (int32_t) pQueryAttr->tableGroupInfo.numOfTables, pOperator, param); From 9a06248432a70b29ce76b4d77331e1056ba21726 Mon Sep 17 00:00:00 2001 From: Jun Li Date: Sun, 27 Jun 2021 06:13:33 -0700 Subject: [PATCH 29/71] Rename file and add some comment/changes (#6587) 1. rename semphone to semaphore 2. add comment for tsched.h 3. change the function signature for taosSchedulerTask, changing from return int to void. We currently don't check any return code of the function. 4. add some error handlings. For fatal error, just exit the process because the program may run into a random state. --- src/os/inc/os.h | 2 +- src/os/inc/{osSemphone.h => osSemaphore.h} | 4 +- .../darwin/{dwSemphone.c => dwSemaphore.c} | 0 .../detail/{osSemphone.c => osSemaphore.c} | 0 .../src/windows/{wSemphone.c => wSemaphore.c} | 0 src/util/inc/tsched.h | 39 +++++++++-- src/util/src/tsched.c | 67 ++++++++++++------- 7 files changed, 79 insertions(+), 33 deletions(-) rename src/os/inc/{osSemphone.h => osSemaphore.h} (97%) rename src/os/src/darwin/{dwSemphone.c => dwSemaphore.c} (100%) rename src/os/src/detail/{osSemphone.c => osSemaphore.c} (100%) rename src/os/src/windows/{wSemphone.c => wSemaphore.c} (100%) diff --git a/src/os/inc/os.h b/src/os/inc/os.h index 6731ca6d7d..903e80d5c7 100644 --- a/src/os/inc/os.h +++ b/src/os/inc/os.h @@ -29,7 +29,7 @@ extern "C" { #include "osMath.h" #include "osMemory.h" #include "osRand.h" -#include "osSemphone.h" +#include "osSemaphore.h" #include "osSignal.h" #include "osSleep.h" #include "osSocket.h" diff --git a/src/os/inc/osSemphone.h b/src/os/inc/osSemaphore.h similarity index 97% rename from src/os/inc/osSemphone.h rename to src/os/inc/osSemaphore.h index fe59095205..10d14700e0 100644 --- a/src/os/inc/osSemphone.h +++ b/src/os/inc/osSemaphore.h @@ -13,8 +13,8 @@ * along with this program. If not, see . */ -#ifndef TDENGINE_OS_SEMPHONE_H -#define TDENGINE_OS_SEMPHONE_H +#ifndef TDENGINE_OS_SEMAPHORE_H +#define TDENGINE_OS_SEMAPHORE_H #ifdef __cplusplus extern "C" { diff --git a/src/os/src/darwin/dwSemphone.c b/src/os/src/darwin/dwSemaphore.c similarity index 100% rename from src/os/src/darwin/dwSemphone.c rename to src/os/src/darwin/dwSemaphore.c diff --git a/src/os/src/detail/osSemphone.c b/src/os/src/detail/osSemaphore.c similarity index 100% rename from src/os/src/detail/osSemphone.c rename to src/os/src/detail/osSemaphore.c diff --git a/src/os/src/windows/wSemphone.c b/src/os/src/windows/wSemaphore.c similarity index 100% rename from src/os/src/windows/wSemphone.c rename to src/os/src/windows/wSemaphore.c diff --git a/src/util/inc/tsched.h b/src/util/inc/tsched.h index 3e481cbc32..a1591512c1 100644 --- a/src/util/inc/tsched.h +++ b/src/util/inc/tsched.h @@ -28,10 +28,41 @@ typedef struct SSchedMsg { void *thandle; } SSchedMsg; -void *taosInitScheduler(int queueSize, int numOfThreads, const char *label); -void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl); -int taosScheduleTask(void *qhandle, SSchedMsg *pMsg); -void taosCleanUpScheduler(void *param); +/** + * Create a thread-safe ring-buffer based task queue and return the instance. A thread + * pool will be created to consume the messages in the queue. + * @param capacity the queue capacity + * @param numOfThreads the number of threads for the thread pool + * @param label the label of the queue + * @return the created queue scheduler + */ +void *taosInitScheduler(int capacity, int numOfThreads, const char *label); + +/** + * Create a thread-safe ring-buffer based task queue and return the instance. + * Same as taosInitScheduler, and it also print the queue status every 1 minite. + * @param capacity the queue capacity + * @param numOfThreads the number of threads for the thread pool + * @param label the label of the queue + * @param tmrCtrl the timer controller, tmr_ctrl_t* + * @return the created queue scheduler + */ +void *taosInitSchedulerWithInfo(int capacity, int numOfThreads, const char *label, void *tmrCtrl); + +/** + * Clean up the queue scheduler instance and free the memory. + * @param queueScheduler the queue scheduler to free + */ +void taosCleanUpScheduler(void *queueScheduler); + +/** + * Schedule a new task to run, the task is described by pMsg. + * The function may be blocked if no thread is available to execute the task. + * That may happen when all threads are busy. + * @param queueScheduler the queue scheduler instance + * @param pMsg the message for the task + */ +void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg); #ifdef __cplusplus } diff --git a/src/util/src/tsched.c b/src/util/src/tsched.c index f014dd0fab..16142470c9 100644 --- a/src/util/src/tsched.c +++ b/src/util/src/tsched.c @@ -108,39 +108,47 @@ void *taosInitScheduler(int queueSize, int numOfThreads, const char *label) { void *taosInitSchedulerWithInfo(int queueSize, int numOfThreads, const char *label, void *tmrCtrl) { SSchedQueue* pSched = taosInitScheduler(queueSize, numOfThreads, label); - + if (tmrCtrl != NULL && pSched != NULL) { pSched->pTmrCtrl = tmrCtrl; taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); } - + return pSched; } -void *taosProcessSchedQueue(void *param) { +void *taosProcessSchedQueue(void *scheduler) { SSchedMsg msg; - SSchedQueue *pSched = (SSchedQueue *)param; + SSchedQueue *pSched = (SSchedQueue *)scheduler; + int ret = 0; while (1) { - if (tsem_wait(&pSched->fullSem) != 0) { - uError("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_wait(&pSched->fullSem)) != 0) { + uFatal("wait %s fullSem failed(%s)", pSched->label, strerror(errno)); + exit(ret); } if (pSched->stop) { break; } - if (pthread_mutex_lock(&pSched->queueMutex) != 0) - uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) { + uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } msg = pSched->queue[pSched->fullSlot]; memset(pSched->queue + pSched->fullSlot, 0, sizeof(SSchedMsg)); pSched->fullSlot = (pSched->fullSlot + 1) % pSched->queueSize; - if (pthread_mutex_unlock(&pSched->queueMutex) != 0) - uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) { + uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } - if (tsem_post(&pSched->emptySem) != 0) - uError("post %s emptySem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_post(&pSched->emptySem)) != 0) { + uFatal("post %s emptySem failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } if (msg.fp) (*(msg.fp))(&msg); @@ -151,30 +159,37 @@ void *taosProcessSchedQueue(void *param) { return NULL; } -int taosScheduleTask(void *qhandle, SSchedMsg *pMsg) { - SSchedQueue *pSched = (SSchedQueue *)qhandle; +void taosScheduleTask(void *queueScheduler, SSchedMsg *pMsg) { + SSchedQueue *pSched = (SSchedQueue *)queueScheduler; + int ret = 0; + if (pSched == NULL) { uError("sched is not ready, msg:%p is dropped", pMsg); - return 0; + return; } - if (tsem_wait(&pSched->emptySem) != 0) { - uError("wait %s emptySem failed(%s)", pSched->label, strerror(errno)); + if ((ret = tsem_wait(&pSched->emptySem)) != 0) { + uFatal("wait %s emptySem failed(%s)", pSched->label, strerror(errno)); + exit(ret); } - if (pthread_mutex_lock(&pSched->queueMutex) != 0) - uError("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_lock(&pSched->queueMutex)) != 0) { + uFatal("lock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } pSched->queue[pSched->emptySlot] = *pMsg; pSched->emptySlot = (pSched->emptySlot + 1) % pSched->queueSize; - if (pthread_mutex_unlock(&pSched->queueMutex) != 0) - uError("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + if ((ret = pthread_mutex_unlock(&pSched->queueMutex)) != 0) { + uFatal("unlock %s queueMutex failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } - if (tsem_post(&pSched->fullSem) != 0) - uError("post %s fullSem failed(%s)", pSched->label, strerror(errno)); - - return 0; + if ((ret = tsem_post(&pSched->fullSem)) != 0) { + uFatal("post %s fullSem failed(%s)", pSched->label, strerror(errno)); + exit(ret); + } } void taosCleanUpScheduler(void *param) { @@ -219,4 +234,4 @@ void taosDumpSchedulerStatus(void *qhandle, void *tmrId) { } taosTmrReset(taosDumpSchedulerStatus, DUMP_SCHEDULER_TIME_WINDOW, pSched, pSched->pTmrCtrl, &pSched->pTimer); -} +} \ No newline at end of file From 55d3e7a2ea682a682b5d81ac55f9bb8dafd58ee7 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 27 Jun 2021 21:45:18 +0800 Subject: [PATCH 30/71] Hotfix/sangshuduo/td 4902 for mastere (#6623) * [TD-4902] FIX with 1 char * Fix prompt of -m @ taosdemo Co-authored-by: SunShine Chan Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index b5781c6070..743c4ac668 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -982,9 +982,9 @@ static void parse_args(int argc, char *argv[], SArguments *arguments) { arguments->len_of_binary = atoi(argv[++i]); } else if (strcmp(argv[i], "-m") == 0) { if ((argc == i+1) || - (!isStringNumber(argv[i+1]))) { + (isStringNumber(argv[i+1]))) { printHelp(); - errorPrint("%s", "\n\t-m need a number following!\n"); + errorPrint("%s", "\n\t-m need a letter-initial string following!\n"); exit(EXIT_FAILURE); } arguments->tb_prefix = argv[++i]; From f48783df4f7b20bf291f6ba9bb4e6a86b66d4040 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Sun, 27 Jun 2021 22:45:38 +0800 Subject: [PATCH 31/71] fix converity scan issue. (#6641) --- src/kit/taosdump/taosdump.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/kit/taosdump/taosdump.c b/src/kit/taosdump/taosdump.c index 05c6b1efbb..33118ce311 100644 --- a/src/kit/taosdump/taosdump.c +++ b/src/kit/taosdump/taosdump.c @@ -1126,7 +1126,7 @@ int taosGetTableDes( strncpy(tableDes->cols[count].field, (char *)row[TSDB_DESCRIBE_METRIC_FIELD_INDEX], fields[TSDB_DESCRIBE_METRIC_FIELD_INDEX].bytes); strncpy(tableDes->cols[count].type, (char *)row[TSDB_DESCRIBE_METRIC_TYPE_INDEX], - min(16, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); + min(15, fields[TSDB_DESCRIBE_METRIC_TYPE_INDEX].bytes)); tableDes->cols[count].length = *((int *)row[TSDB_DESCRIBE_METRIC_LENGTH_INDEX]); strncpy(tableDes->cols[count].note, (char *)row[TSDB_DESCRIBE_METRIC_NOTE_INDEX], fields[TSDB_DESCRIBE_METRIC_NOTE_INDEX].bytes); From 3d1f8f8bcf9d12ca220512e3ba0b13e2d13c28a2 Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 28 Jun 2021 08:33:20 +0800 Subject: [PATCH 32/71] fix mem leak --- src/client/src/tscUtil.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 83be33ffef..937fe1000a 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1304,7 +1304,7 @@ void tscResetSqlCmd(SSqlCmd* pCmd, bool clearCachedMeta) { if (pCmd->pTableMetaMap != NULL) { STableMetaVgroupInfo* p = taosHashIterate(pCmd->pTableMetaMap, NULL); while (p) { - tfree(p->pVgroupInfo); + tscVgroupInfoClear(p->pVgroupInfo); tfree(p->pTableMeta); p = taosHashIterate(pCmd->pTableMetaMap, p); } From b62a1d8baee2d23dcfd2ce149490a8001082e706 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 28 Jun 2021 10:54:03 +0800 Subject: [PATCH 33/71] [TD-2639] : fix typo in doc index. --- documentation20/cn/00.index/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/00.index/docs.md b/documentation20/cn/00.index/docs.md index c900cd373d..ed7156be17 100644 --- a/documentation20/cn/00.index/docs.md +++ b/documentation20/cn/00.index/docs.md @@ -63,7 +63,7 @@ TDengine是一个高效的存储、查询、分析时序大数据的平台,专 ## [高级功能](/advanced-features) * [连续查询(Continuous Query)](/advanced-features#continuous-query):基于滑动窗口,定时自动的对数据流进行查询计算 -* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):象典型的消息队列,应用可订阅接收到的最新数据 +* [数据订阅(Publisher/Subscriber)](/advanced-features#subscribe):类似典型的消息队列,应用可订阅接收到的最新数据 * [缓存(Cache)](/advanced-features#cache):每个设备最新的数据都会缓存在内存中,可快速获取 * [报警监测](/advanced-features#alert):根据配置规则,自动监测超限行为数据,并主动推送 From 1d5b8fa83c0457054156010bc657c7e3d70d0a56 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 28 Jun 2021 11:01:09 +0800 Subject: [PATCH 34/71] [TD-4928] : update version dependency about JDBC driver & TDengine server. --- documentation20/cn/08.connector/01.java/docs.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/08.connector/01.java/docs.md b/documentation20/cn/08.connector/01.java/docs.md index fb47d79268..546e2dbcd3 100644 --- a/documentation20/cn/08.connector/01.java/docs.md +++ b/documentation20/cn/08.connector/01.java/docs.md @@ -532,8 +532,9 @@ Query OK, 1 row(s) in set (0.000141s) | taos-jdbcdriver 版本 | TDengine 版本 | JDK 版本 | | -------------------- | ----------------- | -------- | -| 2.0.22 | 2.0.18.0 及以上 | 1.8.x | -| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.0 | 1.8.x | +| 2.0.31 | 2.1.3.0 及以上 | 1.8.x | +| 2.0.22 - 20.0.30 | 2.0.18.0 - 2.1.2.x | 1.8.x | +| 2.0.12 - 2.0.21 | 2.0.8.0 - 2.0.17.x | 1.8.x | | 2.0.4 - 2.0.11 | 2.0.0.0 - 2.0.7.x | 1.8.x | | 1.0.3 | 1.6.1.x 及以上 | 1.8.x | | 1.0.2 | 1.6.1.x 及以上 | 1.8.x | From 839ad25feeb1ae8842ac2775cb95bd2c67799a05 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 28 Jun 2021 11:12:39 +0800 Subject: [PATCH 35/71] [td-4905]:fix import caused crash. --- src/client/src/tscParseInsert.c | 1 - src/client/src/tscUtil.c | 2 + tests/script/general/parser/gendata.sh | 2 + tests/script/general/parser/import_file.sim | 45 +++++++++++++++++++-- 4 files changed, 45 insertions(+), 5 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index a158162dc5..137a7be7c7 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -1580,7 +1580,6 @@ void tscImportDataFromFile(SSqlObj *pSql) { SImportFileSupport *pSupporter = calloc(1, sizeof(SImportFileSupport)); SSqlObj *pNew = createSubqueryObj(pSql, 0, parseFileSendDataBlock, pSupporter, TSDB_SQL_INSERT, NULL); - pCmd->count = 1; FILE *fp = fopen(pCmd->payload, "rb"); if (fp == NULL) { diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 74dbe42eeb..52d1092107 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -3278,6 +3278,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pnCmd->insertParam.pTableNameList = NULL; pnCmd->insertParam.pTableBlockHashList = NULL; + memset(&pnCmd->insertParam.tagData, 0, sizeof(STagData)); + if (tscAddQueryInfo(pnCmd) != TSDB_CODE_SUCCESS) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; diff --git a/tests/script/general/parser/gendata.sh b/tests/script/general/parser/gendata.sh index f56fdc3468..b2074147ca 100755 --- a/tests/script/general/parser/gendata.sh +++ b/tests/script/general/parser/gendata.sh @@ -4,3 +4,5 @@ Cur_Dir=$(pwd) echo $Cur_Dir echo "'2020-1-1 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql +echo "'2020-1-2 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql +echo "'2020-1-3 1:1:1','abc','device',123,'9876', 'abc', 'net', 'mno', 'province', 'city', 'al'" >> ~/data.sql diff --git a/tests/script/general/parser/import_file.sim b/tests/script/general/parser/import_file.sim index e9f0f1ed08..cf11194ba7 100644 --- a/tests/script/general/parser/import_file.sim +++ b/tests/script/general/parser/import_file.sim @@ -15,6 +15,8 @@ $inFileName = '~/data.csv' $numOfRows = 10000 system general/parser/gendata.sh +sql create table stbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) tags(a int, b binary(12)); + sql create table tbx (ts TIMESTAMP, collect_area NCHAR(12), device_id BINARY(16), imsi BINARY(16), imei BINARY(16), mdn BINARY(10), net_type BINARY(4), mno NCHAR(4), province NCHAR(10), city NCHAR(16), alarm BINARY(2)) print ====== create tables success, starting import data @@ -23,13 +25,48 @@ sql import into tbx file '~/data.sql' sql select count(*) from tbx if $rows != 1 then + print expect 1, actual: $rows return -1 endi -#if $data00 != $numOfRows then -# print "expect: $numOfRows, act: $data00" -# return -1 -#endi +if $data00 != 3 then + return -1 +endi + +sql drop table tbx; + +sql insert into tbx using stbx tags(1,'abc') file '~/data.sql'; +sql insert into tbx using stbx tags(1,'abc') file '~/data.sql'; + +sql select count(*) from tbx +if $rows != 1 then + return -1 +endi + +if $data00 != 3 then + return -1 +endi + +sql drop table tbx; +sql insert into tbx using stbx(b) tags('abcf') file '~/data.sql'; + +sql select ts,a,b from tbx; +if $rows != 3 then + return -1 +endi + +if $data00 != @20-01-01 01:01:01.000@ then + print expect 20-01-01 01:01:01.000 , actual: $data00 + return -1 +endi + +if $data01 != NULL then + return -1 +endi + +if $data02 != @abcf@ then + return -1 +endi system rm -f ~/data.sql From 1e592fb52bdb607f3c592457b06e386dbd707e6b Mon Sep 17 00:00:00 2001 From: Minglei Jin Date: Mon, 28 Jun 2021 12:23:26 +0800 Subject: [PATCH 36/71] [TD-4950]: fix undefined left bit shift --- src/mnode/src/mnodeTable.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeTable.c b/src/mnode/src/mnodeTable.c index 20edb02c38..6c96f1f0d2 100644 --- a/src/mnode/src/mnodeTable.c +++ b/src/mnode/src/mnodeTable.c @@ -1068,7 +1068,7 @@ static int32_t mnodeProcessCreateSuperTableMsg(SMnodeMsg *pMsg) { pStable->info.tableId = strdup(pCreate->tableName); pStable->info.type = TSDB_SUPER_TABLE; pStable->createdTime = taosGetTimestampMs(); - int64_t x = (us&0x000000FFFFFFFFFF); + uint64_t x = (us&0x000000FFFFFFFFFF); x = x<<24; pStable->uid = x + ((sdbGetVersion() & ((1ul << 16) - 1ul)) << 8) + (taosRand() & ((1ul << 8) - 1ul)); pStable->sversion = 0; From 6719d41b15b6d97295539d6d7a8fe680dc15c49d Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 28 Jun 2021 14:35:57 +0800 Subject: [PATCH 37/71] [TD-4821] : add new operator 'IN'. --- documentation20/cn/12.taos-sql/docs.md | 28 ++++++++++++++------------ 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 844865f6db..19fd92d8f6 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -703,22 +703,24 @@ Query OK, 1 row(s) in set (0.001091s) ### 支持的条件过滤操作 -| **Operation** | **Note** | **Applicable Data Types** | -| --------------- | ----------------------------- | ------------------------------------- | -| > | larger than | **`timestamp`** and all numeric types | -| < | smaller than | **`timestamp`** and all numeric types | -| >= | larger than or equal to | **`timestamp`** and all numeric types | -| <= | smaller than or equal to | **`timestamp`** and all numeric types | -| = | equal to | all types | -| <> | not equal to | all types | -| between and | within a certain range | **`timestamp`** and all numeric types | -| % | match with any char sequences | **`binary`** **`nchar`** | -| _ | match with a single char | **`binary`** **`nchar`** | +| **Operation** | **Note** | **Applicable Data Types** | +| --------------- | ----------------------------- | ----------------------------------------- | +| > | larger than | **`timestamp`** and all numeric types | +| < | smaller than | **`timestamp`** and all numeric types | +| >= | larger than or equal to | **`timestamp`** and all numeric types | +| <= | smaller than or equal to | **`timestamp`** and all numeric types | +| = | equal to | all types | +| <> | not equal to | all types | +| between and | within a certain range | **`timestamp`** and all numeric types | +| in | matches any value in a set | all types except first column `timestamp` | +| % | match with any char sequences | **`binary`** **`nchar`** | +| _ | match with a single char | **`binary`** **`nchar`** | 1. <> 算子也可以写为 != ,请注意,这个算子不能用于数据表第一列的 timestamp 字段。 2. 同时进行多个字段的范围过滤,需要使用关键词 AND 来连接不同的查询条件,暂不支持 OR 连接的不同列之间的查询过滤条件。 -3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如:((value > 20 AND value < 30) OR (value < 12)) 。 -4. 从 2.0.17 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +3. 针对单一字段的过滤,如果是时间过滤条件,则一条语句中只支持设定一个;但针对其他的(普通)列或标签列,则可以使用 `OR` 关键字进行组合条件的查询过滤。例如: `((value > 20 AND value < 30) OR (value < 12))`。 +4. 从 2.0.17.0 版本开始,条件过滤开始支持 BETWEEN AND 语法,例如 `WHERE col2 BETWEEN 1.5 AND 3.25` 表示查询条件为“1.5 ≤ col2 ≤ 3.25”。 +5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。需注意:BOOL 类型必须写作 `{true, false}` 的形式,而不能写成 0 或 1;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值相等既能匹配成功。 +5. 从 2.1.4.0 版本开始,条件过滤开始支持 IN 算子,例如 `WHERE city IN ('Beijing', 'Shanghai')`。说明:BOOL 类型写作 `{true, false}` 或 `{0, 1}` 均可,但不能写作 0、1 之外的整数;FLOAT 和 DOUBLE 类型会受到浮点数精度影响,集合内的值在精度范围内认为和数据行的值完全相等才能匹配成功。