From b02219027916e9f55f8564c78f2723f465112943 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 29 Jul 2021 03:27:58 +0800 Subject: [PATCH 01/20] [TD-5664] add more state of compact --- src/inc/tsdb.h | 2 +- src/tsdb/inc/tsdbint.h | 4 ++-- src/tsdb/src/tsdbCompact.c | 8 +++++--- src/tsdb/src/tsdbMain.c | 4 ++-- src/vnode/src/vnodeMgmt.c | 2 +- 5 files changed, 11 insertions(+), 9 deletions(-) diff --git a/src/inc/tsdb.h b/src/inc/tsdb.h index 79d9029dbc..7880dc43b2 100644 --- a/src/inc/tsdb.h +++ b/src/inc/tsdb.h @@ -94,7 +94,7 @@ STsdbRepo *tsdbOpenRepo(STsdbCfg *pCfg, STsdbAppH *pAppH); int tsdbCloseRepo(STsdbRepo *repo, int toCommit); int32_t tsdbConfigRepo(STsdbRepo *repo, STsdbCfg *pCfg); int tsdbGetState(STsdbRepo *repo); -bool tsdbInCompact(STsdbRepo *repo); +int8_t tsdbGetCompactState(STsdbRepo *repo); // --------- TSDB TABLE DEFINITION typedef struct { uint64_t uid; // the unique table ID diff --git a/src/tsdb/inc/tsdbint.h b/src/tsdb/inc/tsdbint.h index dd43e39310..84c7ba4e4b 100644 --- a/src/tsdb/inc/tsdbint.h +++ b/src/tsdb/inc/tsdbint.h @@ -92,7 +92,7 @@ struct STsdbRepo { pthread_mutex_t mutex; bool repoLocked; int32_t code; // Commit code - bool inCompact; // is in compact process? + int8_t compactState; // compact state: inCompact/noCompact/waitingCompact? }; #define REPO_ID(r) (r)->config.tsdbId @@ -139,4 +139,4 @@ static FORCE_INLINE int tsdbGetNextMaxTables(int tid) { } #endif -#endif /* _TD_TSDB_INT_H_ */ \ No newline at end of file +#endif /* _TD_TSDB_INT_H_ */ diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 5211ee3c61..a85fdc25c5 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -58,6 +58,7 @@ static int tsdbCompactFSetImpl(SCompactH *pComph); static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf, void **ppCBuf); +enum {TSDB_NO_COMPACT, TSDB_IN_COMPACT, TSDB_WAITING_COMPACT}; int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); } void *tsdbCompactImpl(STsdbRepo *pRepo) { @@ -89,16 +90,17 @@ _err: } static int tsdbAsyncCompact(STsdbRepo *pRepo) { + pRepo->compactState = TSDB_WAITING_COMPACT; tsem_wait(&(pRepo->readyToCommit)); return tsdbScheduleCommit(pRepo, COMPACT_REQ); } static void tsdbStartCompact(STsdbRepo *pRepo) { - ASSERT(!pRepo->inCompact); + assert(pRepo->compactState != TSDB_IN_COMPACT); tsdbInfo("vgId:%d start to compact!", REPO_ID(pRepo)); tsdbStartFSTxn(pRepo, 0, 0); pRepo->code = TSDB_CODE_SUCCESS; - pRepo->inCompact = true; + pRepo->compactState = TSDB_IN_COMPACT; } static void tsdbEndCompact(STsdbRepo *pRepo, int eno) { @@ -107,7 +109,7 @@ static void tsdbEndCompact(STsdbRepo *pRepo, int eno) { } else { tsdbEndFSTxn(pRepo); } - pRepo->inCompact = false; + pRepo->compactState = TSDB_NO_COMPACT; tsdbInfo("vgId:%d compact over, %s", REPO_ID(pRepo), (eno == TSDB_CODE_SUCCESS) ? "succeed" : "failed"); tsem_post(&(pRepo->readyToCommit)); } diff --git a/src/tsdb/src/tsdbMain.c b/src/tsdb/src/tsdbMain.c index f3a7c4b7ee..44460a7db3 100644 --- a/src/tsdb/src/tsdbMain.c +++ b/src/tsdb/src/tsdbMain.c @@ -197,7 +197,7 @@ STsdbRepoInfo *tsdbGetStatus(STsdbRepo *pRepo) { return NULL; } int tsdbGetState(STsdbRepo *repo) { return repo->state; } -bool tsdbInCompact(STsdbRepo *repo) { return repo->inCompact; } +int8_t tsdbGetCompactState(STsdbRepo *repo) { return (int8_t)(repo->compactState); } void tsdbReportStat(void *repo, int64_t *totalPoints, int64_t *totalStorage, int64_t *compStorage) { ASSERT(repo != NULL); @@ -537,7 +537,7 @@ static STsdbRepo *tsdbNewRepo(STsdbCfg *pCfg, STsdbAppH *pAppH) { pRepo->state = TSDB_STATE_OK; pRepo->code = TSDB_CODE_SUCCESS; - pRepo->inCompact = false; + pRepo->compactState = 0; pRepo->config = *pCfg; if (pAppH) { pRepo->appH = *pAppH; diff --git a/src/vnode/src/vnodeMgmt.c b/src/vnode/src/vnodeMgmt.c index e14b5a385e..7e427988b5 100644 --- a/src/vnode/src/vnodeMgmt.c +++ b/src/vnode/src/vnodeMgmt.c @@ -160,7 +160,7 @@ static void vnodeBuildVloadMsg(SVnodeObj *pVnode, SStatusMsg *pStatus) { pLoad->status = pVnode->status; pLoad->role = pVnode->role; pLoad->replica = pVnode->syncCfg.replica; - pLoad->compact = (pVnode->tsdb != NULL) && tsdbInCompact(pVnode->tsdb) ? 1 : 0; + pLoad->compact = (pVnode->tsdb != NULL) ? tsdbGetCompactState(pVnode->tsdb) : 0; } int32_t vnodeGetVnodeList(int32_t vnodeList[], int32_t *numOfVnodes) { From f25a01f83a2d4277c2adfb37cfa5b6f838d60573 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 29 Jul 2021 05:00:30 +0800 Subject: [PATCH 02/20] [TD-5664] add more state of compact --- src/tsdb/src/tsdbCompact.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 6ff0693d46..98888924ec 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -90,6 +90,10 @@ _err: } static int tsdbAsyncCompact(STsdbRepo *pRepo) { + if (pRepo->compactState != TSDB_NO_COMPACT) { + tsdbInfo("vgId:%d not compact tsdb again", REPO_ID(pRepo)); + return 0; + } pRepo->compactState = TSDB_WAITING_COMPACT; tsem_wait(&(pRepo->readyToCommit)); return tsdbScheduleCommit(pRepo, COMPACT_REQ); From bb2b2ec9f4f11c45c3db01c0f3f3b77634d8f6d9 Mon Sep 17 00:00:00 2001 From: shenglian zhou Date: Wed, 28 Jul 2021 16:59:04 +0800 Subject: [PATCH 03/20] [TD-5594]:calculate tag index in table meta when modify tag type --- src/client/src/tscSQLParser.c | 8 +++++--- tests/script/general/parser/alter_stable.sim | 5 ++++- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 1c12f19834..6360f69145 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -6078,10 +6078,12 @@ int32_t setAlterTableInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSchema* pSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); int16_t numOfTags = tscGetNumOfTags(pTableMetaInfo->pTableMeta); - int16_t i; + int32_t numOfCols = tscGetNumOfColumns(pTableMetaInfo->pTableMeta); + int32_t tagIndex = columnIndex.columnIndex - numOfCols; + assert(tagIndex>=0); uint32_t nLen = 0; - for (i = 0; i < numOfTags; ++i) { - nLen += (i != columnIndex.columnIndex) ? pSchema[i].bytes : pItem->bytes; + for (int i = 0; i < numOfTags; ++i) { + nLen += (i != tagIndex) ? pSchema[i].bytes : pItem->bytes; } if (nLen >= TSDB_MAX_TAGS_LEN) { return invalidOperationMsg(pMsg, msg24); diff --git a/tests/script/general/parser/alter_stable.sim b/tests/script/general/parser/alter_stable.sim index afdd7d3edf..1406af6087 100644 --- a/tests/script/general/parser/alter_stable.sim +++ b/tests/script/general/parser/alter_stable.sim @@ -35,7 +35,10 @@ sql alter table tb1 set tag name = "" sql alter table tb1 set tag name = "shenzhen" sql alter table tb1 set tag len = 379 +# case TD-5594 +sql create stable st5520(ts timestamp, f int) tags(t0 bool, t1 nchar(4093), t2 nchar(1)) +sql_error alter stable st5520 modify tag t2 nchar(2); # test end sql drop database $db -system sh/exec.sh -n dnode1 -s stop -x SIGINT \ No newline at end of file +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 95f304e4762064d24cb3964c84d86517694f3b69 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 29 Jul 2021 11:49:18 +0800 Subject: [PATCH 04/20] [TD-5664] add more state of compact --- src/tsdb/src/tsdbCompact.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbCompact.c b/src/tsdb/src/tsdbCompact.c index 98888924ec..62f9e41119 100644 --- a/src/tsdb/src/tsdbCompact.c +++ b/src/tsdb/src/tsdbCompact.c @@ -58,7 +58,7 @@ static int tsdbCompactFSetImpl(SCompactH *pComph); static int tsdbWriteBlockToRightFile(SCompactH *pComph, STable *pTable, SDataCols *pDataCols, void **ppBuf, void **ppCBuf); -enum {TSDB_NO_COMPACT, TSDB_IN_COMPACT, TSDB_WAITING_COMPACT}; +enum { TSDB_NO_COMPACT, TSDB_IN_COMPACT, TSDB_WAITING_COMPACT}; int tsdbCompact(STsdbRepo *pRepo) { return tsdbAsyncCompact(pRepo); } void *tsdbCompactImpl(STsdbRepo *pRepo) { @@ -91,7 +91,7 @@ _err: static int tsdbAsyncCompact(STsdbRepo *pRepo) { if (pRepo->compactState != TSDB_NO_COMPACT) { - tsdbInfo("vgId:%d not compact tsdb again", REPO_ID(pRepo)); + tsdbInfo("vgId:%d not compact tsdb again ", REPO_ID(pRepo)); return 0; } pRepo->compactState = TSDB_WAITING_COMPACT; From d2218cacdfea2eb0e9e0168a5baaa2b790594356 Mon Sep 17 00:00:00 2001 From: markswang <792637585@qq.com> Date: Thu, 29 Jul 2021 16:46:05 +0800 Subject: [PATCH 05/20] [TD-5539] fix core dump caused by unsupport query statement --- src/client/src/tscUtil.c | 115 ++++++++++++++++++++------------------- 1 file changed, 58 insertions(+), 57 deletions(-) diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 1c610b67a5..f4f0dda8da 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1286,10 +1286,10 @@ static void tscDestroyResPointerInfo(SSqlRes* pRes) { for (int i = 0; i < pRes->numOfCols; i++) { tfree(pRes->buffer[i]); } - + pRes->numOfCols = 0; } - + tfree(pRes->pRsp); tfree(pRes->tsrow); @@ -1729,7 +1729,7 @@ int32_t tscCreateDataBlock(size_t defaultSize, int32_t rowSize, int32_t startOff if (dataBuf->nAllocSize <= dataBuf->headerSize) { dataBuf->nAllocSize = dataBuf->headerSize * 2; } - + //dataBuf->pData = calloc(1, dataBuf->nAllocSize); dataBuf->pData = malloc(dataBuf->nAllocSize); if (dataBuf->pData == NULL) { @@ -1845,7 +1845,7 @@ static SMemRow tdGenMemRowFromBuilder(SMemRowBuilder* pBuilder) { toffset += TYPE_BYTES[pSchema[j].type]; ++j; } - + #if 0 // no need anymore while (i < nColsBound) { p = payloadNextCol(p); @@ -2015,7 +2015,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl // the maximum expanded size in byte when a row-wise data is converted to SDataRow format int32_t expandSize = getRowExpandSize(pOneTableBlock->pTableMeta); STableDataBlocks* dataBuf = NULL; - + int32_t ret = tscGetDataBlockFromList(pVnodeDataBlockHashList, pOneTableBlock->vgId, TSDB_PAYLOAD_SIZE, INSERT_HEAD_SIZE, 0, &pOneTableBlock->tableName, pOneTableBlock->pTableMeta, &dataBuf, pVnodeDataBlockList); if (ret != TSDB_CODE_SUCCESS) { @@ -2069,7 +2069,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl pInsertParam->objectId, tNameGetTableName(&pOneTableBlock->tableName), pBlocks->tid, pBlocks->numOfRows, pBlocks->sversion, blkKeyInfo.pKeyTuple->skey, pLastKeyTuple->skey); } - + int32_t len = pBlocks->numOfRows * (pOneTableBlock->rowSize + expandSize) + sizeof(STColumn) * tscGetNumOfColumns(pOneTableBlock->pTableMeta); pBlocks->tid = htonl(pBlocks->tid); @@ -2093,7 +2093,7 @@ int32_t tscMergeTableDataBlocks(SInsertStatementParam *pInsertParam, bool freeBl }else { tscDebug("0x%"PRIx64" table %s data block is empty", pInsertParam->objectId, pOneTableBlock->tableName.tname); } - + p = taosHashIterate(pInsertParam->pTableBlockHashList, p); if (p == NULL) { break; @@ -2157,7 +2157,7 @@ int tscAllocPayload(SSqlCmd* pCmd, int size) { pCmd->payload = b; pCmd->allocSize = size; } - + memset(pCmd->payload, 0, pCmd->allocSize); } @@ -2174,7 +2174,7 @@ TAOS_FIELD tscCreateField(int8_t type, const char* name, int16_t bytes) { SInternalField* tscFieldInfoAppend(SFieldInfo* pFieldInfo, TAOS_FIELD* pField) { assert(pFieldInfo != NULL); pFieldInfo->numOfOutput++; - + struct SInternalField info = { .pExpr = NULL, .visible = true }; info.field = *pField; @@ -2266,13 +2266,13 @@ int32_t tscGetResRowLength(SArray* pExprList) { if (num == 0) { return 0; } - + int32_t size = 0; for(int32_t i = 0; i < num; ++i) { SExprInfo* pExpr = taosArrayGetP(pExprList, i); size += pExpr->base.resBytes; } - + return size; } @@ -2412,7 +2412,7 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo snprintf(p->colInfo.name, len, "%s.%s", pTableMetaInfo->aliasName, pSchema->name); } } - + p->colInfo.flag = colType; p->colInfo.colIndex = pColIndex->columnIndex; @@ -2424,7 +2424,7 @@ SExprInfo* tscExprCreate(STableMetaInfo* pTableMetaInfo, int16_t functionId, SCo if (pTableMetaInfo->pTableMeta) { p->uid = pTableMetaInfo->pTableMeta->id.uid; } - + return pExpr; } @@ -2510,18 +2510,18 @@ SExprInfo* tscExprGet(SQueryInfo* pQueryInfo, int32_t index) { */ void tscExprDestroy(SArray* pExprInfo) { size_t size = taosArrayGetSize(pExprInfo); - + for(int32_t i = 0; i < size; ++i) { SExprInfo* pExpr = taosArrayGetP(pExprInfo, i); sqlExprDestroy(pExpr); } - + taosArrayDestroy(pExprInfo); } int32_t tscExprCopy(SArray* dst, const SArray* src, uint64_t uid, bool deepcopy) { assert(src != NULL && dst != NULL); - + size_t size = taosArrayGetSize(src); for (int32_t i = 0; i < size; ++i) { SExprInfo* pExpr = taosArrayGetP(src, i); @@ -2606,7 +2606,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t if (columnIndex < 0) { return NULL; } - + size_t numOfCols = taosArrayGetSize(pColumnList); int32_t i = 0; @@ -2636,7 +2636,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t taosArrayInsert(pColumnList, i, &b); } else { SColumn* pCol = taosArrayGetP(pColumnList, i); - + if (i < numOfCols && (pCol->columnIndex > columnIndex || pCol->tableUid != uid)) { SColumn* b = calloc(1, sizeof(SColumn)); if (b == NULL) { @@ -2660,7 +2660,7 @@ SColumn* tscColumnListInsert(SArray* pColumnList, int32_t columnIndex, uint64_t SColumn* tscColumnClone(const SColumn* src) { assert(src != NULL); - + SColumn* dst = calloc(1, sizeof(SColumn)); if (dst == NULL) { return NULL; @@ -2689,7 +2689,7 @@ void tscColumnCopy(SColumn* pDest, const SColumn* pSrc) { void tscColumnListCopy(SArray* dst, const SArray* src, uint64_t tableUid) { assert(src != NULL && dst != NULL); - + size_t num = taosArrayGetSize(src); for (int32_t i = 0; i < num; ++i) { SColumn* pCol = taosArrayGetP(src, i); @@ -2800,18 +2800,19 @@ void tscDequoteAndTrimToken(SStrToken* pToken) { } int32_t tscValidateName(SStrToken* pToken) { - if (pToken->type != TK_STRING && pToken->type != TK_ID) { + if (pToken == NULL || pToken->z == NULL || + (pToken->type != TK_STRING && pToken->type != TK_ID)) { return TSDB_CODE_TSC_INVALID_OPERATION; } char* sep = strnchr(pToken->z, TS_PATH_DELIMITER[0], pToken->n, true); if (sep == NULL) { // single part if (pToken->type == TK_STRING) { - + tscDequoteAndTrimToken(pToken); tscStrToLower(pToken->z, pToken->n); //pToken->n = (uint32_t)strtrim(pToken->z); - + int len = tGetToken(pToken->z, &pToken->type); // single token, validate it @@ -2863,7 +2864,7 @@ int32_t tscValidateName(SStrToken* pToken) { if (pToken->type == TK_STRING && validateQuoteToken(pToken) != TSDB_CODE_SUCCESS) { return TSDB_CODE_TSC_INVALID_OPERATION; } - + // re-build the whole name string if (pStr[firstPartLen] == TS_PATH_DELIMITER[0]) { // first part do not have quote do nothing @@ -2902,7 +2903,7 @@ bool tscValidateColumnId(STableMetaInfo* pTableMetaInfo, int32_t colId, int32_t SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); STableComInfo tinfo = tscGetTableInfo(pTableMetaInfo->pTableMeta); - + int32_t numOfTotal = tinfo.numOfTags + tinfo.numOfColumns; for (int32_t i = 0; i < numOfTotal; ++i) { @@ -2947,21 +2948,21 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { dest->relType = src->relType; - + if (src->pCond == NULL) { return 0; } - + size_t s = taosArrayGetSize(src->pCond); dest->pCond = taosArrayInit(s, sizeof(SCond)); - + for (int32_t i = 0; i < s; ++i) { SCond* pCond = taosArrayGet(src->pCond, i); - + SCond c = {0}; c.len = pCond->len; c.uid = pCond->uid; - + if (pCond->len > 0) { assert(pCond->cond != NULL); c.cond = malloc(c.len); @@ -2971,7 +2972,7 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { memcpy(c.cond, pCond->cond, c.len); } - + taosArrayPush(dest->pCond, &c); } @@ -2980,14 +2981,14 @@ int32_t tscTagCondCopy(STagCond* dest, const STagCond* src) { void tscTagCondRelease(STagCond* pTagCond) { free(pTagCond->tbnameCond.cond); - + if (pTagCond->pCond != NULL) { size_t s = taosArrayGetSize(pTagCond->pCond); for (int32_t i = 0; i < s; ++i) { SCond* p = taosArrayGet(pTagCond->pCond, i); tfree(p->cond); } - + taosArrayDestroy(pTagCond->pCond); } @@ -3014,7 +3015,7 @@ void tscTagCondRelease(STagCond* pTagCond) { void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); SSchema* pSchema = tscGetTableSchema(pTableMetaInfo->pTableMeta); - + size_t numOfExprs = tscNumOfExprs(pQueryInfo); for (int32_t i = 0; i < numOfExprs; ++i) { SExprInfo* pExpr = tscExprGet(pQueryInfo, i); @@ -3022,7 +3023,7 @@ void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SQueryInfo* pQueryInfo) { if (TSDB_COL_IS_TAG(pExpr->base.colInfo.flag)) { SSchema* pTagSchema = tscGetTableTagSchema(pTableMetaInfo->pTableMeta); - + int16_t index = pExpr->base.colInfo.colIndex; pColInfo[i].type = (index != -1) ? pTagSchema[index].type : TSDB_DATA_TYPE_BINARY; } else { @@ -3047,7 +3048,7 @@ bool tscShouldBeFreed(SSqlObj* pSql) { if (pSql == NULL || pSql->signature != pSql) { return false; } - + STscObj* pTscObj = pSql->pTscObj; if (pSql->pStream != NULL || pTscObj->hbrid == pSql->self || pSql->pSubscription != NULL) { return false; @@ -3126,7 +3127,7 @@ STableMetaInfo* tscGetTableMetaInfoByUid(SQueryInfo* pQueryInfo, uint64_t uid, i void tscInitQueryInfo(SQueryInfo* pQueryInfo) { assert(pQueryInfo->fieldsInfo.internalField == NULL); pQueryInfo->fieldsInfo.internalField = taosArrayInit(4, sizeof(SInternalField)); - + assert(pQueryInfo->exprList == NULL); pQueryInfo->exprList = taosArrayInit(4, POINTER_BYTES); @@ -3188,7 +3189,7 @@ static void freeQueryInfoImpl(SQueryInfo* pQueryInfo) { pQueryInfo->groupbyExpr.columnInfo = NULL; pQueryInfo->groupbyExpr.numOfGroupCols = 0; } - + pQueryInfo->tsBuf = tsBufDestroy(pQueryInfo->tsBuf); tfree(pQueryInfo->fillVal); @@ -3383,7 +3384,7 @@ void clearAllTableMetaInfo(SQueryInfo* pQueryInfo, bool removeMeta) { tNameExtractFullName(&pTableMetaInfo->name, name); taosHashRemove(tscTableMetaMap, name, strnlen(name, TSDB_TABLE_FNAME_LEN)); } - + tscFreeVgroupTableInfo(pTableMetaInfo->pVgroupTables); tscClearTableMetaInfo(pTableMetaInfo); @@ -3417,11 +3418,11 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM pTableMetaInfo->pTableMeta = pTableMeta; if (pTableMetaInfo->pTableMeta == NULL) { - pTableMetaInfo->tableMetaSize = 0; + pTableMetaInfo->tableMetaSize = 0; } else { pTableMetaInfo->tableMetaSize = tscGetTableMetaSize(pTableMeta); } - + if (vgroupList != NULL) { pTableMetaInfo->vgroupList = tscVgroupInfoClone(vgroupList); } @@ -3437,7 +3438,7 @@ STableMetaInfo* tscAddTableMetaInfo(SQueryInfo* pQueryInfo, SName* name, STableM } pTableMetaInfo->pVgroupTables = tscVgroupTableInfoDup(pVgroupTables); - + pQueryInfo->numOfTables += 1; return pTableMetaInfo; } @@ -3645,14 +3646,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t goto _error; } } - + if (tscTagCondCopy(&pNewQueryInfo->tagCond, &pQueryInfo->tagCond) != 0) { terrno = TSDB_CODE_TSC_OUT_OF_MEMORY; goto _error; } if (pQueryInfo->fillType != TSDB_FILL_NONE) { - //just make memory memory sanitizer happy + //just make memory memory sanitizer happy //refator later pNewQueryInfo->fillVal = calloc(1, pQueryInfo->fieldsInfo.numOfOutput * sizeof(int64_t)); if (pNewQueryInfo->fillVal == NULL) { @@ -3701,14 +3702,14 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pTableMeta, pTableMetaInfo->vgroupList, pTableMetaInfo->tagColList, pTableMetaInfo->pVgroupTables); - + } else { // transfer the ownership of pTableMeta to the newly create sql object. STableMetaInfo* pPrevInfo = tscGetTableMetaInfoFromCmd(&pPrevSql->cmd, 0); if (pPrevInfo->pTableMeta && pPrevInfo->pTableMeta->tableType < 0) { terrno = TSDB_CODE_TSC_APP_ERROR; goto _error; } - + STableMeta* pPrevTableMeta = tscTableMetaDup(pPrevInfo->pTableMeta); SVgroupsInfo* pVgroupsInfo = pPrevInfo->vgroupList; pFinalInfo = tscAddTableMetaInfo(pNewQueryInfo, &pTableMetaInfo->name, pPrevTableMeta, pVgroupsInfo, pTableMetaInfo->tagColList, @@ -3728,9 +3729,9 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t goto _error; } - + assert(pNewQueryInfo->numOfTables == 1); - + if (UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo)) { assert(pFinalInfo->vgroupList != NULL); } @@ -3739,13 +3740,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, __async_cb_func_t if (cmd == TSDB_SQL_SELECT) { size_t size = taosArrayGetSize(pNewQueryInfo->colList); - + tscDebug("0x%"PRIx64" new subquery:0x%"PRIx64", tableIndex:%d, vgroupIndex:%d, type:%d, exprInfo:%" PRIzu ", colList:%" PRIzu "," "fieldInfo:%d, name:%s, qrang:%" PRId64 " - %" PRId64 " order:%d, limit:%" PRId64, pSql->self, pNew->self, tableIndex, pTableMetaInfo->vgroupIndex, pNewQueryInfo->type, tscNumOfExprs(pNewQueryInfo), size, pNewQueryInfo->fieldsInfo.numOfOutput, tNameGetTableName(&pFinalInfo->name), pNewQueryInfo->window.skey, pNewQueryInfo->window.ekey, pNewQueryInfo->order.order, pNewQueryInfo->limit.limit); - + tscPrintSelNodeList(pNew, 0); } else { tscDebug("0x%"PRIx64" new sub insertion: %p, vnodeIdx:%d", pSql->self, pNew, pTableMetaInfo->vgroupIndex); @@ -3971,7 +3972,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s const char* msgFormat2 = "syntax error near \'%s\' (%s)"; const char* msgFormat3 = "%s"; - const char* prefix = "syntax error"; + const char* prefix = "syntax error"; const int32_t BACKWARD_CHAR_STEP = 0; if (sql == NULL) { @@ -3986,7 +3987,7 @@ int32_t tscSQLSyntaxErrMsg(char* msg, const char* additionalInfo, const char* s if (additionalInfo != NULL) { sprintf(msg, msgFormat2, buf, additionalInfo); } else { - const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1; + const char* msgFormat = (0 == strncmp(sql, prefix, strlen(prefix))) ? msgFormat3 : msgFormat1; sprintf(msg, msgFormat, buf); } @@ -4044,7 +4045,7 @@ bool hasMoreVnodesToTry(SSqlObj* pSql) { if (!UTIL_TABLE_IS_SUPER_TABLE(pTableMetaInfo) || (pTableMetaInfo->vgroupList == NULL)) { return false; } - + int32_t numOfVgroups = pTableMetaInfo->vgroupList->numOfVgroups; if (pTableMetaInfo->pVgroupTables != NULL) { numOfVgroups = (int32_t)taosArrayGetSize(pTableMetaInfo->pVgroupTables); @@ -4071,7 +4072,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { */ assert(pRes->numOfRows == 0 && tscNonOrderedProjectionQueryOnSTable(pQueryInfo, 0) && !tscHasReachLimitation(pQueryInfo, pRes)); STableMetaInfo* pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); - + int32_t totalVgroups = pTableMetaInfo->vgroupList->numOfVgroups; if (++pTableMetaInfo->vgroupIndex < totalVgroups) { tscDebug("0x%"PRIx64" results from vgroup index:%d completed, try next:%d. total vgroups:%d. current numOfRes:%" PRId64, pSql->self, @@ -4092,7 +4093,7 @@ void tscTryQueryNextVnode(SSqlObj* pSql, __async_cb_func_t fp) { pQueryInfo->limit.offset = pRes->offset; assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - + tscDebug("0x%"PRIx64" new query to next vgroup, index:%d, limit:%" PRId64 ", offset:%" PRId64 ", glimit:%" PRId64, pSql->self, pTableMetaInfo->vgroupIndex, pQueryInfo->limit.limit, pQueryInfo->limit.offset, pQueryInfo->clauseLimit); @@ -4177,7 +4178,7 @@ char* strdup_throw(const char* str) { int tscSetMgmtEpSetFromCfg(const char *first, const char *second, SRpcCorEpSet *corMgmtEpSet) { corMgmtEpSet->version = 0; - // init mgmt ip set + // init mgmt ip set SRpcEpSet *mgmtEpSet = &(corMgmtEpSet->epSet); mgmtEpSet->numOfEps = 0; mgmtEpSet->inUse = 0; @@ -4352,7 +4353,7 @@ uint32_t tscGetTableMetaSize(STableMeta* pTableMeta) { if (pTableMeta->tableInfo.numOfColumns >= 0 && pTableMeta->tableInfo.numOfTags >= 0) { totalCols = pTableMeta->tableInfo.numOfColumns + pTableMeta->tableInfo.numOfTags; } - + return sizeof(STableMeta) + totalCols * sizeof(SSchema); } From 23848d45b03f880b16564f2be420fd87c9355f9d Mon Sep 17 00:00:00 2001 From: wpan Date: Mon, 2 Aug 2021 10:34:20 +0800 Subject: [PATCH 06/20] add connection info to cache when taosd reboot while taos remains open --- src/mnode/src/mnodeShow.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/mnode/src/mnodeShow.c b/src/mnode/src/mnodeShow.c index 5fe22826b7..570f5c344b 100644 --- a/src/mnode/src/mnodeShow.c +++ b/src/mnode/src/mnodeShow.c @@ -253,11 +253,15 @@ static int32_t mnodeProcessHeartBeatMsg(SMnodeMsg *pMsg) { int32_t connId = htonl(pHBMsg->connId); SConnObj *pConn = mnodeAccquireConn(connId, connInfo.user, connInfo.clientIp, connInfo.clientPort); + if (pConn == NULL) { + pHBMsg->pid = htonl(pHBMsg->pid); + pConn = mnodeCreateConn(connInfo.user, connInfo.clientIp, connInfo.clientPort, pHBMsg->pid, pHBMsg->appName); + } if (pConn == NULL) { // do not close existing links, otherwise // mError("failed to create connId, close connect"); - // pRsp->killConnection = 1; + // pRsp->killConnection = 1; } else { pRsp->connId = htonl(pConn->connId); mnodeSaveQueryStreamList(pConn, pHBMsg); From 86a6a87666d0437ae774595941a3d4bda2a78983 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 2 Aug 2021 12:57:20 +0800 Subject: [PATCH 07/20] [TD-5618] fix distinct(tbname) error --- src/query/src/qExecutor.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3f6df2ec07..93a2535d56 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6661,19 +6661,20 @@ static SSDataBlock* hashDistinct(void* param, bool* newgroup) { if (isNull(val, type)) { continue; } - + char* p = val; size_t keyLen = 0; if (IS_VAR_DATA_TYPE(pOperator->pExpr->base.colType)) { tstr* var = (tstr*)(val); + p = var->data; keyLen = varDataLen(var); } else { keyLen = bytes; } int dummy; - void* res = taosHashGet(pInfo->pSet, val, keyLen); + void* res = taosHashGet(pInfo->pSet, p, keyLen); if (res == NULL) { - taosHashPut(pInfo->pSet, val, keyLen, &dummy, sizeof(dummy)); + taosHashPut(pInfo->pSet, p, keyLen, &dummy, sizeof(dummy)); char* start = pResultColInfoData->pData + bytes * pInfo->pRes->info.rows; memcpy(start, val, bytes); pRes->info.rows += 1; From 2e050c4714a646a3059596b51dc271048823b5fb Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 2 Aug 2021 13:21:26 +0800 Subject: [PATCH 08/20] [TD-5579] give tips for 3 level'select --- src/client/src/tscSQLParser.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 819f1af4f0..ed7c2eeb19 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -900,6 +900,7 @@ int32_t tscValidateSqlInfo(SSqlObj* pSql, struct SSqlInfo* pInfo) { SSqlNode* pSqlNode = taosArrayGetP(pInfo->list, i); tscTrace("0x%"PRIx64" start to parse the %dth subclause, total:%"PRIzu, pSql->self, i, size); + // normalizeSqlNode(pSqlNode); // normalize the column name in each function if ((code = validateSqlNode(pSql, pSqlNode, pQueryInfo)) != TSDB_CODE_SUCCESS) { return code; @@ -2028,7 +2029,6 @@ static SUdfInfo* isValidUdf(SArray* pUdfInfo, const char* name, int32_t len) { tscError("udfinfo is null"); return NULL; } - size_t t = taosArrayGetSize(pUdfInfo); for(int32_t i = 0; i < t; ++i) { SUdfInfo* pUdf = taosArrayGet(pUdfInfo, i); @@ -8455,6 +8455,7 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf const char* msg6 = "not support stddev/percentile/interp in the outer query yet"; const char* msg7 = "derivative/twa/irate requires timestamp column exists in subquery"; const char* msg8 = "condition missing for join query"; + const char* msg9 = "not support 3 level select"; int32_t code = TSDB_CODE_SUCCESS; @@ -8485,6 +8486,13 @@ int32_t validateSqlNode(SSqlObj* pSql, SSqlNode* pSqlNode, SQueryInfo* pQueryInf // parse the subquery in the first place int32_t numOfSub = (int32_t)taosArrayGetSize(pSqlNode->from->list); for (int32_t i = 0; i < numOfSub; ++i) { + // check if there is 3 level select + SRelElementPair* subInfo = taosArrayGet(pSqlNode->from->list, i); + SSqlNode* p = taosArrayGetP(subInfo->pSubquery, 0); + if (p->from->type == SQL_NODE_FROM_SUBQUERY){ + return invalidOperationMsg(tscGetErrorMsgPayload(pCmd), msg9); + } + code = doValidateSubquery(pSqlNode, i, pSql, pQueryInfo, tscGetErrorMsgPayload(pCmd)); if (code != TSDB_CODE_SUCCESS) { return code; From 3c9b73069c1a33929c2d0e75da413a50233bed76 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 2 Aug 2021 14:51:17 +0800 Subject: [PATCH 09/20] [TD-5650] fix long tag filter conditon error --- src/client/inc/tscUtil.h | 1 + src/client/src/tscServer.c | 6 ++++-- src/client/src/tscUtil.c | 15 +++++++++++++++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index a7c2862f51..687d182bb6 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -344,6 +344,7 @@ int32_t tscCreateTableMetaFromSTableMeta(STableMeta* pChild, const char* name, v STableMeta* tscTableMetaDup(STableMeta* pTableMeta); SVgroupsInfo* tscVgroupsInfoDup(SVgroupsInfo* pVgroupsInfo); +int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo); int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo); int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr); void* createQInfoFromQueryNode(SQueryInfo* pQueryInfo, STableGroupInfo* pTableGroupInfo, SOperatorInfo* pOperator, char* sql, void* addr, int32_t stage, uint64_t qId); diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index fdb1be9f4e..b9edc52e9b 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -678,6 +678,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { int32_t srcColListSize = (int32_t)(taosArrayGetSize(pQueryInfo->colList) * sizeof(SColumnInfo)); int32_t srcColFilterSize = tscGetColFilterSerializeLen(pQueryInfo); + int32_t srcTagFilterSize = tscGetTagFilterSerializeLen(pQueryInfo); + size_t numOfExprs = tscNumOfExprs(pQueryInfo); int32_t exprSize = (int32_t)(sizeof(SSqlExpr) * numOfExprs * 2); @@ -698,8 +700,8 @@ static int32_t tscEstimateQueryMsgSize(SSqlObj *pSql) { tableSerialize = totalTables * sizeof(STableIdInfo); } - return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + exprSize + tsBufSize + - tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; + return MIN_QUERY_MSG_PKT_SIZE + minMsgSize() + sizeof(SQueryTableMsg) + srcColListSize + srcColFilterSize + srcTagFilterSize + + exprSize + tsBufSize + tableSerialize + sqlLen + 4096 + pQueryInfo->bufLen; } static char *doSerializeTableInfo(SQueryTableMsg *pQueryMsg, SSqlObj *pSql, STableMetaInfo *pTableMetaInfo, char *pMsg, diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 1736fa3259..741b82e124 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -4684,6 +4684,21 @@ int32_t tscGetColFilterSerializeLen(SQueryInfo* pQueryInfo) { return len; } +int32_t tscGetTagFilterSerializeLen(SQueryInfo* pQueryInfo) { + // serialize tag column query condition + if (pQueryInfo->tagCond.pCond != NULL && taosArrayGetSize(pQueryInfo->tagCond.pCond) > 0) { + STagCond* pTagCond = &pQueryInfo->tagCond; + + STableMetaInfo *pTableMetaInfo = tscGetMetaInfo(pQueryInfo, 0); + STableMeta * pTableMeta = pTableMetaInfo->pTableMeta; + SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid); + if (pCond != NULL && pCond->cond != NULL) { + return pCond->len; + } + } + return 0; +} + int32_t tscCreateQueryFromQueryInfo(SQueryInfo* pQueryInfo, SQueryAttr* pQueryAttr, void* addr) { memset(pQueryAttr, 0, sizeof(SQueryAttr)); From f73506be5645dd7a68515f6df513eb2416813c23 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 2 Aug 2021 15:32:18 +0800 Subject: [PATCH 10/20] [td-5654]: fix the bug caused by unexpected error code. --- src/client/src/tscParseInsert.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 40092c5860..114b74ae03 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -582,9 +582,7 @@ int32_t tsParseValues(char **str, STableDataBlocks *pDataBlock, int maxRows, SSq sToken = tStrGetToken(*str, &index, false); *str += index; if (sToken.n == 0 || sToken.type != TK_RP) { - tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str); - code = TSDB_CODE_TSC_SQL_SYNTAX_ERROR; - return -1; + return tscSQLSyntaxErrMsg(pCmd->payload, ") expected", *str); } (*numOfRows)++; From d1112228cd02122956a7e9dd076dc236dbf1f204 Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 2 Aug 2021 15:32:55 +0800 Subject: [PATCH 11/20] Test/td 5626 (#7103) * [TD-5626]: add timezone test case for jdbc * change * change * change * change * [TD-5614]: handle client and server time not synchronized * change * do not test TimeZone Case * insert two rows --- src/connector/jdbc/pom.xml | 1 + .../com/taosdata/jdbc/TSDBJNIConnector.java | 3 +- .../taosdata/jdbc/cases/QueryDataTest.java | 3 +- .../com/taosdata/jdbc/cases/TimeZoneTest.java | 71 +++++++++++++++++++ 4 files changed, 75 insertions(+), 3 deletions(-) create mode 100644 src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java diff --git a/src/connector/jdbc/pom.xml b/src/connector/jdbc/pom.xml index 3d5cf8efe3..907562fe26 100644 --- a/src/connector/jdbc/pom.xml +++ b/src/connector/jdbc/pom.xml @@ -122,6 +122,7 @@ **/TSDBJNIConnectorTest.java **/TaosInfoMonitorTest.java **/UnsignedNumberJniTest.java + **/TimeZoneTest.java true diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 051eca7e10..4fdbb308c5 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -80,7 +80,8 @@ public class TSDBJNIConnector { this.taos = this.connectImp(host, port, dbName, user, password); if (this.taos == TSDBConstants.JNI_NULL_POINTER) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + String errMsg = this.getErrMsg(0); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, errMsg); } // invoke connectImp only here taosInfo.conn_open_increment(); diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java index 535e56f7d7..3fea221446 100644 --- a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/QueryDataTest.java @@ -34,9 +34,8 @@ public class QueryDataTest { String createTableSql = "create table " + stbName + "(ts timestamp, name binary(64))"; statement.executeUpdate(createTableSql); - } catch (SQLException e) { - return; + e.printStackTrace(); } } diff --git a/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java new file mode 100644 index 0000000000..94a175ad5c --- /dev/null +++ b/src/connector/jdbc/src/test/java/com/taosdata/jdbc/cases/TimeZoneTest.java @@ -0,0 +1,71 @@ +package com.taosdata.jdbc.cases; + +import com.taosdata.jdbc.TSDBDriver; +import org.junit.Test; + +import java.sql.*; +import java.time.Instant; +import java.time.LocalDateTime; +import java.time.ZoneId; +import java.util.Properties; + +public class TimeZoneTest { + + private String url = "jdbc:TAOS://127.0.0.1:6030/?user=root&password=taosdata"; + + @Test + public void javaTimeZone() { + LocalDateTime localDateTime = LocalDateTime.of(1970, 1, 1, 0, 0, 0); + + Instant instant = localDateTime.atZone(ZoneId.of("UTC-8")).toInstant(); + System.out.println("UTC-8: " + instant.getEpochSecond() + "," + instant); + + instant = localDateTime.atZone(ZoneId.of("UT")).toInstant(); + System.out.println("UTC: " + instant.getEpochSecond() + "," + instant); + + + instant = localDateTime.atZone(ZoneId.of("UTC+8")).toInstant(); + System.out.println("UTC+8: " + instant.getEpochSecond() + "," + instant); + } + + @Test + public void taosTimeZone() { + // given + Properties props = new Properties(); + props.setProperty(TSDBDriver.PROPERTY_KEY_TIME_ZONE, "UTC-8"); + + // when and then + try (Connection connection = DriverManager.getConnection(url, props)) { + Statement stmt = connection.createStatement(); + + stmt.execute("drop database if exists timezone_test"); + stmt.execute("create database if not exists timezone_test keep 365000"); + stmt.execute("use timezone_test"); + stmt.execute("create table weather(ts timestamp, temperature float)"); + + stmt.execute("insert into timezone_test.weather(ts, temperature) values('1970-01-01 00:00:00', 1.0)"); + + ResultSet rs = stmt.executeQuery("select * from timezone_test.weather"); + while (rs.next()) { + Timestamp ts = rs.getTimestamp("ts"); + System.out.println("ts: " + ts.getTime() + "," + ts); + } + + stmt.execute("insert into timezone_test.weather(ts, temperature, humidity) values('1970-01-02 00:00:00', 1.0, 2.0)"); + + rs = stmt.executeQuery("select * from timezone_test.weather"); + while (rs.next()) { + Timestamp ts = rs.getTimestamp("ts"); + System.out.println("ts: " + ts.getTime() + "," + ts); + } + + + stmt.execute("drop database if exists timezone_test"); + + stmt.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + +} \ No newline at end of file From 1e19765ba387b474b5e2bf39af114843a3cb9e54 Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 2 Aug 2021 15:38:20 +0800 Subject: [PATCH 12/20] [TD-5650] fix long tag filter conditon error --- src/inc/taosmsg.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index fb5bbe6c2d..54ea17657a 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -489,7 +489,7 @@ typedef struct { int16_t numOfCols; // the number of columns will be load from vnode SInterval interval; SSessionWindow sw; // session window - uint16_t tagCondLen; // tag length in current query + uint32_t tagCondLen; // tag length in current query uint32_t tbnameCondLen; // table name filter condition string length int16_t numOfGroupCols; // num of group by columns int16_t orderByIdx; From 2e9e8434b4f6031cc25b6114b9ca86fd6ff188a3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 2 Aug 2021 15:44:28 +0800 Subject: [PATCH 13/20] [td-5707]: fix bug in interp query while only one row exists in a table. --- src/tsdb/src/tsdbRead.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 0892182f03..be686fcffd 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -2373,7 +2373,7 @@ static void destroyHelper(void* param) { free(param); } -static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { +static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { if (pQueryHandle->checkFiles) { // check if the query range overlaps with the file data block bool exists = true; @@ -2385,6 +2385,7 @@ static bool loadBlockOfActiveTable(STsdbQueryHandle* pQueryHandle) { } if (exists) { + tsdbRetrieveDataBlock((TsdbQueryHandleT) pQueryHandle, NULL); if (pQueryHandle->currentLoadExternalRows && pQueryHandle->window.skey == pQueryHandle->window.ekey) { SColumnInfoData* pColInfo = taosArrayGet(pQueryHandle->pColumns, 0); assert(*(int64_t*)pColInfo->pData == pQueryHandle->window.skey); From 4b124f6ac23bb9d11ff7679b6398b778f77c50dc Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 2 Aug 2021 15:45:45 +0800 Subject: [PATCH 14/20] [TD-5650] fix long tag filter conditon error --- src/client/src/tscServer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index b9edc52e9b..c951f24ae9 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -1037,7 +1037,7 @@ int tscBuildQueryMsg(SSqlObj *pSql, SSqlInfo *pInfo) { SCond *pCond = tsGetSTableQueryCond(pTagCond, pTableMeta->id.uid); if (pCond != NULL && pCond->cond != NULL) { - pQueryMsg->tagCondLen = htons(pCond->len); + pQueryMsg->tagCondLen = htonl(pCond->len); memcpy(pMsg, pCond->cond, pCond->len); pMsg += pCond->len; From 9e6dd46611d62feb811d3c3efc922cec7cfbbcdc Mon Sep 17 00:00:00 2001 From: wangmm0220 Date: Mon, 2 Aug 2021 15:49:17 +0800 Subject: [PATCH 15/20] [TD-5650] fix long tag filter conditon error --- src/query/src/qExecutor.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 3f6df2ec07..36bfb1d442 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -6898,7 +6898,7 @@ int32_t convertQueryMsg(SQueryTableMsg *pQueryMsg, SQueryParam* param) { pQueryMsg->numOfCols = htons(pQueryMsg->numOfCols); pQueryMsg->numOfOutput = htons(pQueryMsg->numOfOutput); pQueryMsg->numOfGroupCols = htons(pQueryMsg->numOfGroupCols); - pQueryMsg->tagCondLen = htons(pQueryMsg->tagCondLen); + pQueryMsg->tagCondLen = htonl(pQueryMsg->tagCondLen); pQueryMsg->tsBuf.tsOffset = htonl(pQueryMsg->tsBuf.tsOffset); pQueryMsg->tsBuf.tsLen = htonl(pQueryMsg->tsBuf.tsLen); pQueryMsg->tsBuf.tsNumOfBlocks = htonl(pQueryMsg->tsBuf.tsNumOfBlocks); From c8b5aa73125e7f3de584c43eac7c679a035ff2e2 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Mon, 2 Aug 2021 17:39:04 +0800 Subject: [PATCH 16/20] [TD-5606] : change default value of "numOfMnodes" to be 1. --- documentation20/cn/11.administrator/docs.md | 2 +- documentation20/en/11.administrator/docs.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation20/cn/11.administrator/docs.md b/documentation20/cn/11.administrator/docs.md index 19e4b761ba..b37916d790 100644 --- a/documentation20/cn/11.administrator/docs.md +++ b/documentation20/cn/11.administrator/docs.md @@ -143,7 +143,7 @@ taosd -C TDengine集群中加入一个新的dnode时,涉及集群相关的一些参数必须与已有集群的配置相同,否则不能成功加入到集群中。会进行校验的参数如下: -- numOfMnodes:系统中管理节点个数。默认值:3。 +- numOfMnodes:系统中管理节点个数。默认值:3。(2.0 版本从 2.0.20.11 开始、2.1 及以上版本从 2.1.6.0 开始,numOfMnodes 默认值改为 1。) - mnodeEqualVnodeNum: 一个mnode等同于vnode消耗的个数。默认值:4。 - offlineThreshold: dnode离线阈值,超过该时间将导致该dnode从集群中删除。单位为秒,默认值:86400*10(即10天)。 - statusInterval: dnode向mnode报告状态时长。单位为秒,默认值:1。 diff --git a/documentation20/en/11.administrator/docs.md b/documentation20/en/11.administrator/docs.md index 90bfdbe9c6..3817a41766 100644 --- a/documentation20/en/11.administrator/docs.md +++ b/documentation20/en/11.administrator/docs.md @@ -132,7 +132,7 @@ The SQL creates a database demo, each data file stores 10 days of data, the memo When adding a new dnode to the TDengine cluster, some parameters related to the cluster must be the same as the configuration of the existing cluster, otherwise it cannot be successfully added to the cluster. The parameters that will be verified are as follows: -- numOfMnodes: the number of management nodes in the system. Default: 3. +- numOfMnodes: the number of management nodes in the system. Default: 3. (Since version 2.0.20.11 and version 2.1.6.0, the default value of "numOfMnodes" has been changed to 1.) - balance: whether to enable load balancing. 0: No, 1: Yes. Default: 1. - mnodeEqualVnodeNum: an mnode is equal to the number of vnodes consumed. Default: 4. - offlineThreshold: the threshold for a dnode to be offline, exceed which the dnode will be removed from the cluster. The unit is seconds, and the default value is 86400*10 (that is, 10 days). From 98472086d112f9371040b2668dfc8d25b6f8eadf Mon Sep 17 00:00:00 2001 From: Zhiyu Yang <69311263+zyyang-taosdata@users.noreply.github.com> Date: Mon, 2 Aug 2021 17:39:31 +0800 Subject: [PATCH 17/20] [TD-5614]: handle client and server time not synchronized (#7109) --- .../jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java index 051eca7e10..4fdbb308c5 100755 --- a/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java +++ b/src/connector/jdbc/src/main/java/com/taosdata/jdbc/TSDBJNIConnector.java @@ -80,7 +80,8 @@ public class TSDBJNIConnector { this.taos = this.connectImp(host, port, dbName, user, password); if (this.taos == TSDBConstants.JNI_NULL_POINTER) { - throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL); + String errMsg = this.getErrMsg(0); + throw TSDBError.createSQLException(TSDBErrorNumbers.ERROR_JNI_CONNECTION_NULL, errMsg); } // invoke connectImp only here taosInfo.conn_open_increment(); From ad05808c01fa9269688bcee4a563581fd71bc102 Mon Sep 17 00:00:00 2001 From: Ping Xiao Date: Tue, 3 Aug 2021 11:23:59 +0800 Subject: [PATCH 18/20] remove duplicae file: operator.py --- tests/pytest/query/operator.py | 536 --------------------------------- 1 file changed, 536 deletions(-) delete mode 100644 tests/pytest/query/operator.py diff --git a/tests/pytest/query/operator.py b/tests/pytest/query/operator.py deleted file mode 100644 index 774a1e5f42..0000000000 --- a/tests/pytest/query/operator.py +++ /dev/null @@ -1,536 +0,0 @@ -################################################################### -# Copyright (c) 2016 by TAOS Technologies, Inc. -# All rights reserved. -# -# This file is proprietary and confidential to TAOS Technologies. -# No part of this file may be reproduced, stored, transmitted, -# disclosed or used in any form or by any means other than as -# expressly provided by the written permission from Jianhui Tao -# -################################################################### - -# -*- coding: utf-8 -*- - -import sys -import taos -from util.log import tdLog -from util.cases import tdCases -from util.sql import tdSql -import random -import time - - -class TDTestCase: - def init(self, conn, logSql): - tdLog.debug("start to execute %s" % __file__) - tdSql.init(conn.cursor(), logSql) - - self.ts = 1600000000000 - self.num = 10 - - def run(self): - tdSql.prepare() - # test case for https://jira.taosdata.com:18080/browse/TD-5074 - - startTime = time.time() - - tdSql.execute('''create stable stable_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create stable stable_2 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) - tags(loc nchar(20) , t_int int , t_bigint bigint , t_smallint smallint , t_tinyint tinyint, - t_bool bool , t_binary binary(20) , t_nchar nchar(20) , - t_float float , t_double double , t_ts timestamp);''') - tdSql.execute('''create table table_0 using stable_1 - tags('table_0' , '0' , '0' , '0' , '0' , 0 , '0' , '0' , '0' , '0' ,'0')''') - tdSql.execute('''create table table_1 using stable_1 - tags('table_1' , '2147483647' , '9223372036854775807' , '32767' , '127' , 1 , - 'binary1' , 'nchar1' , '1' , '11' , \'1999-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_2 using stable_1 - tags('table_2' , '-2147483647' , '-9223372036854775807' , '-32767' , '-127' , false , - 'binary2' , 'nchar2nchar2' , '-2.2' , '-22.22' , \'2099-09-09 09:09:09.090\')''') - tdSql.execute('''create table table_3 using stable_1 - tags('table_3' , '3' , '3' , '3' , '3' , true , 'binary3' , 'nchar3' , '33.33' , '3333.3333' , '0')''') - tdSql.execute('''create table table_4 using stable_1 - tags('table_4' , '4' , '4' , '4' , '4' , false , 'binary4' , 'nchar4' , '-444.444' , '-444444.444444' , '0')''') - tdSql.execute('''create table table_5 using stable_1 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - tdSql.execute('''create table table_21 using stable_2 - tags('table_5' , '5' , '5' , '5' , '5' , true , 'binary5' , 'nchar5' , '5555.5555' , '55555555.55555555' , '0')''') - #regular table - tdSql.execute('''create table regular_table_1 - (ts timestamp , q_int int , q_bigint bigint , q_smallint smallint , q_tinyint tinyint, - q_bool bool , q_binary binary(20) , q_nchar nchar(20) , - q_float float , q_double double , q_ts timestamp) ;''') - - for i in range(self.num): - tdSql.execute('''insert into table_0 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into table_2 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into table_3 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into table_4 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_5 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into table_21 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 0, 'binary.%s', 'nchar.%s', %f, %f, %d)''' - % (self.ts + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, 1, 'binary1.%s', 'nchar1.%s', %f, %f, %d)''' - % (self.ts + 100 + i, 2147483647-i, 9223372036854775807-i, 32767-i, 127-i, - i, i, random.random(), random.random(), 1262304000001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary2.%s', 'nchar2nchar2.%s', %f, %f, %d)''' - % (self.ts + 200 + i, -2147483647+i, -9223372036854775807+i, -32767+i, -127+i, - i, i, random.uniform(-1,0), random.uniform(-1,0), 1577836800001 + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary3.%s', 'nchar3.%s', %f, %f, %d)''' - % (self.ts + 300 + i, random.randint(-2147483647, 2147483647), - random.randint(-9223372036854775807, 9223372036854775807), random.randint(-32767, 32767), - random.randint(-127, 127), random.randint(-100, 100), random.randint(-10000, 10000), - random.uniform(-100000,100000), random.uniform(-1000000000,1000000000), self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, true, 'binary4.%s', 'nchar4.%s', %f, %f, %d)''' - % (self.ts + 400 + i, i, i, i, i, i, i, i, i, self.ts + i)) - tdSql.execute('''insert into regular_table_1 values(%d, %d, %d, %d, %d, false, 'binary5.%s', 'nchar5.%s', %f, %f, %d)''' - % (self.ts + 500 + i, i, i, i, i, i, i, i, i, self.ts + i)) - - tdLog.info("========== operator=1(OP_TableScan) ==========") - tdLog.info("========== operator=7(OP_Project) ==========") - sql = '''select * from stable_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select * from regular_table_1''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - tdLog.info("========== operator=14(OP_MultiTableAggregate ) ==========") - sql = '''select last_row(*) from stable_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=6(OP_Aggregate) ==========") - sql = '''select last_row(*) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkData(0,1,self.num-1) - - tdLog.info("========== operator=9(OP_Limit) ==========") - sql = '''select * from stable_1 where loc = 'table_0' limit 5;''' - tdSql.query(sql) - tdSql.checkRows(5) - sql = '''select last_row(*) from (select * from stable_1 where loc = 'table_0');''' - tdSql.query(sql) - tdSql.checkRows(1) - - sql = '''select * from regular_table_1 ;''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - sql = '''select last_row(*) from (select * from regular_table_1);''' - tdSql.query(sql) - tdSql.checkRows(1) - tdSql.checkData(0,1,self.num-1) - - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=16(OP_DummyInput) ==========") - sql = '''select last_row(*) from - ((select last_row(*) from table_0) union all - (select last_row(*) from table_1) union all - (select last_row(*) from table_2));''' - tdSql.error(sql) - - sql = '''select last_row(*) from - ((select * from table_0 limit 5 offset 5) union all - (select * from table_1 limit 5 offset 5) union all - (select * from regular_table_1 limit 5 offset 5));''' - tdSql.error(sql) - - tdLog.info("========== operator=10(OP_SLimit) ==========") - sql = '''select count(*) from stable_1 group by loc slimit 3 soffset 2 ;''' - tdSql.query(sql) - tdSql.checkRows(3) - - sql = '''select last_row(*) from - ((select * from table_0) union all - (select * from table_1) union all - (select * from table_2));''' - tdSql.error(sql) - - tdLog.info("========== operator=20(OP_Distinct) ==========") - tdLog.info("========== operator=4(OP_TagScan) ==========") - sql = '''select distinct(t_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(2) - sql = '''select distinct(loc) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - sql = '''select distinct(t_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(3) - sql = '''select distinct(tbname) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(6) - - tdLog.info("========== operator=2(OP_DataBlocksOptScan) ==========") - sql = '''select last(q_int),first(q_int) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bigint),first(q_bigint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_smallint),first(q_smallint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_tinyint),first(q_tinyint) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_bool),first(q_bool) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_binary),first(q_binary) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_nchar),first(q_nchar) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_float),first(q_float) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_double),first(q_double) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_ts),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint), - first(q_bool),first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from stable_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint),last(q_bool),last(q_binary),last(q_nchar), - last(q_float),last(q_double),last(q_ts),first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint),first(q_bool), - first(q_binary),first(q_nchar),first(q_float),first(q_float),first(q_double),first(q_ts) from regular_table_1;''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=8(OP_Groupby) ==========") - sql = '''select stddev(q_int) from table_0 group by q_int;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from stable_1 group by q_int;''' - tdSql.query(sql) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from table_1 group by q_bigint;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select stddev(q_int),stddev(q_bigint),stddev(q_smallint),stddev(q_tinyint),stddev(q_float),stddev(q_double) from regular_table_1 group by q_smallint;''' - tdSql.query(sql) - - tdLog.info("========== operator=11(OP_TimeWindow) ==========") - sql = '''select last(q_int) from table_0 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from stable_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select last(q_int),last(q_bigint), last(q_smallint),last(q_tinyint), - first(q_int),first(q_bigint),first(q_smallint),first(q_tinyint) from regular_table_1 interval(1m);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=12(OP_SessionWindow) ==========") - sql = '''select count(*) from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*) from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - sql = '''select count(*),sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 session(ts,1s);''' - tdSql.query(sql) - tdSql.checkRows(1) - - tdLog.info("========== operator=13(OP_Fill) ==========") - sql = '''select sum(q_int) from table_0 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_0 where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - #TD-5190 - sql = '''select sum(q_tinyint),stddev(q_float) from stable_1 - where ts >='1970-10-01 00:00:00' and ts <=now interval(1n) fill(NULL);''' - tdSql.query(sql) - tdSql.checkData(0,1,'None') - - tdLog.info("========== operator=15(OP_MultiTableTimeInterval) ==========") - sql = '''select avg(q_int) from stable_1 where ts=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having sum(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having avg(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having min(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having max(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having first(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - sql = '''select loc, sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from stable_1 group by loc having last(q_int)>=0;''' - tdSql.query(sql) - tdSql.checkData(0,0,'table_0') - - tdLog.info("========== operator=21(OP_Join) ==========") - sql = '''select t1.q_int,t2.q_int from - (select ts,q_int from table_1) t1 , (select ts,q_int from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from table_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from table_0) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from stable_1) t1 , (select * from table_2) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2 - where t2.ts = t1.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select t1.*,t2.*,t3.* from - (select * from regular_table_1) t1 , (select * from stable_1) t2, (select * from table_0) t3 - where t2.ts = t1.ts and t3.ts = t1.ts and t2.ts = t3.ts;''' - tdSql.query(sql) - tdSql.checkRows(self.num) - - tdLog.info("========== operator=22(OP_StateWindow) ==========") - sql = '''select avg(q_int),sum(q_smallint) from table_1 state_window(q_int);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from table_1 state_window(q_bigint);''' - tdSql.query(sql) - tdSql.checkRows(self.num) - sql = '''select sum(q_int), avg(q_int), min(q_int), max(q_int), first(q_int), last(q_int), - sum(q_bigint), avg(q_bigint), min(q_bigint), max(q_bigint), first(q_bigint), last(q_bigint), - sum(q_smallint), avg(q_smallint), min(q_smallint), max(q_smallint), first(q_smallint), last(q_smallint), - sum(q_tinyint), avg(q_tinyint), min(q_tinyint), max(q_tinyint), first(q_tinyint), last(q_tinyint), - sum(q_float), avg(q_float), min(q_float), max(q_float), first(q_float), last(q_float), - sum(q_double), avg(q_double), min(q_double), max(q_double), first(q_double), last(q_double) - from regular_table_1 state_window(q_smallint);''' - tdSql.query(sql) - tdSql.checkRows(6*self.num) - - endTime = time.time() - print("total time %ds" % (endTime - startTime)) - - - def stop(self): - tdSql.close() - tdLog.success("%s successfully executed" % __file__) - - -tdCases.addWindows(__file__, TDTestCase()) -tdCases.addLinux(__file__, TDTestCase()) \ No newline at end of file From 07b74e1afbb5187a35eabe28ccae91c952295c55 Mon Sep 17 00:00:00 2001 From: Elias Soong Date: Tue, 3 Aug 2021 13:22:59 +0800 Subject: [PATCH 19/20] [TD-2569] : fix description about timestamp starting time. --- documentation20/cn/12.taos-sql/docs.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/cn/12.taos-sql/docs.md b/documentation20/cn/12.taos-sql/docs.md index 6d39c25565..4368e5fa1d 100644 --- a/documentation20/cn/12.taos-sql/docs.md +++ b/documentation20/cn/12.taos-sql/docs.md @@ -34,7 +34,7 @@ taos> DESCRIBE meters; - 时间格式为 ```YYYY-MM-DD HH:mm:ss.MS```,默认时间分辨率为毫秒。比如:```2017-08-12 18:25:58.128``` - 内部函数 now 是客户端的当前时间 - 插入记录时,如果时间戳为 now,插入数据时使用提交这条记录的客户端的当前时间 -- Epoch Time:时间戳也可以是一个长整数,表示从 1970-01-01 08:00:00.000 开始的毫秒数 +- Epoch Time:时间戳也可以是一个长整数,表示从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的毫秒数(相应地,如果所在 Database 的时间精度设置为“微秒”,则长整型格式的时间戳含义也就对应于从格林威治时间 1970-01-01 00:00:00.000 (UTC/GMT) 开始的微秒数) - 时间可以加减,比如 now-2h,表明查询时刻向前推 2 个小时(最近 2 小时)。数字后面的时间单位可以是 u(微秒)、a(毫秒)、s(秒)、m(分)、h(小时)、d(天)、w(周)。 比如 `select * from t1 where ts > now-2w and ts <= now-1w`,表示查询两周前整整一周的数据。在指定降频操作(down sampling)的时间窗口(interval)时,时间单位还可以使用 n(自然月) 和 y(自然年)。 TDengine 缺省的时间戳是毫秒精度,但通过在 CREATE DATABASE 时传递的 PRECISION 参数就可以支持微秒。 From f5a9b37c28ef92afa3c42e68d7c90939f28b64d6 Mon Sep 17 00:00:00 2001 From: Shuduo Sang Date: Tue, 3 Aug 2021 14:14:52 +0800 Subject: [PATCH 20/20] [TD-5702]: taosdemo remove memory operation. (#7116) * [td-5654]: fix the bug caused by unexpected error code. * [td-5707]: fix bug in interp query while only one row exists in a table. * [TD-5702]: taosdemo remove memory operation. * [TD-5702]: taosdemo remove memory operation. * add remainderBufLen to check row data generation. * row data generation with remainder buffer length checking. Co-authored-by: Haojun Liao Co-authored-by: Haojun Liao Co-authored-by: Shuduo Sang --- src/kit/taosdemo/taosdemo.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) diff --git a/src/kit/taosdemo/taosdemo.c b/src/kit/taosdemo/taosdemo.c index 773506c5ae..f9fe66f466 100644 --- a/src/kit/taosdemo/taosdemo.c +++ b/src/kit/taosdemo/taosdemo.c @@ -5094,7 +5094,9 @@ static int getRowDataFromSample( static int64_t generateStbRowData( SSuperTable* stbInfo, - char* recBuf, int64_t timestamp) + char* recBuf, + int64_t remainderBufLen, + int64_t timestamp) { int64_t dataLen = 0; char *pstr = recBuf; @@ -5122,6 +5124,7 @@ static int64_t generateStbRowData( rand_string(buf, stbInfo->columns[i].dataLen); dataLen += snprintf(pstr + dataLen, maxLen - dataLen, "\'%s\',", buf); tmfree(buf); + } else { char *tmp; @@ -5178,6 +5181,9 @@ static int64_t generateStbRowData( tstrncpy(pstr + dataLen, ",", 2); dataLen += 1; } + + if (dataLen > remainderBufLen) + return 0; } dataLen -= 1; @@ -5384,7 +5390,7 @@ static int32_t generateDataTailWithoutStb( int32_t k = 0; for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; + char *data = pstr; memset(data, 0, MAX_DATA_SIZE); int64_t retLen = 0; @@ -5408,7 +5414,7 @@ static int32_t generateDataTailWithoutStb( if (len > remainderBufLen) break; - pstr += sprintf(pstr, "%s", data); + pstr += retLen; k++; len += retLen; remainderBufLen -= retLen; @@ -5464,14 +5470,14 @@ static int32_t generateStbDataTail( int32_t k; for (k = 0; k < batch;) { - char data[MAX_DATA_SIZE]; - memset(data, 0, MAX_DATA_SIZE); + char *data = pstr; int64_t lenOfRow = 0; if (tsRand) { if (superTblInfo->disorderRatio > 0) { lenOfRow = generateStbRowData(superTblInfo, data, + remainderBufLen, startTime + getTSRandTail( superTblInfo->timeStampStep, k, superTblInfo->disorderRatio, @@ -5479,6 +5485,7 @@ static int32_t generateStbDataTail( ); } else { lenOfRow = generateStbRowData(superTblInfo, data, + remainderBufLen, startTime + superTblInfo->timeStampStep * k ); } @@ -5491,11 +5498,15 @@ static int32_t generateStbDataTail( pSamplePos); } + if (lenOfRow == 0) { + data[0] = '\0'; + break; + } if ((lenOfRow + 1) > remainderBufLen) { break; } - pstr += snprintf(pstr , lenOfRow + 1, "%s", data); + pstr += lenOfRow; k++; len += lenOfRow; remainderBufLen -= lenOfRow; @@ -6247,7 +6258,7 @@ static int32_t generateStbProgressiveData( assert(buffer != NULL); char *pstr = buffer; - memset(buffer, 0, *pRemainderBufLen); + memset(pstr, 0, *pRemainderBufLen); int64_t headLen = generateStbSQLHead( superTblInfo, @@ -6641,7 +6652,7 @@ static void* syncWriteProgressive(threadInfo *pThreadInfo) { return NULL; } - int64_t remainderBufLen = maxSqlLen; + int64_t remainderBufLen = maxSqlLen - 2000; char *pstr = pThreadInfo->buffer; int len = snprintf(pstr, @@ -6823,10 +6834,14 @@ static void callBack(void *param, TAOS_RES *res, int code) { && rand_num < pThreadInfo->superTblInfo->disorderRatio) { int64_t d = pThreadInfo->lastTs - (taosRandom() % pThreadInfo->superTblInfo->disorderRange + 1); - generateStbRowData(pThreadInfo->superTblInfo, data, d); + generateStbRowData(pThreadInfo->superTblInfo, data, + MAX_DATA_SIZE, + d); } else { generateStbRowData(pThreadInfo->superTblInfo, - data, pThreadInfo->lastTs += 1000); + data, + MAX_DATA_SIZE, + pThreadInfo->lastTs += 1000); } pstr += sprintf(pstr, "%s", data); pThreadInfo->counter++; @@ -7051,6 +7066,7 @@ static void startMultiThreadInsertData(int threads, char* db_name, for (int i = 0; i < threads; i++) { threadInfo *pThreadInfo = infos + i; pThreadInfo->threadID = i; + tstrncpy(pThreadInfo->db_name, db_name, TSDB_DB_NAME_LEN); pThreadInfo->time_precision = timePrec; pThreadInfo->superTblInfo = superTblInfo;