From 2cee0797b428bf7e7192ec58066bed70f7105ae5 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 15:56:41 +0800 Subject: [PATCH 1/3] [td-1101]add some logs --- src/tsdb/src/tsdbRead.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index ccc631fb58..2e9520c360 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -172,6 +172,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab if (pQueryHandle == NULL) { goto out_of_memory; } + pQueryHandle->order = pCond->order; pQueryHandle->window = pCond->twindow; pQueryHandle->pTsdb = tsdb; @@ -183,6 +184,7 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab pQueryHandle->qinfo = qinfo; pQueryHandle->outputCapacity = ((STsdbRepo*)tsdb)->config.maxRowsPerFileBlock; pQueryHandle->allocSize = 0; + pQueryHandle->locateStart = false; if (tsdbInitReadHelper(&pQueryHandle->rhelper, (STsdbRepo*) tsdb) != 0) { goto out_of_memory; @@ -193,6 +195,12 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab size_t sizeOfGroup = taosArrayGetSize(groupList->pGroupList); assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); + if (ASCENDING_TRAVERSE(pCond->order)) { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } + // allocate buffer in order to load data blocks from file int32_t numOfCols = pCond->numOfCols; @@ -243,6 +251,8 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab info.pTableObj->type == TSDB_CHILD_TABLE || info.pTableObj->type == TSDB_STREAM_TABLE)); taosArrayPush(pQueryHandle->pTableCheckInfo, &info); + tsdbDebug("%p check table uid:%"PRId64", tid:%d from lastKey:%"PRId64" %p", pQueryHandle, info.tableId.uid, + info.tableId.tid, info.lastKey, qinfo); } } @@ -645,7 +655,7 @@ static bool doLoadFileDataBlock(STsdbQueryHandle* pQueryHandle, SCompBlock* pBlo int16_t* colIds = pQueryHandle->defaultLoadColumn->pData; int32_t ret = tsdbLoadBlockDataCols(&(pQueryHandle->rhelper), pBlock, pCheckInfo->pCompInfo, colIds, QH_GET_NUM_OF_COLS(pQueryHandle)); - if (ret == TSDB_CODE_SUCCESS) { + if (ret == TSDB_CODE_SUCCESS) { SDataBlockLoadInfo* pBlockLoadInfo = &pQueryHandle->dataBlockLoadInfo; pBlockLoadInfo->fileGroup = pQueryHandle->pFileGroup; @@ -1071,6 +1081,14 @@ static void doMergeTwoLevelData(STsdbQueryHandle* pQueryHandle, STableCheckInfo* TSKEY* tsArray = pCols->cols[0].pData; + if (ASCENDING_TRAVERSE(pQueryHandle->order)) { + TSKEY s = tsArray[cur->pos]; + assert(s >= pQueryHandle->window.skey && s <= pQueryHandle->window.ekey); + } else { + TSKEY s = tsArray[cur->pos]; + assert(s <= pQueryHandle->window.skey && s >= pQueryHandle->window.ekey); + } + // for search the endPos, so the order needs to reverse int32_t order = (pQueryHandle->order == TSDB_ORDER_ASC)? TSDB_ORDER_DESC:TSDB_ORDER_ASC; @@ -1550,7 +1568,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists STableCheckInfo* pCheckInfo = pBlockInfo->pTableCheckInfo; // current block is done, try next - if (!cur->mixBlock || cur->blockCompleted) { + if ((!cur->mixBlock) || cur->blockCompleted) { if ((cur->slot == pQueryHandle->numOfBlocks - 1 && ASCENDING_TRAVERSE(pQueryHandle->order)) || (cur->slot == 0 && !ASCENDING_TRAVERSE(pQueryHandle->order))) { // all data blocks in current file has been checked already, try next file if exists @@ -1569,6 +1587,7 @@ static int32_t getDataBlocksInFiles(STsdbQueryHandle* pQueryHandle, bool* exists return TSDB_CODE_SUCCESS; } } else { + tsdbDebug("%p continue in current data block, index:%d, %p", pQueryHandle, cur->slot, pQueryHandle->qinfo); handleDataMergeIfNeeded(pQueryHandle, pBlockInfo->compBlock, pCheckInfo); *exists = pQueryHandle->realNumOfRows > 0; From 8aad94d7eb8d192801aa45abbe86d15b83e9aa6a Mon Sep 17 00:00:00 2001 From: eurake Date: Mon, 10 Aug 2020 17:31:12 +0800 Subject: [PATCH 2/3] Update cluster-ch.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 删除多余英文字符 --- documentation20/webdocs/markdowndocs/cluster-ch.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/documentation20/webdocs/markdowndocs/cluster-ch.md b/documentation20/webdocs/markdowndocs/cluster-ch.md index 2df6d2cb0e..afe0272387 100644 --- a/documentation20/webdocs/markdowndocs/cluster-ch.md +++ b/documentation20/webdocs/markdowndocs/cluster-ch.md @@ -107,7 +107,7 @@ CREATE DATABASE demo replica 3; ``` 一个DB里的数据会被切片分到多个vnode group,vnode group里的vnode数目就是DB的副本数,同一个vnode group里各vnode的数据是完全一致的。为保证高可用性,vnode group里的vnode一定要分布在不同的dnode里(实际部署时,需要在不同的物理机上),只要一个vgroup里超过半数的vnode处于工作状态,这个vgroup就能正常的对外服务。 -一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的d读写操作。 +一个dnode里可能有多个DB的数据,因此一个dnode离线时,可能会影响到多个DB。如果一个vnode group里的一半或一半以上的vnode不工作,那么该vnode group就无法对外服务,无法插入或读取数据,这样会影响到它所属的DB的一部分表的读写操作。 因为vnode的引入,无法简单的给出结论:“集群中过半dnode工作,集群就应该工作”。但是对于简单的情形,很好下结论。比如副本数为3,只有三个dnode,那如果仅有一个节点不工作,整个集群还是可以正常工作的,但如果有两个节点不工作,那整个集群就无法正常工作了。 From e00d0728872db857a7b4eb6ce5c92ae3d1e503c3 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 10 Aug 2020 17:38:09 +0800 Subject: [PATCH 3/3] [td-1101] add time range check, fix bugs for projection query. --- src/query/src/qExecutor.c | 5 +++++ src/tsdb/src/tsdbRead.c | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/query/src/qExecutor.c b/src/query/src/qExecutor.c index 1277e7bfbb..a08a5476e3 100644 --- a/src/query/src/qExecutor.c +++ b/src/query/src/qExecutor.c @@ -1342,6 +1342,11 @@ static int32_t tableApplyFunctionsOnBlock(SQueryRuntimeEnv *pRuntimeEnv, SDataBl if ((pQuery->limit.limit >= 0) && (pQuery->limit.limit + pQuery->limit.offset) <= numOfRes) { setQueryStatus(pQuery, QUERY_COMPLETED); } + + if (((pTableQInfo->lastKey > pTableQInfo->win.ekey) && QUERY_IS_ASC_QUERY(pQuery)) || + ((pTableQInfo->lastKey < pTableQInfo->win.ekey) && (!QUERY_IS_ASC_QUERY(pQuery)))) { + setQueryStatus(pQuery, QUERY_COMPLETED); + } } } diff --git a/src/tsdb/src/tsdbRead.c b/src/tsdb/src/tsdbRead.c index 2e9520c360..d40084be23 100644 --- a/src/tsdb/src/tsdbRead.c +++ b/src/tsdb/src/tsdbRead.c @@ -196,9 +196,9 @@ TsdbQueryHandleT* tsdbQueryTables(TSDB_REPO_T* tsdb, STsdbQueryCond* pCond, STab assert(sizeOfGroup >= 1 && pCond != NULL && pCond->numOfCols > 0); if (ASCENDING_TRAVERSE(pCond->order)) { - assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); - } else { assert(pQueryHandle->window.skey <= pQueryHandle->window.ekey); + } else { + assert(pQueryHandle->window.skey >= pQueryHandle->window.ekey); } // allocate buffer in order to load data blocks from file