From db5e538fc5b97c0e9e8697c36efd9ec68c45486c Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 4 Dec 2019 10:26:31 +0800 Subject: [PATCH 01/17] remove some logs --- src/modules/http/src/httpHandle.c | 7 +++---- src/modules/http/src/httpServer.c | 8 ++++---- 2 files changed, 7 insertions(+), 8 deletions(-) diff --git a/src/modules/http/src/httpHandle.c b/src/modules/http/src/httpHandle.c index c736825b37..9a2067bb15 100644 --- a/src/modules/http/src/httpHandle.c +++ b/src/modules/http/src/httpHandle.c @@ -279,8 +279,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpParseChunkedBody(pContext, pParser, false); return HTTP_CHECK_BODY_SUCCESS; } else { - httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, - pContext->ipstr); + //httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); if (!httpReadDataImp(pContext)) { httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); return HTTP_CHECK_BODY_ERROR; @@ -298,8 +297,8 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR); return HTTP_CHECK_BODY_ERROR; } else if (dataReadLen < pParser->data.len) { - httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", - pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); + //httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", + // pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); return HTTP_CHECK_BODY_CONTINUE; } else { return HTTP_CHECK_BODY_SUCCESS; diff --git a/src/modules/http/src/httpServer.c b/src/modules/http/src/httpServer.c index 01d3ef40f2..232119b32a 100644 --- a/src/modules/http/src/httpServer.c +++ b/src/modules/http/src/httpServer.c @@ -101,7 +101,7 @@ void httpFreeContext(HttpServer *pServer, HttpContext *pContext) { void httpCleanUpContextTimer(HttpContext *pContext) { if (pContext->timer != NULL) { taosTmrStopA(&pContext->timer); - httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, ip:%s, close timer:%p", pContext, pContext->ipstr, pContext->timer); pContext->timer = NULL; } } @@ -329,8 +329,6 @@ bool httpReadDataImp(HttpContext *pContext) { } pParser->buffer[pParser->bufsize] = 0; - httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d", - pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pParser->bufsize); return true; } @@ -383,10 +381,12 @@ bool httpReadData(HttpThread *pThread, HttpContext *pContext) { int ret = httpCheckReadCompleted(pContext); if (ret == HTTP_CHECK_BODY_CONTINUE) { taosTmrReset(httpCloseContextByServerForExpired, HTTP_EXPIRED_TIME, pContext, pThread->pServer->timerHandle, &pContext->timer); - httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); + //httpTrace("context:%p, fd:%d, ip:%s, not finished yet, try another times, timer:%p", pContext, pContext->fd, pContext->ipstr, pContext->timer); return false; } else if (ret == HTTP_CHECK_BODY_SUCCESS){ httpCleanUpContextTimer(pContext); + httpTrace("context:%p, fd:%d, ip:%s, thread:%s, read size:%d, dataLen:%d", + pContext, pContext->fd, pContext->ipstr, pContext->pThread->label, pContext->parser.bufsize, pContext->parser.data.len); if (httpDecompressData(pContext)) { return true; } else { From d019ee0c80c8d64dbce61884065379ac1af98845 Mon Sep 17 00:00:00 2001 From: slguan Date: Wed, 4 Dec 2019 10:48:16 +0800 Subject: [PATCH 02/17] add some logs --- src/modules/http/src/httpHandle.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/modules/http/src/httpHandle.c b/src/modules/http/src/httpHandle.c index 9a2067bb15..b46fa11cde 100644 --- a/src/modules/http/src/httpHandle.c +++ b/src/modules/http/src/httpHandle.c @@ -279,7 +279,7 @@ bool httpReadChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpParseChunkedBody(pContext, pParser, false); return HTTP_CHECK_BODY_SUCCESS; } else { - //httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); + httpTrace("context:%p, fd:%d, ip:%s, chunked body not finished, continue read", pContext, pContext->fd, pContext->ipstr); if (!httpReadDataImp(pContext)) { httpError("context:%p, fd:%d, ip:%s, read chunked request error", pContext, pContext->fd, pContext->ipstr); return HTTP_CHECK_BODY_ERROR; @@ -297,8 +297,8 @@ int httpReadUnChunkedBody(HttpContext* pContext, HttpParser* pParser) { httpSendErrorResp(pContext, HTTP_PARSE_BODY_ERROR); return HTTP_CHECK_BODY_ERROR; } else if (dataReadLen < pParser->data.len) { - //httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", - // pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); + httpTrace("context:%p, fd:%d, ip:%s, un-chunked body not finished, read size:%d dataReadLen:%d < pContext->data.len:%d, continue read", + pContext, pContext->fd, pContext->ipstr, pContext->parser.bufsize, dataReadLen, pParser->data.len); return HTTP_CHECK_BODY_CONTINUE; } else { return HTTP_CHECK_BODY_SUCCESS; From 39ace3f38e35eb309e31e07bd5fe6a473fb86743 Mon Sep 17 00:00:00 2001 From: slguan Date: Thu, 5 Dec 2019 09:31:42 +0800 Subject: [PATCH 03/17] fix some log errors --- src/sdb/src/sdbEngine.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 3e7a6ac8ee..5eb644a7be 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -351,7 +351,7 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { return -1; } - if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int64_t *)row)) + if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int32_t *)row)) if (sdbGetRow(handle, row)) { if (strcmp(pTable->name, "mnode") == 0) { /* @@ -372,10 +372,10 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *)row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record:%d sdbVersion:%ld id:%d", pTable->name, *(int32_t *)row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to insert record:%s sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); + sdbError("table:%s, failed to insert record sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); break; } return -1; @@ -593,15 +593,15 @@ int sdbUpdateRow(void *handle, void *row, int updateSize, char isUpdated) { pTable->name, (char *) row, sdbVersion, pTable->id); break; case SDB_KEYTYPE_UINT32: //dnodes or mnodes - sdbError("table:%s, failed to update record:%s record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%s, record is not there, sdbVersion:%ld id:%d", pTable->name, taosIpStr(*(int32_t *) row), sdbVersion, pTable->id); break; case SDB_KEYTYPE_AUTO: - sdbError("table:%s, failed to update record:F%s record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record:%d, record is not there, sdbVersion:%ld id:%d", pTable->name, *(int32_t *) row, sdbVersion, pTable->id); break; default: - sdbError("table:%s, failed to update record:%s record is not there, sdbVersion:%ld id:%d", + sdbError("table:%s, failed to update record, record is not there, sdbVersion:%ld id:%d", pTable->name, sdbVersion, pTable->id); break; } From 6864dcb57f0110d766832251ef25c7ee6fae92a0 Mon Sep 17 00:00:00 2001 From: slguan Date: Thu, 5 Dec 2019 18:02:45 +0800 Subject: [PATCH 04/17] add some log for jdbc --- src/client/src/TSDBJNIConnector.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index 71f983dadb..e27313a968 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -239,7 +239,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J jbyteArray jsql, jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } @@ -252,6 +252,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J char *dst = (char *)calloc(1, sizeof(char) * (len + 1)); if (dst == NULL) { + jniError("jobj:%p, conn:%p, can not alloc memory", jobj, tscon); return JNI_OUT_OF_MEMORY; } @@ -260,6 +261,8 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J //todo handle error } + jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, sql); + int code = taos_query(tscon, dst); if (code != 0) { jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst); @@ -271,9 +274,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J if (pSql->cmd.command == TSDB_SQL_INSERT) { affectRows = taos_affected_rows(tscon); - jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d, sql:%s", jobj, tscon, code, affectRows, dst); + jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows, dst); } else { - jniTrace("jobj:%p, conn:%p, code:%d, sql:%s", jobj, tscon, code, dst); + jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code, dst); } free(dst); @@ -307,7 +310,7 @@ JNIEXPORT jlong JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getResultSetImp( if (tscIsUpdateQuery(tscon)) { ret = 0; // for update query, no result pointer - jniTrace("jobj:%p, conn:%p, no result", jobj, tscon); + jniTrace("jobj:%p, conn:%p, no resultset", jobj, tscon); } else { ret = (jlong) taos_use_result(tscon); jniTrace("jobj:%p, conn:%p, get resultset:%p", jobj, tscon, (void *) ret); @@ -496,7 +499,7 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_closeConnectionIm jlong con) { TAOS *tscon = (TAOS *)con; if (tscon == NULL) { - jniError("jobj:%p, connection is closed", jobj); + jniError("jobj:%p, connection is already closed", jobj); return JNI_CONNECTION_NULL; } else { jniTrace("jobj:%p, conn:%p, close connection success", jobj, tscon); From 8de14a0c7d05c58b347e655cd7b22e565e5a9ddd Mon Sep 17 00:00:00 2001 From: hjxilinx Date: Fri, 6 Dec 2019 10:08:42 +0800 Subject: [PATCH 05/17] [tbase-1282] --- src/client/inc/tscSecondaryMerge.h | 2 +- src/client/inc/tscUtil.h | 67 ++++++++------- src/client/inc/tsclient.h | 74 ++++++++-------- src/client/src/tscAsync.c | 31 ++++--- src/client/src/tscJoinProcess.c | 130 ++++++++++++++++++++++------- src/client/src/tscParseInsert.c | 15 ++-- src/client/src/tscPrepare.c | 8 +- src/client/src/tscServer.c | 74 ++++++++-------- src/client/src/tscSql.c | 16 +++- src/client/src/tscUtil.c | 28 ++++--- 10 files changed, 267 insertions(+), 178 deletions(-) diff --git a/src/client/inc/tscSecondaryMerge.h b/src/client/inc/tscSecondaryMerge.h index 4c95994dfa..0c6472f6b3 100644 --- a/src/client/inc/tscSecondaryMerge.h +++ b/src/client/inc/tscSecondaryMerge.h @@ -94,7 +94,7 @@ typedef struct SRetrieveSupport { tOrderDescriptor *pOrderDescriptor; tColModel * pFinalColModel; // colModel for final result SSubqueryState * pState; - int32_t vnodeIdx; // index of current vnode in vnode list + int32_t subqueryIndex; // index of current vnode in vnode list SSqlObj * pParentSqlObj; tFilePage * localBuffer; // temp buffer, there is a buffer for each vnode to uint32_t numOfRetry; // record the number of retry times diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index da4ebc4e9c..41e1389c8e 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -26,11 +26,12 @@ extern "C" { #include #include #include "textbuffer.h" +#include "tscSecondaryMerge.h" #include "tsclient.h" #include "tsdb.h" -#include "tscSecondaryMerge.h" -#define UTIL_METER_IS_METRIC(metaInfo) (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) +#define UTIL_METER_IS_METRIC(metaInfo) \ + (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_METRIC)) #define UTIL_METER_IS_NOMRAL_METER(metaInfo) (!(UTIL_METER_IS_METRIC(metaInfo))) #define UTIL_METER_IS_CREATE_FROM_METRIC(metaInfo) \ (((metaInfo)->pMeterMeta != NULL) && ((metaInfo)->pMeterMeta->meterType == TSDB_METER_MTABLE)) @@ -62,28 +63,27 @@ typedef struct SJoinSubquerySupporter { SFieldInfo fieldsInfo; STagCond tagCond; SSqlGroupbyExpr groupbyExpr; - - struct STSBuf* pTSBuf; - - FILE* f; - char path[PATH_MAX]; + struct STSBuf* pTSBuf; // the TSBuf struct that holds the compressed timestamp array + FILE* f; // temporary file in order to create TSBuf + char path[PATH_MAX]; // temporary file path } SJoinSubquerySupporter; -void tscDestroyDataBlock(STableDataBlocks* pDataBlock); +void tscDestroyDataBlock(STableDataBlocks* pDataBlock); STableDataBlocks* tscCreateDataBlock(int32_t size); -void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); -SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, uint32_t offset); +void tscAppendDataBlock(SDataBlockList* pList, STableDataBlocks* pBlocks); +SParamInfo* tscAddParamToDataBlock(STableDataBlocks* pDataBlock, char type, uint8_t timePrec, short bytes, + uint32_t offset); -SDataBlockList* tscCreateBlockArrayList(); -void* tscDestroyBlockArrayList(SDataBlockList* pList); -int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); -void tscFreeUnusedDataBlocks(SDataBlockList* pList); -int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); +SDataBlockList* tscCreateBlockArrayList(); +void* tscDestroyBlockArrayList(SDataBlockList* pList); +int32_t tscCopyDataBlockToPayload(SSqlObj* pSql, STableDataBlocks* pDataBlock); +void tscFreeUnusedDataBlocks(SDataBlockList* pList); +int32_t tscMergeTableDataBlocks(SSqlObj* pSql, SDataBlockList* pDataList); STableDataBlocks* tscGetDataBlockFromList(void* pHashList, SDataBlockList* pDataBlockList, int64_t id, int32_t size, int32_t startOffset, int32_t rowSize, char* tableId); STableDataBlocks* tscCreateDataBlockEx(size_t size, int32_t rowSize, int32_t startOffset, char* name); -SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); +SVnodeSidList* tscGetVnodeSidList(SMetricMeta* pMetricmeta, int32_t vnodeIdx); SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); /** @@ -108,7 +108,7 @@ void tscAddSpecialColumnForSelect(SSqlCmd* pCmd, int32_t outputColIndex, int16_t void addRequiredTagColumn(SSqlCmd* pCmd, int32_t tagColIndex, int32_t tableIndex); int32_t setMeterID(SSqlObj* pSql, SSQLToken* pzTableName, int32_t tableIndex); -void tscClearInterpInfo(SSqlCmd* pCmd); +void tscClearInterpInfo(SSqlCmd* pCmd); bool tscIsInsertOrImportData(char* sqlstr); @@ -128,9 +128,9 @@ void tscFieldInfoCopy(SFieldInfo* src, SFieldInfo* dst, const int32_t* indexList void tscFieldInfoCopyAll(SFieldInfo* src, SFieldInfo* dst); TAOS_FIELD* tscFieldInfoGetField(SSqlCmd* pCmd, int32_t index); -int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); -int32_t tscGetResRowLength(SSqlCmd* pCmd); -void tscClearFieldInfo(SFieldInfo* pFieldInfo); +int16_t tscFieldInfoGetOffset(SSqlCmd* pCmd, int32_t index); +int32_t tscGetResRowLength(SSqlCmd* pCmd); +void tscClearFieldInfo(SFieldInfo* pFieldInfo); void addExprParams(SSqlExpr* pExpr, char* argument, int32_t type, int32_t bytes, int16_t tableIndex); @@ -142,15 +142,15 @@ SSqlExpr* tscSqlExprUpdate(SSqlCmd* pCmd, int32_t index, int16_t functionId, int int16_t size); SSqlExpr* tscSqlExprGet(SSqlCmd* pCmd, int32_t index); -void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); +void tscSqlExprCopy(SSqlExprInfo* dst, const SSqlExprInfo* src, uint64_t uid); SColumnBase* tscColumnBaseInfoInsert(SSqlCmd* pCmd, SColumnIndex* colIndex); -void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); -void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); +void tscColumnFilterInfoCopy(SColumnFilterInfo* dst, const SColumnFilterInfo* src); +void tscColumnBaseCopy(SColumnBase* dst, const SColumnBase* src); -void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); +void tscColumnBaseInfoCopy(SColumnBaseInfo* dst, const SColumnBaseInfo* src, int16_t tableIndex); SColumnBase* tscColumnBaseInfoGet(SColumnBaseInfo* pColumnBaseInfo, int32_t index); -void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); +void tscColumnBaseInfoUpdateTableIndex(SColumnBaseInfo* pColList, int16_t tableIndex); void tscColumnBaseInfoReserve(SColumnBaseInfo* pColumnBaseInfo, int32_t size); void tscColumnBaseInfoDestroy(SColumnBaseInfo* pColumnBaseInfo); @@ -163,7 +163,7 @@ bool tscValidateColumnId(SSqlCmd* pCmd, int32_t colId); // get starter position of metric query condition (query on tags) in SSqlCmd.payload SCond* tsGetMetricQueryCondPos(STagCond* pCond, uint64_t tableIndex); -void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); +void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondRelease(STagCond* pCond); @@ -176,19 +176,19 @@ bool tscShouldFreeHeatBeat(SSqlObj* pHb); void tscCleanSqlCmd(SSqlCmd* pCmd); bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql); -void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); +void tscRemoveAllMeterMetaInfo(SSqlCmd* pCmd, bool removeFromCache); SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index); SMeterMetaInfo* tscGetMeterMetaInfoByUid(SSqlCmd* pCmd, uint64_t uid, int32_t* index); -void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); +void tscClearMeterMetaInfo(SMeterMetaInfo* pMeterMetaInfo, bool removeFromCache); SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMeta, SMetricMeta* pMetricMeta, int16_t numOfTags, int16_t* tags); SMeterMetaInfo* tscAddEmptyMeterMetaInfo(SSqlCmd* pCmd); void tscGetMetricMetaCacheKey(SSqlCmd* pCmd, char* keyStr, uint64_t uid); -int tscGetMetricMeta(SSqlObj* pSql); -int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); -int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); +int tscGetMetricMeta(SSqlObj* pSql); +int tscGetMeterMeta(SSqlObj* pSql, char* meterId, int32_t tableIndex); +int tscGetMeterMetaEx(SSqlObj* pSql, char* meterId, bool createIfNotExists); void tscResetForNextRetrieve(SSqlRes* pRes); @@ -212,9 +212,8 @@ void tscDoQuery(SSqlObj* pSql); * @param pPrevSql * @return */ -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql); -void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql); +void addGroupInfoForSubquery(SSqlObj* pParentObj, SSqlObj* pSql, int32_t tableIndex); void doAddGroupColumnForSubquery(SSqlCmd* pCmd, int32_t tagIndex); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index d439ba9929..4101cbfc9e 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -107,22 +107,25 @@ enum _sql_cmd { struct SSqlInfo; typedef struct SSqlGroupbyExpr { - int16_t tableIndex; - + int16_t tableIndex; int16_t numOfGroupCols; SColIndexEx columnInfo[TSDB_MAX_TAGS]; // group by columns information - - int16_t orderIndex; // order by column index - int16_t orderType; // order by type: asc/desc + int16_t orderIndex; // order by column index + int16_t orderType; // order by type: asc/desc } SSqlGroupbyExpr; typedef struct SMeterMetaInfo { - SMeterMeta * pMeterMeta; // metermeta - SMetricMeta *pMetricMeta; // metricmeta - - char name[TSDB_METER_ID_LEN + 1]; - int16_t numOfTags; // total required tags in query, including groupby tags - int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection + SMeterMeta * pMeterMeta; // metermeta + SMetricMeta *pMetricMeta; // metricmeta + + /* + * 1. keep the vnode index during the multi-vnode super table projection query + * 2. keep the vnode index for multi-vnode insertion + */ + int32_t vnodeIndex; + char name[TSDB_METER_ID_LEN + 1]; // table(super table) name + int16_t numOfTags; // total required tags in query, including groupby tags + int16_t tagColumnIndex[TSDB_MAX_TAGS]; // clause + tag projection } SMeterMetaInfo; /* the structure for sql function in select clause */ @@ -188,7 +191,7 @@ typedef struct SString { typedef struct SCond { uint64_t uid; - char* cond; + char * cond; } SCond; typedef struct SJoinNode { @@ -262,15 +265,15 @@ typedef struct SDataBlockList { typedef struct { SOrderVal order; int command; - int count;// TODO refactor + int count; // TODO refactor union { - bool existsCheck; // check if the table exists - int8_t showType; // show command type + bool existsCheck; // check if the table exists + int8_t showType; // show command type }; - + int8_t isInsertFromFile; // load data from file or not - bool import; // import/insert type + bool import; // import/insert type char msgType; uint16_t type; // query type char intervalTimeUnit; @@ -296,7 +299,6 @@ typedef struct { SLimitVal slimit; int64_t globalLimit; STagCond tagCond; - int16_t vnodeIdx; // vnode index in pMetricMeta for metric query int16_t interpoType; // interpolate type int16_t numOfTables; @@ -366,25 +368,23 @@ typedef struct _sql_obj { STscObj *pTscObj; void (*fp)(); void (*fetchFp)(); - void * param; - uint32_t ip; - short vnode; - int64_t stime; - uint32_t queryId; - void * thandle; - void * pStream; - char * sqlstr; - char retry; - char maxRetry; - char index; - char freed : 4; - char listed : 4; - tsem_t rspSem; - tsem_t emptyRspSem; - - SSqlCmd cmd; - SSqlRes res; - + void * param; + uint32_t ip; + short vnode; + int64_t stime; + uint32_t queryId; + void * thandle; + void * pStream; + char * sqlstr; + char retry; + char maxRetry; + char index; + char freed : 4; + char listed : 4; + tsem_t rspSem; + tsem_t emptyRspSem; + SSqlCmd cmd; + SSqlRes res; char numOfSubs; struct _sql_obj **pSubs; struct _sql_obj * prev, *next; diff --git a/src/client/src/tscAsync.c b/src/client/src/tscAsync.c index abf91e7c43..1268844d77 100644 --- a/src/client/src/tscAsync.c +++ b/src/client/src/tscAsync.c @@ -121,7 +121,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf // sequentially retrieve data from remain vnodes first, query vnode specified by vnodeIdx if (numOfRows == 0 && tscProjectionQueryOnMetric(pCmd)) { // vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx - assert(pCmd->vnodeIdx >= 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pMeterMetaInfo->vnodeIndex >= 0); /* reach the maximum number of output rows, abort */ if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { @@ -133,8 +134,8 @@ static void tscProcessAsyncFetchRowsProxy(void *param, TAOS_RES *tres, int numOf pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; pCmd->limit.offset = pRes->offset; - if ((++(pCmd->vnodeIdx)) < tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { - tscTrace("%p retrieve data from next vnode:%d", pSql, pCmd->vnodeIdx); + if ((++(pMeterMetaInfo->vnodeIndex)) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + tscTrace("%p retrieve data from next vnode:%d", pSql, pMeterMetaInfo->vnodeIndex); pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. @@ -272,7 +273,8 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { /* * vnode is denoted by vnodeIdx, continue to query vnode specified by vnodeIdx till all vnode have been retrieved */ - assert(pCmd->vnodeIdx >= 1); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pMeterMetaInfo->vnodeIndex >= 0); /* reach the maximum number of output rows, abort */ if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { @@ -283,7 +285,7 @@ void tscProcessAsyncRetrieve(void *param, TAOS_RES *tres, int numOfRows) { /* update the limit value according to current retrieval results */ pCmd->limit.limit = pCmd->globalLimit - pRes->numOfTotal; - if ((++pCmd->vnodeIdx) <= tscGetMeterMetaInfo(pCmd, 0)->pMetricMeta->numOfVnodes) { + if ((++pMeterMetaInfo->vnodeIndex) <= pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; // reset flag to launch query first. tscResetForNextRetrieve(pRes); @@ -404,9 +406,12 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) int32_t code = TSDB_CODE_SUCCESS; assert(!pCmd->isInsertFromFile && pSql->signature == pSql); - + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + assert(pCmd->numOfTables == 1); + SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { // restore user defined fp pSql->fp = pSql->fetchFp; tscTrace("%p Async insertion completed, destroy data block list", pSql); @@ -418,17 +423,17 @@ void tscAsyncInsertMultiVnodesProxy(void *param, TAOS_RES *tres, int numOfRows) (*pSql->fp)(pSql->param, tres, numOfRows); } else { do { - code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pCmd->vnodeIdx++]); + code = tscCopyDataBlockToPayload(pSql, pDataBlocks->pData[pMeterMetaInfo->vnodeIndex++]); if (code != TSDB_CODE_SUCCESS) { tscTrace("%p prepare submit data block failed in async insertion, vnodeIdx:%d, total:%d, code:%d", - pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize, code); + pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize, code); } - } while (code != TSDB_CODE_SUCCESS && pCmd->vnodeIdx < pDataBlocks->nSize); + } while (code != TSDB_CODE_SUCCESS && pMeterMetaInfo->vnodeIndex < pDataBlocks->nSize); // build submit msg may fail if (code == TSDB_CODE_SUCCESS) { - tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx - 1, pDataBlocks->nSize); + tscTrace("%p async insertion, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex - 1, pDataBlocks->nSize); tscProcessSql(pSql); } } @@ -484,11 +489,11 @@ void tscMeterMetaCallBack(void *param, TAOS_RES *res, int code) { // check if it is a sub-query of metric query first, if true, enter another routine if ((pSql->cmd.type & TSDB_QUERY_TYPE_STABLE_SUBQUERY) == TSDB_QUERY_TYPE_STABLE_SUBQUERY) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pCmd->vnodeIdx >= 0 && pSql->param != NULL); + assert(pMeterMetaInfo->pMeterMeta->numOfTags != 0 && pMeterMetaInfo->vnodeIndex >= 0 && pSql->param != NULL); SRetrieveSupport *trs = (SRetrieveSupport *)pSql->param; SSqlObj * pParObj = trs->pParentSqlObj; - assert(pParObj->signature == pParObj && trs->vnodeIdx == pCmd->vnodeIdx && + assert(pParObj->signature == pParObj && trs->subqueryIndex == pMeterMetaInfo->vnodeIndex && pMeterMetaInfo->pMeterMeta->numOfTags != 0); tscTrace("%p get metricMeta during metric query successfully", pSql); diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index ed44d54066..b470d84440 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -150,7 +150,7 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor tsBufDestory(pSupporter1->pTSBuf); tsBufDestory(pSupporter2->pTSBuf); - tscTrace("%p input1:%lld, input2:%lld, %lld for secondary query after ts blocks intersecting", + tscTrace("%p input1:%lld, input2:%lld, final:%lld for secondary query after ts blocks intersecting", pSql, numOfInput1, numOfInput2, output1->numOfTotal); return output1->numOfTotal; @@ -239,15 +239,20 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { pSupporter = pSql->pSubs[i]->param; pSupporter->pState->numOfCompleted = 0; + /* + * If the columns are not involved in the final select clause, the secondary query will not be launched + * for the subquery. + */ if (pSupporter->exprsInfo.numOfExprs > 0) { ++numOfSub; } } // scan all subquery, if one sub query has only ts, ignore it - int32_t j = 0; - tscTrace("%p start to launch secondary subqueries: %d", pSql, pSql->numOfSubs); + tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " + "select clause", pSql, pSql->numOfSubs, numOfSub); + int32_t j = 0; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SSqlObj* pSub = pSql->pSubs[i]; pSupporter = pSub->param; @@ -259,15 +264,14 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { continue; } - SSqlObj* pNew = createSubqueryObj(pSql, 0, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); + SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { pSql->numOfSubs = i; //revise the number of subquery pSupporter->pState->numOfTotal = i; pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscDestroyJoinSupporter(pSupporter); - - return NULL; + return 0; } tscFreeSqlCmdData(&pNew->cmd); @@ -386,8 +390,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { if (numOfRows > 0) { // write the data into disk fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f); - fflush(pSupporter->f); - + fclose(pSupporter->f); + STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); if (pBuf == NULL) { tscError("%p invalid ts comp file from vnode, abort sub query, file size:%d", pSql, numOfRows); @@ -401,7 +405,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path); pSupporter->pTSBuf = pBuf; } else { - tsBufMerge(pSupporter->pTSBuf, pBuf, pSql->cmd.vnodeIdx); + assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex); tsBufDestory(pBuf); } @@ -412,6 +419,20 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { taos_fetch_rows_a(tres, joinRetrieveCallback, param); } else if (numOfRows == 0) { // no data from this vnode anymore + if (tscProjectionQueryOnMetric(&pParentSql->cmd)) { + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + + // for projection query, need to try next vnode + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + + return; + } + } + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { @@ -466,6 +487,8 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { void tscFetchDatablockFromSubquery(SSqlObj* pSql) { int32_t numOfFetch = 0; + assert(pSql->numOfSubs >= 1); + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; @@ -731,7 +754,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { strncpy(pTSBuf->path, path, PATH_MAX); - pTSBuf->f = fopen(pTSBuf->path, "r"); + pTSBuf->f = fopen(pTSBuf->path, "r+"); if (pTSBuf->f == NULL) { return NULL; } @@ -797,6 +820,10 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { pTSBuf->cur.order = TSQL_SO_ASC; pTSBuf->autoDelete = autoDelete; + + tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), + pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); + return pTSBuf; } @@ -814,10 +841,21 @@ void tsBufDestory(STSBuf* pTSBuf) { fclose(pTSBuf->f); if (pTSBuf->autoDelete) { + tscTrace("tsBuf %p destroyed, delete tmp file:%s", pTSBuf, pTSBuf->path); unlink(pTSBuf->path); + } else { + tscTrace("tsBuf %p destroyed, tmp file:%s, remains", pTSBuf, pTSBuf->path); } free(pTSBuf); + +} + +static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { + int32_t last = pTSBuf->numOfVnodes - 1; + + assert(last >= 0); + return &pTSBuf->pData[last]; } static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { @@ -836,10 +874,10 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { } if (pTSBuf->numOfVnodes > 0) { - STSVnodeBlockInfo* pPrevBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pPrevBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pPrevBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pPrevBlockInfoEx->info); } // set initial value for vnode block @@ -855,11 +893,11 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { pTSBuf->numOfVnodes += 1; // update the header info - STSBufFileHeader header = { - .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + STSBufFileHeader header = + {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + STSBufUpdateHeader(pTSBuf, &header); - - return &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + return tsBufGetLastVnodeInfo(pTSBuf); } static void shrinkBuffer(STSList* ptsData) { @@ -905,9 +943,11 @@ static void writeDataToDisk(STSBuf* pTSBuf) { pTSBuf->fileSize += blockSize; pTSBuf->tsData.len = 0; - - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.compLen += blockSize; - pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.numOfBlocks += 1; + + STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); + + pVnodeBlockInfoEx->info.compLen += blockSize; + pVnodeBlockInfoEx->info.numOfBlocks += 1; shrinkBuffer(&pTSBuf->tsData); } @@ -1008,13 +1048,13 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData STSVnodeBlockInfoEx* pBlockInfo = NULL; STSList* ptsData = &pTSBuf->tsData; - if (pTSBuf->numOfVnodes == 0 || pTSBuf->pData[pTSBuf->numOfVnodes - 1].info.vnode != vnodeId) { + if (pTSBuf->numOfVnodes == 0 || tsBufGetLastVnodeInfo(pTSBuf)->info.vnode != vnodeId) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); pBlockInfo = addOneVnodeInfo(pTSBuf, vnodeId); } else { - pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1]; + pBlockInfo = tsBufGetLastVnodeInfo(pTSBuf); } assert(pBlockInfo->info.vnode == vnodeId); @@ -1037,6 +1077,8 @@ void tsBufAppend(STSBuf* pTSBuf, int32_t vnodeId, int64_t tag, const char* pData pTSBuf->numOfTotal += len / TSDB_KEYSIZE; + // the size of raw data exceeds the size of the default prepared buffer, so + // during getBufBlock, the output buffer needs to be large enough. if (ptsData->len >= ptsData->threshold) { writeDataToDisk(pTSBuf); shrinkBuffer(ptsData); @@ -1053,10 +1095,10 @@ void tsBufFlush(STSBuf* pTSBuf) { writeDataToDisk(pTSBuf); shrinkBuffer(&pTSBuf->tsData); - STSVnodeBlockInfo* pBlockInfo = &pTSBuf->pData[pTSBuf->numOfVnodes - 1].info; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); // update prev vnode length info in file - TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); + TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, &pBlockInfoEx->info); // save the ts order into header STSBufFileHeader header = { @@ -1157,11 +1199,22 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex } STSBlock* pBlock = &pTSBuf->block; + + size_t s = pBlock->numOfElem * TSDB_KEYSIZE; + + /* + * In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value + * may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function + */ + if (s > pTSBuf->tsData.allocSize) { + expandBuffer(&pTSBuf->tsData, s); + } + pTSBuf->tsData.len = tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); - assert(pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem); + assert((pTSBuf->tsData.len / TSDB_KEYSIZE == pBlock->numOfElem) && (pTSBuf->tsData.allocSize >= pTSBuf->tsData.len)); pCur->vnodeIndex = vnodeIndex; pCur->blockIndex = blockIndex; @@ -1293,6 +1346,8 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { return elem1; } + + /** * current only support ts comp data from two vnode merge * @param pDestBuf @@ -1318,7 +1373,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { tsBufFlush(pDestBuf); // compared with the last vnode id - if (vnodeId != pDestBuf->pData[pDestBuf->numOfVnodes - 1].info.vnode) { + if (vnodeId != tsBufGetLastVnodeInfo(pDestBuf)->info.vnode) { int32_t oldSize = pDestBuf->numOfVnodes; int32_t newSize = oldSize + pSrcBuf->numOfVnodes; @@ -1345,36 +1400,49 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { pDestBuf->numOfVnodes = newSize; } else { - STSVnodeBlockInfoEx* pBlockInfoEx = &pDestBuf->pData[pDestBuf->numOfVnodes - 1]; + STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf); + pBlockInfoEx->len += pSrcBuf->pData[0].len; pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks; pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen; pBlockInfoEx->info.vnode = vnodeId; } - int64_t r = fseek(pDestBuf->f, 0, SEEK_END); + int32_t r = fseek(pDestBuf->f, 0, SEEK_END); assert(r == 0); int64_t offset = getDataStartOffset(); int32_t size = pSrcBuf->fileSize - offset; #ifdef LINUX - ssize_t rc = sendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); + ssize_t rc = tsendfile(fileno(pDestBuf->f), fileno(pSrcBuf->f), &offset, size); #else ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size); #endif + if (rc == -1) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } if (rc != size) { - printf("%s\n", strerror(errno)); + tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; } pDestBuf->numOfTotal += pSrcBuf->numOfTotal; - + + int32_t oldSize = pDestBuf->fileSize; + + struct stat fileStat; + fstat(fileno(pDestBuf->f), &fileStat); + pDestBuf->fileSize = (uint32_t) fileStat.st_size; + + assert(pDestBuf->fileSize == oldSize + size); + + tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, vnode:%d, autoDelete:%d", pDestBuf, pDestBuf->path, + fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); + return 0; } diff --git a/src/client/src/tscParseInsert.c b/src/client/src/tscParseInsert.c index 58cfcda17e..572b65f364 100644 --- a/src/client/src/tscParseInsert.c +++ b/src/client/src/tscParseInsert.c @@ -498,10 +498,11 @@ int tsParseValues(char **str, STableDataBlocks *pDataBlock, SMeterMeta *pMeterMe *str += index; if (numOfRows >= maxRows || pDataBlock->size + pMeterMeta->rowSize >= pDataBlock->nAllocSize) { int32_t tSize = tscAllocateMemIfNeed(pDataBlock, pMeterMeta->rowSize); - if (0 == tSize) { + if (0 == tSize) { //TODO pass the correct error code to client strcpy(error, "client out of memory"); return -1; } + maxRows += tSize; } @@ -1060,8 +1061,10 @@ int doParserInsertSql(SSqlObj *pSql, char *str) { goto _error_clean; } + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } @@ -1279,19 +1282,19 @@ void tscProcessMultiVnodesInsert(SSqlObj *pSql) { int32_t code = TSDB_CODE_SUCCESS; /* the first block has been sent to server in processSQL function */ - assert(pCmd->isInsertFromFile != -1 && pCmd->vnodeIdx >= 1 && pCmd->pDataBlocks != NULL); + assert(pCmd->isInsertFromFile != -1 && pMeterMetaInfo->vnodeIndex >= 1 && pCmd->pDataBlocks != NULL); - if (pCmd->vnodeIdx < pCmd->pDataBlocks->nSize) { + if (pMeterMetaInfo->vnodeIndex < pCmd->pDataBlocks->nSize) { SDataBlockList *pDataBlocks = pCmd->pDataBlocks; - for (int32_t i = pCmd->vnodeIdx; i < pDataBlocks->nSize; ++i) { + for (int32_t i = pMeterMetaInfo->vnodeIndex; i < pDataBlocks->nSize; ++i) { pDataBlock = pDataBlocks->pData[i]; if (pDataBlock == NULL) { continue; } if ((code = tscCopyDataBlockToPayload(pSql, pDataBlock)) != TSDB_CODE_SUCCESS) { - tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pCmd->vnodeIdx, pDataBlocks->nSize); + tscTrace("%p build submit data block failed, vnodeIdx:%d, total:%d", pSql, pMeterMetaInfo->vnodeIndex, pDataBlocks->nSize); continue; } diff --git a/src/client/src/tscPrepare.c b/src/client/src/tscPrepare.c index 532baec205..7e62afefe6 100644 --- a/src/client/src/tscPrepare.c +++ b/src/client/src/tscPrepare.c @@ -409,7 +409,9 @@ static int insertStmtReset(STscStmt* pStmt) { } } pCmd->batchSize = 0; - pCmd->vnodeIdx = 0; + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + pMeterMetaInfo->vnodeIndex = 0; return TSDB_CODE_SUCCESS; } @@ -422,6 +424,8 @@ static int insertStmtExecute(STscStmt* stmt) { ++pCmd->batchSize; } + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + if (pCmd->pDataBlocks->nSize > 0) { // merge according to vgid int code = tscMergeTableDataBlocks(stmt->pSql, pCmd->pDataBlocks); @@ -436,7 +440,7 @@ static int insertStmtExecute(STscStmt* stmt) { } // set the next sent data vnode index in data block arraylist - pCmd->vnodeIdx = 1; + pMeterMetaInfo->vnodeIndex = 1; } else { pCmd->pDataBlocks = tscDestroyBlockArrayList(pCmd->pDataBlocks); } diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index bf175c5540..81ef5d13e6 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -222,7 +222,7 @@ void tscGetConnToVnode(SSqlObj *pSql, uint8_t *pCode) { SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { // multiple vnode query - SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *vnodeList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); if (vnodeList != NULL) { pVPeersDesc = vnodeList->vpeerDesc; } @@ -528,7 +528,7 @@ void *tscProcessMsgFromServer(char *msg, void *ahandle, void *thandle) { if (pMeterMetaInfo->pMeterMeta) // it may be deleted pMeterMetaInfo->pMeterMeta->index = pSql->index; } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pSql->cmd.vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, pMeterMetaInfo->vnodeIndex); pVnodeSidList->index = pSql->index; } } else { @@ -639,7 +639,7 @@ static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsu static int tscLaunchMetricSubQueries(SSqlObj *pSql); // todo merge with callback -int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeIdx, SJoinSubquerySupporter *pSupporter) { +int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySupporter *pSupporter) { SSqlCmd *pCmd = &pSql->cmd; pSql->res.qhandle = 0x1; @@ -652,12 +652,13 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, int16_t vnodeId } } - SSqlObj *pNew = createSubqueryObj(pSql, vnodeIdx, tableIndex, tscJoinQueryCallback, pSupporter, NULL); + SSqlObj *pNew = createSubqueryObj(pSql, tableIndex, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { return TSDB_CODE_CLI_OUT_OF_MEMORY; } - + pSql->pSubs[pSql->numOfSubs++] = pNew; + assert(pSql->numOfSubs <= pSupporter->pState->numOfTotal); if (QUERY_IS_JOIN_QUERY(pCmd->type)) { addGroupInfoForSubquery(pSql, pNew, tableIndex); @@ -774,7 +775,7 @@ int tscProcessSql(SSqlObj *pSql) { pSql->index = pMeterMetaInfo->pMeterMeta->index; } else { // it must be the parent SSqlObj for super table query if ((pSql->cmd.type & TSDB_QUERY_TYPE_SUBQUERY) != 0) { - int32_t idx = pSql->cmd.vnodeIdx; + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *pSidList = tscGetVnodeSidList(pMeterMetaInfo->pMetricMeta, idx); pSql->index = pSidList->index; } @@ -802,7 +803,7 @@ int tscProcessSql(SSqlObj *pSql) { return pSql->res.code; } - int32_t code = tscLaunchJoinSubquery(pSql, i, 0, pSupporter); + int32_t code = tscLaunchJoinSubquery(pSql, i, pSupporter); if (code != TSDB_CODE_SUCCESS) { // failed to create subquery object, quit query tscDestroyJoinSupporter(pSupporter); pSql->res.code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -944,7 +945,7 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { trs->pOrderDescriptor = pDesc; trs->pState = pState; trs->localBuffer = (tFilePage *)calloc(1, nBufferSize + sizeof(tFilePage)); - trs->vnodeIdx = i; + trs->subqueryIndex = i; trs->pParentSqlObj = pSql; trs->pFinalColModel = pModel; @@ -971,7 +972,7 @@ int tscLaunchMetricSubQueries(SSqlObj *pSql) { pNew->cmd.tsBuf = tsBufClone(pSql->cmd.tsBuf); } - tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, pNew->cmd.vnodeIdx); + tscTrace("%p sub:%p launch subquery.orderOfSub:%d", pSql, pNew, trs->subqueryIndex); tscProcessSql(pNew); } @@ -1020,7 +1021,7 @@ static void tscAbortFurtherRetryRetrieval(SRetrieveSupport *trsupport, TAOS_RES static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSql, int numOfRows) { SSqlObj *pPObj = trsupport->pParentSqlObj; - int32_t idx = trsupport->vnodeIdx; + int32_t subqueryIndex = trsupport->subqueryIndex; assert(pSql != NULL); @@ -1035,27 +1036,27 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq pSql->res.numOfRows = 0; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; // disable retry efforts tscTrace("%p query is cancelled, sub:%p, orderOfSub:%d abort retrieve, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + subqueryIndex, trsupport->pState->code); } if (numOfRows >= 0) { // current query is successful, but other sub query failed, still abort current query. - tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, idx); - tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, idx, - trsupport->pState->code); + tscTrace("%p sub:%p retrieve numOfRows:%d,orderOfSub:%d", pPObj, pSql, numOfRows, subqueryIndex); + tscError("%p sub:%p abort further retrieval due to other queries failure,orderOfSub:%d,code:%d", pPObj, pSql, + subqueryIndex, trsupport->pState->code); } else { if (trsupport->numOfRetry++ < MAX_NUM_OF_SUBQUERY_RETRY && trsupport->pState->code == TSDB_CODE_SUCCESS) { /* * current query failed, and the retry count is less than the available * count, retry query clear previous retrieved data, then launch a new sub query */ - tExtMemBufferClear(trsupport->pExtMemBuffer[idx]); + tExtMemBufferClear(trsupport->pExtMemBuffer[subqueryIndex]); // clear local saved number of results trsupport->localBuffer->numOfElems = 0; pthread_mutex_unlock(&trsupport->queryMutex); tscTrace("%p sub:%p retrieve failed, code:%d, orderOfSub:%d, retry:%d", trsupport->pParentSqlObj, pSql, numOfRows, - idx, trsupport->numOfRetry); + subqueryIndex, trsupport->numOfRetry); SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { @@ -1072,7 +1073,7 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq } else { // reach the maximum retry count, abort atomic_val_compare_exchange_32(&trsupport->pState->code, TSDB_CODE_SUCCESS, numOfRows); tscError("%p sub:%p retrieve failed,code:%d,orderOfSub:%d failed.no more retry,set global code:%d", pPObj, pSql, - numOfRows, idx, trsupport->pState->code); + numOfRows, subqueryIndex, trsupport->pState->code); } } @@ -1115,13 +1116,12 @@ static void tscHandleSubRetrievalError(SRetrieveSupport *trsupport, SSqlObj *pSq void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; - int32_t idx = trsupport->vnodeIdx; + int32_t idx = trsupport->subqueryIndex; SSqlObj * pPObj = trsupport->pParentSqlObj; tOrderDescriptor *pDesc = trsupport->pOrderDescriptor; SSqlObj *pSql = (SSqlObj *)tres; - if (pSql == NULL) { - /* sql object has been released in error process, return immediately */ + if (pSql == NULL) { // sql object has been released in error process, return immediately tscTrace("%p subquery has been released, idx:%d, abort", pPObj, idx); return; } @@ -1172,7 +1172,7 @@ void tscRetrieveFromVnodeCallBack(void *param, TAOS_RES *tres, int numOfRows) { } else { // all data has been retrieved to client /* data in from current vnode is stored in cache and disk */ uint32_t numOfRowsFromVnode = - trsupport->pExtMemBuffer[pCmd->vnodeIdx]->numOfAllElems + trsupport->localBuffer->numOfElems; + trsupport->pExtMemBuffer[idx]->numOfAllElems + trsupport->localBuffer->numOfElems; tscTrace("%p sub:%p all data retrieved from ip:%u,vid:%d, numOfRows:%d, orderOfSub:%d", pPObj, pSql, pSvd->ip, pSvd->vnode, numOfRowsFromVnode, idx); @@ -1285,10 +1285,10 @@ void tscKillMetricQuery(SSqlObj *pSql) { static void tscRetrieveDataRes(void *param, TAOS_RES *tres, int retCode); static SSqlObj *tscCreateSqlObjForSubquery(SSqlObj *pSql, SRetrieveSupport *trsupport, SSqlObj *prevSqlObj) { - SSqlObj *pNew = createSubqueryObj(pSql, trsupport->vnodeIdx, 0, tscRetrieveDataRes, trsupport, prevSqlObj); + SSqlObj *pNew = createSubqueryObj(pSql, 0, tscRetrieveDataRes, trsupport, prevSqlObj); if (pNew != NULL) { // the sub query of two-stage super table query pNew->cmd.type |= TSDB_QUERY_TYPE_STABLE_SUBQUERY; - pSql->pSubs[trsupport->vnodeIdx] = pNew; + pSql->pSubs[trsupport->subqueryIndex] = pNew; } return pNew; @@ -1298,8 +1298,8 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SRetrieveSupport *trsupport = (SRetrieveSupport *)param; SSqlObj * pSql = (SSqlObj *)tres; - int32_t idx = pSql->cmd.vnodeIdx; SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + int32_t idx = pMeterMetaInfo->vnodeIndex; SVnodeSidList *vnodeInfo = NULL; SVPeerDesc * pSvd = NULL; @@ -1317,7 +1317,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { code = trsupport->pState->code; } tscTrace("%p query cancelled or failed, sub:%p, orderOfSub:%d abort, code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, code); + trsupport->subqueryIndex, code); } /* @@ -1337,7 +1337,7 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { SSqlObj *pNew = tscCreateSqlObjForSubquery(trsupport->pParentSqlObj, trsupport, pSql); if (pNew == NULL) { tscError("%p sub:%p failed to create new subquery due to out of memory, abort retry, vid:%d, orderOfSub:%d", - trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->vnodeIdx); + trsupport->pParentSqlObj, pSql, pSvd->vnode, trsupport->subqueryIndex); trsupport->pState->code = -TSDB_CODE_CLI_OUT_OF_MEMORY; trsupport->numOfRetry = MAX_NUM_OF_SUBQUERY_RETRY; @@ -1353,17 +1353,17 @@ void tscRetrieveDataRes(void *param, TAOS_RES *tres, int code) { if (vnodeInfo != NULL) { tscTrace("%p sub:%p query failed,ip:%u,vid:%d,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx, trsupport->pState->code); + trsupport->subqueryIndex, trsupport->pState->code); } else { tscTrace("%p sub:%p query failed,orderOfSub:%d,global code:%d", trsupport->pParentSqlObj, pSql, - trsupport->vnodeIdx, trsupport->pState->code); + trsupport->subqueryIndex, trsupport->pState->code); } tscRetrieveFromVnodeCallBack(param, tres, trsupport->pState->code); } else { // success, proceed to retrieve data from dnode tscTrace("%p sub:%p query complete,ip:%u,vid:%d,orderOfSub:%d,retrieve data", trsupport->pParentSqlObj, pSql, vnodeInfo->vpeerDesc[vnodeInfo->index].ip, vnodeInfo->vpeerDesc[vnodeInfo->index].vnode, - trsupport->vnodeIdx); + trsupport->subqueryIndex); taos_fetch_rows_a(tres, tscRetrieveFromVnodeCallBack, param); } @@ -1438,7 +1438,7 @@ void tscUpdateVnodeInQueryMsg(SSqlObj *pSql, char *buf) { pQueryMsg->vnode = htons(pMeterMeta->vpeerDesc[pSql->index].vnode); } else { // query on metric SMetricMeta * pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); pQueryMsg->vnode = htons(pVnodeSidList->vpeerDesc[pSql->index].vnode); } } @@ -1461,7 +1461,7 @@ static int32_t tscEstimateQueryMsgSize(SSqlCmd *pCmd) { SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); int32_t meterInfoSize = (pMetricMeta->tagLen + sizeof(SMeterSidExtInfo)) * pVnodeSidList->numOfSids; int32_t outputColumnSize = pCmd->fieldsInfo.numOfOutputCols * sizeof(SSqlFuncExprMsg); @@ -1506,12 +1506,12 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->numOfTagsCols = 0; } else { // query on metric SMetricMeta *pMetricMeta = pMeterMetaInfo->pMetricMeta; - if (pCmd->vnodeIdx < 0) { - tscError("%p error vnodeIdx:%d", pSql, pCmd->vnodeIdx); + if (pMeterMetaInfo->vnodeIndex < 0) { + tscError("%p error vnodeIdx:%d", pSql, pMeterMetaInfo->vnodeIndex); return -1; } - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); uint32_t vnodeId = pVnodeSidList->vpeerDesc[pVnodeSidList->index].vnode; numOfMeters = pVnodeSidList->numOfSids; @@ -1693,7 +1693,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pQueryMsg->colNameLen = htonl(len); // set sids list - tscTrace("%p vid:%d, query on %d meters", pSql, pSql->cmd.vnodeIdx, numOfMeters); + tscTrace("%p vid:%d, query on %d meters", pSql, htons(pQueryMsg->vnode), numOfMeters); if (UTIL_METER_IS_NOMRAL_METER(pMeterMetaInfo)) { #ifdef _DEBUG_VIEW @@ -1703,7 +1703,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { pSMeterTagInfo->sid = htonl(pMeterMeta->sid); pMsg += sizeof(SMeterSidExtInfo); } else { - SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pCmd->vnodeIdx); + SVnodeSidList *pVnodeSidList = tscGetVnodeSidList(pMetricMeta, pMeterMetaInfo->vnodeIndex); for (int32_t i = 0; i < numOfMeters; ++i) { SMeterSidExtInfo *pMeterTagInfo = (SMeterSidExtInfo *)pMsg; @@ -1774,7 +1774,7 @@ int tscBuildQueryMsg(SSqlObj *pSql) { int32_t numOfBlocks = 0; if (pCmd->tsBuf != NULL) { - STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pCmd->vnodeIdx); + STSVnodeBlockInfo *pBlockInfo = tsBufGetVnodeBlockInfo(pCmd->tsBuf, pMeterMetaInfo->vnodeIndex); assert(QUERY_IS_JOIN_QUERY(pCmd->type) && pBlockInfo != NULL); // this query should not be sent // todo refactor diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index 4d7f2734a9..fe097b15d9 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -609,7 +609,15 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { assert((pRes->offset >= 0 && pRes->numOfRows == 0) || (pRes->offset == 0 && pRes->numOfRows >= 0)); - if ((++pCmd->vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + /* + * For project query with super table join, the numOfSub is equalled to the number of all subqueries, so + * we need to reset the value of numOfSubs to be 0. + * + * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. + */ + pSql->numOfSubs = 0; + + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pCmd->command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); tscProcessSql(pSql); @@ -617,7 +625,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { } // check!!! - if (rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { + if (rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { break; } } @@ -654,7 +662,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { pCmd->limit.offset = pRes->offset; - if ((++pSql->cmd.vnodeIdx) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); tscProcessSql(pSql); @@ -662,7 +670,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { } // check!!! - if (*rows != NULL || pCmd->vnodeIdx >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { + if (*rows != NULL || pMeterMetaInfo->vnodeIndex >= pMeterMetaInfo->pMetricMeta->numOfVnodes) { break; } } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 7fd3d7706b..5ca55a486f 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1474,7 +1474,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { * data blocks have been submit to vnode. */ SDataBlockList* pDataBlocks = pCmd->pDataBlocks; - if (pDataBlocks == NULL || pCmd->vnodeIdx >= pDataBlocks->nSize) { + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + + if (pDataBlocks == NULL || pMeterMetaInfo->vnodeIndex >= pDataBlocks->nSize) { tscTrace("%p object should be release since all data blocks have been submit", pSql); return true; } else { @@ -1487,10 +1491,11 @@ bool tscShouldFreeAsyncSqlObj(SSqlObj* pSql) { } SMeterMetaInfo* tscGetMeterMetaInfo(SSqlCmd* pCmd, int32_t index) { - if (pCmd == NULL || index >= pCmd->numOfTables || index < 0) { + if (pCmd == NULL || pCmd->numOfTables == 0) { return NULL; } + assert(index >= 0 && index <= pCmd->numOfTables && pCmd->pMeterInfo != NULL); return pCmd->pMeterInfo[index]; } @@ -1587,13 +1592,13 @@ void tscResetForNextRetrieve(SSqlRes* pRes) { pRes->numOfRows = 0; } -SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex, void (*fp)(), void* param, - SSqlObj* pPrevSql) { +SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void* param, SSqlObj* pPrevSql) { SSqlCmd* pCmd = &pSql->cmd; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); SSqlObj* pNew = (SSqlObj*)calloc(1, sizeof(SSqlObj)); if (pNew == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); return NULL; } @@ -1602,7 +1607,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex pNew->sqlstr = strdup(pSql->sqlstr); if (pNew->sqlstr == NULL) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); free(pNew); return NULL; @@ -1627,15 +1632,13 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex tscTagCondCopy(&pNew->cmd.tagCond, &pCmd->tagCond); if (tscAllocPayload(&pNew->cmd, TSDB_DEFAULT_PAYLOAD_SIZE) != TSDB_CODE_SUCCESS) { - tscError("%p new subquery failed, vnodeIdx:%d, tableIndex:%d", pSql, vnodeIndex, tableIndex); + tscError("%p new subquery failed, tableIndex:%d, vnodeIndex:%d", pSql, tableIndex, pMeterMetaInfo->vnodeIndex); tscFreeSqlObj(pNew); return NULL; } tscColumnBaseInfoCopy(&pNew->cmd.colList, &pCmd->colList, (int16_t)tableIndex); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - + // set the correct query type if (pPrevSql != NULL) { pNew->cmd.type = pPrevSql->cmd.type; @@ -1666,7 +1669,6 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex pNew->fp = fp; pNew->param = param; - pNew->cmd.vnodeIdx = vnodeIndex; SMeterMetaInfo* pMetermetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); char key[TSDB_MAX_TAGS_LEN + 1] = {0}; @@ -1695,8 +1697,8 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int32_t vnodeIndex, int16_t tableIndex assert(pFinalInfo->pMetricMeta != NULL); } - tscTrace("%p new subquery %p, vnodeIdx:%d, tableIndex:%d, type:%d", pSql, pNew, vnodeIndex, tableIndex, - pNew->cmd.type); + tscTrace("%p new subquery %p, tableIndex:%d, vnodeIdx:%d, type:%d", pSql, pNew, tableIndex, + pMeterMetaInfo->vnodeIndex, pNew->cmd.type); return pNew; } From 1a5ae0d1c3095e5a9f45f30f1fc89238f50894d0 Mon Sep 17 00:00:00 2001 From: hjxilinx Date: Fri, 6 Dec 2019 10:10:33 +0800 Subject: [PATCH 06/17] [tbase-1282] --- src/client/inc/tscUtil.h | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 41e1389c8e..473fdbb942 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -23,8 +23,7 @@ extern "C" { /* * @date 2018/09/30 */ -#include -#include +#include "os.h" #include "textbuffer.h" #include "tscSecondaryMerge.h" #include "tsclient.h" From 4246517c9f98788643828af5ee123e14fc982c68 Mon Sep 17 00:00:00 2001 From: hjxilinx Date: Sat, 7 Dec 2019 15:24:52 +0800 Subject: [PATCH 07/17] [tbase-1282] --- src/client/inc/tscUtil.h | 5 +- src/client/src/tscJoinProcess.c | 123 +++++++++---- src/client/src/tscSQLParser.c | 295 +++++++++++++++++++------------- src/client/src/tscServer.c | 8 +- src/client/src/tscSql.c | 77 ++++++--- src/client/src/tscUtil.c | 3 +- 6 files changed, 329 insertions(+), 182 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 473fdbb942..9ea6ba7c3f 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -52,7 +52,6 @@ typedef struct SParsedDataColInfo { typedef struct SJoinSubquerySupporter { SSubqueryState* pState; SSqlObj* pObj; // parent SqlObj - bool hasMore; // has data from vnode to fetch int32_t subqueryIndex; // index of sub query int64_t interval; // interval time SLimitVal limit; // limit info @@ -166,7 +165,6 @@ void tsSetMetricQueryCond(STagCond* pTagCond, uint64_t uid, const char* str); void tscTagCondCopy(STagCond* dest, const STagCond* src); void tscTagCondRelease(STagCond* pCond); -void tscTagCondSetQueryCondType(STagCond* pCond, int16_t type); void tscGetSrcColumnInfo(SSrcColumnInfo* pColInfo, SSqlCmd* pCmd); @@ -222,6 +220,9 @@ TAOS* taos_connect_a(char* ip, char* user, char* pass, char* db, uint16_t port, void* param, void** taos); void sortRemoveDuplicates(STableDataBlocks* dataBuf); + +void tscPrintSelectClause(SSqlCmd* pCmd); + #ifdef __cplusplus } #endif diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index b470d84440..3126c3a867 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -164,8 +164,6 @@ SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pS } pSupporter->pObj = pSql; - pSupporter->hasMore = true; - pSupporter->pState = pState; pSupporter->subqueryIndex = index; @@ -226,12 +224,6 @@ bool needSecondaryQuery(SSqlObj* pSql) { * launch secondary stage query to fetch the result that contains timestamp in set */ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { - // TODO not launch secondary stage query - // if (!needSecondaryQuery(pSql)) { - // return; - // } - - // sub query may not be necessary int32_t numOfSub = 0; SJoinSubquerySupporter* pSupporter = NULL; @@ -286,7 +278,6 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { pNew->cmd.type |= TSDB_QUERY_TYPE_JOIN_SEC_STAGE; pNew->cmd.nAggTimeInterval = pSupporter->interval; - pNew->cmd.limit = pSupporter->limit; pNew->cmd.groupbyExpr = pSupporter->groupbyExpr; tscColumnBaseInfoCopy(&pNew->cmd.colList, &pSupporter->colList, 0); @@ -305,7 +296,14 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { tscFieldInfoCalOffset(&pNew->cmd); SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - + + /* + * When handling the projection query, the offset value will be modified for table-table join, which is changed + * during the timestamp intersection. + */ + pSupporter->limit = pSql->cmd.limit; + pNew->cmd.limit = pSupporter->limit; + // fetch the join tag column if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0); @@ -314,10 +312,12 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { int16_t tagColIndex = tscGetJoinTagColIndexByUid(&pNew->cmd.tagCond, pMeterMetaInfo->pMeterMeta->uid); pExpr->param[0].i64Key = tagColIndex; pExpr->numOfParams = 1; - - addRequiredTagColumn(&pNew->cmd, tagColIndex, 0); } +#ifdef _DEBUG_VIEW + tscPrintSelectClause(&pNew->cmd); +#endif + tscProcessSql(pNew); } @@ -471,9 +471,31 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { pSupporter->pState->code = numOfRows; tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex); } - + + if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) { + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + assert(pSql->cmd.numOfTables == 1); + + // for projection query, need to try next vnode if current vnode is exhausted + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + + pSupporter->pState->numOfCompleted = 0; + pSupporter->pState->numOfTotal = 1; + + pSql->cmd.command = TSDB_SQL_SELECT; + pSql->fp = tscJoinQueryCallback; + tscProcessSql(pSql); + + return; + } + } + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { - tscTrace("%p secondary retrieve completed, global code:%d", tres, pParentSql->res.code); + assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal); + + tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal, + pParentSql->res.code); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { pParentSql->res.code = abs(pSupporter->pState->code); freeSubqueryObj(pParentSql); @@ -490,11 +512,17 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { assert(pSql->numOfSubs >= 1); for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[i]->param; - SSqlRes* pRes = &pSql->pSubs[i]->res; - if (pRes->row >= pRes->numOfRows && pSupporter->hasMore) { - numOfFetch++; + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); + + if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + numOfFetch++; + } + } else { + if (pRes->row >= pRes->numOfRows) { + numOfFetch++; + } } } @@ -515,8 +543,13 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { // wait for all subqueries completed pSupporter->pState->numOfTotal = numOfFetch; - if (pRes1->row >= pRes1->numOfRows && pSupporter->hasMore) { - tscTrace("%p subquery:%p retrieve data from vnode, index:%d", pSql, pSql1, pSupporter->subqueryIndex); + + assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + if (pRes1->row >= pRes1->numOfRows) { + tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, + pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex); tscResetForNextRetrieve(pRes1); @@ -541,7 +574,11 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; tscTrace("%p all subquery response, retrieve data", pSql); - + + if (pRes->pColumnIndex != NULL) { + return; // the column transfer support struct has been built + } + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols); for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { @@ -631,20 +668,34 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { tscSetupOutputColumnIndex(pParentSql); - if (pParentSql->fp == NULL) { - tsem_wait(&pParentSql->emptyRspSem); - tsem_wait(&pParentSql->emptyRspSem); - - tsem_post(&pParentSql->rspSem); - } else { - // set the command flag must be after the semaphore been correctly set. - // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; - // if (pPObj->res.code == TSDB_CODE_SUCCESS) { - // (*pPObj->fp)(pPObj->param, pPObj, 0); - // } else { - // tscQueueAsyncRes(pPObj); - // } - assert(0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + + /** + * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of data instead of returning to its invoker + */ + if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) { + assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes); + pSupporter->pState->numOfCompleted = 0; // reset the record value + + pSql->fp = joinRetrieveCallback; // continue retrieve data + pSql->cmd.command = TSDB_SQL_FETCH; + tscProcessSql(pSql); + } else { // first retrieve from vnode during the secondary stage sub-query + if (pParentSql->fp == NULL) { + tsem_wait(&pParentSql->emptyRspSem); + tsem_wait(&pParentSql->emptyRspSem); + + tsem_post(&pParentSql->rspSem); + } else { + // set the command flag must be after the semaphore been correctly set. + // pPObj->cmd.command = TSDB_SQL_RETRIEVE_METRIC; + // if (pPObj->res.code == TSDB_CODE_SUCCESS) { + // (*pPObj->fp)(pPObj->param, pPObj, 0); + // } else { + // tscQueueAsyncRes(pPObj); + // } + assert(0); + } } } } @@ -1440,7 +1491,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { assert(pDestBuf->fileSize == oldSize + size); - tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, vnode:%d, autoDelete:%d", pDestBuf, pDestBuf->path, + tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); return 0; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index d0aa290d31..128cc7f3a1 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -1020,7 +1020,10 @@ int32_t tscToSQLCmd(SSqlObj* pSql, struct SSqlInfo* pInfo) { } setColumnOffsetValueInResultset(pCmd); - updateTagColumnIndex(pCmd, 0); + + for(int32_t i = 0; i < pCmd->numOfTables; ++i) { + updateTagColumnIndex(pCmd, i); + } break; } @@ -1796,12 +1799,11 @@ int32_t addProjectionExprAndResultField(SSqlCmd* pCmd, tSQLExprItem* pItem) { } if (index.columnIndex == TSDB_TBNAME_COLUMN_INDEX) { - SColumnIndex index1 = {0, TSDB_TBNAME_COLUMN_INDEX}; SSchema colSchema = {.type = TSDB_DATA_TYPE_BINARY, .bytes = TSDB_METER_NAME_LEN}; strcpy(colSchema.name, TSQL_TBNAME_L); pCmd->type = TSDB_QUERY_TYPE_STABLE_QUERY; - tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index1, &colSchema, true); + tscAddSpecialColumnForSelect(pCmd, startPos, TSDB_FUNC_TAGPRJ, &index, &colSchema, true); } else { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); SMeterMeta* pMeterMeta = pMeterMetaInfo->pMeterMeta; @@ -2739,15 +2741,20 @@ static bool functionCompatibleCheck(SSqlCmd* pCmd) { void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, tableIndex); - // update tags column index for group by tags - for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { - int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; - - for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { - int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; - if (tagColIndex == index) { - pCmd->groupbyExpr.columnInfo[i].colIdx = j; - break; + /* + * update tags column index for group by tags + * group by columns belong to this table + */ + if (pCmd->groupbyExpr.numOfGroupCols > 0 && pCmd->groupbyExpr.tableIndex == tableIndex) { + for (int32_t i = 0; i < pCmd->groupbyExpr.numOfGroupCols; ++i) { + int32_t index = pCmd->groupbyExpr.columnInfo[i].colIdx; + + for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { + int32_t tagColIndex = pMeterMetaInfo->tagColumnIndex[j]; + if (tagColIndex == index) { + pCmd->groupbyExpr.columnInfo[i].colIdx = j; + break; + } } } } @@ -2755,9 +2762,15 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { // update tags column index for expression for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + if (!TSDB_COL_IS_TAG(pExpr->colInfo.flag)) { // not tags, continue continue; } + + // not belongs to this table + if (pExpr->uid != pMeterMetaInfo->pMeterMeta->uid) { + continue; + } for (int32_t j = 0; j < pMeterMetaInfo->numOfTags; ++j) { if (pExpr->colInfo.colIdx == pMeterMetaInfo->tagColumnIndex[j]) { @@ -2766,6 +2779,32 @@ void updateTagColumnIndex(SSqlCmd* pCmd, int32_t tableIndex) { } } } + + // update join condition tag column index + SJoinInfo* pJoinInfo = &pCmd->tagCond.joinInfo; + if (!pJoinInfo->hasJoin) { // not join query + return; + } + + assert(pJoinInfo->left.uid != pJoinInfo->right.uid); + + // the join condition expression node belongs to this table(super table) + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->left.uid) { + for(int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->left.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->left.tagCol = i; + } + } + } + + if (pMeterMetaInfo->pMeterMeta->uid == pJoinInfo->right.uid) { + for(int32_t i = 0; i < pMeterMetaInfo->numOfTags; ++i) { + if (pJoinInfo->right.tagCol == pMeterMetaInfo->tagColumnIndex[i]) { + pJoinInfo->right.tagCol = i; + } + } + } + } int32_t parseGroupbyClause(SSqlCmd* pCmd, tVariantList* pList) { @@ -2987,8 +3026,6 @@ typedef struct SCondExpr { static int32_t getTimeRange(int64_t* stime, int64_t* etime, tSQLExpr* pRight, int32_t optr, int16_t timePrecision); -static int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr); - static int32_t tSQLExprNodeToString(tSQLExpr* pExpr, char** str) { if (pExpr->nSQLOptr == TK_ID) { // column name strncpy(*str, pExpr->colInfo.z, pExpr->colInfo.n); @@ -4018,129 +4055,128 @@ static void cleanQueryExpr(SCondExpr* pCondExpr) { } } -int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { - SSqlCmd* pCmd = &pSql->cmd; +static void doAddJoinTagsColumnsIntoTagList(SSqlCmd* pCmd, SCondExpr* pCondExpr) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + if (QUERY_IS_JOIN_QUERY(pCmd->type) && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { + SColumnIndex index = {0}; + + getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pLeft->colInfo, pCmd, &index); + pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + + int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + + getColumnIndexByNameEx(&pCondExpr->pJoinExpr->pRight->colInfo, pCmd, &index); + pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); + + columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; + addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); + } +} + +static int32_t getTagQueryCondExpr(SSqlCmd* pCmd, SCondExpr* pCondExpr, tSQLExpr** pExpr) { + int32_t ret = TSDB_CODE_SUCCESS; + + if (pCondExpr->pTagCond != NULL) { + for (int32_t i = 0; i < pCmd->numOfTables; ++i) { + tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); + + char c[TSDB_MAX_TAGS_LEN] = {0}; + char* str = c; + + if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { + return ret; + } + + tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); + + doCompactQueryExpr(pExpr); + tSQLExprDestroy(p1); + } + + pCondExpr->pTagCond = NULL; + } + + return ret; +} +int32_t parseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr) { if (pExpr == NULL) { return TSDB_CODE_SUCCESS; } - + + const char* msg = "invalid filter expression"; + const char* msg1 = "invalid expression"; + + int32_t ret = TSDB_CODE_SUCCESS; + + SSqlCmd* pCmd = &pSql->cmd; pCmd->stime = 0; pCmd->etime = INT64_MAX; - int32_t ret = TSDB_CODE_SUCCESS; - - const char* msg1 = "invalid expression"; + //tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space + SStringBuilder sb = {0}; SCondExpr condExpr = {0}; if ((*pExpr)->pLeft == NULL || (*pExpr)->pRight == NULL) { return invalidSqlErrMsg(pCmd, msg1); } - ret = doParseWhereClause(pSql, pExpr, &condExpr); - if (ret != TSDB_CODE_SUCCESS) { + int32_t type = 0; + if ((ret = getQueryCondExpr(pCmd, pExpr, &condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { return ret; } - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); - if (QUERY_IS_JOIN_QUERY(pCmd->type) && UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - SColumnIndex index = {0}; - - getColumnIndexByNameEx(&condExpr.pJoinExpr->pLeft->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); - - int32_t columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); - - getColumnIndexByNameEx(&condExpr.pJoinExpr->pRight->colInfo, pCmd, &index); - pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, index.tableIndex); - - columnInfo = index.columnIndex - pMeterMetaInfo->pMeterMeta->numOfColumns; - addRequiredTagColumn(pCmd, columnInfo, index.tableIndex); - } - - cleanQueryExpr(&condExpr); - return ret; -} - -int32_t doParseWhereClause(SSqlObj* pSql, tSQLExpr** pExpr, SCondExpr* condExpr) { - const char* msg = "invalid filter expression"; - - int32_t type = 0; - SSqlCmd* pCmd = &pSql->cmd; - - /* - * tags query condition may be larger than 512bytes, therefore, we need to prepare enough large space - */ - SStringBuilder sb = {0}; - - int32_t ret = TSDB_CODE_SUCCESS; - if ((ret = getQueryCondExpr(pCmd, pExpr, condExpr, &type, (*pExpr)->nSQLOptr)) != TSDB_CODE_SUCCESS) { - return ret; - } - - doCompactQueryExpr(pExpr); - - // after expression compact, the expression tree is only include tag query condition - condExpr->pTagCond = (*pExpr); - - // 1. check if it is a join query - if ((ret = validateJoinExpr(pCmd, condExpr)) != TSDB_CODE_SUCCESS) { - return ret; - } - - // 2. get the query time range - if ((ret = getTimeRangeFromExpr(pCmd, condExpr->pTimewindow)) != TSDB_CODE_SUCCESS) { - return ret; - } - - // 3. get the tag query condition - if (condExpr->pTagCond != NULL) { - for (int32_t i = 0; i < pCmd->numOfTables; ++i) { - tSQLExpr* p1 = extractExprForSTable(pExpr, pCmd, i); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, i); - - char c[TSDB_MAX_TAGS_LEN] = {0}; - char* str = c; - if ((ret = getTagCondString(pCmd, p1, &str)) != TSDB_CODE_SUCCESS) { - return ret; - } - - tsSetMetricQueryCond(&pCmd->tagCond, pMeterMetaInfo->pMeterMeta->uid, c); - - doCompactQueryExpr(pExpr); - tSQLExprDestroy(p1); - } - - condExpr->pTagCond = NULL; - } - - // 4. get the table name query condition - if ((ret = getTablenameCond(pCmd, condExpr->pTableCond, &sb)) != TSDB_CODE_SUCCESS) { - return ret; - } - - // 5. other column query condition - if ((ret = getColumnQueryCondInfo(pCmd, condExpr->pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { - return ret; - } - - // 6. join condition - if ((ret = getJoinCondInfo(pSql, condExpr->pJoinExpr)) != TSDB_CODE_SUCCESS) { - return ret; - } - - // 7. query condition for table name - pCmd->tagCond.relType = (condExpr->relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; - ret = setTableCondForMetricQuery(pSql, condExpr->pTableCond, condExpr->tableCondIndex, &sb); + doCompactQueryExpr(pExpr); + + // after expression compact, the expression tree is only include tag query condition + condExpr.pTagCond = (*pExpr); + + // 1. check if it is a join query + if ((ret = validateJoinExpr(pCmd, &condExpr)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 2. get the query time range + if ((ret = getTimeRangeFromExpr(pCmd, condExpr.pTimewindow)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 3. get the tag query condition + if ((ret = getTagQueryCondExpr(pCmd, &condExpr, pExpr)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 4. get the table name query condition + if ((ret = getTablenameCond(pCmd, condExpr.pTableCond, &sb)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 5. other column query condition + if ((ret = getColumnQueryCondInfo(pCmd, condExpr.pColumnCond, TK_AND)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 6. join condition + if ((ret = getJoinCondInfo(pSql, condExpr.pJoinExpr)) != TSDB_CODE_SUCCESS) { + return ret; + } + + // 7. query condition for table name + pCmd->tagCond.relType = (condExpr.relType == TK_AND) ? TSDB_RELATION_AND : TSDB_RELATION_OR; + + ret = setTableCondForMetricQuery(pSql, condExpr.pTableCond, condExpr.tableCondIndex, &sb); taosStringBuilderDestroy(&sb); if (!validateFilterExpr(pCmd)) { return invalidSqlErrMsg(pCmd, msg); } - + + doAddJoinTagsColumnsIntoTagList(pCmd, &condExpr); + + cleanQueryExpr(&condExpr); return ret; } @@ -5684,3 +5720,30 @@ int32_t tscCheckCreateDbParams(SSqlCmd* pCmd, SCreateDbMsg *pCreate) { return TSDB_CODE_SUCCESS; } + +// for debug purpose +void tscPrintSelectClause(SSqlCmd* pCmd) { + if (pCmd == NULL || pCmd->exprsInfo.numOfExprs == 0) { + return; + } + + char* str = calloc(1, 10240); + int32_t offset = 0; + + offset += sprintf(str, "%d [", pCmd->exprsInfo.numOfExprs); + for(int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { + SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); + + int32_t size = sprintf(str + offset, "%s(%d)", aAggs[pExpr->functionId].aName, pExpr->colInfo.colId); + offset += size; + + if (i < pCmd->exprsInfo.numOfExprs - 1) { + str[offset++] = ','; + } + } + + str[offset] = ']'; + printf("%s\n", str); + + free(str); +} diff --git a/src/client/src/tscServer.c b/src/client/src/tscServer.c index 81ef5d13e6..1805eac38d 100644 --- a/src/client/src/tscServer.c +++ b/src/client/src/tscServer.c @@ -695,8 +695,6 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySu pExpr->param->i64Key = tagColIndex; pExpr->numOfParams = 1; - addRequiredTagColumn(pCmd, tagColIndex, 0); - // add the filter tag column for (int32_t i = 0; i < pSupporter->colList.numOfCols; ++i) { SColumnBase *pColBase = &pSupporter->colList.pColList[i]; @@ -708,7 +706,11 @@ int32_t tscLaunchJoinSubquery(SSqlObj *pSql, int16_t tableIndex, SJoinSubquerySu } else { pNew->cmd.type |= TSDB_QUERY_TYPE_SUBQUERY; } - + +#ifdef _DEBUG_VIEW + tscPrintSelectClause(&pNew->cmd); +#endif + return tscProcessSql(pNew); } diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index fe097b15d9..f3d3407582 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -458,14 +458,48 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { while (1) { bool hasData = true; + if (tscProjectionQueryOnMetric(pCmd)) { + bool allSubqueryExhausted = true; + + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); + if (pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + allSubqueryExhausted = false; + break; + } + } + + hasData = !allSubqueryExhausted; + } else { //otherwise, in case inner join, if any subquery exhausted, query completed. + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlRes *pRes1 = &pSql->pSubs[i]->res; + if (pRes1->numOfRows == 0) { + hasData = false; + break; + } + } + } + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { SSqlRes *pRes1 = &pSql->pSubs[i]->res; - - // in case inner join, if any subquery exhausted, query completed - if (pRes1->numOfRows == 0) { - hasData = false; - break; + SMeterMetaInfo* pMeterMeta = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); + + if (tscProjectionQueryOnMetric(pCmd)) { + //For multi-vnode projection query, the results may locate in following vnode, so we needs to go on + if (pMeterMeta->vnodeIndex < pMeterMeta->pMetricMeta->numOfVnodes) { + break; + } + } else { //otherwise, in case inner join, if any subquery exhausted, query completed. + if (pRes1->numOfRows == 0) { + hasData = false; + break; + } } +// if (pRes1->numOfRows == 0 && !tscProjectionQueryOnMetric(pCmd) || +// (pMeterMeta->vnodeIndex >= pMeterMeta->pMetricMeta->numOfVnodes && )) { +// hasData = false; +// break; +// } } if (!hasData) { // free all sub sqlobj @@ -487,34 +521,26 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { } if (pRes->tsrow == NULL) { - pRes->tsrow = malloc(sizeof(void *) * pCmd->exprsInfo.numOfExprs); + pRes->tsrow = malloc(POINTER_BYTES * pCmd->exprsInfo.numOfExprs); } bool success = false; - if (pSql->numOfSubs >= 2) { - // do merge result + if (pSql->numOfSubs >= 2) { // do merge result SSqlRes *pRes1 = &pSql->pSubs[0]->res; SSqlRes *pRes2 = &pSql->pSubs[1]->res; - while (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { + if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { doSetResultRowData(pSql->pSubs[0]); doSetResultRowData(pSql->pSubs[1]); - - TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; - TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; - - if (key1 == key2) { - success = true; - pRes1->row++; - pRes2->row++; - break; - } else if (key1 < key2) { - pRes1->row++; - } else if (key1 > key2) { - pRes2->row++; - } +// TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; +// TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; +// printf("first:%lld, second:%lld\n", key1, key2); + success = true; + pRes1->row++; + pRes2->row++; } - } else { + + } else { // only one subquery SSqlRes *pRes1 = &pSql->pSubs[0]->res; doSetResultRowData(pSql->pSubs[0]); @@ -553,9 +579,12 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); + if (pRes->code == TSDB_CODE_SUCCESS) { + tscTrace("%p data from all subqueries have been retrieved to client", pSql); return tscJoinResultsetFromBuf(pSql); } else { + tscTrace("%p retrieve data from subquery failed, code:%d", pSql, pRes->code); return NULL; } diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 5ca55a486f..4521bcb156 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -1538,7 +1538,7 @@ SMeterMetaInfo* tscAddMeterMetaInfo(SSqlCmd* pCmd, const char* name, SMeterMeta* pMeterMetaInfo->numOfTags = numOfTags; if (tags != NULL) { - memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(int16_t) * numOfTags); + memcpy(pMeterMetaInfo->tagColumnIndex, tags, sizeof(pMeterMetaInfo->tagColumnIndex[0]) * numOfTags); } pCmd->numOfTables += 1; @@ -1673,6 +1673,7 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void char key[TSDB_MAX_TAGS_LEN + 1] = {0}; tscGetMetricMetaCacheKey(pCmd, key, pMetermetaInfo->pMeterMeta->uid); + printf("-----%s\n", key); char* name = pMeterMetaInfo->name; SMeterMetaInfo* pFinalInfo = NULL; From 60da89b58e30be0aee8e97adb6c86903275eac5a Mon Sep 17 00:00:00 2001 From: hjxilinx Date: Sat, 7 Dec 2019 15:27:06 +0800 Subject: [PATCH 08/17] refactor some codes, change the log output info --- src/system/detail/src/mgmtShell.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/system/detail/src/mgmtShell.c b/src/system/detail/src/mgmtShell.c index 6084c5489d..fe1932b24a 100644 --- a/src/system/detail/src/mgmtShell.c +++ b/src/system/detail/src/mgmtShell.c @@ -978,12 +978,19 @@ int mgmtProcessCreateTableMsg(char *pMsg, int msgLen, SConnObj *pConn) { if (code == 1) { //mTrace("table:%s, wait vgroup create finish", pCreate->meterId, code); - } - else if (code != 0) { - mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); + } else if (code != TSDB_CODE_SUCCESS) { + if (code == TSDB_CODE_TABLE_ALREADY_EXIST) { // table already created when the second attempt to create table + + STabObj* pMeter = mgmtGetMeter(pCreate->meterId); + assert(pMeter != NULL); + + mWarn("table:%s, table already created, failed to create table, ts:%lld, code:%d", pCreate->meterId, + pMeter->createdTime, code); + } else { // other errors + mError("table:%s, failed to create table, code:%d", pCreate->meterId, code); + } } else { mTrace("table:%s, table is created by %s", pCreate->meterId, pConn->pUser->user); - //mLPrint("meter:%s is created by %s", pCreate->meterId, pConn->pUser->user); } taosSendSimpleRsp(pConn->thandle, TSDB_MSG_TYPE_CREATE_TABLE_RSP, code); From e958793d1426ab1bc245aa5cd7e5b554ca9b114e Mon Sep 17 00:00:00 2001 From: slguan Date: Sat, 7 Dec 2019 17:04:21 +0800 Subject: [PATCH 09/17] Modify a wrong commit --- src/sdb/src/sdbEngine.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index 7eec500199..c115f3c3c0 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -364,7 +364,7 @@ int64_t sdbInsertRow(void *handle, void *row, int rowSize) { return -1; } - if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int32_t *)row)) + if ((pTable->keyType != SDB_KEYTYPE_AUTO) || *((int64_t *)row)) if (sdbGetRow(handle, row)) { if (strcmp(pTable->name, "mnode") == 0) { /* From 2ed4d2b170cb1bc2e4f81800970d1b5dee8a2d21 Mon Sep 17 00:00:00 2001 From: fang Date: Sat, 7 Dec 2019 17:18:18 +0800 Subject: [PATCH 10/17] english version for jdbc driver --- .../webdocs/markdowndocs/Connector.md | 314 +++++++++++++++--- 1 file changed, 275 insertions(+), 39 deletions(-) diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 014e88de42..23efb52de1 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -198,56 +198,104 @@ For the time being, TDengine supports subscription on one table. It is implement ## Java Connector -### JDBC Interface +To Java delevopers, TDengine provides `taos-jdbcdriver` according to the JDBC(3.0) API. Users can find and download it through [Sonatype Repository][1]. -TDengine provides a JDBC driver `taos-jdbcdriver-x.x.x.jar` for Enterprise Java developers. TDengine's JDBC Driver is implemented as a subset of the standard JDBC 3.0 Specification and supports the most common Java development frameworks. The driver have been published to dependency repositories such as Sonatype Maven Repository, and users could refer to the following `pom.xml` configuration file. +Since the native language of TDengine is C, the necessary TDengine library should be checked before using the taos-jdbcdriver: + +* libtaos.so (Linux) + After TDengine is installed successfully, the library `libtaos.so` will be automatically copied to the `/usr/lib/`, which is the system's default search path. + +* taos.dll (Windows) + After TDengine client is installed, the library `taos.dll` will be automatically copied to the `C:/Windows/System32`, which is the system's default search path. + +> Note: Please make sure that TDengine Windows client has been installed if developing on Windows. + +Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: +* TDengine doesn't allow to delete/modify single record, and thus JDBC driver also has no such method. +* No support for transaction +* No support for union between tables +* No support for nested query),`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. + +## Version list of TAOS-JDBCDriver and required TDengine and JDK + +| taos-jdbcdriver | TDengine | JDK | +| --- | --- | --- | +| 1.0.3 | 1.6.1.x or higher | 1.8.x | +| 1.0.2 | 1.6.1.x or higher | 1.8.x | +| 1.0.1 | 1.6.1.x or higher | 1.8.x | + +## DataType in TDengine and Java + +The datatypes in TDengine include timestamp, number, string and boolean, which are converted as follows in Java: + +| TDengine | Java | +| --- | --- | +| TIMESTAMP | java.sql.Timestamp | +| INT | java.lang.Integer | +| BIGINT | java.lang.Long | +| FLOAT | java.lang.Float | +| DOUBLE | java.lang.Double | +| SMALLINT, TINYINT |java.lang.Short | +| BOOL | java.lang.Boolean | +| BINARY, NCHAR | java.lang.String | + +## How to get TAOS-JDBC Driver + +### maven repository + +taos-jdbcdriver has been published to [Sonatype Repository][1]: +* [sonatype][8] +* [mvnrepository][9] +* [maven.aliyun][10] + +Using the following pom.xml for maven projects ```xml - - - oss-sonatype - oss-sonatype - https://oss.sonatype.org/content/groups/public - - - com.taosdata.jdbc taos-jdbcdriver - 1.0.1 + 1.0.3 ``` -Please note the JDBC driver itself relies on a native library written in C. On a Linux OS, the driver relies on a `libtaos.so` native library, where .so stands for "Shared Object". After the successful installation of TDengine on Linux, `libtaos.so` should be automatically copied to `/usr/local/lib/taos` and added to the system's default search path. On a Windows OS, the driver relies on a `taos.dll` native library, where .dll stands for "Dynamic Link Library". After the successful installation of the TDengine client on Windows, the `taos-jdbcdriver.jar` file can be found in `C:/TDengine/driver/JDBC`; the `taos.dll` file can be found in `C:/TDengine/driver/C` and should have been automatically copied to the system's searching path `C:/Windows/System32`. +### JAR file from the source code -Developers can refer to the Oracle's official JDBC API documentation for detailed usage on classes and methods. However, there are some differences of connection configurations and supported methods in the driver implementation between TDengine and traditional relational databases. +After downloading the [TDengine][3] source code, execute `mvn clean package` in the directory `src/connector/jdbc` and then the corresponding jar file is generated. -For database connections, TDengine's JDBC driver has the following configurable parameters in the JDBC URL. The standard format of a TDengine JDBC URL is: +## Usage -`jdbc:TSDB://{host_ip}:{port}/{database_name}?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` - -where `{}` marks the required parameters and `[]` marks the optional. The usage of each parameter is pretty straightforward: - -* user - login user name for TDengine; by default, it's `root` -* password - login password; by default, it's `taosdata` -* charset - the client-side charset; by default, it's the operation system's charset -* cfgdir - the directory of TDengine client configuration file; by default it's `/etc/taos` on Linux and `C:\TDengine/cfg` on Windows -* locale - the language environment of TDengine client; by default, it's the operation system's locale -* timezone - the timezone of the TDengine client; by default, it's the operation system's timezone - -All parameters can be configured at the time when creating a connection using the java.sql.DriverManager class, for example: +### get the connection ```java -import java.sql.Connection; -import java.sql.DriverManager; -import java.util.Properties; -import com.taosdata.jdbc.TSDBDriver; +Class.forName("com.taosdata.jdbc.TSDBDriver"); +String jdbcUrl = "jdbc:TAOS://127.0.0.1:6030/log?user=root&password=taosdata"; +Connection conn = DriverManager.getConnection(jdbcUrl); +``` +> `6030` is the default port and `log` is the default database for system monitor. +A normal JDBC URL looks as follows: +`jdbc:TSDB://{host_ip}:{port}/[database_name]?[user={user}|&password={password}|&charset={charset}|&cfgdir={config_dir}|&locale={locale}|&timezone={timezone}]` + +values in `{}` are necessary while values in `[]` are optional。Each option in the above URL denotes: + +* user:user name for login, defaultly root。 +* password:password for login,defaultly taosdata。 +* charset:charset for client,defaultly system charset +* cfgdir:log directory for client, defaultly _/etc/taos/_ on Linux and _C:/TDengine/cfg_ on Windows。 +* locale:language for client,defaultly system locale。 +* timezone:timezone for client,defaultly system timezone。 + +The options above can be configures (`ordered by priority`): +1. JDBC URL + + As explained above. +2. java.sql.DriverManager.getConnection(String jdbcUrl, Properties connProps) +```java public Connection getConn() throws Exception{ - Class.forName("com.taosdata.jdbc.TSDBDriver"); - String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/db?user=root&password=taosdata"; + Class.forName("com.taosdata.jdbc.TSDBDriver"); + String jdbcUrl = "jdbc:TAOS://127.0.0.1:0/log?user=root&password=taosdata"; Properties connProps = new Properties(); connProps.setProperty(TSDBDriver.PROPERTY_KEY_USER, "root"); connProps.setProperty(TSDBDriver.PROPERTY_KEY_PASSWORD, "taosdata"); @@ -260,16 +308,204 @@ public Connection getConn() throws Exception{ } ``` -Except `cfgdir`, all the parameters listed above can also be configured in the configuration file. The properties specified when calling DriverManager.getConnection() has the highest priority among all configuration methods. The JDBC URL has the second-highest priority, and the configuration file has the lowest priority. The explicitly configured parameters in a method with higher priorities always overwrite that same parameter configured in methods with lower priorities. For example, if `charset` is explicitly configured as "UTF-8" in the JDBC URL and "GKB" in the `taos.cfg` file, then "UTF-8" will be used. +3. Configuration file (taos.cfg) -Although the JDBC driver is implemented following the JDBC standard as much as possible, there are major differences between TDengine and traditional databases in terms of data models that lead to the differences in the driver implementation. Here is a list of head-ups for developers who have plenty of experience on traditional databases but little on TDengine: + Default configuration file is _/var/lib/taos/taos.cfg_ On Linux and _C:\TDengine\cfg\taos.cfg_ on Windows +```properties +# client default username +# defaultUser root -* TDengine does NOT support updating or deleting a specific record, which leads to some unsupported methods in the JDBC driver -* TDengine currently does not support `join` or `union` operations, and thus, is lack of support for associated methods in the JDBC driver -* TDengine supports batch insertions which are controlled at the level of SQL statement writing instead of API calls -* TDengine doesn't support nested queries and neither does the JDBC driver. Thus for each established connection to TDengine, there should be only one open result set associated with it +# client default password +# defaultPass taosdata + +# default system charset +# charset UTF-8 + +# system locale +# locale en_US.UTF-8 +``` +> More options can refer to [client configuration][13] + +### Create databases and tables + +```java +Statement stmt = conn.createStatement(); + +// create database +stmt.executeUpdate("create database if not exists db"); + +// use database +stmt.executeUpdate("use db"); + +// create table +stmt.executeUpdate("create table if not exists tb (ts timestamp, temperature int, humidity float)"); +``` +> Note: if no step like `use db`, the name of database must be added as prefix like _db.tb_ when operating on tables + +### Insert data + +```java +// insert data +int affectedRows = stmt.executeUpdate("insert into tb values(now, 23, 10.3) (now + 1s, 20, 9.3)"); + +System.out.println("insert " + affectedRows + " rows."); +``` +> _now_ is the server time. +> _now+1s_ is 1 second later than current server time. The time unit includes: _a_(millisecond), _s_(second), _m_(minute), _h_(hour), _d_(day), _w_(week), _n_(month), _y_(year). + +### Query database + +```java +// query data +ResultSet resultSet = stmt.executeQuery("select * from tb"); + +Timestamp ts = null; +int temperature = 0; +float humidity = 0; +while(resultSet.next()){ + + ts = resultSet.getTimestamp(1); + temperature = resultSet.getInt(2); + humidity = resultSet.getFloat("humidity"); + + System.out.printf("%s, %d, %s\n", ts, temperature, humidity); +} +``` +> query is consistent with relational database. The subscript start with 1 when retrieving return results. It is recommended to use the column name to retrieve results. + +### Close all + +```java +resultSet.close(); +stmt.close(); +conn.close(); +``` +> `please make sure the connection is closed to avoid the error like connection leakage` + +## Using connection pool + +**HikariCP** + +* dependence in pom.xml: +```xml + + com.zaxxer + HikariCP + 3.4.1 + +``` + +* Examples: +```java + public static void main(String[] args) throws SQLException { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl("jdbc:TAOS://127.0.0.1:6030/log"); + config.setUsername("root"); + config.setPassword("taosdata"); + + config.setMinimumIdle(3); //minimum number of idle connection + config.setMaximumPoolSize(10); //maximum number of connection in the pool + config.setConnectionTimeout(10000); //maximum wait milliseconds for get connection from pool + config.setIdleTimeout(60000); // max idle time for recycle idle connection + config.setConnectionTestQuery("describe log.dn"); //validation query + config.setValidationTimeout(3000); //validation query timeout + + HikariDataSource ds = new HikariDataSource(config); //create datasource + + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> The close() method will not close the connection from HikariDataSource.getConnection(). Instead, the connection is put back to the connection pool. +> More instructions can refer to [User Guide][5] + +**Druid** + +* dependency in pom.xml: + +```xml + + com.alibaba + druid + 1.1.20 + +``` + +* Examples: +```java +public static void main(String[] args) throws Exception { + Properties properties = new Properties(); + properties.put("driverClassName","com.taosdata.jdbc.TSDBDriver"); + properties.put("url","jdbc:TAOS://127.0.0.1:6030/log"); + properties.put("username","root"); + properties.put("password","taosdata"); + + properties.put("maxActive","10"); //maximum number of connection in the pool + properties.put("initialSize","3");//initial number of connection + properties.put("maxWait","10000");//maximum wait milliseconds for get connection from pool + properties.put("minIdle","3");//minimum number of connection in the pool + + properties.put("timeBetweenEvictionRunsMillis","3000");// the interval milliseconds to test connection + + properties.put("minEvictableIdleTimeMillis","60000");//the minimum milliseconds to keep idle + properties.put("maxEvictableIdleTimeMillis","90000");//the maximum milliseconds to keep idle + + properties.put("validationQuery","describe log.dn"); //validation query + properties.put("testWhileIdle","true"); // test connection while idle + properties.put("testOnBorrow","false"); // don't need while testWhileIdle is true + properties.put("testOnReturn","false"); // don't need while testWhileIdle is true + + //create druid datasource + DataSource ds = DruidDataSourceFactory.createDataSource(properties); + Connection connection = ds.getConnection(); // get connection + Statement statement = connection.createStatement(); // get statement + + //query or insert + // ... + + connection.close(); // put back to conneciton pool +} +``` +> More instructions can refer to [User Guide][6] + +**Notice** +* TDengine `v1.6.4.1` provides a function `select server_status()` to check heartbeat. It is highly recommended to use this function for `Validation Query`. + +As follows,`1` will be returned if `select server_status()` is successfully executed。 +```shell +taos> select server_status(); +server_status()| +================ +1 | +Query OK, 1 row(s) in set (0.000141s) +``` + +## Integrated with framework + +* Please refer to [SpringJdbcTemplate][11] if using taos-jdbcdriver in Spring JdbcTemplate +* Please refer to [springbootdemo][12] if using taos-jdbcdriver in Spring JdbcTemplate + +## FAQ + +* java.lang.UnsatisfiedLinkError: no taos in java.library.path + + **Cause**:The application program cannot find Library function _taos_ + + **Answer**:Copy `C:\TDengine\driver\taos.dll` to `C:\Windows\System32\` on Windows and make a soft link through ` ln -s /usr/local/taos/driver/libtaos.so.x.x.x.x /usr/lib/libtaos.so` on Linux. + +* java.lang.UnsatisfiedLinkError: taos.dll Can't load AMD 64 bit on a IA 32-bit platform + + **Cause**:Currently TDengine only support 64bit JDK + + **Answer**:re-install 64bit JDK. + +* For other questions, please refer to [Issues][7] -All the error codes and error messages can be found in `TSDBError.java` . For a more detailed coding example, please refer to the demo project `JDBCDemo` in TDengine's code examples. ## Python Connector From c246456c1f4d5d0a6bbfa71c3ca80b4524ccf110 Mon Sep 17 00:00:00 2001 From: slguan Date: Sat, 7 Dec 2019 17:39:46 +0800 Subject: [PATCH 11/17] Remove compiler warning options --- CMakeLists.txt | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 71cb0bfd43..9c446b19a5 100755 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -141,12 +141,12 @@ IF (NOT DEFINED TD_CLUSTER) SET(RELEASE_FLAGS "-O0") IF (NOT TD_ARM) IF (${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -malign-stringops -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ELSE () - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -fpack-struct=8 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ENDIF () ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) @@ -156,7 +156,7 @@ IF (NOT DEFINED TD_CLUSTER) ENDIF () SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -Wno-char-subscripts -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -g -fsigned-char -munaligned-access -fpack-struct=8 -latomic -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") ADD_DEFINITIONS(-DLINUX) ADD_DEFINITIONS(-D_REENTRANT -D__USE_POSIX -D_LIBC_REENTRANT) ADD_DEFINITIONS(-DUSE_LIBICONV) @@ -171,7 +171,7 @@ IF (NOT DEFINED TD_CLUSTER) ADD_DEFINITIONS(-DPTW32_BUILD) ADD_DEFINITIONS(-D_MBCS -D_CRT_SECURE_NO_DEPRECATE -D_CRT_NONSTDC_NO_DEPRECATE) ELSEIF (TD_DARWIN_64) - SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -Wno-char-subscripts -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE -Wno-unused-variable -Wno-bitfield-constant-conversion") + SET(COMMON_FLAGS "-std=gnu99 -Wall -fPIC -malign-double -g -msse4.2 -D_FILE_OFFSET_BITS=64 -D_LARGE_FILE") SET(DEBUG_FLAGS "-O0 -DDEBUG") SET(RELEASE_FLAGS "-O0") ADD_DEFINITIONS(-DDARWIN) From 964b643bc87b76782a6fe2cce4b89102939de186 Mon Sep 17 00:00:00 2001 From: slguan Date: Sat, 7 Dec 2019 18:04:36 +0800 Subject: [PATCH 12/17] [TBASE-1296] --- src/inc/sdb.h | 2 +- src/inc/taosmsg.h | 2 +- src/sdb/inc/sdbint.h | 2 +- src/sdb/src/sdbEngine.c | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/inc/sdb.h b/src/inc/sdb.h index 389aecfb7b..a0e0a1b2f2 100644 --- a/src/inc/sdb.h +++ b/src/inc/sdb.h @@ -105,7 +105,7 @@ extern SSdbPeer *sdbPeer[]; #endif -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)); void *sdbGetRow(void *handle, void *key); diff --git a/src/inc/taosmsg.h b/src/inc/taosmsg.h index c1820a5b9c..d3634219aa 100644 --- a/src/inc/taosmsg.h +++ b/src/inc/taosmsg.h @@ -664,7 +664,7 @@ typedef struct { uint32_t destId; char meterId[TSDB_UNI_LEN]; char empty[3]; - char msgType; + uint8_t msgType; int32_t msgLen; uint8_t content[0]; } SIntMsg; diff --git a/src/sdb/inc/sdbint.h b/src/sdb/inc/sdbint.h index 3327c1f731..c5b4f4e4ae 100644 --- a/src/sdb/inc/sdbint.h +++ b/src/sdb/inc/sdbint.h @@ -127,7 +127,7 @@ typedef struct { } SMnodeStatus; typedef struct { - char dbId; + uint8_t dbId; char type; uint64_t version; short dataLen; diff --git a/src/sdb/src/sdbEngine.c b/src/sdb/src/sdbEngine.c index c115f3c3c0..0efa81866f 100644 --- a/src/sdb/src/sdbEngine.c +++ b/src/sdb/src/sdbEngine.c @@ -287,7 +287,7 @@ sdb_exit1: return -1; } -void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, char keyType, char *directory, +void *sdbOpenTable(int maxRows, int32_t maxRowSize, char *name, uint8_t keyType, char *directory, void *(*appTool)(char, void *, char *, int, int *)) { SSdbTable *pTable = (SSdbTable *)malloc(sizeof(SSdbTable)); if (pTable == NULL) return NULL; From b6fabc595ceaab546abbd3d5211aaac371f222fa Mon Sep 17 00:00:00 2001 From: slguan Date: Sat, 7 Dec 2019 18:14:26 +0800 Subject: [PATCH 13/17] [TBASE-1315] --- src/rpc/src/trpc.c | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/src/rpc/src/trpc.c b/src/rpc/src/trpc.c index 9f006ab05a..4aa3e12cf1 100755 --- a/src/rpc/src/trpc.c +++ b/src/rpc/src/trpc.c @@ -31,8 +31,6 @@ #include "tutil.h" #include "lz4.h" -#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" - typedef struct _msg_node { struct _msg_node *next; void * ahandle; @@ -58,7 +56,7 @@ typedef struct { uint16_t tranId; // outgoing transcation ID, for build message uint16_t outTranId; // outgoing transcation ID uint16_t inTranId; - char outType; + uint8_t outType; char inType; char closing; char rspReceived; @@ -203,7 +201,7 @@ static STaosHeader* taosDecompressRpcMsg(STaosHeader* pHeader, SSchedMsg* pSched //tDump(pHeader->content, msgLen); if (buf) { - int32_t originalLen = LZ4_decompress_safe(pHeader->content + overhead, buf + sizeof(STaosHeader), + int32_t originalLen = LZ4_decompress_safe((const char*)(pHeader->content + overhead), buf + sizeof(STaosHeader), msgLen - overhead, contLen); memcpy(buf, pHeader, sizeof(STaosHeader)); @@ -220,6 +218,8 @@ static STaosHeader* taosDecompressRpcMsg(STaosHeader* pHeader, SSchedMsg* pSched tError("failed to allocate memory to decompress msg, contLen:%d, reason:%s", contLen, strerror(errno)); pSchedMsg->msg = NULL; } + + return NULL; } char *taosBuildReqHeader(void *param, char type, char *msg) { @@ -245,7 +245,10 @@ char *taosBuildReqHeader(void *param, char type, char *msg) { pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; pHeader->port = 0; + +#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); +#pragma GCC diagnostic warning "-Wpointer-to-int-cast" memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); @@ -276,7 +279,11 @@ char *taosBuildReqMsgWithSize(void *param, char type, int size) { pHeader->sourceId = pConn->ownId; pHeader->destId = pConn->peerId; + +#pragma GCC diagnostic ignored "-Wpointer-to-int-cast" pHeader->uid = (uint32_t)pConn + (uint32_t)getpid(); +#pragma GCC diagnostic warning "-Wpointer-to-int-cast" + memcpy(pHeader->meterId, pConn->meterId, tListLen(pHeader->meterId)); return (char *)pHeader->content; From 80cd0eec84f4dddb814b4d84e1ded3aa9db9bc06 Mon Sep 17 00:00:00 2001 From: fang Date: Sat, 7 Dec 2019 18:20:25 +0800 Subject: [PATCH 14/17] typo correction in english version of jdbc driver --- documentation/webdocs/markdowndocs/Connector.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index 23efb52de1..a642869b60 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -869,3 +869,17 @@ An example of using the NodeJS connector to create a table with weather data and An example of using the NodeJS connector to achieve the same things but without all the object wrappers that wrap around the data returned to achieve higher functionality can be found [here](https://github.com/taosdata/TDengine/tree/master/tests/examples/nodejs/node-example-raw.js) +[1]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[2]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[3]: https://github.com/taosdata/TDengine +[4]: https://www.taosdata.com/blog/2019/12/03/jdbcdriver%e6%89%be%e4%b8%8d%e5%88%b0%e5%8a%a8%e6%80%81%e9%93%be%e6%8e%a5%e5%ba%93/ +[5]: https://github.com/brettwooldridge/HikariCP +[6]: https://github.com/alibaba/druid +[7]: https://github.com/taosdata/TDengine/issues +[8]: https://search.maven.org/artifact/com.taosdata.jdbc/taos-jdbcdriver +[9]: https://mvnrepository.com/artifact/com.taosdata.jdbc/taos-jdbcdriver +[10]: https://maven.aliyun.com/mvn/search +[11]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/SpringJdbcTemplate +[12]: https://github.com/taosdata/TDengine/tree/develop/tests/examples/JDBC/springbootdemo +[13]: https://www.taosdata.com/cn/documentation/administrator/#%E5%AE%A2%E6%88%B7%E7%AB%AF%E9%85%8D%E7%BD%AE + From 3ff4039b1728d2ef8332102bad63b37383f5c4df Mon Sep 17 00:00:00 2001 From: fang Date: Sat, 7 Dec 2019 18:23:34 +0800 Subject: [PATCH 15/17] typo correction in english version of jdbc driver --- documentation/webdocs/markdowndocs/Connector.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/documentation/webdocs/markdowndocs/Connector.md b/documentation/webdocs/markdowndocs/Connector.md index a642869b60..c4a08ca88e 100644 --- a/documentation/webdocs/markdowndocs/Connector.md +++ b/documentation/webdocs/markdowndocs/Connector.md @@ -211,10 +211,10 @@ Since the native language of TDengine is C, the necessary TDengine library shoul > Note: Please make sure that TDengine Windows client has been installed if developing on Windows. Since TDengine is time-series database, there are still some differences compared with traditional databases in using TDengine JDBC driver: -* TDengine doesn't allow to delete/modify single record, and thus JDBC driver also has no such method. +* TDengine doesn't allow to delete/modify a single record, and thus JDBC driver also has no such method. * No support for transaction * No support for union between tables -* No support for nested query),`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. +* No support for nested query,`There is at most one open ResultSet for each Connection. Thus, TSDB JDBC Driver will close current ResultSet if it is not closed and a new query begins`. ## Version list of TAOS-JDBCDriver and required TDengine and JDK From df80c3df689b26edb3971f03085d6deafc084268 Mon Sep 17 00:00:00 2001 From: lihui Date: Sat, 7 Dec 2019 18:32:13 +0800 Subject: [PATCH 16/17] [compile error] --- src/client/src/TSDBJNIConnector.c | 10 +++++----- src/client/src/tscAst.c | 6 +++--- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/client/src/TSDBJNIConnector.c b/src/client/src/TSDBJNIConnector.c index e27313a968..2f1bcc522a 100644 --- a/src/client/src/TSDBJNIConnector.c +++ b/src/client/src/TSDBJNIConnector.c @@ -261,11 +261,11 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J //todo handle error } - jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, sql); + jniTrace("jobj:%p, conn:%p, sql:%s", jobj, tscon, dst); int code = taos_query(tscon, dst); if (code != 0) { - jniError("jobj:%p, conn:%p, code:%d, msg:%s, sql:%s", jobj, tscon, code, taos_errstr(tscon), dst); + jniError("jobj:%p, conn:%p, code:%d, msg:%s", jobj, tscon, code, taos_errstr(tscon)); free(dst); return JNI_TDENGINE_ERROR; } else { @@ -274,9 +274,9 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_executeQueryImp(J if (pSql->cmd.command == TSDB_SQL_INSERT) { affectRows = taos_affected_rows(tscon); - jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows, dst); + jniTrace("jobj:%p, conn:%p, code:%d, affect rows:%d", jobj, tscon, code, affectRows); } else { - jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code, dst); + jniTrace("jobj:%p, conn:%p, code:%d", jobj, tscon, code); } free(dst); @@ -678,4 +678,4 @@ JNIEXPORT jint JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_validateCreateTab JNIEXPORT jstring JNICALL Java_com_taosdata_jdbc_TSDBJNIConnector_getTsCharset(JNIEnv *env, jobject jobj) { return (*env)->NewStringUTF(env, (const char *)tsCharset); -} \ No newline at end of file +} diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index 6e72fa8438..1e0fac4dd2 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -112,8 +112,8 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, tSQLSyntaxNode *pNode = NULL; if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { + int32_t i = 0; if (pToken->type == TK_ID) { - int32_t i = 0; do { size_t len = strlen(pSchema[i].name); if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; @@ -326,8 +326,8 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha uint8_t localOptr = getBinaryExprOptr(&t0); if (localOptr == 0) { pError("not support binary operator:%d", t0.type); + free(pBinExpr); return NULL; - free(pBinExpr) } return parseRemainStr(str, pBinExpr, pSchema, localOptr, numOfCols, i); @@ -936,4 +936,4 @@ void tQueryResultClean(tQueryResultset *pRes) { tfree(pRes->pRes); pRes->num = 0; -} \ No newline at end of file +} From 2f9d055696e9c5fd92d8175ccb14e563e038093b Mon Sep 17 00:00:00 2001 From: hjxilinx Date: Sat, 7 Dec 2019 18:40:14 +0800 Subject: [PATCH 17/17] [tbase-1282] --- src/client/inc/tscUtil.h | 2 + src/client/inc/tsclient.h | 2 + src/client/src/tscAst.c | 8 +- src/client/src/tscJoinProcess.c | 251 +++++++++++++++++--------------- src/client/src/tscSQLParser.c | 2 + src/client/src/tscSql.c | 169 +++++++++++---------- src/client/src/tscUtil.c | 30 +++- 7 files changed, 248 insertions(+), 216 deletions(-) diff --git a/src/client/inc/tscUtil.h b/src/client/inc/tscUtil.h index 9ea6ba7c3f..3deb4c463f 100644 --- a/src/client/inc/tscUtil.h +++ b/src/client/inc/tscUtil.h @@ -95,6 +95,8 @@ SMeterSidExtInfo* tscGetMeterSidInfo(SVnodeSidList* pSidList, int32_t idx); bool tscIsPointInterpQuery(SSqlCmd* pCmd); bool tscIsTWAQuery(SSqlCmd* pCmd); bool tscProjectionQueryOnMetric(SSqlCmd* pCmd); +bool tscProjectionQueryOnTable(SSqlCmd* pCmd); + bool tscIsTwoStageMergeMetricQuery(SSqlCmd* pCmd); bool tscQueryOnMetric(SSqlCmd* pCmd); bool tscQueryMetricTags(SSqlCmd* pCmd); diff --git a/src/client/inc/tsclient.h b/src/client/inc/tsclient.h index 4101cbfc9e..b36d2362da 100644 --- a/src/client/inc/tsclient.h +++ b/src/client/inc/tsclient.h @@ -477,6 +477,8 @@ void tscProcessMultiVnodesInsertForFile(SSqlObj *pSql); void tscKillMetricQuery(SSqlObj *pSql); void tscInitResObjForLocalQuery(SSqlObj *pObj, int32_t numOfRes, int32_t rowLen); bool tscIsUpdateQuery(STscObj *pObj); +bool tscHasReachLimitation(SSqlObj* pSql); + int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *sql); // transfer SSqlInfo to SqlCmd struct diff --git a/src/client/src/tscAst.c b/src/client/src/tscAst.c index 6e72fa8438..003a3aaf7a 100644 --- a/src/client/src/tscAst.c +++ b/src/client/src/tscAst.c @@ -112,8 +112,9 @@ static tSQLSyntaxNode *tSQLSyntaxNodeCreate(SSchema *pSchema, int32_t numOfCols, tSQLSyntaxNode *pNode = NULL; if (pToken->type == TK_ID || pToken->type == TK_TBNAME) { + int32_t i = 0; + if (pToken->type == TK_ID) { - int32_t i = 0; do { size_t len = strlen(pSchema[i].name); if (strncmp(pToken->z, pSchema[i].name, pToken->n) == 0 && pToken->n == len) break; @@ -326,8 +327,8 @@ static tSQLSyntaxNode *createSyntaxTree(SSchema *pSchema, int32_t numOfCols, cha uint8_t localOptr = getBinaryExprOptr(&t0); if (localOptr == 0) { pError("not support binary operator:%d", t0.type); + free(pBinExpr); return NULL; - free(pBinExpr) } return parseRemainStr(str, pBinExpr, pSchema, localOptr, numOfCols, i); @@ -652,8 +653,7 @@ void tSQLListTraverseOnResult(struct tSQLBinaryExpr *pExpr, bool (*fp)(tSkipList // brutal force search int64_t num = pResult->num; for (int32_t i = 0, j = 0; i < pResult->num; ++i) { - //if (fp == NULL || (fp != NULL && fp(pResult->pRes[i], pExpr->info) == true)) { - if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) { + if (fp == NULL || (fp(pResult->pRes[i], pExpr->info) == true)) { pResult->pRes[j++] = pResult->pRes[i]; } else { num--; diff --git a/src/client/src/tscJoinProcess.c b/src/client/src/tscJoinProcess.c index 3126c3a867..1e7355d1b1 100644 --- a/src/client/src/tscJoinProcess.c +++ b/src/client/src/tscJoinProcess.c @@ -13,9 +13,9 @@ * along with this program. If not, see . */ +#include "tscJoinProcess.h" #include "os.h" #include "tcache.h" -#include "tscJoinProcess.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" @@ -45,8 +45,8 @@ static bool doCompare(int32_t order, int64_t left, int64_t right) { } } -static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, SJoinSubquerySupporter* pSupporter2, - TSKEY* st, TSKEY* et) { +static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSupporter1, + SJoinSubquerySupporter* pSupporter2, TSKEY* st, TSKEY* et) { STSBuf* output1 = tsBufCreate(true); STSBuf* output2 = tsBufCreate(true); @@ -150,14 +150,15 @@ static int64_t doTSBlockIntersect(SSqlObj* pSql, SJoinSubquerySupporter* pSuppor tsBufDestory(pSupporter1->pTSBuf); tsBufDestory(pSupporter2->pTSBuf); - tscTrace("%p input1:%lld, input2:%lld, final:%lld for secondary query after ts blocks intersecting", - pSql, numOfInput1, numOfInput2, output1->numOfTotal); + tscTrace("%p input1:%lld, input2:%lld, final:%lld for secondary query after ts blocks intersecting", pSql, + numOfInput1, numOfInput2, output1->numOfTotal); return output1->numOfTotal; } -//todo handle failed to create sub query -SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { +// todo handle failed to create sub query +SJoinSubquerySupporter* tscCreateJoinSupporter(SSqlObj* pSql, SSubqueryState* pState, + /*int32_t* numOfComplete, int32_t* gc,*/ int32_t index) { SJoinSubquerySupporter* pSupporter = calloc(1, sizeof(SJoinSubquerySupporter)); if (pSupporter == NULL) { return NULL; @@ -241,8 +242,10 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { } // scan all subquery, if one sub query has only ts, ignore it - tscTrace("%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " - "select clause", pSql, pSql->numOfSubs, numOfSub); + tscTrace( + "%p start to launch secondary subqueries, total:%d, only:%d needs to query, others are not retrieve in " + "select clause", + pSql, pSql->numOfSubs, numOfSub); int32_t j = 0; for (int32_t i = 0; i < pSql->numOfSubs; ++i) { @@ -258,7 +261,7 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { SSqlObj* pNew = createSubqueryObj(pSql, (int16_t)i, tscJoinQueryCallback, pSupporter, NULL); if (pNew == NULL) { - pSql->numOfSubs = i; //revise the number of subquery + pSql->numOfSubs = i; // revise the number of subquery pSupporter->pState->numOfTotal = i; pSupporter->pState->code = TSDB_CODE_CLI_OUT_OF_MEMORY; @@ -296,14 +299,14 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { tscFieldInfoCalOffset(&pNew->cmd); SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pNew->cmd, 0); - + /* * When handling the projection query, the offset value will be modified for table-table join, which is changed * during the timestamp intersection. */ pSupporter->limit = pSql->cmd.limit; pNew->cmd.limit = pSupporter->limit; - + // fetch the join tag column if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { SSqlExpr* pExpr = tscSqlExprGet(&pNew->cmd, 0); @@ -317,7 +320,7 @@ int32_t tscLaunchSecondSubquery(SSqlObj* pSql) { #ifdef _DEBUG_VIEW tscPrintSelectClause(&pNew->cmd); #endif - + tscProcessSql(pNew); } @@ -388,10 +391,10 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { return; } - if (numOfRows > 0) { // write the data into disk + if (numOfRows > 0) { // write the data into disk fwrite(pSql->res.data, pSql->res.numOfRows, 1, pSupporter->f); fclose(pSupporter->f); - + STSBuf* pBuf = tsBufCreateFromFile(pSupporter->path, true); if (pBuf == NULL) { tscError("%p invalid ts comp file from vnode, abort sub query, file size:%d", pSql, numOfRows); @@ -405,9 +408,9 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { tscTrace("%p create tmp file for ts block:%s", pSql, pBuf->path); pSupporter->pTSBuf = pBuf; } else { - assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo + assert(pSql->cmd.numOfTables == 1); // for subquery, only one metermetaInfo SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - + tsBufMerge(pSupporter->pTSBuf, pBuf, pMeterMetaInfo->vnodeIndex); tsBufDestory(pBuf); } @@ -418,26 +421,25 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { pSql->res.row = pSql->res.numOfRows; taos_fetch_rows_a(tres, joinRetrieveCallback, param); - } else if (numOfRows == 0) { // no data from this vnode anymore + } else if (numOfRows == 0) { // no data from this vnode anymore if (tscProjectionQueryOnMetric(&pParentSql->cmd)) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); assert(pSql->cmd.numOfTables == 1); - + // for projection query, need to try next vnode if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; tscProcessSql(pSql); - + return; } } - - if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { tscTrace("%p sub:%p, numOfSub:%d, quit from further procedure due to other queries failure", pParentSql, tres, - pSupporter->subqueryIndex); + pSupporter->subqueryIndex); doQuitSubquery(pParentSql); return; } @@ -471,31 +473,34 @@ static void joinRetrieveCallback(void* param, TAOS_RES* tres, int numOfRows) { pSupporter->pState->code = numOfRows; tscError("%p retrieve failed, code:%d, index:%d", pSql, numOfRows, pSupporter->subqueryIndex); } - + + if (numOfRows >= 0) { + pSql->res.numOfTotal += pSql->res.numOfRows; + } + if (tscProjectionQueryOnMetric(&pSql->cmd) && numOfRows == 0) { - SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); assert(pSql->cmd.numOfTables == 1); - + // for projection query, need to try next vnode if current vnode is exhausted if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - pSupporter->pState->numOfCompleted = 0; pSupporter->pState->numOfTotal = 1; - + pSql->cmd.command = TSDB_SQL_SELECT; pSql->fp = tscJoinQueryCallback; tscProcessSql(pSql); - + return; } } - + if (atomic_add_fetch_32(&pSupporter->pState->numOfCompleted, 1) >= pSupporter->pState->numOfTotal) { assert(pSupporter->pState->numOfCompleted == pSupporter->pState->numOfTotal); - + tscTrace("%p all %d secondary retrieves are completed, global code:%d", tres, pSupporter->pState->numOfTotal, - pParentSql->res.code); - + pParentSql->res.code); + if (pSupporter->pState->code != TSDB_CODE_SUCCESS) { pParentSql->res.code = abs(pSupporter->pState->code); freeSubqueryObj(pParentSql); @@ -510,62 +515,68 @@ void tscFetchDatablockFromSubquery(SSqlObj* pSql) { int32_t numOfFetch = 0; assert(pSql->numOfSubs >= 1); - + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes* pRes = &pSql->pSubs[i]->res; - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); - - if (UTIL_METER_IS_METRIC(pMeterMetaInfo)) { - if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes) { + SSqlRes *pRes = &pSql->pSubs[i]->res; + SSqlCmd *pCmd = &pSql->pSubs[i]->cmd; + + SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); + + if (tscProjectionQueryOnMetric(pCmd)) { + if (pRes->row >= pRes->numOfRows && pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes && + (!tscHasReachLimitation(pSql->pSubs[i]))) { numOfFetch++; } } else { - if (pRes->row >= pRes->numOfRows) { + if ((pRes->row >= pRes->numOfRows && (!tscHasReachLimitation(pSql->pSubs[i])) && tscProjectionQueryOnTable(pSql)) + || (pRes->numOfRows == 0)) { numOfFetch++; } } } - if (numOfFetch > 0) { - tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); - - SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; - pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed - pSupporter->pState->numOfCompleted = 0; - - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlObj* pSql1 = pSql->pSubs[i]; - - SSqlRes* pRes1 = &pSql1->res; - SSqlCmd* pCmd1 = &pSql1->cmd; - - pSupporter = (SJoinSubquerySupporter*)pSql1->param; - - // wait for all subqueries completed - pSupporter->pState->numOfTotal = numOfFetch; - - assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1); - - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); - if (pRes1->row >= pRes1->numOfRows) { - tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, - pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex); - - tscResetForNextRetrieve(pRes1); - - pSql1->fp = joinRetrieveCallback; - - if (pCmd1->command < TSDB_SQL_LOCAL) { - pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; - } - - tscProcessSql(pSql1); - } - } - - // wait for all subquery completed - tsem_wait(&pSql->rspSem); + if (numOfFetch <= 0) { + return ; } + + // TODO multi-vnode retrieve for projection query with limitation has bugs, since the global limiation is not handled + tscTrace("%p retrieve data from %d subqueries", pSql, numOfFetch); + + SJoinSubquerySupporter* pSupporter = (SJoinSubquerySupporter*)pSql->pSubs[0]->param; + pSupporter->pState->numOfTotal = numOfFetch; // wait for all subqueries completed + pSupporter->pState->numOfCompleted = 0; + + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlObj* pSql1 = pSql->pSubs[i]; + + SSqlRes* pRes1 = &pSql1->res; + SSqlCmd* pCmd1 = &pSql1->cmd; + + pSupporter = (SJoinSubquerySupporter*)pSql1->param; + + // wait for all subqueries completed + pSupporter->pState->numOfTotal = numOfFetch; + assert(pRes1->numOfRows >= 0 && pCmd1->numOfTables == 1); + + SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + + if (pRes1->row >= pRes1->numOfRows) { + tscTrace("%p subquery:%p retrieve data from vnode, subquery:%d, vnodeIndex:%d", pSql, pSql1, + pSupporter->subqueryIndex, pMeterMetaInfo->vnodeIndex); + + tscResetForNextRetrieve(pRes1); + pSql1->fp = joinRetrieveCallback; + + if (pCmd1->command < TSDB_SQL_LOCAL) { + pCmd1->command = (pCmd1->command > TSDB_SQL_MGMT) ? TSDB_SQL_RETRIEVE : TSDB_SQL_FETCH; + } + + tscProcessSql(pSql1); + } + } + + // wait for all subquery completed + tsem_wait(&pSql->rspSem); } // all subqueries return, set the result output index @@ -574,11 +585,11 @@ void tscSetupOutputColumnIndex(SSqlObj* pSql) { SSqlRes* pRes = &pSql->res; tscTrace("%p all subquery response, retrieve data", pSql); - + if (pRes->pColumnIndex != NULL) { return; // the column transfer support struct has been built } - + pRes->pColumnIndex = calloc(1, sizeof(SColumnIndex) * pCmd->fieldsInfo.numOfOutputCols); for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { @@ -669,22 +680,23 @@ void tscJoinQueryCallback(void* param, TAOS_RES* tres, int code) { tscSetupOutputColumnIndex(pParentSql); SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->cmd, 0); - + /** - * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of data instead of returning to its invoker + * if the query is a continue query (vnodeIndex > 0 for projection query) for next vnode, do the retrieval of + * data instead of returning to its invoker */ if (pMeterMetaInfo->vnodeIndex > 0 && tscProjectionQueryOnMetric(&pSql->cmd)) { assert(pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes); pSupporter->pState->numOfCompleted = 0; // reset the record value - + pSql->fp = joinRetrieveCallback; // continue retrieve data pSql->cmd.command = TSDB_SQL_FETCH; tscProcessSql(pSql); - } else { // first retrieve from vnode during the secondary stage sub-query + } else { // first retrieve from vnode during the secondary stage sub-query if (pParentSql->fp == NULL) { tsem_wait(&pParentSql->emptyRspSem); tsem_wait(&pParentSql->emptyRspSem); - + tsem_post(&pParentSql->rspSem); } else { // set the command flag must be after the semaphore been correctly set. @@ -848,7 +860,7 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { size_t infoSize = sizeof(STSVnodeBlockInfo) * pTSBuf->numOfVnodes; STSVnodeBlockInfo* buf = (STSVnodeBlockInfo*)calloc(1, infoSize); - int64_t pos = ftell(pTSBuf->f); + int64_t pos = ftell(pTSBuf->f); fread(buf, infoSize, 1, pTSBuf->f); // the length value for each vnode is not kept in file, so does not set the length value @@ -864,17 +876,17 @@ STSBuf* tsBufCreateFromFile(const char* path, bool autoDelete) { struct stat fileStat; fstat(fileno(pTSBuf->f), &fileStat); - pTSBuf->fileSize = (uint32_t) fileStat.st_size; + pTSBuf->fileSize = (uint32_t)fileStat.st_size; tsBufResetPos(pTSBuf); // ascending by default pTSBuf->cur.order = TSQL_SO_ASC; pTSBuf->autoDelete = autoDelete; - + tscTrace("create tsBuf from file:%s, fd:%d, size:%d, numOfVnode:%d, autoDelete:%d", pTSBuf->path, fileno(pTSBuf->f), - pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); - + pTSBuf->fileSize, pTSBuf->numOfVnodes, pTSBuf->autoDelete); + return pTSBuf; } @@ -899,12 +911,11 @@ void tsBufDestory(STSBuf* pTSBuf) { } free(pTSBuf); - } static STSVnodeBlockInfoEx* tsBufGetLastVnodeInfo(STSBuf* pTSBuf) { int32_t last = pTSBuf->numOfVnodes - 1; - + assert(last >= 0); return &pTSBuf->pData[last]; } @@ -944,9 +955,9 @@ static STSVnodeBlockInfoEx* addOneVnodeInfo(STSBuf* pTSBuf, int32_t vnodeId) { pTSBuf->numOfVnodes += 1; // update the header info - STSBufFileHeader header = - {.magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; - + STSBufFileHeader header = { + .magic = TS_COMP_FILE_MAGIC, .numOfVnode = pTSBuf->numOfVnodes, .tsOrder = pTSBuf->tsOrder}; + STSBufUpdateHeader(pTSBuf, &header); return tsBufGetLastVnodeInfo(pTSBuf); } @@ -994,9 +1005,9 @@ static void writeDataToDisk(STSBuf* pTSBuf) { pTSBuf->fileSize += blockSize; pTSBuf->tsData.len = 0; - + STSVnodeBlockInfoEx* pVnodeBlockInfoEx = tsBufGetLastVnodeInfo(pTSBuf); - + pVnodeBlockInfoEx->info.compLen += blockSize; pVnodeBlockInfoEx->info.numOfBlocks += 1; @@ -1250,9 +1261,9 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex } STSBlock* pBlock = &pTSBuf->block; - + size_t s = pBlock->numOfElem * TSDB_KEYSIZE; - + /* * In order to accommodate all the qualified data, the actual buffer size for one block with identical tags value * may exceed the maximum allowed size during *tsBufAppend* function by invoking expandBuffer function @@ -1260,7 +1271,7 @@ static void tsBufGetBlock(STSBuf* pTSBuf, int32_t vnodeIndex, int32_t blockIndex if (s > pTSBuf->tsData.allocSize) { expandBuffer(&pTSBuf->tsData, s); } - + pTSBuf->tsData.len = tsDecompressTimestamp(pBlock->payload, pBlock->compLen, pBlock->numOfElem, pTSBuf->tsData.rawBuf, pTSBuf->tsData.allocSize, TWO_STAGE_COMP, pTSBuf->assistBuf, pTSBuf->bufSize); @@ -1307,20 +1318,20 @@ bool tsBufNextPos(STSBuf* pTSBuf) { if (pCur->vnodeIndex == -1) { if (pCur->order == TSQL_SO_ASC) { tsBufGetBlock(pTSBuf, 0, 0); - - if (pTSBuf->block.numOfElem == 0) { // the whole list is empty, return + + if (pTSBuf->block.numOfElem == 0) { // the whole list is empty, return tsBufResetPos(pTSBuf); return false; } else { return true; } - - } else { // get the last timestamp record in the last block of the last vnode + + } else { // get the last timestamp record in the last block of the last vnode assert(pTSBuf->numOfVnodes > 0); - + int32_t vnodeIndex = pTSBuf->numOfVnodes - 1; pCur->vnodeIndex = vnodeIndex; - + int32_t vnodeId = pTSBuf->pData[pCur->vnodeIndex].info.vnode; STSVnodeBlockInfo* pBlockInfo = tsBufGetVnodeBlockInfo(pTSBuf, vnodeId); int32_t blockIndex = pBlockInfo->numOfBlocks - 1; @@ -1397,8 +1408,6 @@ STSElem tsBufGetElem(STSBuf* pTSBuf) { return elem1; } - - /** * current only support ts comp data from two vnode merge * @param pDestBuf @@ -1452,7 +1461,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { pDestBuf->numOfVnodes = newSize; } else { STSVnodeBlockInfoEx* pBlockInfoEx = tsBufGetLastVnodeInfo(pDestBuf); - + pBlockInfoEx->len += pSrcBuf->pData[0].len; pBlockInfoEx->info.numOfBlocks += pSrcBuf->pData[0].info.numOfBlocks; pBlockInfoEx->info.compLen += pSrcBuf->pData[0].info.compLen; @@ -1470,7 +1479,7 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { #else ssize_t rc = fsendfile(pDestBuf->f, pSrcBuf->f, &offset, size); #endif - + if (rc == -1) { tscError("failed to merge tsBuf from:%s to %s, reason:%s\n", pSrcBuf->path, pDestBuf->path, strerror(errno)); return -1; @@ -1482,18 +1491,18 @@ int32_t tsBufMerge(STSBuf* pDestBuf, const STSBuf* pSrcBuf, int32_t vnodeId) { } pDestBuf->numOfTotal += pSrcBuf->numOfTotal; - + int32_t oldSize = pDestBuf->fileSize; - + struct stat fileStat; fstat(fileno(pDestBuf->f), &fileStat); - pDestBuf->fileSize = (uint32_t) fileStat.st_size; - + pDestBuf->fileSize = (uint32_t)fileStat.st_size; + assert(pDestBuf->fileSize == oldSize + size); - - tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, pDestBuf->path, - fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); - + + tscTrace("tsBuf merge success, %p, path:%s, fd:%d, file size:%d, numOfVnode:%d, autoDelete:%d", pDestBuf, + pDestBuf->path, fileno(pDestBuf->f), pDestBuf->fileSize, pDestBuf->numOfVnodes, pDestBuf->autoDelete); + return 0; } @@ -1510,7 +1519,7 @@ STSBuf* tsBufCreateFromCompBlocks(const char* pData, int32_t numOfBlocks, int32_ TSBufUpdateVnodeInfo(pTSBuf, pTSBuf->numOfVnodes - 1, pBlockInfo); fseek(pTSBuf->f, pBlockInfo->offset, SEEK_SET); - fwrite((void*) pData, 1, len, pTSBuf->f); + fwrite((void*)pData, 1, len, pTSBuf->f); pTSBuf->fileSize += len; pTSBuf->tsOrder = order; diff --git a/src/client/src/tscSQLParser.c b/src/client/src/tscSQLParser.c index 128cc7f3a1..752c5d123f 100644 --- a/src/client/src/tscSQLParser.c +++ b/src/client/src/tscSQLParser.c @@ -5008,6 +5008,8 @@ int32_t parseLimitClause(SSqlObj* pSql, SQuerySQL* pQuerySql) { // handle the limit offset value, validate the limit pCmd->limit = pQuerySql->limit; + pCmd->globalLimit = pCmd->limit.limit; + pCmd->slimit = pQuerySql->slimit; if (pCmd->slimit.offset < 0 || pCmd->limit.offset < 0) { diff --git a/src/client/src/tscSql.c b/src/client/src/tscSql.c index f3d3407582..04f9fc0aa6 100644 --- a/src/client/src/tscSql.c +++ b/src/client/src/tscSql.c @@ -16,21 +16,21 @@ #include "os.h" #include "tcache.h" #include "tlog.h" +#include "tnote.h" #include "trpc.h" #include "tscJoinProcess.h" #include "tscProfile.h" +#include "tscSQLParser.h" #include "tscSecondaryMerge.h" #include "tscUtil.h" #include "tsclient.h" #include "tscompression.h" #include "tsocket.h" -#include "tscSQLParser.h" #include "ttimer.h" #include "tutil.h" -#include "tnote.h" -TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, void (*fp)(void *, TAOS_RES *, int), - void *param, void **taos) { +TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const char *db, uint16_t port, + void (*fp)(void *, TAOS_RES *, int), void *param, void **taos) { STscObj *pObj; taos_init(); @@ -81,7 +81,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const globalCode = TSDB_CODE_CLI_OUT_OF_MEMORY; return NULL; } - + memset(pObj, 0, sizeof(STscObj)); pObj->signature = pObj; @@ -113,7 +113,7 @@ TAOS *taos_connect_imp(const char *ip, const char *user, const char *pass, const free(pObj); return NULL; } - + memset(pSql, 0, sizeof(SSqlObj)); pSql->pTscObj = pObj; pSql->signature = pSql; @@ -162,14 +162,14 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha void *taos = taos_connect_imp(ip, user, pass, db, port, NULL, NULL, NULL); if (taos != NULL) { - STscObj* pObj = (STscObj*) taos; + STscObj *pObj = (STscObj *)taos; // version compare only requires the first 3 segments of the version string int32_t comparedSegments = 3; - char client_version[64] = {0}; - char server_version[64] = {0}; - int clientVersionNumber[4] = {0}; - int serverVersionNumber[4] = {0}; + char client_version[64] = {0}; + char server_version[64] = {0}; + int clientVersionNumber[4] = {0}; + int serverVersionNumber[4] = {0}; strcpy(client_version, version); strcpy(server_version, taos_get_server_info(taos)); @@ -188,7 +188,7 @@ TAOS *taos_connect(const char *ip, const char *user, const char *pass, const cha return NULL; } - for(int32_t i = 0; i < comparedSegments; ++i) { + for (int32_t i = 0; i < comparedSegments; ++i) { if (clientVersionNumber[i] != serverVersionNumber[i]) { tscError("taos:%p, the %d-th number of server version:%s not matched with client version:%s, close connection", taos, i, server_version, version); @@ -225,7 +225,7 @@ void taos_close(TAOS *taos) { } } -int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { +int taos_query_imp(STscObj *pObj, SSqlObj *pSql) { SSqlRes *pRes = &pSql->res; pRes->numOfRows = 1; @@ -251,7 +251,7 @@ int taos_query_imp(STscObj* pObj, SSqlObj* pSql) { } else { tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(pObj), pObj); } - + if (pRes->code != TSDB_CODE_SUCCESS) { tscFreeSqlObjPartial(pSql); } @@ -271,9 +271,10 @@ int taos_query(TAOS *taos, const char *sqlstr) { size_t sqlLen = strlen(sqlstr); if (sqlLen > tsMaxSQLStringLen) { - pRes->code = tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql + pRes->code = + tscInvalidSQLErrMsg(pSql->cmd.payload, "sql too long", NULL); // set the additional error msg for invalid sql tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); - + return pRes->code; } @@ -283,7 +284,7 @@ int taos_query(TAOS *taos, const char *sqlstr) { if (sql == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer, reason:%s", pSql, strerror(errno)); - + tscError("%p SQL result:%d, %s pObj:%p", pSql, pRes->code, taos_errstr(taos), pObj); return pRes->code; } @@ -451,59 +452,56 @@ static void **getOneRowFromBuf(SSqlObj *pSql) { return pRes->tsrow; } +static bool tscHashRemainDataInSubqueryResultSet(SSqlObj *pSql) { + bool hasData = true; + SSqlCmd *pCmd = &pSql->cmd; + + if (tscProjectionQueryOnMetric(pCmd)) { + bool allSubqueryExhausted = true; + + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlRes *pRes1 = &pSql->pSubs[i]->res; + SSqlCmd *pCmd1 = &pSql->pSubs[i]->cmd; + + SMeterMetaInfo *pMetaInfo = tscGetMeterMetaInfo(pCmd1, 0); + assert(pCmd1->numOfTables == 1); + + /* + * if the global limitation is not reached, and current result has not exhausted, or next more vnodes are + * available, go on + */ + if (pMetaInfo->vnodeIndex < pMetaInfo->pMetricMeta->numOfVnodes && pRes1->row < pRes1->numOfRows && + (!tscHasReachLimitation(pSql->pSubs[i]))) { + allSubqueryExhausted = false; + break; + } + } + + hasData = !allSubqueryExhausted; + } else { // otherwise, in case inner join, if any subquery exhausted, query completed. + for (int32_t i = 0; i < pSql->numOfSubs; ++i) { + SSqlRes *pRes1 = &pSql->pSubs[i]->res; + + if ((pRes1->row >= pRes1->numOfRows && tscHasReachLimitation(pSql->pSubs[i]) && + tscProjectionQueryOnTable(&pSql->pSubs[i]->cmd)) || + (pRes1->numOfRows == 0)) { + + hasData = false; + break; + } + } + } + + return hasData; +} + static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { SSqlCmd *pCmd = &pSql->cmd; SSqlRes *pRes = &pSql->res; while (1) { - bool hasData = true; - - if (tscProjectionQueryOnMetric(pCmd)) { - bool allSubqueryExhausted = true; - - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SMeterMetaInfo* pMeterMetaInfo = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); - if (pMeterMetaInfo->vnodeIndex < pMeterMetaInfo->pMetricMeta->numOfVnodes) { - allSubqueryExhausted = false; - break; - } - } - - hasData = !allSubqueryExhausted; - } else { //otherwise, in case inner join, if any subquery exhausted, query completed. - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes *pRes1 = &pSql->pSubs[i]->res; - if (pRes1->numOfRows == 0) { - hasData = false; - break; - } - } - } - - for (int32_t i = 0; i < pSql->numOfSubs; ++i) { - SSqlRes *pRes1 = &pSql->pSubs[i]->res; - SMeterMetaInfo* pMeterMeta = tscGetMeterMetaInfo(&pSql->pSubs[i]->cmd, 0); - - if (tscProjectionQueryOnMetric(pCmd)) { - //For multi-vnode projection query, the results may locate in following vnode, so we needs to go on - if (pMeterMeta->vnodeIndex < pMeterMeta->pMetricMeta->numOfVnodes) { - break; - } - } else { //otherwise, in case inner join, if any subquery exhausted, query completed. - if (pRes1->numOfRows == 0) { - hasData = false; - break; - } - } -// if (pRes1->numOfRows == 0 && !tscProjectionQueryOnMetric(pCmd) || -// (pMeterMeta->vnodeIndex >= pMeterMeta->pMetricMeta->numOfVnodes && )) { -// hasData = false; -// break; -// } - } - - if (!hasData) { // free all sub sqlobj - tscTrace("%p one subquery exhausted, free other %d subquery", pSql, pSql->numOfSubs - 1); + if (!tscHashRemainDataInSubqueryResultSet(pSql)) { // free all sub sqlobj + tscTrace("%p at least one subquery exhausted, free all other %d subqueries", pSql, pSql->numOfSubs - 1); SSubqueryState *pState = NULL; @@ -525,21 +523,20 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { } bool success = false; - if (pSql->numOfSubs >= 2) { // do merge result + if (pSql->numOfSubs >= 2) { // do merge result SSqlRes *pRes1 = &pSql->pSubs[0]->res; SSqlRes *pRes2 = &pSql->pSubs[1]->res; if (pRes1->row < pRes1->numOfRows && pRes2->row < pRes2->numOfRows) { doSetResultRowData(pSql->pSubs[0]); doSetResultRowData(pSql->pSubs[1]); -// TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; -// TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; -// printf("first:%lld, second:%lld\n", key1, key2); + // TSKEY key1 = *(TSKEY *)pRes1->tsrow[0]; + // TSKEY key2 = *(TSKEY *)pRes2->tsrow[0]; + // printf("first:%lld, second:%lld\n", key1, key2); success = true; pRes1->row++; pRes2->row++; } - } else { // only one subquery SSqlRes *pRes1 = &pSql->pSubs[0]->res; doSetResultRowData(pSql->pSubs[0]); @@ -547,7 +544,7 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { success = (pRes1->row++ < pRes1->numOfRows); } - if (success) { + if (success) { // current row of final output has been built, return to app for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { int32_t tableIndex = pRes->pColumnIndex[i].tableIndex; int32_t columnIndex = pRes->pColumnIndex[i].columnIndex; @@ -557,7 +554,7 @@ static void **tscJoinResultsetFromBuf(SSqlObj *pSql) { } break; - } else { + } else { // continue retrieve data from vnode tscFetchDatablockFromSubquery(pSql); if (pRes->code != TSDB_CODE_SUCCESS) { return NULL; @@ -579,7 +576,7 @@ TAOS_ROW taos_fetch_row_impl(TAOS_RES *res) { if (pCmd->command == TSDB_SQL_METRIC_JOIN_RETRIEVE) { tscFetchDatablockFromSubquery(pSql); - + if (pRes->code == TSDB_CODE_SUCCESS) { tscTrace("%p data from all subqueries have been retrieved to client", pSql); return tscJoinResultsetFromBuf(pSql); @@ -625,7 +622,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { SMeterMetaInfo *pMeterMetaInfo = tscGetMeterMetaInfo(pCmd, 0); // reach the maximum number of output rows, abort - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + if (tscHasReachLimitation(pSql)) { return NULL; } @@ -645,7 +642,7 @@ TAOS_ROW taos_fetch_row(TAOS_RES *res) { * For super table join with projection query, if anyone of the subquery is exhausted, the query completed. */ pSql->numOfSubs = 0; - + if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pCmd->command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); @@ -680,7 +677,7 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { nRows = taos_fetch_block_impl(res, rows); while (*rows == NULL && tscProjectionQueryOnMetric(pCmd)) { /* reach the maximum number of output rows, abort */ - if (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit) { + if (tscHasReachLimitation(pSql)) { return 0; } @@ -690,7 +687,6 @@ int taos_fetch_block(TAOS_RES *res, TAOS_ROW *rows) { pCmd->limit.limit = pSql->cmd.globalLimit - pRes->numOfTotal; pCmd->limit.offset = pRes->offset; - if ((++pMeterMetaInfo->vnodeIndex) < pMeterMetaInfo->pMetricMeta->numOfVnodes) { pSql->cmd.command = TSDB_SQL_SELECT; assert(pSql->fp == NULL); @@ -925,12 +921,11 @@ int taos_print_row(char *str, TAOS_ROW row, TAOS_FIELD *fields, int num_fields) size_t xlen = strlen(row[i]); size_t trueLen = MIN(xlen, fields[i].bytes); - memcpy(str + len, (char*) row[i], trueLen); + memcpy(str + len, (char *)row[i], trueLen); str[len + trueLen] = ' '; len += (trueLen + 1); - } - break; + } break; case TSDB_DATA_TYPE_TIMESTAMP: len += sprintf(str + len, "%lld ", *((int64_t *)row[i])); @@ -987,7 +982,7 @@ int taos_validate_sql(TAOS *taos, const char *sql) { return code; } -static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t tblListLen) { +static int tscParseTblNameList(SSqlObj *pSql, const char *tblNameList, int32_t tblListLen) { // must before clean the sqlcmd object tscRemoveAllMeterMetaInfo(&pSql->cmd, false); tscCleanSqlCmd(&pSql->cmd); @@ -998,11 +993,11 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t pCmd->count = 0; int code = TSDB_CODE_INVALID_METER_ID; - char *str = (char*) tblNameList; + char *str = (char *)tblNameList; SMeterMetaInfo *pMeterMetaInfo = tscAddEmptyMeterMetaInfo(pCmd); - if ((code = tscAllocPayload(pCmd, tblListLen+16)) != TSDB_CODE_SUCCESS) { + if ((code = tscAllocPayload(pCmd, tblListLen + 16)) != TSDB_CODE_SUCCESS) { return code; } @@ -1024,7 +1019,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t strtrim(tblName); len = (uint32_t)strlen(tblName); - + SSQLToken sToken = {.n = len, .type = TK_ID, .z = tblName}; tSQLGetToken(tblName, &sToken.type); @@ -1068,7 +1063,7 @@ static int tscParseTblNameList(SSqlObj *pSql, const char* tblNameList, int32_t t } int taos_load_table_info(TAOS *taos, const char *tableNameList) { - const int32_t MAX_TABLE_NAME_LENGTH = 12*1024*1024; // 12MB list + const int32_t MAX_TABLE_NAME_LENGTH = 12 * 1024 * 1024; // 12MB list STscObj *pObj = (STscObj *)taos; if (pObj == NULL || pObj->signature != pObj) { @@ -1092,7 +1087,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { return pRes->code; } - char* str = calloc(1, tblListLen + 1); + char *str = calloc(1, tblListLen + 1); if (str == NULL) { pRes->code = TSDB_CODE_CLI_OUT_OF_MEMORY; tscError("%p failed to malloc sql string buffer", pSql); @@ -1100,7 +1095,7 @@ int taos_load_table_info(TAOS *taos, const char *tableNameList) { } strtolower(str, tableNameList); - pRes->code = (uint8_t) tscParseTblNameList(pSql, str, tblListLen); + pRes->code = (uint8_t)tscParseTblNameList(pSql, str, tblListLen); /* * set the qhandle to 0 before return in order to erase the qhandle value assigned in the previous successful query. diff --git a/src/client/src/tscUtil.c b/src/client/src/tscUtil.c index 4521bcb156..d0da79651e 100644 --- a/src/client/src/tscUtil.c +++ b/src/client/src/tscUtil.c @@ -244,8 +244,7 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { //for project query, only the following two function is allowed for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { - SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); - int32_t functionId = pExpr->functionId; + int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TAGPRJ && functionId != TSDB_FUNC_TAG && functionId != TSDB_FUNC_TS) { return false; @@ -255,6 +254,17 @@ bool tscProjectionQueryOnMetric(SSqlCmd* pCmd) { return true; } +bool tscProjectionQueryOnTable(SSqlCmd* pCmd) { + for (int32_t i = 0; i < pCmd->fieldsInfo.numOfOutputCols; ++i) { + int32_t functionId = tscSqlExprGet(pCmd, i)->functionId; + if (functionId != TSDB_FUNC_PRJ && functionId != TSDB_FUNC_TS) { + return false; + } + } + + return true; +} + bool tscIsPointInterpQuery(SSqlCmd* pCmd) { for (int32_t i = 0; i < pCmd->exprsInfo.numOfExprs; ++i) { SSqlExpr* pExpr = tscSqlExprGet(pCmd, i); @@ -1673,8 +1683,11 @@ SSqlObj* createSubqueryObj(SSqlObj* pSql, int16_t tableIndex, void (*fp)(), void char key[TSDB_MAX_TAGS_LEN + 1] = {0}; tscGetMetricMetaCacheKey(pCmd, key, pMetermetaInfo->pMeterMeta->uid); - printf("-----%s\n", key); - + +#ifdef _DEBUG_VIEW + printf("the metricmeta key is:%s\n", key); +#endif + char* name = pMeterMetaInfo->name; SMeterMetaInfo* pFinalInfo = NULL; @@ -1768,3 +1781,12 @@ int32_t tscInvalidSQLErrMsg(char *msg, const char *additionalInfo, const char *s return TSDB_CODE_INVALID_SQL; } +bool tscHasReachLimitation(SSqlObj* pSql) { + assert(pSql != NULL && pSql->cmd.globalLimit != 0); + + SSqlCmd* pCmd = &pSql->cmd; + SSqlRes* pRes = &pSql->res; + + return (pCmd->globalLimit > 0 && pRes->numOfTotal >= pCmd->globalLimit); +} +