From 74d5f029697cac47d63eb43ec52765c50572f9de Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 04:56:07 +0000 Subject: [PATCH 01/18] fix invalid free --- include/libs/function/function.h | 1 + include/libs/stream/tstream.h | 1 - source/libs/stream/inc/streamInc.h | 7 ++++-- source/libs/stream/src/streamBackendRocksdb.c | 18 +++++++++----- source/libs/stream/src/streamMeta.c | 3 +-- source/libs/stream/src/streamState.c | 24 +++++++++---------- 6 files changed, 31 insertions(+), 23 deletions(-) diff --git a/include/libs/function/function.h b/include/libs/function/function.h index e015f4182e..c92ce254a8 100644 --- a/include/libs/function/function.h +++ b/include/libs/function/function.h @@ -163,6 +163,7 @@ typedef struct { int64_t checkPointId; int32_t taskId; int64_t streamId; + int64_t streamBackendRid; } SStreamState; typedef struct SFunctionStateStore { diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8316e6ef50..3222a125dd 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -344,7 +344,6 @@ typedef struct SStreamMeta { SRWLatch lock; int32_t walScanCounter; void* streamBackend; - int32_t streamBackendId; int64_t streamBackendRid; SHashObj* pTaskBackendUnique; } SStreamMeta; diff --git a/source/libs/stream/inc/streamInc.h b/source/libs/stream/inc/streamInc.h index 2c1956998a..c7ee308b61 100644 --- a/source/libs/stream/inc/streamInc.h +++ b/source/libs/stream/inc/streamInc.h @@ -36,8 +36,9 @@ static SStreamGlobalEnv streamEnv; int32_t streamDispatchStreamBlock(SStreamTask* pTask); SStreamDataBlock* createStreamDataFromDispatchMsg(const SStreamDispatchReq* pReq, int32_t blockType, int32_t srcVg); -SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, SArray* pRes); -void destroyStreamDataBlock(SStreamDataBlock* pBlock); +SStreamDataBlock* createStreamBlockFromResults(SStreamQueueItem* pItem, SStreamTask* pTask, int64_t resultSize, + SArray* pRes); +void destroyStreamDataBlock(SStreamDataBlock* pBlock); int32_t streamRetrieveReqToData(const SStreamRetrieveReq* pReq, SStreamDataBlock* pData); int32_t streamDispatchAllBlocks(SStreamTask* pTask, const SStreamDataBlock* data); @@ -53,6 +54,8 @@ int32_t streamDispatchOneRecoverFinishReq(SStreamTask* pTask, const SStreamRecov SStreamQueueItem* streamMergeQueueItem(SStreamQueueItem* dst, SStreamQueueItem* pElem); +extern int32_t streamBackendId; + #ifdef __cplusplus } #endif diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index b3995f020b..d190f4b43e 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -16,7 +16,9 @@ #include "streamBackendRocksdb.h" #include "executor.h" #include "query.h" +#include "streamInc.h" #include "tcommon.h" +#include "tref.h" typedef struct SCompactFilteFactory { void* status; @@ -79,7 +81,7 @@ const char* compareParKeyName(void* name); const char* comparePartagKeyName(void* name); void* streamBackendInit(const char* path) { - qDebug("init stream backend"); + qDebug("start to init stream backend at %s", path); SBackendHandle* pHandle = calloc(1, sizeof(SBackendHandle)); pHandle->list = tdListNew(sizeof(SCfComparator)); taosThreadMutexInit(&pHandle->mutex, NULL); @@ -129,6 +131,7 @@ void* streamBackendInit(const char* path) { if (cfs != NULL) { rocksdb_list_column_families_destroy(cfs, nCf); } + qDebug("succ to init stream backend at %s, backend:%p", path, pHandle); return (void*)pHandle; _EXIT: @@ -141,6 +144,7 @@ _EXIT: rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory); tdListFree(pHandle->list); free(pHandle); + qDebug("failed to init stream backend at %s", path); return NULL; } void streamBackendCleanup(void* arg) { @@ -180,7 +184,7 @@ void streamBackendCleanup(void* arg) { taosThreadMutexDestroy(&pHandle->cfMutex); taosMemoryFree(pHandle); - + qDebug("destroy stream backend backend:%p", pHandle); return; } SListNode* streamBackendAddCompare(void* backend, void* arg) { @@ -803,7 +807,8 @@ int32_t streamStateOpenBackendCf(void* backend, char* name, char** cfs, int32_t return 0; } int streamStateOpenBackend(void* backend, SStreamState* pState) { - qInfo("start to open backend, %p 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId); + qInfo("start to open state %p on backend %p 0x%" PRIx64 "-%d", pState, backend, pState->streamId, pState->taskId); + taosAcquireRef(streamBackendId, pState->streamBackendRid); SBackendHandle* handle = backend; sprintf(pState->pTdbState->idstr, "0x%" PRIx64 "-%d", pState->streamId, pState->taskId); @@ -866,7 +871,7 @@ int streamStateOpenBackend(void* backend, SStreamState* pState) { SCfComparator compare = {.comp = pCompare, .numOfComp = cfLen}; pState->pTdbState->pComparNode = streamBackendAddCompare(handle, &compare); // rocksdb_writeoptions_disable_WAL(pState->pTdbState->writeOpts, 1); - qInfo("succ to open backend, %p, 0x%" PRIx64 "-%d", pState, pState->streamId, pState->taskId); + qInfo("succ to open state %p on backend, %p, 0x%" PRIx64 "-%d", pState, handle, pState->streamId, pState->taskId); return 0; } @@ -882,8 +887,8 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) { taosThreadMutexUnlock(&pHandle->cfMutex); char* status[] = {"close", "drop"}; - qInfo("start to %s backend, %p, 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pState->streamId, - pState->taskId); + qInfo("start to %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle, + pState->streamId, pState->taskId); if (pState->pTdbState->rocksdb == NULL) { return; } @@ -938,6 +943,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) { taosThreadRwlockDestroy(&pState->pTdbState->rwLock); pState->pTdbState->rocksdb = NULL; + taosReleaseRef(streamBackendId, pState->streamBackendRid); } void streamStateDestroyCompar(void* arg) { SCfComparator* comp = (SCfComparator*)arg; diff --git a/source/libs/stream/src/streamMeta.c b/source/libs/stream/src/streamMeta.c index 8c26052fdb..ed9f99cf78 100644 --- a/source/libs/stream/src/streamMeta.c +++ b/source/libs/stream/src/streamMeta.c @@ -20,7 +20,7 @@ #include "ttimer.h" static TdThreadOnce streamMetaModuleInit = PTHREAD_ONCE_INIT; -static int32_t streamBackendId = 0; +int32_t streamBackendId = 0; static void streamMetaEnvInit() { streamBackendId = taosOpenRef(20, streamBackendCleanup); } void streamMetaInit() { taosThreadOnce(&streamMetaModuleInit, streamMetaEnvInit); } @@ -79,7 +79,6 @@ SStreamMeta* streamMetaOpen(const char* path, void* ahandle, FTaskExpand expandF pMeta->vgId = vgId; pMeta->ahandle = ahandle; pMeta->expandFunc = expandFunc; - pMeta->streamBackendId = streamBackendId; memset(streamPath, 0, len); sprintf(streamPath, "%s/%s", pMeta->path, "state"); diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 71a21ac150..0b91936f53 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -106,7 +106,7 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz } SStreamTask* pStreamTask = pTask; - char statePath[1024]; + char statePath[1024]; if (!specPath) { sprintf(statePath, "%s/%d", path, pStreamTask->id.taskId); } else { @@ -119,10 +119,10 @@ SStreamState* streamStateOpen(char* path, void* pTask, bool specPath, int32_t sz #ifdef USE_ROCKSDB SStreamMeta* pMeta = pStreamTask->pMeta; - taosAcquireRef(pMeta->streamBackendId, pMeta->streamBackendRid); + pState->streamBackendRid = pMeta->streamBackendRid; int code = streamStateOpenBackend(pMeta->streamBackend, pState); if (code == -1) { - taosReleaseRef(pMeta->streamBackendId, pMeta->streamBackendRid); + taosReleaseRef(streamBackendId, pMeta->streamBackendRid); taosMemoryFree(pState); pState = NULL; } @@ -224,7 +224,7 @@ void streamStateClose(SStreamState* pState, bool remove) { #ifdef USE_ROCKSDB // streamStateCloseBackend(pState); streamStateDestroy(pState, remove); - taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid); + //taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid); #else tdbCommit(pState->pTdbState->db, pState->pTdbState->txn); tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn); @@ -278,10 +278,10 @@ int32_t streamStateCommit(SStreamState* pState) { int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* value, int32_t vLen) { #ifdef USE_ROCKSDB - void* pVal = NULL; - int32_t len = 0; - int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len); - char* buf = ((SRowBuffPos*)pVal)->pRowBuff; + void* pVal = NULL; + int32_t len = 0; + int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), &pVal, &len); + char* buf = ((SRowBuffPos*)pVal)->pRowBuff; uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState); memcpy(buf + len - rowSize, value, vLen); return code; @@ -291,10 +291,10 @@ int32_t streamStateFuncPut(SStreamState* pState, const SWinKey* key, const void* } int32_t streamStateFuncGet(SStreamState* pState, const SWinKey* key, void** ppVal, int32_t* pVLen) { #ifdef USE_ROCKSDB - void* pVal = NULL; - int32_t len = 0; - int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len); - char* buf = ((SRowBuffPos*)pVal)->pRowBuff; + void* pVal = NULL; + int32_t len = 0; + int32_t code = getRowBuff(pState->pFileState, (void*)key, sizeof(SWinKey), (void**)(&pVal), &len); + char* buf = ((SRowBuffPos*)pVal)->pRowBuff; uint32_t rowSize = streamFileStateGeSelectRowSize(pState->pFileState); *ppVal = buf + len - rowSize; return code; From 3108ecf84be54b1eea9718591072230c5f3f03ca Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 06:38:17 +0000 Subject: [PATCH 02/18] fix invalid free --- source/libs/stream/src/streamBackendRocksdb.c | 5 +++-- source/libs/stream/src/streamState.c | 2 -- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index d190f4b43e..9f1cab56e7 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -82,7 +82,7 @@ const char* comparePartagKeyName(void* name); void* streamBackendInit(const char* path) { qDebug("start to init stream backend at %s", path); - SBackendHandle* pHandle = calloc(1, sizeof(SBackendHandle)); + SBackendHandle* pHandle = taosMemoryCalloc(1, sizeof(SBackendHandle)); pHandle->list = tdListNew(sizeof(SCfComparator)); taosThreadMutexInit(&pHandle->mutex, NULL); taosThreadMutexInit(&pHandle->cfMutex, NULL); @@ -121,6 +121,7 @@ void* streamBackendInit(const char* path) { if (err != NULL) { qError("failed to open rocksdb, path:%s, reason:%s", path, err); taosMemoryFreeClear(err); + goto _EXIT; } } else { /* @@ -887,7 +888,7 @@ void streamStateCloseBackend(SStreamState* pState, bool remove) { taosThreadMutexUnlock(&pHandle->cfMutex); char* status[] = {"close", "drop"}; - qInfo("start to %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle, + qInfo("start to close %s state %p on backend %p 0x%" PRIx64 "-%d", status[remove == false ? 0 : 1], pState, pHandle, pState->streamId, pState->taskId); if (pState->pTdbState->rocksdb == NULL) { return; diff --git a/source/libs/stream/src/streamState.c b/source/libs/stream/src/streamState.c index 0b91936f53..967c7733c9 100644 --- a/source/libs/stream/src/streamState.c +++ b/source/libs/stream/src/streamState.c @@ -222,9 +222,7 @@ _err: void streamStateClose(SStreamState* pState, bool remove) { SStreamTask* pTask = pState->pTdbState->pOwner; #ifdef USE_ROCKSDB - // streamStateCloseBackend(pState); streamStateDestroy(pState, remove); - //taosReleaseRef(pTask->pMeta->streamBackendId, pTask->pMeta->streamBackendRid); #else tdbCommit(pState->pTdbState->db, pState->pTdbState->txn); tdbPostCommit(pState->pTdbState->db, pState->pTdbState->txn); From be34546edd8935917f05e663dede14a0b919e5af Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 5 Jun 2023 15:19:06 +0800 Subject: [PATCH 03/18] enh: enable interp fill value support scarlar expression --- source/libs/parser/src/parTranslater.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index c10ee5d988..57f708c5cd 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -707,6 +707,10 @@ static bool isWindowPseudoColumnFunc(const SNode* pNode) { return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsWindowPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); } +static bool isInterpFunc(const SNode* pNode) { + return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpFunc(((SFunctionNode*)pNode)->funcId)); +} + static bool isInterpPseudoColumnFunc(const SNode* pNode) { return (QUERY_NODE_FUNCTION == nodeType(pNode) && fmIsInterpPseudoColumnFunc(((SFunctionNode*)pNode)->funcId)); } @@ -3006,7 +3010,7 @@ static int32_t translateOrderBy(STranslateContext* pCxt, SSelectStmt* pSelect) { } static EDealRes needFillImpl(SNode* pNode, void* pContext) { - if (isAggFunc(pNode) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) { + if ((isAggFunc(pNode) || isInterpFunc(pNode)) && FUNCTION_TYPE_GROUP_KEY != ((SFunctionNode*)pNode)->funcType) { *(bool*)pContext = true; return DEAL_RES_END; } @@ -3517,6 +3521,9 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect if (TSDB_CODE_SUCCESS == code) { code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); } + if (TSDB_CODE_SUCCESS == code) { + code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList); + } return code; } From c77f7f65d2eaa9b7442362fda71658289dbb89e3 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 07:32:28 +0000 Subject: [PATCH 04/18] fix invalid free --- source/libs/stream/src/streamBackendRocksdb.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 9f1cab56e7..3335881b37 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -173,15 +173,16 @@ void streamBackendCleanup(void* arg) { rocksdb_env_destroy(pHandle->env); rocksdb_cache_destroy(pHandle->cache); - taosThreadMutexDestroy(&pHandle->mutex); SListNode* head = tdListPopHead(pHandle->list); while (head != NULL) { streamStateDestroyCompar(head->data); taosMemoryFree(head); head = tdListPopHead(pHandle->list); } - // rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory); + tdListFree(pHandle->list); + taosThreadMutexDestroy(&pHandle->mutex); + taosThreadMutexDestroy(&pHandle->cfMutex); taosMemoryFree(pHandle); From 6b670b7f0801b490e7e5e66e25428be233529eb1 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 5 Jun 2023 15:48:15 +0800 Subject: [PATCH 05/18] fix desc --- source/libs/parser/src/parTranslater.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index 57f708c5cd..be3c41e2ee 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3034,7 +3034,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList code = scalarCalculateConstants(pCaseFunc, &pCell->pNode); } if (TSDB_CODE_SUCCESS == code && QUERY_NODE_VALUE != nodeType(pCell->pNode)) { - code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value is just a constant"); + code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Fill value can only accept constant"); } else if (TSDB_CODE_SUCCESS != code) { code = generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled data type mismatch"); } From 005182c3c925169d45392feb0755beaf50efca2a Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 5 Jun 2023 16:40:48 +0800 Subject: [PATCH 06/18] fix multiple interp issue --- source/libs/parser/src/parTranslater.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index be3c41e2ee..f049af6747 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3041,7 +3041,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList return code; } -static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) { +static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList, bool isInterpFill) { if (FILL_MODE_VALUE != pFill->mode && FILL_MODE_VALUE_F != pFill->mode) { return TSDB_CODE_SUCCESS; } @@ -3058,10 +3058,13 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL if (TSDB_CODE_SUCCESS != code) { return code; } - ++fillNo; + + if (!isInterpFill) { + ++fillNo; + } } } - if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) { + if (!isInterpFill && fillNo != LIST_LENGTH(pFillValues->pNodeList)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch"); } return TSDB_CODE_SUCCESS; @@ -3072,7 +3075,7 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) { return TSDB_CODE_SUCCESS; } - return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList); + return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList, false); } static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { @@ -3522,7 +3525,7 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); } if (TSDB_CODE_SUCCESS == code) { - code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList); + code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList, true); } return code; From 384aa4d70bc1751037ec9af1be813eba490af5ad Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Mon, 5 Jun 2023 16:41:09 +0800 Subject: [PATCH 07/18] add test cases --- tests/system-test/2-query/interp.py | 128 ++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 121d4dcff6..019b449dfd 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -219,6 +219,56 @@ class TDTestCase: tdSql.checkData(2, 0, 12) tdSql.checkData(3, 0, 12) + ## test fill value with scalar expression + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1.0 + 2.0)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + 2.5)") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, 1 + '2.0')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '3' + 'abc')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + + tdSql.query(f"select interp(c0) from {dbname}.{tbname} range('2020-02-01 00:00:16', '2020-02-01 00:00:19') every(1s) fill(value, '2' + '1abc')") + tdSql.checkRows(4) + tdSql.checkData(0, 0, 3) + tdSql.checkData(1, 0, 3) + tdSql.checkData(2, 0, 3) + tdSql.checkData(3, 0, 3) + tdLog.printNoPrefix("==========step5:fill prev") ## {. . .} @@ -1837,6 +1887,84 @@ class TDTestCase: tdSql.checkData(59, 1, 123) tdSql.checkData(60, 1, 60) # + tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123 + 123)") + tdSql.checkRows(61) + tdSql.checkCols(2) + tdSql.checkData(0, 0, 0) # + tdSql.checkData(1, 0, 246) + tdSql.checkData(4, 0, 246) + tdSql.checkData(5, 0, None) # + tdSql.checkData(6, 0, 246) + tdSql.checkData(9, 0, 246) + tdSql.checkData(10, 0, 10) # + tdSql.checkData(11, 0, 246) + tdSql.checkData(14, 0, 246) + tdSql.checkData(15, 0, None) # + tdSql.checkData(16, 0, 246) + tdSql.checkData(19, 0, 246) + tdSql.checkData(20, 0, 20) # + tdSql.checkData(21, 0, 246) + tdSql.checkData(24, 0, 246) + tdSql.checkData(25, 0, None) # + tdSql.checkData(26, 0, 246) + tdSql.checkData(29, 0, 246) + tdSql.checkData(30, 0, 30) # + tdSql.checkData(31, 0, 246) + tdSql.checkData(34, 0, 246) + tdSql.checkData(35, 0, 35) # + tdSql.checkData(36, 0, 246) + tdSql.checkData(39, 0, 246) + tdSql.checkData(40, 0, 40) # + tdSql.checkData(41, 0, 246) + tdSql.checkData(44, 0, 246) + tdSql.checkData(45, 0, None) # + tdSql.checkData(46, 0, 246) + tdSql.checkData(49, 0, 246) + tdSql.checkData(50, 0, 50) # + tdSql.checkData(51, 0, 246) + tdSql.checkData(54, 0, 246) + tdSql.checkData(55, 0, None) # + tdSql.checkData(59, 0, 246) + tdSql.checkData(60, 0, 55) # + + tdSql.checkData(0, 1, None) # + tdSql.checkData(1, 1, 246) + tdSql.checkData(4, 1, 246) + tdSql.checkData(5, 1, None) # + tdSql.checkData(6, 1, 246) + tdSql.checkData(9, 1, 246) + tdSql.checkData(10, 1, 10) # + tdSql.checkData(11, 1, 246) + tdSql.checkData(14, 1, 246) + tdSql.checkData(15, 1, None) # + tdSql.checkData(16, 1, 246) + tdSql.checkData(19, 1, 246) + tdSql.checkData(20, 1, None) # + tdSql.checkData(21, 1, 246) + tdSql.checkData(24, 1, 246) + tdSql.checkData(25, 1, None) # + tdSql.checkData(26, 1, 246) + tdSql.checkData(29, 1, 246) + tdSql.checkData(30, 1, 30) # + tdSql.checkData(31, 1, 246) + tdSql.checkData(34, 1, 246) + tdSql.checkData(35, 1, None) # + tdSql.checkData(36, 1, 246) + tdSql.checkData(39, 1, 246) + tdSql.checkData(40, 1, 40) # + tdSql.checkData(41, 1, 246) + tdSql.checkData(44, 1, 246) + tdSql.checkData(45, 1, 45) # + tdSql.checkData(46, 1, 246) + tdSql.checkData(49, 1, 246) + tdSql.checkData(50, 1, None) # + tdSql.checkData(51, 1, 246) + tdSql.checkData(54, 1, 246) + tdSql.checkData(55, 1, None) # + tdSql.checkData(56, 1, 246) + tdSql.checkData(59, 1, 246) + tdSql.checkData(60, 1, 60) # + # test fill prev tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(prev)") tdSql.checkRows(61) From 63c4929089a80190670b2f5d3b15bd251bf36680 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 08:42:00 +0000 Subject: [PATCH 08/18] fix invalid free --- source/libs/stream/src/streamBackendRocksdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/libs/stream/src/streamBackendRocksdb.c b/source/libs/stream/src/streamBackendRocksdb.c index 3335881b37..df045eef20 100644 --- a/source/libs/stream/src/streamBackendRocksdb.c +++ b/source/libs/stream/src/streamBackendRocksdb.c @@ -144,7 +144,7 @@ _EXIT: taosHashCleanup(pHandle->cfInst); rocksdb_compactionfilterfactory_destroy(pHandle->filterFactory); tdListFree(pHandle->list); - free(pHandle); + taosMemoryFree(pHandle); qDebug("failed to init stream backend at %s", path); return NULL; } From 4f2f441816863fdad92a7e37b6ff5d77c46dde4a Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 09:06:50 +0000 Subject: [PATCH 09/18] fix invalid free --- include/os/osMemory.h | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/include/os/osMemory.h b/include/os/osMemory.h index 18cd0d9cc6..683d10e926 100644 --- a/include/os/osMemory.h +++ b/include/os/osMemory.h @@ -22,21 +22,20 @@ extern "C" { // If the error is in a third-party library, place this header file under the third-party library header file. // When you want to use this feature, you should find or add the same function in the following sectio -// #if !defined(WINDOWS) +#if !defined(WINDOWS) -// #ifndef ALLOW_FORBID_FUNC -// #define malloc MALLOC_FUNC_TAOS_FORBID -// #define calloc CALLOC_FUNC_TAOS_FORBID -// #define realloc REALLOC_FUNC_TAOS_FORBID -// #define free FREE_FUNC_TAOS_FORBID -// #ifdef strdup -// #undef strdup -// #define strdup STRDUP_FUNC_TAOS_FORBID -// #endif -// #endif // ifndef ALLOW_FORBID_FUNC -// #endif // if !defined(WINDOWS) +#ifndef ALLOW_FORBID_FUNC +#define malloc MALLOC_FUNC_TAOS_FORBID +#define calloc CALLOC_FUNC_TAOS_FORBID +#define realloc REALLOC_FUNC_TAOS_FORBID +#define free FREE_FUNC_TAOS_FORBID +#ifdef strdup +#undef strdup +#define strdup STRDUP_FUNC_TAOS_FORBID +#endif +#endif // ifndef ALLOW_FORBID_FUNC +#endif // if !defined(WINDOWS) -// // #define taosMemoryFree malloc // #define taosMemoryMalloc malloc // #define taosMemoryCalloc calloc // #define taosMemoryRealloc realloc From cb024ca069bc0f8852171ac950f9d41b45fd73c0 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Mon, 5 Jun 2023 10:52:07 +0000 Subject: [PATCH 10/18] fix invalid free --- source/libs/stream/src/tstreamFileState.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/stream/src/tstreamFileState.c b/source/libs/stream/src/tstreamFileState.c index bc84509728..bfaeca89f6 100644 --- a/source/libs/stream/src/tstreamFileState.c +++ b/source/libs/stream/src/tstreamFileState.c @@ -419,7 +419,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { if (code != 0 || len == 0 || val == NULL) { return TSDB_CODE_FAILED; } - memcpy(val, buf, len); + memcpy(buf, val, len); buf[len] = 0; maxCheckPointId = atol((char*)buf); taosMemoryFree(val); @@ -433,7 +433,7 @@ int32_t deleteExpiredCheckPoint(SStreamFileState* pFileState, TSKEY mark) { if (code != 0) { return TSDB_CODE_FAILED; } - memcpy(val, buf, len); + memcpy(buf, val, len); buf[len] = 0; taosMemoryFree(val); From 3cb64f3775e23c72fe29926dc4302c8c8cc37362 Mon Sep 17 00:00:00 2001 From: wangjiaming0909 <604227650@qq.com> Date: Tue, 6 Jun 2023 19:28:55 +0800 Subject: [PATCH 11/18] fix: prevent projectoperator scanning all rows when limit with no group --- source/libs/executor/src/groupoperator.c | 2 ++ source/libs/executor/src/joinoperator.c | 5 +++++ source/libs/executor/src/projectoperator.c | 2 ++ 3 files changed, 9 insertions(+) diff --git a/source/libs/executor/src/groupoperator.c b/source/libs/executor/src/groupoperator.c index 7aac639027..c448ea0160 100644 --- a/source/libs/executor/src/groupoperator.c +++ b/source/libs/executor/src/groupoperator.c @@ -647,6 +647,8 @@ uint64_t calcGroupId(char* pData, int32_t len) { // NOTE: only extract the initial 8 bytes of the final MD5 digest uint64_t id = 0; memcpy(&id, context.digest, sizeof(uint64_t)); + if (0 == id) + memcpy(&id, context.digest + 8, sizeof(uint64_t)); return id; } diff --git a/source/libs/executor/src/joinoperator.c b/source/libs/executor/src/joinoperator.c index 754b5f4737..744e513b96 100644 --- a/source/libs/executor/src/joinoperator.c +++ b/source/libs/executor/src/joinoperator.c @@ -183,6 +183,11 @@ void destroyMergeJoinOperator(void* param) { SJoinOperatorInfo* pJoinOperator = (SJoinOperatorInfo*)param; nodesDestroyNode(pJoinOperator->pCondAfterMerge); + taosArrayDestroy(pJoinOperator->rowCtx.leftCreatedBlocks); + taosArrayDestroy(pJoinOperator->rowCtx.rightCreatedBlocks); + taosArrayDestroy(pJoinOperator->rowCtx.leftRowLocations); + taosArrayDestroy(pJoinOperator->rowCtx.rightRowLocations); + pJoinOperator->pRes = blockDataDestroy(pJoinOperator->pRes); taosMemoryFreeClear(param); } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index dde6f7c0e8..e7de826d4b 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -213,6 +213,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS } else { if (limitReached && (pLimitInfo->slimit.limit >= 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { setOperatorCompleted(pOperator); + } else if (limitReached && groupId == 0) { + setOperatorCompleted(pOperator); } } From 1e9c4d5facfaf7dbae9c2d6c30636af6193e563c Mon Sep 17 00:00:00 2001 From: liuyao <54liuyao@163.com> Date: Wed, 7 Jun 2023 10:45:27 +0800 Subject: [PATCH 12/18] opt stream block dispatch --- include/common/tdatablock.h | 1 + include/libs/stream/tstream.h | 1 + source/common/src/tdatablock.c | 24 +++++++--- source/dnode/vnode/src/tq/tqSink.c | 6 +-- source/libs/stream/src/streamDispatch.c | 63 ++++++++++++++++++------- source/libs/stream/src/streamTask.c | 4 ++ 6 files changed, 71 insertions(+), 28 deletions(-) diff --git a/include/common/tdatablock.h b/include/common/tdatablock.h index 53fc07c3f3..6cb7d88523 100644 --- a/include/common/tdatablock.h +++ b/include/common/tdatablock.h @@ -248,6 +248,7 @@ int32_t buildSubmitReqFromDataBlock(SSubmitReq2** pReq, const SSDataBlock* pData tb_uid_t suid); char* buildCtbNameByGroupId(const char* stbName, uint64_t groupId); +int32_t buildCtbNameByGroupIdImpl(const char* stbName, uint64_t groupId, char* pBuf); static FORCE_INLINE int32_t blockGetEncodeSize(const SSDataBlock* pBlock) { return blockDataGetSerialMetaSize(taosArrayGetSize(pBlock->pDataBlock)) + blockDataGetSize(pBlock); diff --git a/include/libs/stream/tstream.h b/include/libs/stream/tstream.h index 8316e6ef50..51f2de481d 100644 --- a/include/libs/stream/tstream.h +++ b/include/libs/stream/tstream.h @@ -327,6 +327,7 @@ struct SStreamTask { int64_t checkpointingId; int32_t checkpointAlignCnt; struct SStreamMeta* pMeta; + SSHashObj* pNameMap; }; // meta diff --git a/source/common/src/tdatablock.c b/source/common/src/tdatablock.c index 24e978b0ea..033fbb0ef1 100644 --- a/source/common/src/tdatablock.c +++ b/source/common/src/tdatablock.c @@ -2465,19 +2465,31 @@ _end: } char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { - if (stbFullName[0] == 0) { + char* pBuf = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1); + if (!pBuf) { return NULL; } + int32_t code = buildCtbNameByGroupIdImpl(stbFullName, groupId, pBuf); + if (code != TSDB_CODE_SUCCESS) { + taosMemoryFree(pBuf); + return NULL; + } + return pBuf; +} + +int32_t buildCtbNameByGroupIdImpl(const char* stbFullName, uint64_t groupId, char* cname) { + if (stbFullName[0] == 0) { + return TSDB_CODE_FAILED; + } SArray* tags = taosArrayInit(0, sizeof(SSmlKv)); if (tags == NULL) { - return NULL; + return TSDB_CODE_FAILED; } - void* cname = taosMemoryCalloc(1, TSDB_TABLE_NAME_LEN + 1); if (cname == NULL) { taosArrayDestroy(tags); - return NULL; + return TSDB_CODE_FAILED; } SSmlKv pTag = {.key = "group_id", @@ -2499,9 +2511,9 @@ char* buildCtbNameByGroupId(const char* stbFullName, uint64_t groupId) { taosArrayDestroy(tags); if ((rname.ctbShortName && rname.ctbShortName[0]) == 0) { - return NULL; + return TSDB_CODE_FAILED; } - return rname.ctbShortName; + return TSDB_CODE_SUCCESS; } int32_t blockEncode(const SSDataBlock* pBlock, char* data, int32_t numOfCols) { diff --git a/source/dnode/vnode/src/tq/tqSink.c b/source/dnode/vnode/src/tq/tqSink.c index db1b5ed902..9349c6eb0d 100644 --- a/source/dnode/vnode/src/tq/tqSink.c +++ b/source/dnode/vnode/src/tq/tqSink.c @@ -298,10 +298,8 @@ void tqSinkToTablePipeline(SStreamTask* pTask, void* vnode, int64_t ver, void* d if (res == TSDB_CODE_SUCCESS) { memcpy(ctbName, pTableSinkInfo->tbName, strlen(pTableSinkInfo->tbName)); } else { - char* tmp = buildCtbNameByGroupId(stbFullName, pDataBlock->info.id.groupId); - memcpy(ctbName, tmp, strlen(tmp)); - memcpy(pTableSinkInfo->tbName, tmp, strlen(tmp)); - taosMemoryFree(tmp); + buildCtbNameByGroupIdImpl(stbFullName, pDataBlock->info.id.groupId, ctbName); + memcpy(pTableSinkInfo->tbName, ctbName, strlen(ctbName)); tqDebug("vgId:%d, gropuId:%" PRIu64 " datablock table name is null", TD_VID(pVnode), pDataBlock->info.id.groupId); } diff --git a/source/libs/stream/src/streamDispatch.c b/source/libs/stream/src/streamDispatch.c index 9cb0a56644..922a1f5345 100644 --- a/source/libs/stream/src/streamDispatch.c +++ b/source/libs/stream/src/streamDispatch.c @@ -15,6 +15,13 @@ #include "streamInc.h" +#define MAX_BLOCK_NAME_NUM 1024 + +typedef struct SBlockName { + uint32_t hashValue; + char parTbName[TSDB_TABLE_NAME_LEN]; +} SBlockName; + int32_t tEncodeStreamDispatchReq(SEncoder* pEncoder, const SStreamDispatchReq* pReq) { if (tStartEncode(pEncoder) < 0) return -1; if (tEncodeI64(pEncoder, pReq->streamId) < 0) return -1; @@ -331,26 +338,46 @@ FAIL: int32_t streamSearchAndAddBlock(SStreamTask* pTask, SStreamDispatchReq* pReqs, SSDataBlock* pDataBlock, int32_t vgSz, int64_t groupId) { - char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); - if (ctbName == NULL) { - return -1; - } - - if (pDataBlock->info.parTbName[0]) { - snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName); - } else { - char* ctbShortName = buildCtbNameByGroupId(pTask->shuffleDispatcher.stbFullName, groupId); - snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, ctbShortName); - taosMemoryFree(ctbShortName); - } - + uint32_t hashValue = 0; SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; + if (pTask->pNameMap == NULL) { + pTask->pNameMap = tSimpleHashInit(1024, taosGetDefaultHashFunction(TSDB_DATA_TYPE_BIGINT)); + } - /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/ - SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo; - uint32_t hashValue = - taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix); - taosMemoryFree(ctbName); + void* pVal = tSimpleHashGet(pTask->pNameMap, &groupId, sizeof(int64_t)); + if (pVal) { + SBlockName* pBln = (SBlockName*)pVal; + hashValue = pBln->hashValue; + if (!pDataBlock->info.parTbName[0]) { + memcpy(pDataBlock->info.parTbName, pBln->parTbName, strlen(pBln->parTbName)); + } + } else { + char* ctbName = taosMemoryCalloc(1, TSDB_TABLE_FNAME_LEN); + if (ctbName == NULL) { + return -1; + } + + if (pDataBlock->info.parTbName[0]) { + snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName); + } else { + buildCtbNameByGroupIdImpl(pTask->shuffleDispatcher.stbFullName, groupId, pDataBlock->info.parTbName); + snprintf(ctbName, TSDB_TABLE_NAME_LEN, "%s.%s", pTask->shuffleDispatcher.dbInfo.db, pDataBlock->info.parTbName); + } + + SArray* vgInfo = pTask->shuffleDispatcher.dbInfo.pVgroupInfos; + + /*uint32_t hashValue = MurmurHash3_32(ctbName, strlen(ctbName));*/ + SUseDbRsp* pDbInfo = &pTask->shuffleDispatcher.dbInfo; + hashValue = + taosGetTbHashVal(ctbName, strlen(ctbName), pDbInfo->hashMethod, pDbInfo->hashPrefix, pDbInfo->hashSuffix); + taosMemoryFree(ctbName); + SBlockName bln = {0}; + bln.hashValue = hashValue; + memcpy(bln.parTbName, pDataBlock->info.parTbName, strlen(pDataBlock->info.parTbName)); + if (tSimpleHashGetSize(pTask->pNameMap) < MAX_BLOCK_NAME_NUM) { + tSimpleHashPut(pTask->pNameMap, &groupId, sizeof(int64_t), &bln, sizeof(SBlockName)); + } + } bool found = false; // TODO: optimize search diff --git a/source/libs/stream/src/streamTask.c b/source/libs/stream/src/streamTask.c index a0caffd41f..284d1ecab6 100644 --- a/source/libs/stream/src/streamTask.c +++ b/source/libs/stream/src/streamTask.c @@ -224,5 +224,9 @@ void tFreeStreamTask(SStreamTask* pTask) { taosMemoryFree((void*)pTask->id.idStr); } + if (pTask->pNameMap) { + tSimpleHashCleanup(pTask->pNameMap); + } + taosMemoryFree(pTask); } From 3f26d67cae7442595c6406c1ce5c6b9b1d391d29 Mon Sep 17 00:00:00 2001 From: huolibo Date: Wed, 7 Jun 2023 13:50:32 +0800 Subject: [PATCH 13/18] fix: change kafka doc, delete confluent related content --- docs/en/20-third-party/11-kafka.md | 290 +++++++++++------------------ docs/zh/20-third-party/11-kafka.md | 289 +++++++++++----------------- 2 files changed, 212 insertions(+), 367 deletions(-) diff --git a/docs/en/20-third-party/11-kafka.md b/docs/en/20-third-party/11-kafka.md index f09ebb274c..1fc2b57a13 100644 --- a/docs/en/20-third-party/11-kafka.md +++ b/docs/en/20-third-party/11-kafka.md @@ -16,165 +16,79 @@ TDengine Source Connector is used to read data from TDengine in real-time and se ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) -## What is Confluent? - -[Confluent](https://www.confluent.io/) adds many extensions to Kafka. include: - -1. Schema Registry -2. REST Proxy -3. Non-Java Clients -4. Many packaged Kafka Connect plugins -5. GUI for managing and monitoring Kafka - Confluent Control Center - -Some of these extensions are available in the community version of Confluent. Some are only available in the enterprise version. -![TDengine Database Kafka Connector -- Confluent platform](kafka/confluentPlatform.webp) - -Confluent Enterprise Edition provides the `confluent` command-line tool to manage various components. - ## Prerequisites 1. Linux operating system 2. Java 8 and Maven installed -3. Git is installed +3. Git/curl/vi is installed 4. TDengine is installed and started. If not, please refer to [Installation and Uninstallation](/operation/pkg-install) -## Install Confluent - -Confluent provides two installation methods: Docker and binary packages. This article only introduces binary package installation. +## Install Kafka Execute in any directory: ```` -curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz -tar xzf confluent-7.1.1.tar.gz -C /opt/ +curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz +tar xzf kafka_2.13-3.4.0.tgz -C /opt/ +ln -s /opt/kafka_2.13-3.4.0 /opt/kafka ```` -Then you need to add the `$CONFLUENT_HOME/bin` directory to the PATH. +Then you need to add the `$KAFKA_HOME/bin` directory to the PATH. ```title=".profile" -export CONFLUENT_HOME=/opt/confluent-7.1.1 -export PATH=$CONFLUENT_HOME/bin:$PATH +export KAFKA_HOME=/opt/kafka +export PATH=$PATH:$KAFKA_HOME/bin ``` Users can append the above script to the current user's profile file (~/.profile or ~/.bash_profile) -After the installation is complete, you can enter `confluent version` for simple verification: - -``` -# confluent version -confluent - Confluent CLI - -Version: v2.6.1 -Git Ref: 6d920590 -Build Date: 2022-02-18T06:14:21Z -Go Version: go1.17.6 (linux/amd64) -Development: false -``` - ## Install TDengine Connector plugin ### Install from source code -``` +```shell git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine -mvn clean package -unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip +mvn clean package -Dmaven.test.skip=true +unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$CONFLUENT_HOME/share/java/` above because it's a build in plugin path. +The above script first clones the project source code and then compiles and packages it with Maven. After the package is complete, the zip package of the plugin is generated in the `target/components/packages/` directory. Unzip this zip package to plugin path. We used `$KAFKA_HOME/components/` above because it's a build in plugin path. -### Install with confluent-hub +### Add configuration file -[Confluent Hub](https://www.confluent.io/hub) provides a service to download Kafka Connect plugins. After TDengine Kafka Connector is published to Confluent Hub, it can be installed using the command tool `confluent-hub`. -**TDengine Kafka Connector is currently not officially released and cannot be installed in this way**. +add kafka-connect-tdengine plugin path to `plugin.path` in `$KAFKA_HOME/config/connect-distributed.properties`. -## Start Confluent - -``` -confluent local services start +```properties +plugin.path=/usr/share/java,/opt/kafka/components ``` -:::note -Be sure to install the plugin before starting Confluent. Otherwise, Kafka Connect will fail to discover the plugins. -::: +## Start Kafka Services -:::tip -If a component fails to start, try clearing the data and restarting. The data directory will be printed to the console at startup, e.g.: +Use command bellow to start all services: -```title="Console output log" {1} -Using CONFLUENT_CURRENT: /tmp/confluent.106668 -Starting ZooKeeper -ZooKeeper is [UP] -Starting Kafka -Kafka is [UP] -Starting Schema Registry -Schema Registry is [UP] -Starting Kafka REST -Kafka REST is [UP] -Starting Connect -Connect is [UP] -Starting ksqlDB Server -ksqlDB Server is [UP] -Starting Control Center -Control Center is [UP] -``` +```shell +zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties -To clear data, execute `rm -rf /tmp/confluent.106668`. -::: +kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties -### Check Confluent Services Status +connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties -Use command bellow to check the status of all service: - -``` -confluent local services status -``` - -The expected output is: -``` -Connect is [UP] -Control Center is [UP] -Kafka is [UP] -Kafka REST is [UP] -ksqlDB Server is [UP] -Schema Registry is [UP] -ZooKeeper is [UP] ``` ### Check Successfully Loaded Plugin After Kafka Connect was completely started, you can use bellow command to check if our plugins are installed successfully: -``` -confluent local services connect plugin list + +```shell +curl http://localhost:8083/connectors ``` -The output should contains `TDengineSinkConnector` and `TDengineSourceConnector` as bellow: +The output as bellow: +```txt +[] ``` -Available Connect Plugins: -[ - { - "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", - "type": "sink", - "version": "1.0.0" - }, - { - "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", - "type": "source", - "version": "1.0.0" - }, -...... -``` - -If not, please check the log file of Kafka Connect. To view the log file path, please execute: - -``` -echo `cat /tmp/confluent.current`/connect/connect.stdout -``` -It should produce a path like:`/tmp/confluent.104086/connect/connect.stdout` - -Besides log file `connect.stdout` there is a file named `connect.properties`. At the end of this file you can see the effective `plugin.path` which is a series of paths joined by comma. If Kafka Connect not found our plugins, it's probably because the installed path is not included in `plugin.path`. ## The use of TDengine Sink Connector @@ -184,40 +98,47 @@ TDengine Sink Connector internally uses TDengine [modeless write interface](/ref The following example synchronizes the data of the topic meters to the target database power. The data format is the InfluxDB Line protocol format. -### Add configuration file +### Add Sink Connector configuration file -``` +```shell mkdir ~/test cd ~/test -vi sink-demo.properties +vi sink-demo.json ``` -sink-demo.properties' content is following: +sink-demo.json' content is following: -```ini title="sink-demo.properties" -name=TDengineSinkConnector -connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector -tasks.max=1 -topics=meters -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.user=root -connection.password=taosdata -connection.database=power -db.schemaless=line -data.precision=ns -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter +```json title="sink-demo.json" +{ + "name": "TDengineSinkConnector", + "config": { + "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "tasks.max": "1", + "topics": "meters", + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.user": "root", + "connection.password": "taosdata", + "connection.database": "power", + "db.schemaless": "line", + "data.precision": "ns", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + "errors.tolerance": "all", + "errors.deadletterqueue.topic.name": "dead_letter_topic", + "errors.deadletterqueue.topic.replication.factor": 1 + } +} ``` Key configuration instructions: -1. `topics=meters` and `connection.database=power` means to subscribe to the data of the topic meters and write to the database power. -2. `db.schemaless=line` means the data in the InfluxDB Line protocol format. +1. `"topics": "meters"` and `"connection.database": "power"` means to subscribe to the data of the topic meters and write to the database power. +2. `"db.schemaless": "line"` means the data in the InfluxDB Line protocol format. -### Create Connector instance +### Create Sink Connector instance -```` -confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties +````shell +curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json" ```` If the above command is executed successfully, the output is as follows: @@ -237,7 +158,10 @@ If the above command is executed successfully, the output is as follows: "tasks.max": "1", "topics": "meters", "value.converter": "org.apache.kafka.connect.storage.StringConverter", - "name": "TDengineSinkConnector" + "name": "TDengineSinkConnector", + "errors.tolerance": "all", + "errors.deadletterqueue.topic.name": "dead_letter_topic", + "errors.deadletterqueue.topic.replication.factor": "1", }, "tasks": [], "type": "sink" @@ -258,7 +182,7 @@ meters,location=California.LoSangeles,groupid=3 current=11.3,voltage=221,phase=0 Use kafka-console-producer to write test data to the topic `meters`. ``` -cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters +cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters ``` :::note @@ -269,12 +193,12 @@ TDengine Sink Connector will automatically create the database if the target dat Use the TDengine CLI to verify that the sync was successful. -``` +```sql taos> use power; Database changed. taos> select * from meters; - ts | current | voltage | phase | groupid | location | + _ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | @@ -293,29 +217,34 @@ TDengine Source Connector will convert the data in TDengine data table into [Inf The following sample program synchronizes the data in the database test to the topic tdengine-source-test. -### Add configuration file +### Add Source Connector configuration file -``` -vi source-demo.properties +```shell +vi source-demo.json ``` Input following content: -```ini title="source-demo.properties" -name=TDengineSourceConnector -connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector -tasks.max=1 -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.username=root -connection.password=taosdata -connection.database=test -connection.attempts=3 -connection.backoff.ms=5000 -topic.prefix=tdengine-source- -poll.interval.ms=1000 -fetch.max.rows=100 -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter +```json title="source-demo.json" +{ + "name":"TDengineSourceConnector", + "config":{ + "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "tasks.max": 1, + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.username": "root", + "connection.password": "taosdata", + "connection.database": "test", + "connection.attempts": 3, + "connection.backoff.ms": 5000, + "topic.prefix": "tdengine-source", + "poll.interval.ms": 1000, + "fetch.max.rows": 100, + "topic.per.stable": true, + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter" + } +} ``` ### Prepare test data @@ -340,40 +269,40 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1 Use TDengine CLI to execute SQL script -``` +```shell taos -f prepare-source-data.sql ``` ### Create Connector instance -```` -confluent local services connect connector load TDengineSourceConnector --config source-demo.properties -```` +```shell +curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json" +``` ### View topic data Use the kafka-console-consumer command-line tool to monitor data in the topic tdengine-source-test. In the beginning, all historical data will be output. After inserting two new data into TDengine, kafka-console-consumer immediately outputs the two new data. The output is in InfluxDB line protocol format. -```` -kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test +````shell +kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters ```` output: -```` +```txt ...... meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 ...... -```` +``` All historical data is displayed. Switch to the TDengine CLI and insert two new pieces of data: -```` +```sql USE test; INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); -```` +``` Switch back to kafka-console-consumer, and the command line window has printed out the two pieces of data just inserted. @@ -383,16 +312,16 @@ After testing, use the unload command to stop the loaded connector. View currently active connectors: -```` -confluent local services connect connector status -```` +```shell +curl http://localhost:8083/connectors +``` You should now have two active connectors if you followed the previous steps. Use the following command to unload: -```` -confluent local services connect connector unload TDengineSinkConnector -confluent local services connect connector unload TDengineSourceConnector -```` +```shell +curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector +curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector +``` ## Configuration reference @@ -430,19 +359,14 @@ The following configuration items apply to TDengine Sink Connector and TDengine 6. `query.interval.ms`: The time range of reading data from TDengine each time, its unit is millisecond. It should be adjusted according to the data flow in rate, the default value is 1000. 7. `topic.per.stable`: If it's set to true, it means one super table in TDengine corresponds to a topic in Kafka, the topic naming rule is `--`; if it's set to false, it means the whole DB corresponds to a topic in Kafka, the topic naming rule is `-`. - - ## Other notes -1. To install plugin to a customized location, refer to https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually. -2. To use Kafka Connect without confluent, refer to https://kafka.apache.org/documentation/#connect. +1. To use Kafka Connect, refer to . ## Feedback -https://github.com/taosdata/kafka-connect-tdengine/issues + ## Reference -1. https://www.confluent.io/what-is-apache-kafka -2. https://developer.confluent.io/learn-kafka/kafka-connect/intro -3. https://docs.confluent.io/platform/current/platform.html +1. For more information, see diff --git a/docs/zh/20-third-party/11-kafka.md b/docs/zh/20-third-party/11-kafka.md index 97e78c2fde..641e2d5174 100644 --- a/docs/zh/20-third-party/11-kafka.md +++ b/docs/zh/20-third-party/11-kafka.md @@ -16,169 +16,78 @@ TDengine Source Connector 用于把数据实时地从 TDengine 读出来发送 ![TDengine Database Kafka Connector -- streaming integration with kafka connect](kafka/streaming-integration-with-kafka-connect.webp) -## 什么是 Confluent? - -[Confluent](https://www.confluent.io/) 在 Kafka 的基础上增加很多扩展功能。包括: - -1. Schema Registry -2. REST 代理 -3. 非 Java 客户端 -4. 很多打包好的 Kafka Connect 插件 -5. 管理和监控 Kafka 的 GUI —— Confluent 控制中心 - -这些扩展功能有的包含在社区版本的 Confluent 中,有的只有企业版能用。 -![TDengine Database Kafka Connector -- Confluent introduction](kafka/confluentPlatform.webp) - -Confluent 企业版提供了 `confluent` 命令行工具管理各个组件。 - ## 前置条件 运行本教程中示例的前提条件。 1. Linux 操作系统 2. 已安装 Java 8 和 Maven -3. 已安装 Git +3. 已安装 Git、curl、vi 4. 已安装并启动 TDengine。如果还没有可参考[安装和卸载](/operation/pkg-install) -## 安装 Confluent - -Confluent 提供了 Docker 和二进制包两种安装方式。本文仅介绍二进制包方式安装。 +## 安装 Kafka 在任意目录下执行: -``` -curl -O http://packages.confluent.io/archive/7.1/confluent-7.1.1.tar.gz -tar xzf confluent-7.1.1.tar.gz -C /opt/ +```shell +curl -O https://downloads.apache.org/kafka/3.4.0/kafka_2.13-3.4.0.tgz +tar xzf kafka_2.13-3.4.0.tgz -C /opt/ +ln -s /opt/kafka_2.13-3.4.0 /opt/kafka ``` -然后需要把 `$CONFLUENT_HOME/bin` 目录加入 PATH。 +然后需要把 `$KAFKA_HOME/bin` 目录加入 PATH。 ```title=".profile" -export CONFLUENT_HOME=/opt/confluent-7.1.1 -export PATH=$CONFLUENT_HOME/bin:$PATH +export KAFKA_HOME=/opt/kafka +export PATH=$PATH:$KAFKA_HOME/bin ``` 以上脚本可以追加到当前用户的 profile 文件(~/.profile 或 ~/.bash_profile) -安装完成之后,可以输入`confluent version`做简单验证: - -``` -# confluent version -confluent - Confluent CLI - -Version: v2.6.1 -Git Ref: 6d920590 -Build Date: 2022-02-18T06:14:21Z -Go Version: go1.17.6 (linux/amd64) -Development: false -``` - ## 安装 TDengine Connector 插件 -### 从源码安装 +### 编译插件 -``` +```shell git clone --branch 3.0 https://github.com/taosdata/kafka-connect-tdengine.git cd kafka-connect-tdengine -mvn clean package -unzip -d $CONFLUENT_HOME/share/java/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip +mvn clean package -Dmaven.test.skip=true +unzip -d $KAFKA_HOME/components/ target/components/packages/taosdata-kafka-connect-tdengine-*.zip ``` -以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$CONFLUENT_HOME/share/java/`。 +以上脚本先 clone 项目源码,然后用 Maven 编译打包。打包完成后在 `target/components/packages/` 目录生成了插件的 zip 包。把这个 zip 包解压到安装插件的路径即可。上面的示例中使用了内置的插件安装路径: `$KAFKA_HOME/components/`。 -### 用 confluent-hub 安装 +### 配置插件 -[Confluent Hub](https://www.confluent.io/hub) 提供下载 Kafka Connect 插件的服务。在 TDengine Kafka Connector 发布到 Confluent Hub 后可以使用命令工具 `confluent-hub` 安装。 -**TDengine Kafka Connector 目前没有正式发布,不能用这种方式安装**。 +将 kafka-connect-tdengine 插件加入 `$KAFKA_HOME/config/connect-distributed.properties` 配置文件 plugin.path 中 -## 启动 Confluent - -``` -confluent local services start +```properties +plugin.path=/usr/share/java,/opt/kafka/components ``` -:::note -一定要先安装插件再启动 Confluent, 否则加载插件会失败。 -::: +## 启动 Kafka -:::tip -若某组件启动失败,可尝试清空数据,重新启动。数据目录在启动时将被打印到控制台,比如 : +```shell +zookeeper-server-start.sh -daemon $KAFKA_HOME/config/zookeeper.properties -```title="控制台输出日志" {1} -Using CONFLUENT_CURRENT: /tmp/confluent.106668 -Starting ZooKeeper -ZooKeeper is [UP] -Starting Kafka -Kafka is [UP] -Starting Schema Registry -Schema Registry is [UP] -Starting Kafka REST -Kafka REST is [UP] -Starting Connect -Connect is [UP] -Starting ksqlDB Server -ksqlDB Server is [UP] -Starting Control Center -Control Center is [UP] +kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties + +connect-distributed.sh -daemon $KAFKA_HOME/config/connect-distributed.properties ``` -清空数据可执行 `rm -rf /tmp/confluent.106668`。 -::: - -### 验证各个组件是否启动成功 +### 验证 kafka Connect 是否启动成功 输入命令: -``` -confluent local services status +```shell +curl http://localhost:8083/connectors ``` 如果各组件都启动成功,会得到如下输出: +```txt +[] ``` -Connect is [UP] -Control Center is [UP] -Kafka is [UP] -Kafka REST is [UP] -ksqlDB Server is [UP] -Schema Registry is [UP] -ZooKeeper is [UP] -``` - -### 验证插件是否安装成功 - -在 Kafka Connect 组件完全启动后,可用以下命令列出成功加载的插件: - -``` -confluent local services connect plugin list -``` - -如果成功安装,会输出如下: - -```txt {4,9} -Available Connect Plugins: -[ - { - "class": "com.taosdata.kafka.connect.sink.TDengineSinkConnector", - "type": "sink", - "version": "1.0.0" - }, - { - "class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", - "type": "source", - "version": "1.0.0" - }, -...... -``` - -如果插件安装失败,请检查 Kafka Connect 的启动日志是否有异常信息,用以下命令输出日志路径: -``` -echo `cat /tmp/confluent.current`/connect/connect.stdout -``` -该命令的输出类似: `/tmp/confluent.104086/connect/connect.stdout`。 - -与日志文件 `connect.stdout` 同一目录,还有一个文件名为: `connect.properties`。在这个文件的末尾,可以看到最终生效的 `plugin.path`, 它是一系列用逗号分割的路径。如果插件安装失败,很可能是因为实际的安装路径不包含在 `plugin.path` 中。 - ## TDengine Sink Connector 的使用 @@ -188,40 +97,47 @@ TDengine Sink Connector 内部使用 TDengine [无模式写入接口](../../conn 下面的示例将主题 meters 的数据,同步到目标数据库 power。数据格式为 InfluxDB Line 协议格式。 -### 添加配置文件 +### 添加 Sink Connector 配置文件 -``` +```shell mkdir ~/test cd ~/test -vi sink-demo.properties +vi sink-demo.json ``` -sink-demo.properties 内容如下: +sink-demo.json 内容如下: -```ini title="sink-demo.properties" -name=TDengineSinkConnector -connector.class=com.taosdata.kafka.connect.sink.TDengineSinkConnector -tasks.max=1 -topics=meters -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.user=root -connection.password=taosdata -connection.database=power -db.schemaless=line -data.precision=ns -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter +```json title="sink-demo.json" +{ + "name": "TDengineSinkConnector", + "config": { + "connector.class":"com.taosdata.kafka.connect.sink.TDengineSinkConnector", + "tasks.max": "1", + "topics": "meters", + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.user": "root", + "connection.password": "taosdata", + "connection.database": "power", + "db.schemaless": "line", + "data.precision": "ns", + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter", + "errors.tolerance": "all", + "errors.deadletterqueue.topic.name": "dead_letter_topic", + "errors.deadletterqueue.topic.replication.factor": 1 + } +} ``` 关键配置说明: -1. `topics=meters` 和 `connection.database=power`, 表示订阅主题 meters 的数据,并写入数据库 power。 -2. `db.schemaless=line`, 表示使用 InfluxDB Line 协议格式的数据。 +1. `"topics": "meters"` 和 `"connection.database": "power"`, 表示订阅主题 meters 的数据,并写入数据库 power。 +2. `"db.schemaless": "line"`, 表示使用 InfluxDB Line 协议格式的数据。 -### 创建 Connector 实例 +### 创建 Sink Connector 实例 -``` -confluent local services connect connector load TDengineSinkConnector --config ./sink-demo.properties +```shell +curl -X POST -d @sink-demo.json http://localhost:8083/connectors -H "Content-Type: application/json" ``` 若以上命令执行成功,则有如下输出: @@ -241,7 +157,10 @@ confluent local services connect connector load TDengineSinkConnector --config . "tasks.max": "1", "topics": "meters", "value.converter": "org.apache.kafka.connect.storage.StringConverter", - "name": "TDengineSinkConnector" + "name": "TDengineSinkConnector", + "errors.tolerance": "all", + "errors.deadletterqueue.topic.name": "dead_letter_topic", + "errors.deadletterqueue.topic.replication.factor": "1", }, "tasks": [], "type": "sink" @@ -261,8 +180,8 @@ meters,location=California.LosAngeles,groupid=3 current=11.3,voltage=221,phase=0 使用 kafka-console-producer 向主题 meters 添加测试数据。 -``` -cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic meters +```shell +cat test-data.txt | kafka-console-producer.sh --broker-list localhost:9092 --topic meters ``` :::note @@ -273,12 +192,12 @@ cat test-data.txt | kafka-console-producer --broker-list localhost:9092 --topic 使用 TDengine CLI 验证同步是否成功。 -``` +```sql taos> use power; Database changed. taos> select * from meters; - ts | current | voltage | phase | groupid | location | + _ts | current | voltage | phase | groupid | location | =============================================================================================================================================================== 2022-03-28 09:56:51.249000000 | 11.800000000 | 221.000000000 | 0.280000000 | 2 | California.LosAngeles | 2022-03-28 09:56:51.250000000 | 13.400000000 | 223.000000000 | 0.290000000 | 2 | California.LosAngeles | @@ -297,29 +216,34 @@ TDengine Source Connector 会将 TDengine 数据表中的数据转换成 [Influx 下面的示例程序同步数据库 test 中的数据到主题 tdengine-source-test。 -### 添加配置文件 +### 添加 Source Connector 配置文件 -``` -vi source-demo.properties +```shell +vi source-demo.json ``` 输入以下内容: -```ini title="source-demo.properties" -name=TDengineSourceConnector -connector.class=com.taosdata.kafka.connect.source.TDengineSourceConnector -tasks.max=1 -connection.url=jdbc:TAOS://127.0.0.1:6030 -connection.username=root -connection.password=taosdata -connection.database=test -connection.attempts=3 -connection.backoff.ms=5000 -topic.prefix=tdengine-source- -poll.interval.ms=1000 -fetch.max.rows=100 -key.converter=org.apache.kafka.connect.storage.StringConverter -value.converter=org.apache.kafka.connect.storage.StringConverter +```json title="source-demo.json" +{ + "name":"TDengineSourceConnector", + "config":{ + "connector.class": "com.taosdata.kafka.connect.source.TDengineSourceConnector", + "tasks.max": 1, + "connection.url": "jdbc:TAOS://127.0.0.1:6030", + "connection.username": "root", + "connection.password": "taosdata", + "connection.database": "test", + "connection.attempts": 3, + "connection.backoff.ms": 5000, + "topic.prefix": "tdengine-source", + "poll.interval.ms": 1000, + "fetch.max.rows": 100, + "topic.per.stable": true, + "key.converter": "org.apache.kafka.connect.storage.StringConverter", + "value.converter": "org.apache.kafka.connect.storage.StringConverter" + } +} ``` ### 准备测试数据 @@ -344,27 +268,27 @@ INSERT INTO d1001 USING meters TAGS('California.SanFrancisco', 2) VALUES('2018-1 使用 TDengine CLI, 执行 SQL 文件。 -``` +```shell taos -f prepare-source-data.sql ``` -### 创建 Connector 实例 +### 创建 Source Connector 实例 -``` -confluent local services connect connector load TDengineSourceConnector --config source-demo.properties +```shell +curl -X POST -d @source-demo.json http://localhost:8083/connectors -H "Content-Type: application/json" ``` ### 查看 topic 数据 使用 kafka-console-consumer 命令行工具监控主题 tdengine-source-test 中的数据。一开始会输出所有历史数据, 往 TDengine 插入两条新的数据之后,kafka-console-consumer 也立即输出了新增的两条数据。 输出数据 InfluxDB line protocol 的格式。 -``` -kafka-console-consumer --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test +```shell +kafka-console-consumer.sh --bootstrap-server localhost:9092 --from-beginning --topic tdengine-source-test-meters ``` 输出: -``` +```txt ...... meters,location="California.SanFrancisco",groupid=2i32 current=10.3f32,voltage=219i32,phase=0.31f32 1538548685000000000 meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=218i32,phase=0.33f32 1538548695000000000 @@ -373,7 +297,7 @@ meters,location="California.SanFrancisco",groupid=2i32 current=12.6f32,voltage=2 此时会显示所有历史数据。切换到 TDengine CLI, 插入两条新的数据: -``` +```sql USE test; INSERT INTO d1001 VALUES (now, 13.3, 229, 0.38); INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); @@ -387,15 +311,15 @@ INSERT INTO d1002 VALUES (now, 16.3, 233, 0.22); 查看当前活跃的 connector: -``` -confluent local services connect connector status +```shell +curl http://localhost:8083/connectors ``` 如果按照前述操作,此时应有两个活跃的 connector。使用下面的命令 unload: -``` -confluent local services connect connector unload TDengineSinkConnector -confluent local services connect connector unload TDengineSourceConnector +```shell +curl -X DELETE http://localhost:8083/connectors/TDengineSinkConnector +curl -X DELETE http://localhost:8083/connectors/TDengineSourceConnector ``` ## 配置参考 @@ -442,15 +366,12 @@ confluent local services connect connector unload TDengineSourceConnector ## 其他说明 -1. 插件的安装位置可以自定义,请参考官方文档:https://docs.confluent.io/home/connect/self-managed/install.html#install-connector-manually。 -2. 本教程的示例程序使用了 Confluent 平台,但是 TDengine Kafka Connector 本身同样适用于独立安装的 Kafka, 且配置方法相同。关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档: https://kafka.apache.org/documentation/#connect。 +1. 关于如何在独立安装的 Kafka 环境使用 Kafka Connect 插件, 请参考官方文档:。 ## 问题反馈 -无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈: https://github.com/taosdata/kafka-connect-tdengine/issues。 +无论遇到任何问题,都欢迎在本项目的 Github 仓库反馈:。 ## 参考 -1. https://www.confluent.io/what-is-apache-kafka -2. https://developer.confluent.io/learn-kafka/kafka-connect/intro -3. https://docs.confluent.io/platform/current/platform.html +1. From 150e9ae2960da9c8cc146034c0688f3217a9966a Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 7 Jun 2023 16:55:03 +0800 Subject: [PATCH 14/18] make interp fill multiple col logic same as window fill --- source/libs/parser/src/parTranslater.c | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/source/libs/parser/src/parTranslater.c b/source/libs/parser/src/parTranslater.c index f049af6747..5c841deda0 100644 --- a/source/libs/parser/src/parTranslater.c +++ b/source/libs/parser/src/parTranslater.c @@ -3041,7 +3041,7 @@ static int32_t convertFillValue(STranslateContext* pCxt, SDataType dt, SNodeList return code; } -static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList, bool isInterpFill) { +static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeList* pProjectionList) { if (FILL_MODE_VALUE != pFill->mode && FILL_MODE_VALUE_F != pFill->mode) { return TSDB_CODE_SUCCESS; } @@ -3059,12 +3059,10 @@ static int32_t checkFillValues(STranslateContext* pCxt, SFillNode* pFill, SNodeL return code; } - if (!isInterpFill) { - ++fillNo; - } + ++fillNo; } } - if (!isInterpFill && fillNo != LIST_LENGTH(pFillValues->pNodeList)) { + if (fillNo != LIST_LENGTH(pFillValues->pNodeList)) { return generateSyntaxErrMsgExt(&pCxt->msgBuf, TSDB_CODE_PAR_WRONG_VALUE_TYPE, "Filled values number mismatch"); } return TSDB_CODE_SUCCESS; @@ -3075,7 +3073,7 @@ static int32_t translateFillValues(STranslateContext* pCxt, SSelectStmt* pSelect NULL == ((SIntervalWindowNode*)pSelect->pWindow)->pFill) { return TSDB_CODE_SUCCESS; } - return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList, false); + return checkFillValues(pCxt, (SFillNode*)((SIntervalWindowNode*)pSelect->pWindow)->pFill, pSelect->pProjectionList); } static int32_t rewriteProjectAlias(SNodeList* pProjectionList) { @@ -3525,7 +3523,7 @@ static int32_t translateInterpFill(STranslateContext* pCxt, SSelectStmt* pSelect code = checkFill(pCxt, (SFillNode*)pSelect->pFill, (SValueNode*)pSelect->pEvery, true); } if (TSDB_CODE_SUCCESS == code) { - code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList, true); + code = checkFillValues(pCxt, (SFillNode*)pSelect->pFill, pSelect->pProjectionList); } return code; From c612d945ec73114f93623f0116c190608605fc45 Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 7 Jun 2023 18:27:29 +0800 Subject: [PATCH 15/18] fix fill value bug --- source/libs/executor/src/timesliceoperator.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/source/libs/executor/src/timesliceoperator.c b/source/libs/executor/src/timesliceoperator.c index 3e4055876d..2421343bd7 100644 --- a/source/libs/executor/src/timesliceoperator.c +++ b/source/libs/executor/src/timesliceoperator.c @@ -257,7 +257,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp // output the result - bool hasInterp = true; + int32_t fillColIndex = 0; + bool hasInterp = true; for (int32_t j = 0; j < pExprSup->numOfExprs; ++j) { SExprInfo* pExprInfo = &pExprSup->pExprInfo[j]; @@ -307,7 +308,7 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp case TSDB_FILL_SET_VALUE: case TSDB_FILL_SET_VALUE_F: { - SVariant* pVar = &pSliceInfo->pFillColInfo[j].fillVal; + SVariant* pVar = &pSliceInfo->pFillColInfo[fillColIndex].fillVal; if (pDst->info.type == TSDB_DATA_TYPE_FLOAT) { float v = 0; @@ -342,6 +343,8 @@ static bool genInterpolationResult(STimeSliceOperatorInfo* pSliceInfo, SExprSupp } colDataSetVal(pDst, rows, (char*)&v, false); } + + ++fillColIndex; break; } From 46baeefea73e2c6edf038482a96e373335d7edef Mon Sep 17 00:00:00 2001 From: Ganlin Zhao Date: Wed, 7 Jun 2023 18:39:33 +0800 Subject: [PATCH 16/18] fix test cases --- tests/system-test/2-query/interp.py | 190 ++++++++++++++-------------- 1 file changed, 97 insertions(+), 93 deletions(-) diff --git a/tests/system-test/2-query/interp.py b/tests/system-test/2-query/interp.py index 019b449dfd..eb6aeec95a 100644 --- a/tests/system-test/2-query/interp.py +++ b/tests/system-test/2-query/interp.py @@ -1809,47 +1809,10 @@ class TDTestCase: tdSql.checkData(60, 1, 60) # # test fill value - tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123)") + tdSql.query(f"select _irowts, interp(c0), _irowts, interp(c1), _irowts from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123, 456)") tdSql.checkRows(61) - tdSql.checkCols(2) - tdSql.checkData(0, 0, 0) # - tdSql.checkData(1, 0, 123) - tdSql.checkData(4, 0, 123) - tdSql.checkData(5, 0, None) # - tdSql.checkData(6, 0, 123) - tdSql.checkData(9, 0, 123) - tdSql.checkData(10, 0, 10) # - tdSql.checkData(11, 0, 123) - tdSql.checkData(14, 0, 123) - tdSql.checkData(15, 0, None) # - tdSql.checkData(16, 0, 123) - tdSql.checkData(19, 0, 123) - tdSql.checkData(20, 0, 20) # - tdSql.checkData(21, 0, 123) - tdSql.checkData(24, 0, 123) - tdSql.checkData(25, 0, None) # - tdSql.checkData(26, 0, 123) - tdSql.checkData(29, 0, 123) - tdSql.checkData(30, 0, 30) # - tdSql.checkData(31, 0, 123) - tdSql.checkData(34, 0, 123) - tdSql.checkData(35, 0, 35) # - tdSql.checkData(36, 0, 123) - tdSql.checkData(39, 0, 123) - tdSql.checkData(40, 0, 40) # - tdSql.checkData(41, 0, 123) - tdSql.checkData(44, 0, 123) - tdSql.checkData(45, 0, None) # - tdSql.checkData(46, 0, 123) - tdSql.checkData(49, 0, 123) - tdSql.checkData(50, 0, 50) # - tdSql.checkData(51, 0, 123) - tdSql.checkData(54, 0, 123) - tdSql.checkData(55, 0, None) # - tdSql.checkData(59, 0, 123) - tdSql.checkData(60, 0, 55) # - - tdSql.checkData(0, 1, None) # + tdSql.checkCols(5) + tdSql.checkData(0, 1, 0) # tdSql.checkData(1, 1, 123) tdSql.checkData(4, 1, 123) tdSql.checkData(5, 1, None) # @@ -1861,7 +1824,7 @@ class TDTestCase: tdSql.checkData(15, 1, None) # tdSql.checkData(16, 1, 123) tdSql.checkData(19, 1, 123) - tdSql.checkData(20, 1, None) # + tdSql.checkData(20, 1, 20) # tdSql.checkData(21, 1, 123) tdSql.checkData(24, 1, 123) tdSql.checkData(25, 1, None) # @@ -1870,64 +1833,64 @@ class TDTestCase: tdSql.checkData(30, 1, 30) # tdSql.checkData(31, 1, 123) tdSql.checkData(34, 1, 123) - tdSql.checkData(35, 1, None) # + tdSql.checkData(35, 1, 35) # tdSql.checkData(36, 1, 123) tdSql.checkData(39, 1, 123) tdSql.checkData(40, 1, 40) # tdSql.checkData(41, 1, 123) tdSql.checkData(44, 1, 123) - tdSql.checkData(45, 1, 45) # + tdSql.checkData(45, 1, None) # tdSql.checkData(46, 1, 123) tdSql.checkData(49, 1, 123) - tdSql.checkData(50, 1, None) # + tdSql.checkData(50, 1, 50) # tdSql.checkData(51, 1, 123) tdSql.checkData(54, 1, 123) tdSql.checkData(55, 1, None) # - tdSql.checkData(56, 1, 123) tdSql.checkData(59, 1, 123) - tdSql.checkData(60, 1, 60) # + tdSql.checkData(60, 1, 55) # - tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123 + 123)") + tdSql.checkData(0, 3, None) # + tdSql.checkData(1, 3, 456) + tdSql.checkData(4, 3, 456) + tdSql.checkData(5, 3, None) # + tdSql.checkData(6, 3, 456) + tdSql.checkData(9, 3, 456) + tdSql.checkData(10, 3, 10) # + tdSql.checkData(11, 3, 456) + tdSql.checkData(14, 3, 456) + tdSql.checkData(15, 3, None) # + tdSql.checkData(16, 3, 456) + tdSql.checkData(19, 3, 456) + tdSql.checkData(20, 3, None) # + tdSql.checkData(21, 3, 456) + tdSql.checkData(24, 3, 456) + tdSql.checkData(25, 3, None) # + tdSql.checkData(26, 3, 456) + tdSql.checkData(29, 3, 456) + tdSql.checkData(30, 3, 30) # + tdSql.checkData(31, 3, 456) + tdSql.checkData(34, 3, 456) + tdSql.checkData(35, 3, None) # + tdSql.checkData(36, 3, 456) + tdSql.checkData(39, 3, 456) + tdSql.checkData(40, 3, 40) # + tdSql.checkData(41, 3, 456) + tdSql.checkData(44, 3, 456) + tdSql.checkData(45, 3, 45) # + tdSql.checkData(46, 3, 456) + tdSql.checkData(49, 3, 456) + tdSql.checkData(50, 3, None) # + tdSql.checkData(51, 3, 456) + tdSql.checkData(54, 3, 456) + tdSql.checkData(55, 3, None) # + tdSql.checkData(56, 3, 456) + tdSql.checkData(59, 3, 456) + tdSql.checkData(60, 3, 60) # + + tdSql.query(f"select _isfilled, interp(c0), _isfilled, interp(c1), _isfilled from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(value, 123 + 123, 234 + 234)") tdSql.checkRows(61) - tdSql.checkCols(2) - tdSql.checkData(0, 0, 0) # - tdSql.checkData(1, 0, 246) - tdSql.checkData(4, 0, 246) - tdSql.checkData(5, 0, None) # - tdSql.checkData(6, 0, 246) - tdSql.checkData(9, 0, 246) - tdSql.checkData(10, 0, 10) # - tdSql.checkData(11, 0, 246) - tdSql.checkData(14, 0, 246) - tdSql.checkData(15, 0, None) # - tdSql.checkData(16, 0, 246) - tdSql.checkData(19, 0, 246) - tdSql.checkData(20, 0, 20) # - tdSql.checkData(21, 0, 246) - tdSql.checkData(24, 0, 246) - tdSql.checkData(25, 0, None) # - tdSql.checkData(26, 0, 246) - tdSql.checkData(29, 0, 246) - tdSql.checkData(30, 0, 30) # - tdSql.checkData(31, 0, 246) - tdSql.checkData(34, 0, 246) - tdSql.checkData(35, 0, 35) # - tdSql.checkData(36, 0, 246) - tdSql.checkData(39, 0, 246) - tdSql.checkData(40, 0, 40) # - tdSql.checkData(41, 0, 246) - tdSql.checkData(44, 0, 246) - tdSql.checkData(45, 0, None) # - tdSql.checkData(46, 0, 246) - tdSql.checkData(49, 0, 246) - tdSql.checkData(50, 0, 50) # - tdSql.checkData(51, 0, 246) - tdSql.checkData(54, 0, 246) - tdSql.checkData(55, 0, None) # - tdSql.checkData(59, 0, 246) - tdSql.checkData(60, 0, 55) # - - tdSql.checkData(0, 1, None) # + tdSql.checkCols(5) + tdSql.checkData(0, 1, 0) # tdSql.checkData(1, 1, 246) tdSql.checkData(4, 1, 246) tdSql.checkData(5, 1, None) # @@ -1939,7 +1902,7 @@ class TDTestCase: tdSql.checkData(15, 1, None) # tdSql.checkData(16, 1, 246) tdSql.checkData(19, 1, 246) - tdSql.checkData(20, 1, None) # + tdSql.checkData(20, 1, 20) # tdSql.checkData(21, 1, 246) tdSql.checkData(24, 1, 246) tdSql.checkData(25, 1, None) # @@ -1948,22 +1911,59 @@ class TDTestCase: tdSql.checkData(30, 1, 30) # tdSql.checkData(31, 1, 246) tdSql.checkData(34, 1, 246) - tdSql.checkData(35, 1, None) # + tdSql.checkData(35, 1, 35) # tdSql.checkData(36, 1, 246) tdSql.checkData(39, 1, 246) tdSql.checkData(40, 1, 40) # tdSql.checkData(41, 1, 246) tdSql.checkData(44, 1, 246) - tdSql.checkData(45, 1, 45) # + tdSql.checkData(45, 1, None) # tdSql.checkData(46, 1, 246) tdSql.checkData(49, 1, 246) - tdSql.checkData(50, 1, None) # + tdSql.checkData(50, 1, 50) # tdSql.checkData(51, 1, 246) tdSql.checkData(54, 1, 246) tdSql.checkData(55, 1, None) # - tdSql.checkData(56, 1, 246) tdSql.checkData(59, 1, 246) - tdSql.checkData(60, 1, 60) # + tdSql.checkData(60, 1, 55) # + + tdSql.checkData(0, 3, None) # + tdSql.checkData(1, 3, 468) + tdSql.checkData(4, 3, 468) + tdSql.checkData(5, 3, None) # + tdSql.checkData(6, 3, 468) + tdSql.checkData(9, 3, 468) + tdSql.checkData(10, 3, 10) # + tdSql.checkData(11, 3, 468) + tdSql.checkData(14, 3, 468) + tdSql.checkData(15, 3, None) # + tdSql.checkData(16, 3, 468) + tdSql.checkData(19, 3, 468) + tdSql.checkData(20, 3, None) # + tdSql.checkData(21, 3, 468) + tdSql.checkData(24, 3, 468) + tdSql.checkData(25, 3, None) # + tdSql.checkData(26, 3, 468) + tdSql.checkData(29, 3, 468) + tdSql.checkData(30, 3, 30) # + tdSql.checkData(31, 3, 468) + tdSql.checkData(34, 3, 468) + tdSql.checkData(35, 3, None) # + tdSql.checkData(36, 3, 468) + tdSql.checkData(39, 3, 468) + tdSql.checkData(40, 3, 40) # + tdSql.checkData(41, 3, 468) + tdSql.checkData(44, 3, 468) + tdSql.checkData(45, 3, 45) # + tdSql.checkData(46, 3, 468) + tdSql.checkData(49, 3, 468) + tdSql.checkData(50, 3, None) # + tdSql.checkData(51, 3, 468) + tdSql.checkData(54, 3, 468) + tdSql.checkData(55, 3, None) # + tdSql.checkData(56, 3, 468) + tdSql.checkData(59, 3, 468) + tdSql.checkData(60, 3, 60) # # test fill prev tdSql.query(f"select interp(c0),interp(c1) from {dbname}.{tbname1} range('2020-02-02 00:00:00', '2020-02-02 00:01:00') every(1s) fill(prev)") @@ -2138,7 +2138,7 @@ class TDTestCase: tdSql.checkData(3, i, None) tdSql.checkData(4, i, None) - tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1)") + tdSql.query(f"select interp(c0),interp(c1),interp(c2),interp(c3) from {dbname}.{tbname} range('2020-02-09 00:00:05', '2020-02-13 00:00:05') every(1d) fill(value, 1, 1, 1, 1)") tdSql.checkRows(5) tdSql.checkCols(4) @@ -2564,6 +2564,10 @@ class TDTestCase: tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _isfilled = true range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") tdSql.error(f"select interp(c0) from {dbname}.{tbname} where _irowts > 0 range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(null)") + # fill value number mismatch + tdSql.error(f"select interp(c0) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1, 2)") + tdSql.error(f"select interp(c0), interp(c1) from {dbname}.{tbname} range('2020-02-10 00:00:05', '2020-02-15 00:00:05') every(1d) fill(value, 1)") + From 9b749b8faa0c2abeb622757161c70ec9112d1989 Mon Sep 17 00:00:00 2001 From: yihaoDeng Date: Thu, 8 Jun 2023 02:43:44 +0000 Subject: [PATCH 17/18] change link op t --- contrib/CMakeLists.txt | 2 +- examples/c/CMakeLists.txt | 12 ++++++------ source/dnode/vnode/CMakeLists.txt | 2 +- source/libs/stream/CMakeLists.txt | 2 +- utils/test/c/CMakeLists.txt | 16 ++++++++-------- 5 files changed, 17 insertions(+), 17 deletions(-) diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt index 59986a3b3c..fdb9f102f0 100644 --- a/contrib/CMakeLists.txt +++ b/contrib/CMakeLists.txt @@ -274,7 +274,7 @@ if(${BUILD_WITH_ROCKSDB}) option(WITH_TOOLS "" OFF) option(WITH_LIBURING "" OFF) IF (TD_LINUX) - option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" ON) + option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) ELSE() option(ROCKSDB_BUILD_SHARED "Build shared versions of the RocksDB libraries" OFF) ENDIF() diff --git a/examples/c/CMakeLists.txt b/examples/c/CMakeLists.txt index e14c4e60d9..07fc2fd71b 100644 --- a/examples/c/CMakeLists.txt +++ b/examples/c/CMakeLists.txt @@ -42,27 +42,27 @@ IF (TD_LINUX) ) target_link_libraries(tmq - taos_static + taos ) target_link_libraries(stream_demo - taos_static + taos ) target_link_libraries(schemaless - taos_static + taos ) target_link_libraries(prepare - taos_static + taos ) target_link_libraries(demo - taos_static + taos ) target_link_libraries(asyncdemo - taos_static + taos ) SET_TARGET_PROPERTIES(tmq PROPERTIES OUTPUT_NAME tmq) diff --git a/source/dnode/vnode/CMakeLists.txt b/source/dnode/vnode/CMakeLists.txt index b18cb8e282..b7bfc57cd5 100644 --- a/source/dnode/vnode/CMakeLists.txt +++ b/source/dnode/vnode/CMakeLists.txt @@ -103,7 +103,7 @@ target_link_libraries( # PUBLIC bdb # PUBLIC scalar - PUBLIC rocksdb-shared + PUBLIC rocksdb PUBLIC transport PUBLIC stream PUBLIC index diff --git a/source/libs/stream/CMakeLists.txt b/source/libs/stream/CMakeLists.txt index fa6c709c8f..d1ef7fe3c1 100644 --- a/source/libs/stream/CMakeLists.txt +++ b/source/libs/stream/CMakeLists.txt @@ -11,7 +11,7 @@ if(${BUILD_WITH_ROCKSDB}) IF (TD_LINUX) target_link_libraries( stream - PUBLIC rocksdb-shared tdb + PUBLIC rocksdb tdb PRIVATE os util transport qcom executor wal index ) ELSE() diff --git a/utils/test/c/CMakeLists.txt b/utils/test/c/CMakeLists.txt index 87b0d11d1c..71dfd710a5 100644 --- a/utils/test/c/CMakeLists.txt +++ b/utils/test/c/CMakeLists.txt @@ -9,35 +9,35 @@ add_executable(get_db_name_test get_db_name_test.c) add_executable(tmq_offset tmqOffset.c) target_link_libraries( tmq_offset - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( create_table - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_demo - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_sim - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os ) target_link_libraries( tmq_taosx_ci - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os @@ -45,7 +45,7 @@ target_link_libraries( target_link_libraries( write_raw_block_test - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os @@ -53,7 +53,7 @@ target_link_libraries( target_link_libraries( sml_test - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os @@ -61,7 +61,7 @@ target_link_libraries( target_link_libraries( get_db_name_test - PUBLIC taos_static + PUBLIC taos PUBLIC util PUBLIC common PUBLIC os From 4d574ca611f8ced2c37ba4be33bd87f6f6ea04de Mon Sep 17 00:00:00 2001 From: xleili Date: Wed, 7 Jun 2023 16:57:30 +0800 Subject: [PATCH 18/18] fix: exclude install and remove lbrocksdb.so --- packaging/deb/DEBIAN/preinst | 1 - packaging/deb/DEBIAN/prerm | 1 - packaging/deb/makedeb.sh | 2 -- packaging/rpm/tdengine.spec | 4 ---- packaging/tools/install.sh | 12 ------------ packaging/tools/makepkg.sh | 3 --- packaging/tools/post.sh | 10 ---------- packaging/tools/remove.sh | 2 -- 8 files changed, 35 deletions(-) diff --git a/packaging/deb/DEBIAN/preinst b/packaging/deb/DEBIAN/preinst index d6558d5b3b..904a946e20 100644 --- a/packaging/deb/DEBIAN/preinst +++ b/packaging/deb/DEBIAN/preinst @@ -80,5 +80,4 @@ fi # there can not libtaos.so*, otherwise ln -s error ${csudo}rm -f ${install_main_dir}/driver/libtaos.* || : -[ -f ${install_main_dir}/driver/librocksdb.* ] && ${csudo}rm -f ${install_main_dir}/driver/librocksdb.* || : [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}rm -f ${install_main_dir}/driver/libtaosws.so || : diff --git a/packaging/deb/DEBIAN/prerm b/packaging/deb/DEBIAN/prerm index 8f8d472867..0d63115a04 100644 --- a/packaging/deb/DEBIAN/prerm +++ b/packaging/deb/DEBIAN/prerm @@ -40,7 +40,6 @@ else ${csudo}rm -f ${inc_link_dir}/taosudf.h || : [ -f ${inc_link_dir}/taosws.h ] && ${csudo}rm -f ${inc_link_dir}/taosws.h || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - [ -f ${lib_link_dir}/librocksdb.* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb.* || : [ -f ${lib_link_dir}/libtaosws.so ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.so || : ${csudo}rm -f ${log_link_dir} || : diff --git a/packaging/deb/makedeb.sh b/packaging/deb/makedeb.sh index 024c69deb1..9f49cf345a 100755 --- a/packaging/deb/makedeb.sh +++ b/packaging/deb/makedeb.sh @@ -31,7 +31,6 @@ cd ${pkg_dir} libfile="libtaos.so.${tdengine_ver}" wslibfile="libtaosws.so" -rocksdblib="librocksdb.so.8" # create install dir install_home_path="/usr/local/taos" @@ -95,7 +94,6 @@ fi cp ${compile_dir}/build/bin/taos ${pkg_dir}${install_home_path}/bin cp ${compile_dir}/build/lib/${libfile} ${pkg_dir}${install_home_path}/driver -[ -f ${compile_dir}/build/lib/${rocksdblib} ] && cp ${compile_dir}/build/lib/${rocksdblib} ${pkg_dir}${install_home_path}/driver ||: [ -f ${compile_dir}/build/lib/${wslibfile} ] && cp ${compile_dir}/build/lib/${wslibfile} ${pkg_dir}${install_home_path}/driver ||: cp ${compile_dir}/../include/client/taos.h ${pkg_dir}${install_home_path}/include cp ${compile_dir}/../include/common/taosdef.h ${pkg_dir}${install_home_path}/include diff --git a/packaging/rpm/tdengine.spec b/packaging/rpm/tdengine.spec index 2b056c376a..52d5335003 100644 --- a/packaging/rpm/tdengine.spec +++ b/packaging/rpm/tdengine.spec @@ -45,7 +45,6 @@ echo buildroot: %{buildroot} libfile="libtaos.so.%{_version}" wslibfile="libtaosws.so" -rocksdblib="librocksdb.so.8" # create install path, and cp file mkdir -p %{buildroot}%{homepath}/bin @@ -93,7 +92,6 @@ if [ -f %{_compiledir}/build/bin/taosadapter ]; then fi cp %{_compiledir}/build/lib/${libfile} %{buildroot}%{homepath}/driver [ -f %{_compiledir}/build/lib/${wslibfile} ] && cp %{_compiledir}/build/lib/${wslibfile} %{buildroot}%{homepath}/driver ||: -[ -f %{_compiledir}/build/lib/${rocksdblib} ] && cp %{_compiledir}/build/lib/${rocksdblib} %{buildroot}%{homepath}/driver ||: cp %{_compiledir}/../include/client/taos.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/common/taosdef.h %{buildroot}%{homepath}/include cp %{_compiledir}/../include/util/taoserror.h %{buildroot}%{homepath}/include @@ -176,7 +174,6 @@ fi # there can not libtaos.so*, otherwise ln -s error ${csudo}rm -f %{homepath}/driver/libtaos* || : -${csudo}rm -f %{homepath}/driver/librocksdb* || : #Scripts executed after installation %post @@ -222,7 +219,6 @@ if [ $1 -eq 0 ];then ${csudo}rm -f ${inc_link_dir}/taoserror.h || : ${csudo}rm -f ${inc_link_dir}/taosudf.h || : ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib_link_dir}/librocksdb.* || : ${csudo}rm -f ${log_link_dir} || : ${csudo}rm -f ${data_link_dir} || : diff --git a/packaging/tools/install.sh b/packaging/tools/install.sh index 9aa019f218..1b47b10520 100755 --- a/packaging/tools/install.sh +++ b/packaging/tools/install.sh @@ -250,30 +250,18 @@ function install_lib() { # Remove links ${csudo}rm -f ${lib_link_dir}/libtaos.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib_link_dir}/librocksdb.* || : - ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || : #${csudo}rm -rf ${v15_java_app_dir} || : ${csudo}cp -rf ${script_dir}/driver/* ${install_main_dir}/driver && ${csudo}chmod 777 ${install_main_dir}/driver/* ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib_link_dir}/libtaos.so.1 ${csudo}ln -sf ${lib_link_dir}/libtaos.so.1 ${lib_link_dir}/libtaos.so - ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8 - ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so - - ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib_link_dir}/librocksdb.so.8 - ${csudo}ln -sf ${lib_link_dir}/librocksdb.so.8 ${lib_link_dir}/librocksdb.so - - [ -f ${install_main_dir}/driver/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/driver/libtaosws.so ${lib_link_dir}/libtaosws.so || : if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.so ]]; then ${csudo}ln -sf ${install_main_dir}/driver/libtaos.* ${lib64_link_dir}/libtaos.so.1 || : ${csudo}ln -sf ${lib64_link_dir}/libtaos.so.1 ${lib64_link_dir}/libtaos.so || : - ${csudo}ln -sf ${install_main_dir}/driver/librocksdb.* ${lib64_link_dir}/librocksdb.so.8 || : - ${csudo}ln -sf ${lib64_link_dir}/librocksdb.so.8 ${lib64_link_dir}/librocksdb.so || : - [ -f ${install_main_dir}/libtaosws.so ] && ${csudo}ln -sf ${install_main_dir}/libtaosws.so ${lib64_link_dir}/libtaosws.so || : fi diff --git a/packaging/tools/makepkg.sh b/packaging/tools/makepkg.sh index ab45c684c4..b0537e8bcf 100755 --- a/packaging/tools/makepkg.sh +++ b/packaging/tools/makepkg.sh @@ -111,11 +111,9 @@ fi if [ "$osType" == "Darwin" ]; then lib_files="${build_dir}/lib/libtaos.${version}.dylib" wslib_files="${build_dir}/lib/libtaosws.dylib" - rocksdb_lib_files="${build_dir}/lib/librocksdb.dylib.8.1.1" else lib_files="${build_dir}/lib/libtaos.so.${version}" wslib_files="${build_dir}/lib/libtaosws.so" - rocksdb_lib_files="${build_dir}/lib/librocksdb.so.8.1.1" fi header_files="${code_dir}/include/client/taos.h ${code_dir}/include/common/taosdef.h ${code_dir}/include/util/taoserror.h ${code_dir}/include/libs/function/taosudf.h" @@ -338,7 +336,6 @@ fi # Copy driver mkdir -p ${install_dir}/driver && cp ${lib_files} ${install_dir}/driver && echo "${versionComp}" >${install_dir}/driver/vercomp.txt [ -f ${wslib_files} ] && cp ${wslib_files} ${install_dir}/driver || : -[ -f ${rocksdb_lib_files} ] && cp ${rocksdb_lib_files} ${install_dir}/driver || : # Copy connector if [ "$verMode" == "cluster" ]; then diff --git a/packaging/tools/post.sh b/packaging/tools/post.sh index 10de87966f..fc392c9684 100755 --- a/packaging/tools/post.sh +++ b/packaging/tools/post.sh @@ -202,19 +202,10 @@ function install_lib() { log_print "start install lib from ${lib_dir} to ${lib_link_dir}" ${csudo}rm -f ${lib_link_dir}/libtaos* || : ${csudo}rm -f ${lib64_link_dir}/libtaos* || : - - #rocksdb - [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || : - [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || : - - #rocksdb - [ -f ${lib_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib_link_dir}/librocksdb* || : - [ -f ${lib64_link_dir}/librocksdb* ] && ${csudo}rm -f ${lib64_link_dir}/librocksdb* || : [ -f ${lib_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.${lib_file_ext} || : [ -f ${lib64_link_dir}/libtaosws.${lib_file_ext} ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.${lib_file_ext} || : - ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_dir}/libtaos.* ${lib_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib_link_dir}/libtaos.${lib_file_ext_1} ${lib_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 @@ -223,7 +214,6 @@ function install_lib() { if [[ -d ${lib64_link_dir} && ! -e ${lib64_link_dir}/libtaos.${lib_file_ext} ]]; then ${csudo}ln -s ${lib_dir}/libtaos.* ${lib64_link_dir}/libtaos.${lib_file_ext_1} 2>>${install_log_path} || return 1 ${csudo}ln -s ${lib64_link_dir}/libtaos.${lib_file_ext_1} ${lib64_link_dir}/libtaos.${lib_file_ext} 2>>${install_log_path} || return 1 - ${csudo}ln -s ${lib_dir}/librocksdb.* ${lib64_link_dir}/librocksdb.${lib_file_ext_1} 2>>${install_log_path} || return 1 [ -f ${lib_dir}/libtaosws.${lib_file_ext} ] && ${csudo}ln -sf ${lib_dir}/libtaosws.${lib_file_ext} ${lib64_link_dir}/libtaosws.${lib_file_ext} 2>>${install_log_path} fi diff --git a/packaging/tools/remove.sh b/packaging/tools/remove.sh index a17b29983c..be2c26c309 100755 --- a/packaging/tools/remove.sh +++ b/packaging/tools/remove.sh @@ -142,11 +142,9 @@ function clean_local_bin() { function clean_lib() { # Remove link ${csudo}rm -f ${lib_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib_link_dir}/librocksdb.* || : [ -f ${lib_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib_link_dir}/libtaosws.* || : ${csudo}rm -f ${lib64_link_dir}/libtaos.* || : - ${csudo}rm -f ${lib64_link_dir}/librocksdb.* || : [ -f ${lib64_link_dir}/libtaosws.* ] && ${csudo}rm -f ${lib64_link_dir}/libtaosws.* || : #${csudo}rm -rf ${v15_java_app_dir} || :