From b5dc851767427fdb901636f0907f70c3066bc642 Mon Sep 17 00:00:00 2001 From: Yu Chen <74105241+yu285@users.noreply.github.com> Date: Thu, 21 Nov 2024 10:13:47 +0800 Subject: [PATCH 01/13] docs/Update 01-faq.md --- docs/zh/27-train-faq/01-faq.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/zh/27-train-faq/01-faq.md b/docs/zh/27-train-faq/01-faq.md index 5bc6a4df27..af8468411c 100644 --- a/docs/zh/27-train-faq/01-faq.md +++ b/docs/zh/27-train-faq/01-faq.md @@ -273,3 +273,11 @@ TDinsight插件中展示的数据是通过taosKeeper和taosAdapter服务收集 ### 29 遇到报错 “some vnode/qnode/mnode(s) out of service” 怎么办? 客户端未配置所有服务端的 FQDN 解析。比如服务端有 3 个节点,客户端只配置了 1 个节点的 FQDN 解析。FQDN 配置参考:[一篇文章说清楚 TDengine 的 FQDN](https://www.taosdata.com/blog/2020/09/11/1824.html) + +### 30 为什么开源版 TDengine 的主进程会建立一个与公网的连接? +这个连接只会上报不涉及任何用户数据的最基本信息,用于官方了解产品在世界范围内的分布情况,进而优化产品,提升用户体验,具体采集项目为:集群名、操作系统版本、cpu信息等。 +该特性为可选配置项,在开源版中默认开启,具体参数为 telemetryReporting , 在官方文档中有做说明,链接如下: +https://docs.taosdata.com/reference/components/taosd/#%E7%9B%91%E6%8E%A7%E7%9B%B8%E5%85%B3 +您可以随时关闭该参数,只需要在taos.cfg 中修改telemetryReporting为 0,然后重启数据库服务即可。 +代码位于:https://github.com/taosdata/TDengine/blob/62e609c558deb764a37d1a01ba84bc35115a85a4/source/dnode/mnode/impl/src/mndTelem.c +此外,对于安全性要求极高的企业版 TDengine Enterprise 来说,此参数不会工作。 From 39564a41ae1f5ddf731686200848a8a654367432 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Mon, 25 Nov 2024 13:54:31 +0800 Subject: [PATCH 02/13] fix(stream):build create table request for force_window_close --- source/libs/executor/inc/executorInt.h | 1 + source/libs/executor/src/scanoperator.c | 15 ++ .../executor/src/streamtimesliceoperator.c | 12 ++ tests/parallel_test/cases.task | 1 + .../script/tsim/stream/streamTwaInterpFwc.sim | 195 ++++++++++++++++++ 5 files changed, 224 insertions(+) diff --git a/source/libs/executor/inc/executorInt.h b/source/libs/executor/inc/executorInt.h index 039c0fa68b..271e9c91a1 100644 --- a/source/libs/executor/inc/executorInt.h +++ b/source/libs/executor/inc/executorInt.h @@ -538,6 +538,7 @@ typedef struct SStreamScanInfo { int32_t pkColLen; bool useGetResultRange; STimeWindow lastScanRange; + SSDataBlock* pRangeScanRes; // update SSDataBlock } SStreamScanInfo; typedef struct { diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 095d7e1a2b..07b7d052a2 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -3839,6 +3839,11 @@ FETCH_NEXT_BLOCK: } break; case STREAM_SCAN_FROM_DATAREADER_RANGE: case STREAM_SCAN_FROM_DATAREADER_RETRIEVE: { + if (pInfo->pRangeScanRes != NULL) { + (*ppRes) = pInfo->pRangeScanRes; + pInfo->pRangeScanRes = NULL; + return code; + } SSDataBlock* pSDB = NULL; code = doRangeScan(pInfo, pInfo->pUpdateRes, pInfo->primaryTsIndex, &pInfo->updateResIndex, &pSDB); QUERY_CHECK_CODE(code, lino, _end); @@ -3852,6 +3857,15 @@ FETCH_NEXT_BLOCK: printSpecDataBlock(pSDB, getStreamOpName(pOperator->operatorType), "update", GET_TASKID(pTaskInfo)); code = calBlockTbName(pInfo, pSDB, 0); QUERY_CHECK_CODE(code, lino, _end); + + if (pInfo->pCreateTbRes->info.rows > 0) { + printSpecDataBlock(pInfo->pCreateTbRes, getStreamOpName(pOperator->operatorType), "update", + GET_TASKID(pTaskInfo)); + (*ppRes) = pInfo->pCreateTbRes; + pInfo->pRangeScanRes = pSDB; + return code; + } + (*ppRes) = pSDB; return code; } @@ -4629,6 +4643,7 @@ int32_t createStreamScanOperatorInfo(SReadHandle* pHandle, STableScanPhysiNode* pInfo->readerFn = pTaskInfo->storageAPI.tqReaderFn; pInfo->pFillSup = NULL; pInfo->useGetResultRange = false; + pInfo->pRangeScanRes = NULL; code = createSpecialDataBlock(STREAM_CHECKPOINT, &pInfo->pCheckpointRes); QUERY_CHECK_CODE(code, lino, _error); diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index b120bb6374..efc5dd6d6a 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -1481,6 +1481,18 @@ void doBuildTimeSlicePointResult(SStreamAggSupporter* pAggSup, STimeWindowAggSup pBlock->info.id.groupId = pKey->groupId; } } + void* tbname = NULL; + int32_t winCode = TSDB_CODE_SUCCESS; + code = + pAggSup->stateStore.streamStateGetParName(pAggSup->pState, pBlock->info.id.groupId, &tbname, false, &winCode); + QUERY_CHECK_CODE(code, lino, _end); + if (winCode != TSDB_CODE_SUCCESS) { + pBlock->info.parTbName[0] = 0; + } else { + memcpy(pBlock->info.parTbName, tbname, TSDB_TABLE_NAME_LEN); + } + pAggSup->stateStore.streamStateFreeVal(tbname); + SSlicePoint curPoint = {.key.ts = pKey->ts, .key.groupId = pKey->groupId}; SSlicePoint prevPoint = {0}; SSlicePoint nextPoint = {0}; diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 492dd11177..0378a932b6 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1417,6 +1417,7 @@ ,,y,script,./test.sh -f tsim/stream/streamTwaFwcFill.sim ,,y,script,./test.sh -f tsim/stream/streamTwaFwcFillPrimaryKey.sim ,,y,script,./test.sh -f tsim/stream/streamTwaFwcIntervalPrimaryKey.sim +,,y,script,./test.sh -f tsim/stream/streamTwaInterpFwc.sim ,,y,script,./test.sh -f tsim/stream/triggerInterval0.sim ,,y,script,./test.sh -f tsim/stream/triggerSession0.sim ,,y,script,./test.sh -f tsim/stream/udTableAndCol0.sim diff --git a/tests/script/tsim/stream/streamTwaInterpFwc.sim b/tests/script/tsim/stream/streamTwaInterpFwc.sim index 2073378e92..ce76387c91 100644 --- a/tests/script/tsim/stream/streamTwaInterpFwc.sim +++ b/tests/script/tsim/stream/streamTwaInterpFwc.sim @@ -109,6 +109,201 @@ if $data01 != $query1_data01 then return -1 endi +print step2 +print =============== create database +sql create database test4 vgroups 4; +sql use test4; + +sql create stable st(ts timestamp,a int,b int,c int) tags(ta int,tb int,tc int); +sql create table t1 using st tags(1,1,1); +sql create table t2 using st tags(2,2,2); + + +sql create stream streams6 trigger FORCE_WINDOW_CLOSE IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt6 TAGS(cc int) SUBTABLE(concat(concat("tbn-", tbname), "_1")) as select _irowts, interp(a), _isfilled as a1 from st partition by tbname, b as cc every(2s) fill(prev); +sql create stream streams7 trigger FORCE_WINDOW_CLOSE IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt7 TAGS(cc int) SUBTABLE(concat(concat("tbn-", tbname), "_2")) as select _wstart, twa(a) from st partition by tbname, b as cc interval(2s) fill(NULL); +sql create stream streams8 trigger FORCE_WINDOW_CLOSE IGNORE EXPIRED 1 IGNORE UPDATE 1 into streamt8 TAGS(cc int) SUBTABLE(concat(concat("tbn-", tbname), "_3")) as select _wstart, count(a) from st partition by tbname, b as cc interval(2s); + +run tsim/stream/checkTaskStatus.sim + +sql insert into t1 values(now + 3s,1,1,1); + +$loop_count = 0 +loop6: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select cc,* from streamt6; +sql select cc,* from streamt6; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop6 +endi + +if $data00 != 1 then + return -1 +endi + +print 3 sql select * from information_schema.ins_tables where stable_name = "streamt6"; +sql select * from information_schema.ins_tables where stable_name = "streamt6"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + +print 4 sql select * from information_schema.ins_tables where stable_name = "streamt6" and table_name like "tbn-t1_1%"; +sql select * from information_schema.ins_tables where stable_name = "streamt6" and table_name like "tbn-t1_1%"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + +$loop_count = 0 +loop7: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select cc,* from streamt7; +sql select cc,* from streamt7; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 2 then + print ======rows=$rows + goto loop7 +endi + +if $data00 != 1 then + return -1 +endi + +print 3 sql select * from information_schema.ins_tables where stable_name = "streamt7"; +sql select * from information_schema.ins_tables where stable_name = "streamt7"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + +print 4 sql select * from information_schema.ins_tables where stable_name = "streamt7" and table_name like "tbn-t1_2%"; +sql select * from information_schema.ins_tables where stable_name = "streamt7" and table_name like "tbn-t1_2%"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + +$loop_count = 0 +loop8: + +sleep 2000 + +$loop_count = $loop_count + 1 +if $loop_count == 20 then + return -1 +endi + +print 2 sql select cc,* from streamt8; +sql select cc,* from streamt8; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + + +# row 0 +if $rows < 1 then + print ======rows=$rows + goto loop8 +endi + +if $data00 != 1 then + return -1 +endi + +print 3 sql select * from information_schema.ins_tables where stable_name = "streamt8"; +sql select * from information_schema.ins_tables where stable_name = "streamt8"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + +print 4 sql select * from information_schema.ins_tables where stable_name = "streamt8" and table_name like "tbn-t1_3%"; +sql select * from information_schema.ins_tables where stable_name = "streamt8" and table_name like "tbn-t1_3%"; + +print $data00 $data01 $data02 $data03 $data04 +print $data10 $data11 $data12 $data13 $data14 +print $data20 $data21 $data22 $data23 $data24 +print $data30 $data31 $data32 $data33 $data34 +print $data40 $data41 $data42 $data43 $data44 +print $data50 $data51 $data52 $data53 $data54 + +if $rows != 1 then + return -1 +endi + print end system sh/exec.sh -n dnode1 -s stop -x SIGINT From ab7badb0c00f27d847ac0a85a6136d47ef0c666d Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Mon, 25 Nov 2024 14:58:42 +0800 Subject: [PATCH 03/13] fix(query)[TD-33006]. resolve wild pointer release issue in tsdbCreateReader Initialize pointer member variables in tsdbCreateReader to prevent random memory errors. It addresses a bug where uninitialized pointers are freed during error cleanup. --- source/dnode/vnode/src/tsdb/tsdbRead2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbRead2.c b/source/dnode/vnode/src/tsdb/tsdbRead2.c index ac8e8505e4..05ae4be74b 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead2.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead2.c @@ -210,7 +210,7 @@ static int32_t setColumnIdSlotList(SBlockLoadSuppInfo* pSupInfo, SColumnInfo* pC pSupInfo->smaValid = true; pSupInfo->numOfCols = numOfCols; - pSupInfo->colId = taosMemoryMalloc(numOfCols * (sizeof(int16_t) * 2 + POINTER_BYTES)); + pSupInfo->colId = taosMemoryCalloc(numOfCols, sizeof(int16_t) * 2 + POINTER_BYTES); TSDB_CHECK_NULL(pSupInfo->colId, code, lino, _end, terrno); pSupInfo->slotId = (int16_t*)((char*)pSupInfo->colId + (sizeof(int16_t) * numOfCols)); From 7bbda0c92aa0a54634eb9c386cdc144dba05dc9a Mon Sep 17 00:00:00 2001 From: Jinqing Kuang Date: Mon, 25 Nov 2024 15:09:11 +0800 Subject: [PATCH 04/13] fix(query)[TD-33008]. fix error handling in tsdbCacheRead Fix an issue that a freed null pointer was accessed during error handling in tsdbCacheRead, which would cause a crash. --- source/dnode/vnode/src/tsdb/tsdbCacheRead.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c index 0f524e22d7..f5aeb609d5 100644 --- a/source/dnode/vnode/src/tsdb/tsdbCacheRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbCacheRead.c @@ -346,7 +346,8 @@ int32_t tsdbCacherowsReaderOpen(void* pVnode, int32_t type, void* pTableIdList, p->rowKey.pks[0].pData = taosMemoryCalloc(1, pPkCol->bytes); if (p->rowKey.pks[0].pData == NULL) { taosMemoryFreeClear(p); - TSDB_CHECK_NULL(p->rowKey.pks[0].pData, code, lino, _end, terrno); + code = terrno; + TSDB_CHECK_CODE(code, lino, _end); } } From 41526e08d4f7423368a62bbcb154a4120f048688 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 25 Nov 2024 16:07:41 +0800 Subject: [PATCH 05/13] docs: move sql manual assets for compatibility with cloud --- .../en/14-reference/03-taos-sql/12-distinguished.md | 10 +++++----- .../assets/time-series-extensions-01.png | Bin .../assets/time-series-extensions-02.png | Bin .../assets/time-series-extensions-03.png | Bin .../assets/time-series-extensions-04.png | Bin .../assets/time-series-extensions-05.png | Bin 6 files changed, 5 insertions(+), 5 deletions(-) rename docs/en/{ => 14-reference/03-taos-sql}/assets/time-series-extensions-01.png (100%) rename docs/en/{ => 14-reference/03-taos-sql}/assets/time-series-extensions-02.png (100%) rename docs/en/{ => 14-reference/03-taos-sql}/assets/time-series-extensions-03.png (100%) rename docs/en/{ => 14-reference/03-taos-sql}/assets/time-series-extensions-04.png (100%) rename docs/en/{ => 14-reference/03-taos-sql}/assets/time-series-extensions-05.png (100%) diff --git a/docs/en/14-reference/03-taos-sql/12-distinguished.md b/docs/en/14-reference/03-taos-sql/12-distinguished.md index 23961ecfd0..009b59cc5d 100644 --- a/docs/en/14-reference/03-taos-sql/12-distinguished.md +++ b/docs/en/14-reference/03-taos-sql/12-distinguished.md @@ -5,11 +5,11 @@ slug: /tdengine-reference/sql-manual/time-series-extensions --- import Image from '@theme/IdealImage'; -import imgStep01 from '../../assets/time-series-extensions-01.png'; -import imgStep02 from '../../assets/time-series-extensions-02.png'; -import imgStep03 from '../../assets/time-series-extensions-03.png'; -import imgStep04 from '../../assets/time-series-extensions-04.png'; -import imgStep05 from '../../assets/time-series-extensions-05.png'; +import imgStep01 from './assets/time-series-extensions-01.png'; +import imgStep02 from './assets/time-series-extensions-02.png'; +import imgStep03 from './assets/time-series-extensions-03.png'; +import imgStep04 from './assets/time-series-extensions-04.png'; +import imgStep05 from './assets/time-series-extensions-05.png'; On top of supporting standard SQL, TDengine also offers a series of unique query syntaxes that meet the needs of time series business scenarios, greatly facilitating the development of applications in these contexts. diff --git a/docs/en/assets/time-series-extensions-01.png b/docs/en/14-reference/03-taos-sql/assets/time-series-extensions-01.png similarity index 100% rename from docs/en/assets/time-series-extensions-01.png rename to docs/en/14-reference/03-taos-sql/assets/time-series-extensions-01.png diff --git a/docs/en/assets/time-series-extensions-02.png b/docs/en/14-reference/03-taos-sql/assets/time-series-extensions-02.png similarity index 100% rename from docs/en/assets/time-series-extensions-02.png rename to docs/en/14-reference/03-taos-sql/assets/time-series-extensions-02.png diff --git a/docs/en/assets/time-series-extensions-03.png b/docs/en/14-reference/03-taos-sql/assets/time-series-extensions-03.png similarity index 100% rename from docs/en/assets/time-series-extensions-03.png rename to docs/en/14-reference/03-taos-sql/assets/time-series-extensions-03.png diff --git a/docs/en/assets/time-series-extensions-04.png b/docs/en/14-reference/03-taos-sql/assets/time-series-extensions-04.png similarity index 100% rename from docs/en/assets/time-series-extensions-04.png rename to docs/en/14-reference/03-taos-sql/assets/time-series-extensions-04.png diff --git a/docs/en/assets/time-series-extensions-05.png b/docs/en/14-reference/03-taos-sql/assets/time-series-extensions-05.png similarity index 100% rename from docs/en/assets/time-series-extensions-05.png rename to docs/en/14-reference/03-taos-sql/assets/time-series-extensions-05.png From 866f9da77bb6a6f71b6e87f3d40bbcfa80feb814 Mon Sep 17 00:00:00 2001 From: factosea <285808407@qq.com> Date: Mon, 25 Nov 2024 18:46:30 +0800 Subject: [PATCH 06/13] fix: add not case --- tests/parallel_test/cases.task | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index 492dd11177..6081b177cf 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -600,6 +600,8 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/normal.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/normal.py -R +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/not.py +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/not.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py -R ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/Now.py @@ -780,6 +782,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/normal.py -Q 2 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/not.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 2 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -Q 2 @@ -879,6 +882,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/normal.py -Q 3 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/not.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 3 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -Q 3 @@ -977,6 +981,7 @@ ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/max.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/min.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/normal.py -Q 4 +,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/not.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/mode.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/count.py -Q 4 ,,y,system-test,./pytest.sh python3 ./test.py -f 2-query/countAlwaysReturnValue.py -Q 4 From 25b8d45fd9cd34b0b13191d9ed9a4780ac3c468f Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 25 Nov 2024 21:57:10 +0800 Subject: [PATCH 07/13] docs: update active-active doc --- docs/en/08-operation/18-dual.md | 219 ++++++++++++++------------------ 1 file changed, 98 insertions(+), 121 deletions(-) diff --git a/docs/en/08-operation/18-dual.md b/docs/en/08-operation/18-dual.md index 83c4f55b4a..8e279f0c05 100644 --- a/docs/en/08-operation/18-dual.md +++ b/docs/en/08-operation/18-dual.md @@ -1,37 +1,66 @@ --- -title: Active-Standby Deployment +title: Active-Active Deployment slug: /operations-and-maintenance/active-standby-deployment --- import Image from '@theme/IdealImage'; import imgDual from '../assets/active-standby-deployment-01.png'; -This section introduces the configuration and usage of the TDengine Active-Active System. +:::info[Version Note] -1. Some users can only deploy two servers due to the uniqueness of their deployment environment, while also hoping to achieve a certain level of service high availability and data high reliability. This article primarily describes the product behavior of the TDengine Active-Active System based on two key technologies: data replication and client failover. This includes the architecture, configuration, and operation and maintenance of the Active-Active System. The TDengine Active-Active feature can be used in resource-constrained environments, as previously mentioned, as well as in disaster recovery scenarios between two TDengine clusters (regardless of resources). The Active-Active feature is unique to TDengine Enterprise and was first released in version 3.3.0.0. It is recommended to use the latest version. +This feature is available only in TDengine Enterprise 3.3.0.0 and later. -2. The definition of an Active-Active system is: there are only two servers in the business system, each deploying a set of services. From the business layer's perspective, these two machines and two sets of services constitute a complete system, with the details of the underlying system not needing to be perceived by the business layer. The two nodes in the Active-Active system are usually referred to as Master-Slave, meaning "primary-secondary" or "primary-backup," and this document may mix these terms. +::: -3. The deployment architecture diagram of the TDengine Active-Active System is as follows, involving three key points: - 1. Failover of the dual system is implemented by the Client Driver, meaning the switch between primary and secondary nodes when the primary node goes down. - 2. Data replication is achieved from the (current) primary node to the secondary node via taosX. - 3. The write interface of data subscriptions adds a special mark in the WAL when writing replicated data, while the read interface of data subscriptions automatically filters out the data with that special mark during reads to avoid infinite loops caused by repeated replication. +You can deploy TDengine in active-active mode to achieve high availability and reliability with limited resources. Active-active mode is also used in disaster recovery strategies to maintain offsite replicas of the database. -Note: The diagram below uses a single TDengine instance as an example, but in actual deployment, one host in the diagram can be replaced by any number of TDengine clusters. +In active-active mode, you create two separate TDengine deployments, one acting as the primary node and the other as the secondary node. Data is replicated in real time between the primary and secondary nodes via TDengine's built-in data subscription component. Note that each node in an active-active deployment can be a single TDengine instance or a cluster. + +In the event that the primary node cannot provide service, the client driver fails over to the secondary node. This failover is automatic and transparent to the business layer. + +Replicated data is specially marked to avoid infinite loops. The architecture of an active-active deployment is described in the following figure.
+
Figure 1. TDengine in active-active mode
-## Configuration +## Limitations -### Cluster Configuration +The following limitations apply to active-active deployments: -The Active-Active feature imposes no specific requirements on the configuration of the TDengine cluster itself, but there is a certain requirement regarding the WAL retention period for databases to be synchronized between the Active-Active systems. A longer WAL retention period increases the fault tolerance of the Active-Active system; if the backup node is down for a period exceeding the WAL retention period on the primary node, data loss on the backup node is inevitable. Even if the downtime of the backup node does not exceed the WAL retention period on the primary node, there is still a certain probability of data loss, depending on the proximity and speed of data synchronization. +1. You cannot use the data subscription APIs when active-active mode is enabled. +2. You cannot use the parameter binding interface while active-active mode is enabled. +3. The primary and secondary nodes must be identical. Database names, all configuration parameters, usernames, passwords, and permission settings must be exactly the same. +4. You can connect to an active-active deployment only through the Java client library in WebSocket mode. +5. Do not use the `USE ` statement to set a context. Instead, specify the database in the connection parameters. -### Client Configuration +## Cluster Configuration -Currently, only the Java connector supports Active-Active in WebSocket connection mode. The configuration example is as follows: +It is not necessary to configure your cluster specifically for active-active mode. However, note that the WAL retention period affects the fault tolerance of an active-active deployment. This is because data loss will occur If the secondary node is unreachable for a period of time exceeding the configured WAL retention period. Data lost in this manner can only be recovered manually. + +## Enable Active-Active Mode + +1. Create two identical TDengine deployments. For more information, see [Get Started](../../get-started/). +2. Ensure that the taosd and taosx service are running on both deployments. +3. On the deployment that you have designated as the primary node, run the following command to start the replication service: + + ```shell + taosx replica start -f -t [database] + ``` + + - The source endpoint is the FQDN of TDengine on the primary node. + - The sink endpoint is the FQDN of TDengine on the secondary node. + - You can use the native connection (port 6030) or WebSocket connection (port 6041). + - You can specify one or more databases to replicate only the data contained in those databases. If you do not specify a database, all databases on the node are replicated except for `information_schema`, `performance_schema`, `log`, and `audit`. + + When the command is successful, the replica ID is displayed. You can use this ID to add other databases to the replication task if necessary. + +4. Run the same command on the secondary node, specifying the FQDN of TDengine on the secondary node as the source endpoint and the FQDN of TDengine on the primary node as the sink endpoint. + +## Client Configuration + +Active-active mode is supported in the Java client library in WebSocket connection mode. The following is an example configuration: ```java url = "jdbc:TAOS-RS://" + host + ":6041/?user=root&password=taosdata"; @@ -45,136 +74,84 @@ properties.setProperty(TSDBDriver.PROPERTY_KEY_RECONNECT_RETRY_COUNT, "3"); connection = DriverManager.getConnection(url, properties); ``` -The configuration properties and their meanings are as follows: +These parameters are described as follows: | Property Name | Meaning | | ---------------------------------- | ------------------------------------------------------------ | -| PROPERTY_KEY_SLAVE_CLUSTER_HOST | Hostname or IP of the second node; defaults to empty | -| PROPERTY_KEY_SLAVE_CLUSTER_PORT | Port number of the second node; defaults to empty | -| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | Whether to enable automatic reconnection; effective only in WebSocket mode. true: enable, false: disable; default is false. In Active-Active scenarios, please set to true. | -| PROPERTY_KEY_RECONNECT_INTERVAL_MS | Interval for reconnection in milliseconds; default is 2000 milliseconds (2 seconds); minimum is 0 (immediate retry); no maximum limit. | -| PROPERTY_KEY_RECONNECT_RETRY_COUNT | Maximum number of retries per node; default is 3; minimum is 0 (no retries); no maximum limit. | +| PROPERTY_KEY_SLAVE_CLUSTER_HOST | Enter the hostname or IP address of the secondary node. | +| PROPERTY_KEY_SLAVE_CLUSTER_PORT | Enter the port number of the secondary node. | +| PROPERTY_KEY_ENABLE_AUTO_RECONNECT | Specify whether to enable automatic reconnection. For active-active mode, set the value of this parameter to true. | +| PROPERTY_KEY_RECONNECT_INTERVAL_MS | Enter the interval in milliseconds at which reconnection is attempted. The default value is 2000. You can enter 0 to attempt to reconnect immediately. There is no maximum limit. | +| PROPERTY_KEY_RECONNECT_RETRY_COUNT | Enter the maximum number of retries per node. The default value is 3. There is no maximum limit. | -### Constraints +## Command Reference -1. Applications cannot use the subscription interface; if Active-Active parameters are configured, it will cause the creation of consumers to fail. -2. It is not recommended for applications to use parameter binding for writes and queries; if used, the application must address the issue of invalidated related objects after a connection switch. -3. In Active-Active scenarios, it is not recommended for user applications to explicitly call `use database`; the database should be specified in the connection parameters. -4. The clusters at both ends of the Active-Active configuration must be homogeneous (i.e., the naming of databases, all configuration parameters, usernames, passwords, and permission settings must be exactly the same). -5. Only WebSocket connection mode is supported. +You can manage your active-active deployment with the following commands: -## Operation and Maintenance Commands +1. Use an existing replica ID to add databases to an existing replication task: -The TDengine Active-Active System provides several operation and maintenance tools that can automate the configuration of taosX, and allow one-click starting, restarting, and stopping (on single-node environments) of all Active-Active components. + ```shell + taosx replica start -i [database...] + ``` -### Starting the Active-Active Task + :::note + - This command cannot create duplicate tasks. It only adds the specified databases to the specified task. + - The replica ID is globally unique within a taosX instance and is independent of the source/sink combination. -```shell -taosx replica start -``` + ::: -This command is used to start the data replication task in the Active-Active system, where both the taosd and taosX on the specified two hosts are in an online state. +2. Check the status of a task: -1. Method One + ```shell + taosx replica status [id...] + ``` -```shell - - taosx replica start -f source_endpoint -t sink_endpoint [database...] -``` + This command returns the list and status of active-active synchronization tasks created on the current machine. You can specify one or more replica IDs to obtain their task lists and status. An example output is as follows: -Establish a synchronization task from `source_endpoint` to `sink_endpoint` in the taosx service on the current machine. After successfully running this command, the replica ID will be printed to the console (referred to as `id` later). -The input parameters `source_endpoint` and `sink_endpoint` are mandatory, formatted as `td2:6030`. For example: + ```shell + +---------+----------+----------+----------+------+-------------+----------------+ + | replica | task | source | sink | database | status | note | + +---------+----------+----------+----------+------+-------------+----------------+ + | a | 2 | td1:6030 | td2:6030 | opc | running | | + | a | 3 | td2:6030 | td2:6030 | test | interrupted | Error reason | + ``` -```shell -taosx replica start -f td1:6030 -t td2:6030 -``` +3. Stop a replication task: -This example command will automatically create a synchronization task for all databases except `information_schema`, `performance_schema`, `log`, and `audit`. You can specify the endpoint using `http://td2:6041` to use the WebSocket interface (default is the native interface). You can also specify database synchronization: `taosx replica start -f td1:6030 -t td2:6030 db1` will create synchronization tasks only for the specified database. + ```shell + taosx replica stop [id [db...]] + ``` -2. Method Two + If you specify a database, replication for that database is stopped. If you do not specify a database, all replication tasks on the ID are stopped. If you do not specify an ID, all replication tasks on the instance are stopped. -```shell -taosx replica start -i id [database...] -``` +4. Restart a replication task: -Use the already created Replica ID (`id`) to add other databases to that synchronization task. + ```shell + taosx replica restart [id [db...]] + ``` -:::note + If you specify a database, replication for that database is restarted. If you do not specify a database, all replication tasks in the instance are restarted. If you do not specify an ID, all replication tasks on the instance are restarted. -- Repeated use of this command will not create duplicate tasks; it will only add the specified databases to the corresponding task. -- The replica ID is globally unique within a taosX instance and is independent of the `source/sink` combination. -- For ease of memory, the replica ID is a randomly chosen common word, and the system automatically maps the `source/sink` combination to a word list to obtain a unique available word. +5. Check the progress of a replication task: -::: + ```shell + taosx replica diff [id [db....]] + ``` -### Checking Task Status + This command outputs the difference between the subscribed offset in the current dual-replica synchronization task and the latest WAL (not representing row counts), for example: -```shell -taosx replica status [id...] -``` + ```shell + +---------+----------+----------+----------+-----------+---------+---------+------+ + | replica | database | source | sink | vgroup_id | current | latest | diff | + +---------+----------+----------+----------+-----------+---------+---------+------+ + | a | opc | td1:6030 | td2:6030 | 2 | 17600 | 17600 | 0 | + | ad | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 | + ``` -This returns the list and status of Active-Active synchronization tasks created on the current machine. You can specify one or more replica IDs to obtain their task lists and status. An example output is as follows: +6. Delete a replication task. -```shell -+---------+----------+----------+----------+------+-------------+----------------+ -| replica | task | source | sink | database | status | note | -+---------+----------+----------+----------+------+-------------+----------------+ -| a | 2 | td1:6030 | td2:6030 | opc | running | | -| a | 3 | td2:6030 | td2:6030 | test | interrupted | Error reason | -``` + ```shell + taosx replica remove [id] [--force] + ``` -### Stopping Active-Active Tasks - -```shell -taosx replica stop id [db...] -``` - -This command has the following effects: - -- Stops all or specified database synchronization tasks under the specified Replica ID. -- Using `taosx replica stop id1 db1` indicates stopping the synchronization task for `db1` under the `id1` replica. - -### Restarting Active-Active Tasks - -```shell -taosx replica restart id [db...] -``` - -This command has the following effects: - -- Restarts all or specified database synchronization tasks under the specified Replica ID. -- Using `taosx replica start id1 db1` only restarts the synchronization task for the specified database `db1`. - -### Checking Synchronization Progress - -```shell -taosx replica diff id [db....] -``` - -This command outputs the difference between the subscribed offset in the current dual-replica synchronization task and the latest WAL (not representing row counts), for example: - -```shell -+---------+----------+----------+----------+-----------+---------+---------+------+ -| replica | database | source | sink | vgroup_id | current | latest | diff | -+---------+----------+----------+----------+-----------+---------+---------+------+ -| a | opc | td1:6030 | td2:6030 | 2 | 17600 | 17600 | 0 | -| ad | opc | td2:6030 | td2:6030 | 3 | 17600 | 17600 | 0 | -``` - -### Deleting Active-Active Tasks - -```shell -taosx replica remove id [--force] -``` - -This deletes all current Active-Active synchronization tasks. Under normal circumstances, to delete a synchronization task, you need to first stop that task; however, when `--force` is enabled, it will forcibly stop and clear the task. - -### Recommended Usage Steps - -1. Assuming running on machine A, you need to first use `taosx replica start` to configure taosX, with input parameters being the addresses of the source and target servers to synchronize. After configuration, the synchronization service and tasks will automatically start. It is assumed that the taosx service uses the standard port and the synchronization task uses the native connection. -2. The steps on machine B are the same. -3. After starting services on both machines, the Active-Active system can provide services. -4. After the configuration is completed, if you want to restart the Active-Active system, please use the restart subcommand. - -## Exception Cases - -If the downtime recovery time exceeds the WAL retention duration, data loss may occur. In this case, the automatic data synchronization of the taosX service in the Active-Active system cannot handle the situation. Manual judgment is required to identify which data is lost, followed by starting additional taosX tasks to replicate the missing data. + This command deletes all stopped replication tasks on the specified ID. If you do not specify an ID, all stopped replication tasks on the instance are deleted. You can include the `--force` argument to delete all tasks without stopping them first. From 51c445a818fa4b9f9de36a043e919a4b03001428 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 25 Nov 2024 22:02:48 +0800 Subject: [PATCH 08/13] docs: correct slug --- docs/en/08-operation/18-dual.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/08-operation/18-dual.md b/docs/en/08-operation/18-dual.md index 8e279f0c05..21131226d5 100644 --- a/docs/en/08-operation/18-dual.md +++ b/docs/en/08-operation/18-dual.md @@ -1,6 +1,6 @@ --- title: Active-Active Deployment -slug: /operations-and-maintenance/active-standby-deployment +slug: /operations-and-maintenance/active-active-deployment --- import Image from '@theme/IdealImage'; From c30883b59c3aab77088d0c5516108cabbe094740 Mon Sep 17 00:00:00 2001 From: danielclow <106956386+danielclow@users.noreply.github.com> Date: Mon, 25 Nov 2024 22:55:12 +0800 Subject: [PATCH 09/13] docs: correct term --- docs/en/08-operation/18-dual.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/en/08-operation/18-dual.md b/docs/en/08-operation/18-dual.md index 21131226d5..681e53429f 100644 --- a/docs/en/08-operation/18-dual.md +++ b/docs/en/08-operation/18-dual.md @@ -138,7 +138,7 @@ You can manage your active-active deployment with the following commands: taosx replica diff [id [db....]] ``` - This command outputs the difference between the subscribed offset in the current dual-replica synchronization task and the latest WAL (not representing row counts), for example: + This command outputs the difference between the subscribed offset in the current active-active replication task and the latest WAL (not representing row counts), for example: ```shell +---------+----------+----------+----------+-----------+---------+---------+------+ From 721170dcb30b63d698b2b90ca4d1ac3e8fdabdb2 Mon Sep 17 00:00:00 2001 From: 54liuyao <54liuyao@163.com> Date: Tue, 26 Nov 2024 08:45:44 +0800 Subject: [PATCH 10/13] opt stream time slice --- source/libs/executor/src/streamtimesliceoperator.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/source/libs/executor/src/streamtimesliceoperator.c b/source/libs/executor/src/streamtimesliceoperator.c index efc5dd6d6a..d307c651fc 100644 --- a/source/libs/executor/src/streamtimesliceoperator.c +++ b/source/libs/executor/src/streamtimesliceoperator.c @@ -748,7 +748,7 @@ _end: static int32_t getResultInfoFromState(SStreamAggSupporter* pAggSup, SStreamFillSupporter* pFillSup, TSKEY ts, int64_t groupId, SSlicePoint* pCurPoint, SSlicePoint* pPrevPoint, - SSlicePoint* pNextPoint) { + SSlicePoint* pNextPoint, bool isFwc) { int32_t code = TSDB_CODE_SUCCESS; int32_t lino = 0; int32_t tmpRes = TSDB_CODE_SUCCESS; @@ -769,6 +769,10 @@ static int32_t getResultInfoFromState(SStreamAggSupporter* pAggSup, SStreamFillS setPointBuff(pCurPoint, pFillSup); pFillSup->cur.key = pCurPoint->pRightRow->key; pFillSup->cur.pRowVal = (SResultCellData*)pCurPoint->pRightRow->pRowVal; + if (isFwc) { + qDebug("===stream=== only get current point state"); + goto _end; + } } else { pFillSup->cur.key = pCurPoint->key.ts + 1; } @@ -1466,6 +1470,7 @@ void doBuildTimeSlicePointResult(SStreamAggSupporter* pAggSup, STimeWindowAggSup return; } + bool isFwc = (pTwSup->calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE); // clear the existed group id pBlock->info.id.groupId = 0; int32_t numOfRows = getNumOfTotalRes(pGroupResInfo); @@ -1497,7 +1502,7 @@ void doBuildTimeSlicePointResult(SStreamAggSupporter* pAggSup, STimeWindowAggSup SSlicePoint prevPoint = {0}; SSlicePoint nextPoint = {0}; if (pFillSup->type != TSDB_FILL_LINEAR) { - code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint, isFwc); } else { code = getLinearResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); @@ -1516,7 +1521,7 @@ void doBuildTimeSlicePointResult(SStreamAggSupporter* pAggSup, STimeWindowAggSup } } - if (pTwSup->calTrigger == STREAM_TRIGGER_FORCE_WINDOW_CLOSE) { + if (isFwc) { setForceWindowCloseFillRule(pFillSup, pFillInfo, pKey->ts); } else { setTimeSliceFillRule(pFillSup, pFillInfo, pKey->ts); @@ -1558,7 +1563,7 @@ static void doBuildTimeSliceDeleteResult(SStreamAggSupporter* pAggSup, SStreamFi SSlicePoint nextPoint = {0}; STimeWindow tw = {0}; if (pFillSup->type != TSDB_FILL_LINEAR) { - code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); + code = getResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint, false); } else { code = getLinearResultInfoFromState(pAggSup, pFillSup, pKey->ts, pKey->groupId, &curPoint, &prevPoint, &nextPoint); From 9525ec823dbc030ee9a8a40b8824975dc9e39dbc Mon Sep 17 00:00:00 2001 From: sheyanjie-qq <249478495@qq.com> Date: Mon, 25 Nov 2024 19:07:11 +0800 Subject: [PATCH 11/13] update jdbc demo, and version history --- docs/en/07-develop/01-connect.md | 2 +- docs/en/14-reference/05-connector/14-java.mdx | 2 + docs/examples/JDBC/mybatisplus-demo/pom.xml | 2 +- .../mybatisplusdemo/domain/Meters.java | 16 +++ .../mybatisplusdemo/mapper/MetersMapper.java | 31 +++++ .../src/main/resources/application.yml | 4 +- .../mapper/MetersMapperTest.java | 112 ++++++++++++++++++ docs/examples/JDBC/springbootdemo/pom.xml | 2 +- docs/zh/07-develop/01-connect/index.md | 2 +- docs/zh/14-reference/05-connector/14-java.mdx | 1 + 10 files changed, 168 insertions(+), 6 deletions(-) create mode 100644 docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java create mode 100644 docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java create mode 100644 docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java diff --git a/docs/en/07-develop/01-connect.md b/docs/en/07-develop/01-connect.md index a1fd6136fe..4086f15193 100644 --- a/docs/en/07-develop/01-connect.md +++ b/docs/en/07-develop/01-connect.md @@ -94,7 +94,7 @@ If you are using Maven to manage the project, simply add the following dependenc com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 ``` diff --git a/docs/en/14-reference/05-connector/14-java.mdx b/docs/en/14-reference/05-connector/14-java.mdx index 7302348f2e..29f919b5cf 100644 --- a/docs/en/14-reference/05-connector/14-java.mdx +++ b/docs/en/14-reference/05-connector/14-java.mdx @@ -35,6 +35,8 @@ REST connections are supported on all platforms that can run Java. | taos-jdbcdriver Version | Major Changes | TDengine Version | | :----------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------: | +| 3.4.0 | 1. Replace the fastjson library with the Jackson library; 2. WebSocket connection protocal uses independent identification; 3. Optimize the use of backend pull threads to avoid user misuse leading to timeouts.| - | +| 3.3.4 | 1. Fixed getInt error when data type is float| - | | 3.3.3 | 1. Fixed memory leak caused by closing WebSocket statement | - | | 3.3.2 | 1. Optimized parameter binding performance under WebSocket connections; 2. Improved support for MyBatis | - | | 3.3.0 | 1. Optimized data transmission performance under WebSocket connections; 2. Supported skipping SSL verification, turned off by default | 3.3.2.0 and above | diff --git a/docs/examples/JDBC/mybatisplus-demo/pom.xml b/docs/examples/JDBC/mybatisplus-demo/pom.xml index f792946c96..2077e31d8d 100644 --- a/docs/examples/JDBC/mybatisplus-demo/pom.xml +++ b/docs/examples/JDBC/mybatisplus-demo/pom.xml @@ -47,7 +47,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.2.4 + 3.4.0 diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java new file mode 100644 index 0000000000..e886e56269 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/domain/Meters.java @@ -0,0 +1,16 @@ +package com.taosdata.example.mybatisplusdemo.domain; + +import lombok.Data; + +import java.sql.Timestamp; + +@Data +public class Meters { + private String tbname; + private Timestamp ts; + private float current; + private int voltage; + private float phase; + private int groupid; + private byte[] location; +} diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java new file mode 100644 index 0000000000..441c340886 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java @@ -0,0 +1,31 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.mapper.BaseMapper; +import com.taosdata.example.mybatisplusdemo.domain.Meters; +import org.apache.ibatis.annotations.Insert; +import org.apache.ibatis.annotations.Param; +import org.apache.ibatis.annotations.Update; + +import java.util.List; + +public interface MetersMapper extends BaseMapper { + + @Update("CREATE STABLE IF NOT EXISTS meters (ts TIMESTAMP, current FLOAT, voltage INT, phase FLOAT) TAGS (groupId INT, location BINARY(24))") + int createTable(); + + @Insert("insert into meters (tbname, ts, groupid, location, current, voltage, phase) values(#{tbname}, #{ts}, #{groupid}, #{location}, #{current}, #{voltage}, #{phase})") + int insertOne(Meters one); + + @Insert({ + "" + }) + int insertBatch(@Param("list") List metersList); + + @Update("drop stable if exists meters") + void dropTable(); +} diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml b/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml index 985ed1675e..e9855bf011 100644 --- a/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml +++ b/docs/examples/JDBC/mybatisplus-demo/src/main/resources/application.yml @@ -1,7 +1,7 @@ spring: datasource: - driver-class-name: com.taosdata.jdbc.TSDBDriver - url: jdbc:TAOS://localhost:6030/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 + driver-class-name: com.taosdata.jdbc.ws.WebSocketDriver + url: jdbc:TAOS-WS://localhost:6041/mp_test?charset=UTF-8&locale=en_US.UTF-8&timezone=UTC-8 username: root password: taosdata diff --git a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java new file mode 100644 index 0000000000..2d8458e9d9 --- /dev/null +++ b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java @@ -0,0 +1,112 @@ +package com.taosdata.example.mybatisplusdemo.mapper; + +import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper; +import com.baomidou.mybatisplus.core.metadata.IPage; +import com.baomidou.mybatisplus.extension.plugins.pagination.Page; +import com.taosdata.example.mybatisplusdemo.domain.Meters; +import com.taosdata.example.mybatisplusdemo.domain.Weather; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.boot.test.context.SpringBootTest; +import org.springframework.test.context.junit4.SpringJUnit4ClassRunner; + +import java.sql.Timestamp; +import java.util.LinkedList; +import java.util.List; +import java.util.Random; + +@RunWith(SpringJUnit4ClassRunner.class) +@SpringBootTest +public class MetersMapperTest { + + private static Random random = new Random(System.currentTimeMillis()); + + @Autowired + private MetersMapper mapper; + + @Before + public void createTable(){ + mapper.dropTable(); + mapper.createTable(); + Meters one = new Meters(); + one.setTbname("test_10001"); + one.setGroupid(10001); + one.setCurrent(random.nextFloat()); + one.setPhase(random.nextFloat()); + one.setCurrent(12345); + one.setTs(new Timestamp(1605024000000l)); + one.setLocation("望京".getBytes()); + mapper.insertOne(one); + } + + @Test + public void testSelectList() { + List meters = mapper.selectList(null); + meters.forEach(System.out::println); + } + + @Test + public void testInsertBatch() { + List metersList = new LinkedList<>(); + for (int i = 0; i < 100; i++){ + Meters one = new Meters(); + one.setTbname("tb_" + i); + one.setGroupid(i); + one.setCurrent(random.nextFloat()); + one.setPhase(random.nextFloat()); + one.setCurrent(random.nextInt()); + one.setTs(new Timestamp(1605024000000l + i)); + one.setLocation(("望京" + i).getBytes()); + metersList.add(one); + + } + int affectRows = mapper.insertBatch(metersList); + Assert.assertEquals(100, affectRows); + } + + @Test + public void testSelectOne() { + QueryWrapper wrapper = new QueryWrapper<>(); + wrapper.eq("location", "望京".getBytes()); + Meters one = mapper.selectOne(wrapper); + System.out.println(one); + Assert.assertEquals(12345, one.getCurrent(), 0.00f); + Assert.assertArrayEquals("望京".getBytes(), one.getLocation()); + } + + // @Test + // public void testSelectByMap() { + // Map map = new HashMap<>(); + // map.put("location", "beijing"); + // List weathers = mapper.selectByMap(map); + // Assert.assertEquals(1, weathers.size()); + // } + + @Test + public void testSelectObjs() { + List ts = mapper.selectObjs(null); + System.out.println(ts); + } + + @Test + public void testSelectCount() { + int count = mapper.selectCount(null); +// Assert.assertEquals(5, count); + System.out.println(count); + } + + @Test + public void testSelectPage() { + IPage page = new Page(1, 2); + IPage metersIPage = mapper.selectPage(page, null); + System.out.println("total : " + metersIPage.getTotal()); + System.out.println("pages : " + metersIPage.getPages()); + for (Meters meters : metersIPage.getRecords()) { + System.out.println(meters); + } + } + +} \ No newline at end of file diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml index ee15f6013e..b3ead6cd11 100644 --- a/docs/examples/JDBC/springbootdemo/pom.xml +++ b/docs/examples/JDBC/springbootdemo/pom.xml @@ -68,7 +68,7 @@ com.taosdata.jdbc taos-jdbcdriver - 3.0.0 + 3.4.0 diff --git a/docs/zh/07-develop/01-connect/index.md b/docs/zh/07-develop/01-connect/index.md index bd26bea46d..94f55967ec 100644 --- a/docs/zh/07-develop/01-connect/index.md +++ b/docs/zh/07-develop/01-connect/index.md @@ -89,7 +89,7 @@ TDengine 提供了丰富的应用程序开发接口,为了便于用户快速 com.taosdata.jdbc taos-jdbcdriver - 3.3.3 + 3.4.0 ``` diff --git a/docs/zh/14-reference/05-connector/14-java.mdx b/docs/zh/14-reference/05-connector/14-java.mdx index 0a167dd5ee..e8554ae668 100644 --- a/docs/zh/14-reference/05-connector/14-java.mdx +++ b/docs/zh/14-reference/05-connector/14-java.mdx @@ -34,6 +34,7 @@ REST 连接支持所有能运行 Java 的平台。 | taos-jdbcdriver 版本 | 主要变化 | TDengine 版本 | | :------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------: | | 3.4.0 | 1. 使用 jackson 库替换 fastjson 库;2. WebSocket 采用独立协议标识;3. 优化后台拉取线程使用,避免用户误用导致超时。 | - | +| 3.3.4 | 1. 解决了 getInt 在数据类型为 float 报错 | - | | 3.3.3 | 1. 解决了 WebSocket statement 关闭导致的内存泄漏 | - | | 3.3.2 | 1. 优化 WebSocket 连接下的参数绑定性能;2. 优化了对 mybatis 的支持 | - | | 3.3.0 | 1. 优化 WebSocket 连接下的数据传输性能;2. 支持跳过 SSL 验证,默认关闭 | 3.3.2.0 及更高版本 | From 47634f4f1876635086ddfd277dab8dbfbb69f8ce Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Tue, 26 Nov 2024 22:48:30 +0800 Subject: [PATCH 12/13] test: add a new test case for TDgpt. --- tests/script/sh/stop_dnodes.sh | 2 +- tests/script/tsim/analytics/basic0.sim | 82 ++++++++++++++++++++++++++ 2 files changed, 83 insertions(+), 1 deletion(-) create mode 100644 tests/script/tsim/analytics/basic0.sim diff --git a/tests/script/sh/stop_dnodes.sh b/tests/script/sh/stop_dnodes.sh index c462442fa2..8923804547 100755 --- a/tests/script/sh/stop_dnodes.sh +++ b/tests/script/sh/stop_dnodes.sh @@ -27,7 +27,7 @@ while [ -n "$PID" ]; do PID=`ps -ef|grep -w taosd | grep -v grep | awk '{print $2}'` done -PID=`ps -ef|grep -w taos | grep -v grep | awk '{print $2}'` +PID=`ps -ef|grep -w taos | grep -v grep | grep -v taosanode|awk '{print $2}'` while [ -n "$PID" ]; do echo kill -9 $PID #pkill -9 taos diff --git a/tests/script/tsim/analytics/basic0.sim b/tests/script/tsim/analytics/basic0.sim new file mode 100644 index 0000000000..a4fe6354ae --- /dev/null +++ b/tests/script/tsim/analytics/basic0.sim @@ -0,0 +1,82 @@ +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/exec.sh -n dnode1 -s start +sql connect + +print =============== create anode +sql create anode '127.0.0.1:6090' + +sql show anodes +if $rows != 1 then + return -1 +endi + +print =============== show info +sql show anodes full +if $rows != 8 then + print expect 8 , actual $rows + return -1 +endi + +print =============== create database +sql create database d0 vgroups 1 +sql select * from information_schema.ins_databases +if $rows != 3 then + return -1 +endi + +print $data00 $data01 $data02 + +sql use d0 + +print =============== create super table, include column type for count/sum/min/max/first +sql create table if not exists stb (ts timestamp, c1 int, c2 float, c3 double) tags (t1 int unsigned) + +sql show stables +if $rows != 1 then + return -1 +endi + +print =============== create child table +sql create table ct1 using stb tags(1000) + +print ==================== insert data +# input_list = [5, 14, 15, 15, 14, 19, 17, 16, 20, 22, 8, 21, 28, 11, 9, 29, 40] +sql insert into ct1(ts, c1) values(now-1a, 5)(now+1a, 14)(now+2a, 15)(now+3a, 15)(now+4a, 14) +sql insert into ct1(ts, c1) values(now+5a, 19)(now+6a, 17)(now+7a, 16)(now+8a, 20)(now+9a, 22) +sql insert into ct1(ts, c1) values(now+10a, 8)(now+11a, 21)(now+12a, 28)(now+13a, 11)(now+14a, 9) +sql insert into ct1(ts, c1) values(now+15a, 29)(now+16a, 40) + +sql select count(*) from ct1 +if $data00 != 17 then + print expect 17 , actual $data00 + return -1 +endi + +sql select count(*) from ct1 anomaly_window(c1, 'algo=iqr') +if $data00 != 1 then + return -1 +endi + + +sql drop anode 1 +sql show anodes + +if $rows != 0 then + return -1 +endi + +_OVER: +system sh/exec.sh -n dnode1 -s stop -x SIGINT +print =============== check +$null= + +system_content sh/checkValgrind.sh -n dnode1 +print cmd return result ----> [ $system_content ] +if $system_content > 0 then + return -1 +endi + +if $system_content == $null then + return -1 +endi From 73671bed91f7d70f558d73fc5cbde1bd6fdb84ba Mon Sep 17 00:00:00 2001 From: WANG Xu Date: Wed, 27 Nov 2024 11:25:10 +0800 Subject: [PATCH 13/13] Update pull_request_template.md --- .github/pull_request_template.md | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 7d877987ac..ea70f31ff1 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,4 +1,10 @@ -# Pull Request Checklist +# Description + +Please briefly describe the code changes in this pull request. + +# Checklist + +Please check the items in the checklist if applicable. - [ ] Is the user manual updated? - [ ] Are the test cases passed and automated?