From c39042b643d3fa780cf5e5a22be3a83dcd6ad24a Mon Sep 17 00:00:00 2001 From: the5fire Date: Wed, 16 Oct 2024 11:11:12 +0800 Subject: [PATCH 1/7] fix typo: 365 -> 3650 --- docs/zh/05-basic/01-model.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/zh/05-basic/01-model.md b/docs/zh/05-basic/01-model.md index bcd931df3a..f49db17892 100644 --- a/docs/zh/05-basic/01-model.md +++ b/docs/zh/05-basic/01-model.md @@ -106,7 +106,7 @@ CREATE DATABASE power PRECISION 'ms' KEEP 3650 DURATION 10 BUFFER 16; ``` 该 SQL 将创建一个名为 `power` 的数据库,各参数说明如下: - `PRECISION 'ms'` :这个数据库的时序数据使用毫秒(ms)精度的时间戳 -- `KEEP 365`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 +- `KEEP 3650`:这个库的数据将保留 3650 天,超过 3650 天的数据将被自动删除 - `DURATION 10` :每 10 天的数据放在一个数据文件中 - `BUFFER 16` :写入使用大小为 16MB 的内存池。 @@ -214,4 +214,4 @@ TDengine 支持灵活的数据模型设计,包括多列模型和单列模型 尽管 TDengine 推荐使用多列模型,因为这种模型在写入效率和存储效率方面通常更优,但在某些特定场景下,单列模型可能更为适用。例如,当一个数据采集点的采集量种类经常发生变化时,如果采用多列模型,就需要频繁修改超级表的结构定义,这会增加应用程序的复杂性。在这种情况下,采用单列模型可以简化应用程序的设计和管理,因为它允许独立地管理和扩展每个物理量的超级表。 -总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 \ No newline at end of file +总之,TDengine 提供了灵活的数据模型选项,用户可以根据实际需求和场景选择最适合的模型,以优化性能和管理复杂性。 From be82bbf90ca87b1cb6a80cb78810994e354baac0 Mon Sep 17 00:00:00 2001 From: yanyuxing Date: Thu, 31 Oct 2024 10:46:39 +0800 Subject: [PATCH 2/7] feat(packaging): add client install summary information --- packaging/tools/mac_install_summary_client.txt | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 packaging/tools/mac_install_summary_client.txt diff --git a/packaging/tools/mac_install_summary_client.txt b/packaging/tools/mac_install_summary_client.txt new file mode 100644 index 0000000000..f49703c555 --- /dev/null +++ b/packaging/tools/mac_install_summary_client.txt @@ -0,0 +1,4 @@ +TDengine client is installed successfully. Please open a terminal and execute the commands below: + +To configure TDengine client, sudo vi /etc/taos/taos.cfg +To access TDengine command line interface, taos -h YouServerName \ No newline at end of file From f0760a3bb03404253161bd559c60e7365da33230 Mon Sep 17 00:00:00 2001 From: dapan1121 Date: Fri, 1 Nov 2024 16:17:10 +0800 Subject: [PATCH 3/7] fix: msg preprocess fail issue --- source/libs/qworker/src/qwMsg.c | 2 +- source/libs/qworker/src/qworker.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/source/libs/qworker/src/qwMsg.c b/source/libs/qworker/src/qwMsg.c index 69014d5b1c..20b81bfc14 100644 --- a/source/libs/qworker/src/qwMsg.c +++ b/source/libs/qworker/src/qwMsg.c @@ -429,7 +429,7 @@ int32_t qWorkerPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg, bool chkGran tFreeSSubQueryMsg(&msg); - return TSDB_CODE_SUCCESS; + return code; } int32_t qWorkerAbortPreprocessQueryMsg(void *qWorkerMgmt, SRpcMsg *pMsg) { diff --git a/source/libs/qworker/src/qworker.c b/source/libs/qworker/src/qworker.c index ddc4812b55..9b96c1e519 100644 --- a/source/libs/qworker/src/qworker.c +++ b/source/libs/qworker/src/qworker.c @@ -751,7 +751,7 @@ _return: qwReleaseTaskCtx(mgmt, ctx); } - return TSDB_CODE_SUCCESS; + return code; } int32_t qwProcessQuery(QW_FPARAMS_DEF, SQWMsg *qwMsg, char *sql) { From d75d22eb3c707d26a7c30f6cef602144d8d2ade7 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 19:38:18 +0800 Subject: [PATCH 4/7] fix(stream): check the right return code for concurrent checkpoint trans. --- source/common/src/tglobal.c | 3 + source/dnode/mnode/impl/src/mndStream.c | 11 ++- .../script/tsim/stream/concurrentcheckpt.sim | 79 +++++++++++++++++++ 3 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 tests/script/tsim/stream/concurrentcheckpt.sim diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index 00052cc810..2104562c0b 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -1646,6 +1646,9 @@ static int32_t taosSetServerCfg(SConfig *pCfg) { TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "checkpointInterval"); tsStreamCheckpointInterval = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "concurrentCheckpoint"); + tsMaxConcurrentCheckpoint = pItem->i32; + TAOS_CHECK_GET_CFG_ITEM(pCfg, pItem, "streamSinkDataRate"); tsSinkDataRate = pItem->fval; diff --git a/source/dnode/mnode/impl/src/mndStream.c b/source/dnode/mnode/impl/src/mndStream.c index a4327b777f..88b43d497f 100644 --- a/source/dnode/mnode/impl/src/mndStream.c +++ b/source/dnode/mnode/impl/src/mndStream.c @@ -1284,9 +1284,10 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { void* p = taosArrayPush(pList, &in); if (p) { int32_t currentSize = taosArrayGetSize(pList); - mDebug("stream:%s (uid:0x%" PRIx64 ") checkpoint interval beyond threshold: %ds(%" PRId64 - "s) beyond concurrently launch threshold:%d", - pStream->name, pStream->uid, tsStreamCheckpointInterval, duration / 1000, currentSize); + mDebug("stream:%s (uid:0x%" PRIx64 ") total %d stream(s) beyond chpt interval threshold: %ds(%" PRId64 + "s), concurrently launch threshold:%d", + pStream->name, pStream->uid, currentSize, tsStreamCheckpointInterval, duration / 1000, + tsMaxConcurrentCheckpoint); } else { mError("failed to record the checkpoint interval info, stream:0x%" PRIx64, pStream->uid); } @@ -1338,7 +1339,7 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { code = mndProcessStreamCheckpointTrans(pMnode, p, checkpointId, 1, true); sdbRelease(pSdb, p); - if (code != 0 && code != TSDB_CODE_ACTION_IN_PROGRESS) { + if (code == 0 || code == TSDB_CODE_ACTION_IN_PROGRESS) { started += 1; if (started >= capacity) { @@ -1346,6 +1347,8 @@ static int32_t mndProcessStreamCheckpoint(SRpcMsg *pReq) { (started + numOfCheckpointTrans)); break; } + } else { + mError("failed to start checkpoint trans, code:%s", tstrerror(code)); } } } diff --git a/tests/script/tsim/stream/concurrentcheckpt.sim b/tests/script/tsim/stream/concurrentcheckpt.sim new file mode 100644 index 0000000000..4162617deb --- /dev/null +++ b/tests/script/tsim/stream/concurrentcheckpt.sim @@ -0,0 +1,79 @@ +system sh/stop_dnodes.sh + +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c supportVnodes -v 1 + +print ========== step1 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select task_id, checkpoint_id from information_schema.ins_stream_tasks order by checkpoint_id; + +print $data01 $data11 $data21 +if $data01 == $data11 then + print not allowed 2 checkpoint start completed + return -1 +endi + +if $data11 == $data21 then + print not allowed 2 checkpoints start concurrently + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT + +print ========== concurrent checkpoint is set 2 + +system sh/stop_dnodes.sh +system sh/deploy.sh -n dnode1 -i 1 +system sh/cfg.sh -n dnode1 -c concurrentCheckpoint -v 2 + +system sh/exec.sh -n dnode1 -s start + +print ========== step2 +system sh/exec.sh -n dnode1 -s start +sql connect + +sql create database abc1 vgroups 1; +sql use abc1; +sql create table st1(ts timestamp, k int) tags(a int); +sql create table t1 using st1 tags(1); +sql create table t2 using st1 tags(2); +sql insert into t1 values(now, 1); + +sql create stream str1 trigger at_once into str_dst1 as select count(*) from st1 interval(30s); +sql create stream str2 trigger at_once into str_dst2 as select count(*) from st1 interval(30s); +sql create stream str3 trigger at_once into str_dst3 as select count(*) from st1 interval(30s); + +print ============== create 3 streams, check the concurrently checkpoint +sleep 180000 + +sql select count(*) a, checkpoint_id from information_schema.ins_stream_tasks group by checkpoint_id order by a; +print $data00 $data01 +print $data10 $data11 + +if $data00 != 1 then + print expect 1, actual $data00 + return -1 +endi + +if $data10 != 2 then + print expect 2, actual $data10 + return -1 +endi + +system sh/exec.sh -n dnode1 -s stop -x SIGINT From 26334df0905d61e1950527e081964ffbc9411008 Mon Sep 17 00:00:00 2001 From: Haojun Liao Date: Mon, 4 Nov 2024 20:13:49 +0800 Subject: [PATCH 5/7] test: add a new test case. --- tests/parallel_test/cases.task | 1 + tests/script/tsim/testsuit.sim | 1 + 2 files changed, 2 insertions(+) diff --git a/tests/parallel_test/cases.task b/tests/parallel_test/cases.task index d389c27929..4769c5b67a 100644 --- a/tests/parallel_test/cases.task +++ b/tests/parallel_test/cases.task @@ -1319,6 +1319,7 @@ ,,y,script,./test.sh -f tsim/stream/basic2.sim ,,y,script,./test.sh -f tsim/stream/basic3.sim ,,y,script,./test.sh -f tsim/stream/basic4.sim +,,y,script,./test.sh -f tsim/stream/concurrentcheckpt.sim ,,y,script,./test.sh -f tsim/stream/checkpointInterval0.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable1.sim ,,y,script,./test.sh -f tsim/stream/checkStreamSTable.sim diff --git a/tests/script/tsim/testsuit.sim b/tests/script/tsim/testsuit.sim index c208a07488..fbc89b196b 100644 --- a/tests/script/tsim/testsuit.sim +++ b/tests/script/tsim/testsuit.sim @@ -110,6 +110,7 @@ run tsim/stream/distributeInterval0.sim run tsim/stream/distributeSession0.sim run tsim/stream/state0.sim run tsim/stream/basic2.sim +run tsim/stream/concurrentcheckpt.sim run tsim/insert/basic1.sim run tsim/insert/commit-merge0.sim run tsim/insert/basic0.sim From 1a20ae8013ad351b485c50dafd8dc1c863869611 Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Mon, 4 Nov 2024 17:47:33 +0800 Subject: [PATCH 6/7] docs: update port information --- docs/zh/04-get-started/01-docker.md | 7 ++++--- docs/zh/08-operation/02-planning.md | 24 +++++++++++++++--------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/docs/zh/04-get-started/01-docker.md b/docs/zh/04-get-started/01-docker.md index 848a7fd499..e58e7bff71 100644 --- a/docs/zh/04-get-started/01-docker.md +++ b/docs/zh/04-get-started/01-docker.md @@ -23,17 +23,18 @@ docker pull tdengine/tdengine:3.3.3.0 然后只需执行下面的命令: ```shell -docker run -d -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine +docker run -d -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` -注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043-6049 为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +注意:TDengine 3.0 服务端仅使用 6030 TCP 端口。6041 为 taosAdapter 所使用提供 REST 服务端口。6043 为 taosKeeper 使用端口。6044-6049 TCP 端口为 taosAdapter 提供第三方应用接入所使用端口,可根据需要选择是否打开。 +6044 和 6045 UDP 端口为 statsd 和 collectd 格式写入接口,可根据需要选择是否打开。6060 为 taosExplorer 使用端口。具体端口使用情况请参考[网络端口要求](../../operation/planning#网络端口要求)。 如果需要将数据持久化到本机的某一个文件夹,则执行下边的命令: ```shell docker run -d -v ~/data/taos/dnode/data:/var/lib/taos \ -v ~/data/taos/dnode/log:/var/log/taos \ - -p 6030:6030 -p 6041:6041 -p 6043-6060:6043-6060 -p 6043-6060:6043-6060/udp tdengine/tdengine + -p 6030:6030 -p 6041:6041 -p 6043:6043 -p 6044-6049:6044-6049 -p 6044-6045:6044-6045/udp -p 6060:6060 tdengine/tdengine ``` :::note diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 66da1df8bf..04957ed26c 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -53,7 +53,7 @@ M = (T × S × 3 + (N / 4096) + 100) 与 WebSocket 连接方式相比,RESTful 连接方式在内存占用上更大,除了缓冲区所需的内存以外,还需要考虑每个连接响应结果的内存开销。这种内存开销与响应结果的JSON 数据大小密切相关,特别是在查询数据量很大时,会占用大量内存。 -由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议打开batchfetch=true 选项,以启用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 +由于 RESTful 连接方式不支持分批获取查询数据,这就导致在查询获取超大结果集时,可能会占用特别大的内存,从而导致内存溢出,因此,在大型项目中,建议使用 WebSocket 连接方式,实现流式结果集返回,从而避免内存溢出的风险 **注意** - 建议采用 RESTful/WebSocket 连接方式来访问 TDengine 集群,而不采用taosc 原生连接方式。 @@ -146,11 +146,17 @@ TDengine 的多级存储功能在使用上还具备以下优点。 下表列出了 TDengine 的一些接口或组件的常用端口,这些端口均可以通过配置文件中的参数进行修改。 -|接口或组件 | 端口 | -|:---------------------------:|:---------:| -|原生接口(taosc) | 6030 | -|RESTful 接口 | 6041 | -|WebSocket 接口 |6041 | -|taosKeeper | 6043 | -|taosX | 6050, 6055 | -|taosExplorer | 6060 | \ No newline at end of file +| 接口或组件名称 | 端口 | 协议 | +|:------------------------------------:|:----------:|:-------:| +| 原生接口(taosc) | 6030 | TCP | +| RESTful 接口 | 6041 | TCP | +| WebSocket 接口 | 6041 | TCP | +| statsd 格式写入接口 | 6044 | TCP/UDP | +| collectd 格式写入接口 | 6045 | TCP/UDP | +| openTSDB Telnet 格式写入接口 | 6046 | TCP | +| collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP | +| icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP | +| tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP | +| taosKeeper | 6043 | TCP | +| taosX | 6050, 6055 | TCP | +| taosExplorer | 6060 | TCP | From 17300c8b792861c7142fe433e11e211155b402ce Mon Sep 17 00:00:00 2001 From: t_max <1172915550@qq.com> Date: Mon, 4 Nov 2024 18:01:25 +0800 Subject: [PATCH 7/7] docs: update port information --- docs/zh/08-operation/02-planning.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/zh/08-operation/02-planning.md b/docs/zh/08-operation/02-planning.md index 04957ed26c..83f98af0bb 100644 --- a/docs/zh/08-operation/02-planning.md +++ b/docs/zh/08-operation/02-planning.md @@ -151,12 +151,12 @@ TDengine 的多级存储功能在使用上还具备以下优点。 | 原生接口(taosc) | 6030 | TCP | | RESTful 接口 | 6041 | TCP | | WebSocket 接口 | 6041 | TCP | +| taosKeeper | 6043 | TCP | | statsd 格式写入接口 | 6044 | TCP/UDP | | collectd 格式写入接口 | 6045 | TCP/UDP | | openTSDB Telnet 格式写入接口 | 6046 | TCP | | collectd 使用 openTSDB Telnet 格式写入接口 | 6047 | TCP | | icinga2 使用 openTSDB Telnet 格式写入接口 | 6048 | TCP | | tcollector 使用 openTSDB Telnet 格式写入接口 | 6049 | TCP | -| taosKeeper | 6043 | TCP | | taosX | 6050, 6055 | TCP | | taosExplorer | 6060 | TCP |