diff --git a/Jenkinsfile2 b/Jenkinsfile2 index bc309ff66c..d7df07f06a 100644 --- a/Jenkinsfile2 +++ b/Jenkinsfile2 @@ -53,6 +53,7 @@ def check_docs() { } sh ''' cd ${WKC} + git remote prune origin git pull >/dev/null git fetch origin +refs/pull/${CHANGE_ID}/merge git checkout -qf FETCH_HEAD diff --git a/cmake/cmake.version b/cmake/cmake.version index c8afc1a291..db29644b38 100644 --- a/cmake/cmake.version +++ b/cmake/cmake.version @@ -2,7 +2,7 @@ IF (DEFINED VERNUMBER) SET(TD_VER_NUMBER ${VERNUMBER}) ELSE () - SET(TD_VER_NUMBER "3.0.0.0") + SET(TD_VER_NUMBER "3.0.0.1") ENDIF () IF (DEFINED VERCOMPATIBLE) diff --git a/docs/en/01-index.md b/docs/en/01-index.md index 363fa1101c..22e62bc5e0 100644 --- a/docs/en/01-index.md +++ b/docs/en/01-index.md @@ -5,7 +5,7 @@ slug: / --- -TDengine is an open source, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. +TDengine is an [open source](https://tdengine.com/tdengine/open-source-time-series-database/), [cloud native](https://tdengine.com/tdengine/cloud-native-time-series-database/) time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. It enables efficient, real-time data ingestion, processing, and monitoring of TB and even PB scale data per day, generated by billions of sensors and data collectors. This document is the TDengine user manual. It introduces the basic, as well as novel concepts, in TDengine, and also talks in detail about installation, features, SQL, APIs, operation, maintenance, kernel design and other topics. It’s written mainly for architects, developers and system administrators. To get an overview of TDengine, such as a feature list, benchmarks, and competitive advantages, please browse through the [Introduction](./intro) section. diff --git a/docs/en/02-intro/index.md b/docs/en/02-intro/index.md index db28cdd939..b4636e54a6 100644 --- a/docs/en/02-intro/index.md +++ b/docs/en/02-intro/index.md @@ -3,7 +3,7 @@ title: Introduction toc_max_heading_level: 2 --- -TDengine is an open source, high-performance, cloud native time-series database optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation. +TDengine is an open source, high-performance, cloud native [time-series database](https://tdengine.com/tsdb/) optimized for Internet of Things (IoT), Connected Cars, and Industrial IoT. Its code, including its cluster feature is open source under GNU AGPL v3.0. Besides the database engine, it provides [caching](../develop/cache), [stream processing](../develop/stream), [data subscription](../develop/tmq) and other functionalities to reduce the system complexity and cost of development and operation. This section introduces the major features, competitive advantages, typical use-cases and benchmarks to help you get a high level overview of TDengine. @@ -33,17 +33,18 @@ For more details on features, please read through the entire documentation. By making full use of [characteristics of time series data](https://tdengine.com/tsdb/characteristics-of-time-series-data/), TDengine differentiates itself from other time series databases, with the following advantages. -- **High-Performance**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. +- **[High-Performance](https://tdengine.com/tdengine/high-performance-time-series-database/)**: TDengine is the only time-series database to solve the high cardinality issue to support billions of data collection points while out performing other time-series databases for data ingestion, querying and data compression. -- **Simplified Solution**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. +- **[Simplified Solution](https://tdengine.com/tdengine/simplified-time-series-data-solution/)**: Through built-in caching, stream processing and data subscription features, TDengine provides a simplified solution for time-series data processing. It reduces system design complexity and operation costs significantly. -- **Cloud Native**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. +- **[Cloud Native](https://tdengine.com/tdengine/cloud-native-time-series-database/)**: Through native distributed design, sharding and partitioning, separation of compute and storage, RAFT, support for kubernetes deployment and full observability, TDengine is a cloud native Time-Series Database and can be deployed on public, private or hybrid clouds. -- **Ease of Use**: For administrators, TDengine significantly reduces the effort to deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. +- **[Ease of Use](https://tdengine.com/tdengine/easy-time-series-data-platform/)**: For administrators, TDengine significantly reduces the effort to[ +](https://tdengine.com/tdengine/easy-time-series-data-platform/) deploy and maintain. For developers, it provides a simple interface, simplified solution and seamless integrations for third party tools. For data users, it gives easy data access. -- **Easy Data Analytics**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. +- **[Easy Data Analytics](https://tdengine.com/tdengine/time-series-data-analytics-made-easy/)**: Through super tables, storage and compute separation, data partitioning by time interval, pre-computation and other means, TDengine makes it easy to explore, format, and get access to data in a highly efficient way. -- **Open Source**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide. +- **[Open Source](https://tdengine.com/tdengine/open-source-time-series-database/)**: TDengine’s core modules, including cluster feature, are all available under open source licenses. It has gathered over 19k stars on GitHub. There is an active developer community, and over 140k running instances worldwide. With TDengine, the total cost of ownership of your time-series data platform can be greatly reduced. 1: With its superior performance, the computing and storage resources are reduced significantly;2: With SQL support, it can be seamlessly integrated with many third party tools, and learning costs/migration costs are reduced significantly;3: With its simplified solution and nearly zero management, the operation and maintenance costs are reduced significantly. @@ -96,14 +97,13 @@ As a high-performance, scalable and SQL supported time-series database, TDengine | **System Maintenance Requirements** | **Not Applicable** | **Might Be Applicable** | **Very Applicable** | **Description** | | ------------------------------------------------- | ------------------ | ----------------------- | ------------------- | ------------------------------------------------------------ | | Native high-reliability | | | √ | TDengine has a very robust, reliable and easily configurable system architecture to simplify routine operation. Human errors and accidents are eliminated to the greatest extent, with a streamlined experience for operators. | -| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the Taos shell for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| +| Minimize learning and maintenance costs | | | √ | In addition to being easily configurable, standard SQL support and the TDengine CLI for ad hoc queries makes maintenance simpler, allows reuse and reduces learning costs.| | Abundant talent supply | √ | | | Given the above, and given the extensive training and professional services provided by TDengine, it is easy to migrate from existing solutions or create a new and lasting solution based on TDengine.| ## Comparison with other databases - [Writing Performance Comparison of TDengine and InfluxDB ](https://tdengine.com/2022/02/23/4975.html) - [Query Performance Comparison of TDengine and InfluxDB](https://tdengine.com/2022/02/24/5120.html) -- [TDengine vs InfluxDB、OpenTSDB、Cassandra、MySQL、ClickHouse](https://www.tdengine.com/downloads/TDengine_Testing_Report_en.pdf) - [TDengine vs OpenTSDB](https://tdengine.com/2019/09/12/710.html) - [TDengine vs Cassandra](https://tdengine.com/2019/09/12/708.html) - [TDengine vs InfluxDB](https://tdengine.com/2019/09/12/706.html) diff --git a/docs/en/20-third-party/emqx/client-num.webp b/docs/en/20-third-party/emqx/client-num.webp index a151b18484..a8ac6fb4c0 100644 Binary files a/docs/en/20-third-party/emqx/client-num.webp and b/docs/en/20-third-party/emqx/client-num.webp differ diff --git a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp index 147a65b17b..a8b52f3b2d 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp and b/docs/en/25-application/IT-DevOps-Solutions-Collectd-StatsD.webp differ diff --git a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp index fd5461ec9b..fac96f4eb6 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp and b/docs/en/25-application/IT-DevOps-Solutions-Telegraf.webp differ diff --git a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp index 105afcdb83..fac96f4eb6 100644 Binary files a/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp and b/docs/en/25-application/IT-DevOps-Solutions-telegraf-dashboard.webp differ diff --git a/docs/zh/05-get-started/03-package.md b/docs/zh/05-get-started/03-package.md index d3247350ff..85005b9551 100644 --- a/docs/zh/05-get-started/03-package.md +++ b/docs/zh/05-get-started/03-package.md @@ -108,7 +108,7 @@ apt-get 方式只适用于 Debian 或 Ubuntu 系统 :::info -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases) +下载其他组件、最新 Beta 版及之前版本的安装包,请点击[发布历史页面](../../releases/tdengine) ::: :::note diff --git a/docs/zh/05-get-started/_pkg_install.mdx b/docs/zh/05-get-started/_pkg_install.mdx deleted file mode 100644 index 36730b4429..0000000000 --- a/docs/zh/05-get-started/_pkg_install.mdx +++ /dev/null @@ -1,17 +0,0 @@ -import PkgList from "/components/PkgList"; - -TDengine 的安装非常简单,从下载到安装成功仅仅只要几秒钟。 - -标准的服务端安装包包含了 taos、taosd、taosAdapter、taosdump、taosBenchmark、TDinsight 安装脚本和示例代码;如果您只需要用到服务端程序和客户端连接的 C/C++ 语言支持,也可以仅下载 lite 版本的安装包。 - -在安装包格式上,我们提供 tar.gz, rpm 和 deb 格式,为企业客户提供 tar.gz 格式安装包,以方便在特定操作系统上使用。需要注意的是,rpm 和 deb 包不含 taosdump、taosBenchmark 和 TDinsight 安装脚本,这些工具需要通过安装 taosTool 包获得。 - -发布版本包括稳定版和 Beta 版,Beta 版含有更多新功能。正式上线或测试建议安装稳定版。您可以根据需要选择下载: - - - -具体的安装方法,请参见[安装包的安装和卸载](/operation/pkg-install)。 - -下载其他组件、最新 Beta 版及之前版本的安装包,请点击[这里](https://www.taosdata.com/all-downloads) - -查看 Release Notes, 请点击[这里](https://github.com/taosdata/TDengine/releases) diff --git a/docs/zh/08-connector/_linux_install.mdx b/docs/zh/08-connector/_linux_install.mdx index c3ddff53cd..0b1f415f54 100644 --- a/docs/zh/08-connector/_linux_install.mdx +++ b/docs/zh/08-connector/_linux_install.mdx @@ -4,7 +4,7 @@ import PkgListV3 from "/components/PkgListV3"; - [所有下载](../../releases) + [所有下载](../../releases/tdengine) 2. 解压缩软件包 diff --git a/docs/zh/08-connector/_windows_install.mdx b/docs/zh/08-connector/_windows_install.mdx index 9fdefa04c0..3cd688e615 100644 --- a/docs/zh/08-connector/_windows_install.mdx +++ b/docs/zh/08-connector/_windows_install.mdx @@ -4,7 +4,8 @@ import PkgListV3 from "/components/PkgListV3"; - [所有下载](../../releases) + [所有下载](../../releases/tdengine) + 2. 执行安装程序,按提示选择默认值,完成安装 3. 安装路径 diff --git a/docs/zh/28-releases.md b/docs/zh/28-releases.md deleted file mode 100644 index 311d69ac1b..0000000000 --- a/docs/zh/28-releases.md +++ /dev/null @@ -1,9 +0,0 @@ ---- -sidebar_label: 发布历史 -title: 发布历史 ---- - -import Release from "/components/ReleaseV3"; - - - diff --git a/docs/zh/28-releases/01-tdengine.md b/docs/zh/28-releases/01-tdengine.md new file mode 100644 index 0000000000..a64798caa0 --- /dev/null +++ b/docs/zh/28-releases/01-tdengine.md @@ -0,0 +1,15 @@ +--- +sidebar_label: TDengine 发布历史 +title: TDengine 发布历史 +--- + +import Release from "/components/ReleaseV3"; + +## 3.0.0.1 + + + +## 3.0.0.0 + + + diff --git a/docs/zh/28-releases/02-tools.md b/docs/zh/28-releases/02-tools.md new file mode 100644 index 0000000000..7513333040 --- /dev/null +++ b/docs/zh/28-releases/02-tools.md @@ -0,0 +1,10 @@ +--- +sidebar_label: taosTools 发布历史 +title: taosTools 发布历史 +--- + +import Release from "/components/ReleaseV3"; + +## 2.1.2 + + \ No newline at end of file diff --git a/docs/zh/28-releases/_category_.yml b/docs/zh/28-releases/_category_.yml new file mode 100644 index 0000000000..dcd57247d7 --- /dev/null +++ b/docs/zh/28-releases/_category_.yml @@ -0,0 +1 @@ +label: 发布历史 \ No newline at end of file diff --git a/include/common/tglobal.h b/include/common/tglobal.h index cd74ffd477..f872d1dbc2 100644 --- a/include/common/tglobal.h +++ b/include/common/tglobal.h @@ -130,6 +130,7 @@ extern int32_t tsMqRebalanceInterval; extern int32_t tsTtlUnit; extern int32_t tsTtlPushInterval; extern int32_t tsGrantHBInterval; +extern int32_t tsUptimeInterval; #define NEEDTO_COMPRESSS_MSG(size) (tsCompressMsgSize != -1 && (size) > tsCompressMsgSize) diff --git a/include/common/tmsgdef.h b/include/common/tmsgdef.h index e2bb3e2ae1..006ba7f21b 100644 --- a/include/common/tmsgdef.h +++ b/include/common/tmsgdef.h @@ -170,6 +170,7 @@ enum { TD_DEF_MSG_TYPE(TDMT_MND_SPLIT_VGROUP, "split-vgroup", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SHOW_VARIABLES, "show-variables", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_SERVER_VERSION, "server-version", NULL, NULL) + TD_DEF_MSG_TYPE(TDMT_MND_UPTIME_TIMER, "uptime-timer", NULL, NULL) TD_DEF_MSG_TYPE(TDMT_MND_MAX_MSG, "mnd-max", NULL, NULL) TD_NEW_MSG_SEG(TDMT_VND_MSG) diff --git a/source/common/src/systable.c b/source/common/src/systable.c index 65041e1f12..68a77a9f33 100644 --- a/source/common/src/systable.c +++ b/source/common/src/systable.c @@ -66,8 +66,9 @@ static const SSysDbTableSchema bnodesSchema[] = { }; static const SSysDbTableSchema clusterSchema[] = { - {.name = "id", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, + {.name = "id", .bytes = 8, .type = TSDB_DATA_TYPE_BIGINT}, {.name = "name", .bytes = TSDB_CLUSTER_ID_LEN + VARSTR_HEADER_SIZE, .type = TSDB_DATA_TYPE_VARCHAR}, + {.name = "uptime", .bytes = 4, .type = TSDB_DATA_TYPE_INT}, {.name = "create_time", .bytes = 8, .type = TSDB_DATA_TYPE_TIMESTAMP}, }; diff --git a/source/common/src/tglobal.c b/source/common/src/tglobal.c index adc5af1a17..a628f551f4 100644 --- a/source/common/src/tglobal.c +++ b/source/common/src/tglobal.c @@ -164,6 +164,7 @@ int32_t tsMqRebalanceInterval = 2; int32_t tsTtlUnit = 86400; int32_t tsTtlPushInterval = 86400; int32_t tsGrantHBInterval = 60; +int32_t tsUptimeInterval = 300; // seconds #ifndef _STORAGE int32_t taosSetTfsCfg(SConfig *pCfg) { diff --git a/source/dnode/mnode/impl/inc/mndCluster.h b/source/dnode/mnode/impl/inc/mndCluster.h index 0de253fb6a..2cb41edd7c 100644 --- a/source/dnode/mnode/impl/inc/mndCluster.h +++ b/source/dnode/mnode/impl/inc/mndCluster.h @@ -27,6 +27,7 @@ void mndCleanupCluster(SMnode *pMnode); int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len); int64_t mndGetClusterId(SMnode *pMnode); int64_t mndGetClusterCreateTime(SMnode *pMnode); +float mndGetClusterUpTime(SMnode *pMnode); #ifdef __cplusplus } diff --git a/source/dnode/mnode/impl/inc/mndDef.h b/source/dnode/mnode/impl/inc/mndDef.h index 8cff7fe48e..ea05215fe9 100644 --- a/source/dnode/mnode/impl/inc/mndDef.h +++ b/source/dnode/mnode/impl/inc/mndDef.h @@ -179,6 +179,7 @@ typedef struct { char name[TSDB_CLUSTER_ID_LEN]; int64_t createdTime; int64_t updateTime; + int32_t upTime; } SClusterObj; typedef struct { diff --git a/source/dnode/mnode/impl/src/mndCluster.c b/source/dnode/mnode/impl/src/mndCluster.c index a82bf739f5..7d633f90bd 100644 --- a/source/dnode/mnode/impl/src/mndCluster.c +++ b/source/dnode/mnode/impl/src/mndCluster.c @@ -19,7 +19,7 @@ #include "mndTrans.h" #define CLUSTER_VER_NUMBE 1 -#define CLUSTER_RESERVE_SIZE 64 +#define CLUSTER_RESERVE_SIZE 60 static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster); static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw); @@ -29,6 +29,7 @@ static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOldCluster, SCl static int32_t mndCreateDefaultCluster(SMnode *pMnode); static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock *pBlock, int32_t rows); static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter); +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq); int32_t mndInitCluster(SMnode *pMnode) { SSdbTable table = { @@ -42,8 +43,10 @@ int32_t mndInitCluster(SMnode *pMnode) { .deleteFp = (SdbDeleteFp)mndClusterActionDelete, }; + mndSetMsgHandle(pMnode, TDMT_MND_UPTIME_TIMER, mndProcessUptimeTimer); mndAddShowRetrieveHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndRetrieveClusters); mndAddShowFreeIterHandle(pMnode, TSDB_MGMT_TABLE_CLUSTER, mndCancelGetNextCluster); + return sdbSetTable(pMnode->pSdb, table); } @@ -62,40 +65,69 @@ int32_t mndGetClusterName(SMnode *pMnode, char *clusterName, int32_t len) { return 0; } -int64_t mndGetClusterId(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t clusterId = -1; +static SClusterObj *mndAcquireCluster(SMnode *pMnode) { + SSdb *pSdb = pMnode->pSdb; + void *pIter = NULL; while (1) { SClusterObj *pCluster = NULL; pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); if (pIter == NULL) break; + return pCluster; + } + + return NULL; +} + +static void mndReleaseCluster(SMnode *pMnode, SClusterObj *pCluster) { + SSdb *pSdb = pMnode->pSdb; + sdbRelease(pSdb, pCluster); +} + +int64_t mndGetClusterId(SMnode *pMnode) { + int64_t clusterId = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { clusterId = pCluster->id; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return clusterId; } int64_t mndGetClusterCreateTime(SMnode *pMnode) { - SSdb *pSdb = pMnode->pSdb; - void *pIter = NULL; - int64_t createTime = INT64_MAX; - - while (1) { - SClusterObj *pCluster = NULL; - pIter = sdbFetch(pSdb, SDB_CLUSTER, pIter, (void **)&pCluster); - if (pIter == NULL) break; - + int64_t createTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { createTime = pCluster->createdTime; - sdbRelease(pSdb, pCluster); + mndReleaseCluster(pMnode, pCluster); } return createTime; } +static int32_t mndGetClusterUpTimeImp(SClusterObj *pCluster) { +#if 0 + int32_t upTime = taosGetTimestampSec() - pCluster->updateTime / 1000; + upTime = upTime + pCluster->upTime; + return upTime; +#else + return pCluster->upTime; +#endif +} + +float mndGetClusterUpTime(SMnode *pMnode) { + int64_t upTime = 0; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + upTime = mndGetClusterUpTimeImp(pCluster); + mndReleaseCluster(pMnode, pCluster); + } + + return upTime / 86400.0f; +} + static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { terrno = TSDB_CODE_OUT_OF_MEMORY; @@ -107,6 +139,7 @@ static SSdbRaw *mndClusterActionEncode(SClusterObj *pCluster) { SDB_SET_INT64(pRaw, dataPos, pCluster->createdTime, _OVER) SDB_SET_INT64(pRaw, dataPos, pCluster->updateTime, _OVER) SDB_SET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_SET_INT32(pRaw, dataPos, pCluster->upTime, _OVER) SDB_SET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -144,6 +177,7 @@ static SSdbRow *mndClusterActionDecode(SSdbRaw *pRaw) { SDB_GET_INT64(pRaw, dataPos, &pCluster->createdTime, _OVER) SDB_GET_INT64(pRaw, dataPos, &pCluster->updateTime, _OVER) SDB_GET_BINARY(pRaw, dataPos, pCluster->name, TSDB_CLUSTER_ID_LEN, _OVER) + SDB_GET_INT32(pRaw, dataPos, &pCluster->upTime, _OVER) SDB_GET_RESERVE(pRaw, dataPos, CLUSTER_RESERVE_SIZE, _OVER) terrno = 0; @@ -162,6 +196,7 @@ _OVER: static int32_t mndClusterActionInsert(SSdb *pSdb, SClusterObj *pCluster) { mTrace("cluster:%" PRId64 ", perform insert action, row:%p", pCluster->id, pCluster); pSdb->pMnode->clusterId = pCluster->id; + pCluster->updateTime = taosGetTimestampMs(); return 0; } @@ -171,7 +206,10 @@ static int32_t mndClusterActionDelete(SSdb *pSdb, SClusterObj *pCluster) { } static int32_t mndClusterActionUpdate(SSdb *pSdb, SClusterObj *pOld, SClusterObj *pNew) { - mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p", pOld->id, pOld, pNew); + mTrace("cluster:%" PRId64 ", perform update action, old row:%p new row:%p, uptime from %d to %d", pOld->id, pOld, + pNew, pOld->upTime, pNew->upTime); + pOld->upTime = pNew->upTime; + pOld->updateTime = taosGetTimestampMs(); return 0; } @@ -242,6 +280,10 @@ static int32_t mndRetrieveClusters(SRpcMsg *pMsg, SShowObj *pShow, SSDataBlock * pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, buf, false); + int32_t upTime = mndGetClusterUpTimeImp(pCluster); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); + colDataAppend(pColInfo, numOfRows, (const char *)&upTime, false); + pColInfo = taosArrayGet(pBlock->pDataBlock, cols++); colDataAppend(pColInfo, numOfRows, (const char *)&pCluster->createdTime, false); @@ -257,3 +299,40 @@ static void mndCancelGetNextCluster(SMnode *pMnode, void *pIter) { SSdb *pSdb = pMnode->pSdb; sdbCancelFetch(pSdb, pIter); } + +static int32_t mndProcessUptimeTimer(SRpcMsg *pReq) { + SMnode *pMnode = pReq->info.node; + SClusterObj clusterObj = {0}; + SClusterObj *pCluster = mndAcquireCluster(pMnode); + if (pCluster != NULL) { + memcpy(&clusterObj, pCluster, sizeof(SClusterObj)); + clusterObj.upTime += tsUptimeInterval; + mndReleaseCluster(pMnode, pCluster); + } + + if (clusterObj.id <= 0) { + mError("can't get cluster info while update uptime"); + return 0; + } + + mTrace("update cluster uptime to %" PRId64, clusterObj.upTime); + STrans *pTrans = mndTransCreate(pMnode, TRN_POLICY_ROLLBACK, TRN_CONFLICT_NOTHING, pReq); + if (pTrans == NULL) return -1; + + SSdbRaw *pCommitRaw = mndClusterActionEncode(&clusterObj); + if (pCommitRaw == NULL || mndTransAppendCommitlog(pTrans, pCommitRaw) != 0) { + mError("trans:%d, failed to append commit log since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + sdbSetRawStatus(pCommitRaw, SDB_STATUS_READY); + + if (mndTransPrepare(pMnode, pTrans) != 0) { + mError("trans:%d, failed to prepare since %s", pTrans->id, terrstr()); + mndTransDrop(pTrans); + return -1; + } + + mndTransDrop(pTrans); + return 0; +} diff --git a/source/dnode/mnode/impl/src/mndMain.c b/source/dnode/mnode/impl/src/mndMain.c index c3296ac5c1..65a539bc90 100644 --- a/source/dnode/mnode/impl/src/mndMain.c +++ b/source/dnode/mnode/impl/src/mndMain.c @@ -100,6 +100,16 @@ static void mndGrantHeartBeat(SMnode *pMnode) { } } +static void mndIncreaseUpTime(SMnode *pMnode) { + int32_t contLen = 0; + void *pReq = mndBuildTimerMsg(&contLen); + if (pReq != NULL) { + SRpcMsg rpcMsg = { + .msgType = TDMT_MND_UPTIME_TIMER, .pCont = pReq, .contLen = contLen, .info.ahandle = (void *)0x9528}; + tmsgPutToQueue(&pMnode->msgCb, WRITE_QUEUE, &rpcMsg); + } +} + static void *mndThreadFp(void *param) { SMnode *pMnode = param; int64_t lastTime = 0; @@ -122,13 +132,17 @@ static void *mndThreadFp(void *param) { mndCalMqRebalance(pMnode); } - if (lastTime % (tsTelemInterval * 10) == 0) { + if (lastTime % (tsTelemInterval * 10) == 1) { mndPullupTelem(pMnode); } if (lastTime % (tsGrantHBInterval * 10) == 0) { mndGrantHeartBeat(pMnode); } + + if ((lastTime % (tsUptimeInterval * 10)) == ((tsUptimeInterval - 1) * 10)) { + mndIncreaseUpTime(pMnode); + } } return NULL; @@ -556,7 +570,8 @@ static int32_t mndCheckMnodeState(SRpcMsg *pMsg) { } if (mndAcquireRpcRef(pMsg->info.node) == 0) return 0; if (pMsg->msgType == TDMT_MND_MQ_TIMER || pMsg->msgType == TDMT_MND_TELEM_TIMER || - pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER) { + pMsg->msgType == TDMT_MND_TRANS_TIMER || pMsg->msgType == TDMT_MND_TTL_TIMER || + pMsg->msgType == TDMT_MND_UPTIME_TIMER) { return -1; } @@ -705,7 +720,8 @@ int32_t mndGetMonitorInfo(SMnode *pMnode, SMonClusterInfo *pClusterInfo, SMonVgr if (pObj->id == pMnode->selfDnodeId) { pClusterInfo->first_ep_dnode_id = pObj->id; tstrncpy(pClusterInfo->first_ep, pObj->pDnode->ep, sizeof(pClusterInfo->first_ep)); - pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); + pClusterInfo->master_uptime = mndGetClusterUpTime(pMnode); + // pClusterInfo->master_uptime = (ms - pObj->stateStartTime) / (86400000.0f); tstrncpy(desc.role, syncStr(TAOS_SYNC_STATE_LEADER), sizeof(desc.role)); } else { tstrncpy(desc.role, syncStr(pObj->state), sizeof(desc.role)); diff --git a/source/dnode/mnode/impl/src/mndSync.c b/source/dnode/mnode/impl/src/mndSync.c index 8e8cff853c..b7129cf56e 100644 --- a/source/dnode/mnode/impl/src/mndSync.c +++ b/source/dnode/mnode/impl/src/mndSync.c @@ -68,7 +68,7 @@ void mndSyncCommitMsg(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SFsmCbMeta cbM if (pMgmt->errCode != 0) { mError("trans:%d, failed to propose since %s, post sem", transId, tstrerror(pMgmt->errCode)); } else { - mInfo("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode)); + mDebug("trans:%d, is proposed and post sem", transId, tstrerror(pMgmt->errCode)); } pMgmt->transId = 0; taosWUnLockLatch(&pMgmt->lock); @@ -118,7 +118,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM SSyncMgmt *pMgmt = &pMnode->syncMgmt; pMgmt->errCode = cbMeta.code; - mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId, + mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64, pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); taosWLockLatch(&pMgmt->lock); @@ -126,7 +126,7 @@ void mndReConfig(struct SSyncFSM *pFsm, const SRpcMsg *pMsg, SReConfigCbMeta cbM if (pMgmt->errCode != 0) { mError("trans:-1, failed to propose sync reconfig since %s, post sem", tstrerror(pMgmt->errCode)); } else { - mInfo("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem", + mDebug("trans:-1, sync reconfig is proposed, saved:%d code:0x%x, index:%" PRId64 " term:%" PRId64 " post sem", pMgmt->transId, cbMeta.code, cbMeta.index, cbMeta.term); } pMgmt->transId = 0; @@ -228,7 +228,7 @@ int32_t mndInitSync(SMnode *pMnode) { syncInfo.isStandBy = pMgmt->standby; syncInfo.snapshotStrategy = SYNC_STRATEGY_STANDARD_SNAPSHOT; - mInfo("start to open mnode sync, standby:%d", pMgmt->standby); + mDebug("start to open mnode sync, standby:%d", pMgmt->standby); if (pMgmt->standby || pMgmt->replica.id > 0) { SSyncCfg *pCfg = &syncInfo.syncCfg; pCfg->replicaNum = 1; @@ -236,7 +236,7 @@ int32_t mndInitSync(SMnode *pMnode) { SNodeInfo *pNode = &pCfg->nodeInfo[0]; tstrncpy(pNode->nodeFqdn, pMgmt->replica.fqdn, sizeof(pNode->nodeFqdn)); pNode->nodePort = pMgmt->replica.port; - mInfo("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort); + mDebug("mnode ep:%s:%u", pNode->nodeFqdn, pNode->nodePort); } tsem_init(&pMgmt->syncSem, 0, 0); diff --git a/source/dnode/vnode/src/tsdb/tsdbRead.c b/source/dnode/vnode/src/tsdb/tsdbRead.c index a4738781f5..bbef249079 100644 --- a/source/dnode/vnode/src/tsdb/tsdbRead.c +++ b/source/dnode/vnode/src/tsdb/tsdbRead.c @@ -178,7 +178,7 @@ static int32_t doAppendRowFromFileBlock(SSDataBlock* pResBlock, STsdbReader* pR static void setComposedBlockFlag(STsdbReader* pReader, bool composed); static bool hasBeenDropped(const SArray* pDelList, int32_t* index, TSDBKEY* pKey, int32_t order); -static void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, +static void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow); static void doMergeMemIMemRows(TSDBROW* pRow, TSDBROW* piRow, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader, STSRow** pTSRow); @@ -1510,6 +1510,7 @@ static int32_t doMergeBufAndFileRows_Rv(STsdbReader* pReader, STableBlockScanInf return TSDB_CODE_SUCCESS; } +#if 0 static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, TSDBROW* pRow, SIterInfo* pIter, int64_t key, SLastBlockReader* pLastBlockReader) { SRowMerger merge = {0}; @@ -1536,7 +1537,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* freeTSRow = true; } } else if (k.ts < key) { // k.ts < key - doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); } else { // k.ts == key, ascending order: file block ----> imem rows -----> mem rows tRowMergerInit(&merge, &fRow, pReader->pSchema); doMergeRowsInFileBlocks(pBlockData, pBlockScanInfo, pReader, &merge); @@ -1549,7 +1550,7 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* } } else { // descending order scan if (key < k.ts) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, pIter, pDelList, &pTSRow, pReader, &freeTSRow); } else if (k.ts < key) { if (tryCopyDistinctRowFromFileBlock(pReader, pBlockData, key, pDumpInfo)) { return TSDB_CODE_SUCCESS; @@ -1583,6 +1584,8 @@ static int32_t doMergeBufAndFileRows(STsdbReader* pReader, STableBlockScanInfo* return TSDB_CODE_SUCCESS; } +#endif + static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData, SLastBlockReader* pLastBlockReader) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; @@ -1734,6 +1737,7 @@ static int32_t doMergeMultiLevelRowsRv(STsdbReader* pReader, STableBlockScanInfo return TSDB_CODE_SUCCESS; } +#if 0 static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* pBlockScanInfo, SBlockData* pBlockData) { SRowMerger merge = {0}; STSRow* pTSRow = NULL; @@ -1779,7 +1783,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [3] ik.ts < key <= k.ts // [4] ik.ts < k.ts <= key if (ik.ts < k.ts) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1790,7 +1794,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [5] k.ts < key <= ik.ts // [6] k.ts < ik.ts <= key if (k.ts < ik.ts) { - doMergeMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1836,7 +1840,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* // [3] ik.ts > k.ts >= Key // [4] ik.ts > key >= k.ts if (ik.ts > key) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); doAppendRowFromTSRow(pReader->pResBlock, pReader, pTSRow, uid); if (freeTSRow) { taosMemoryFree(pTSRow); @@ -1859,7 +1863,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* //[7] key = ik.ts > k.ts if (key == ik.ts) { - doMergeMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, &pTSRow, pReader, &freeTSRow); TSDBROW fRow = tsdbRowFromBlockData(pBlockData, pDumpInfo->rowIndex); tRowMerge(&merge, &fRow); @@ -1876,6 +1880,7 @@ static int32_t doMergeThreeLevelRows(STsdbReader* pReader, STableBlockScanInfo* ASSERT(0); return -1; } +#endif static bool isValidFileBlockRow(SBlockData* pBlockData, SFileBlockDumpInfo* pDumpInfo, STableBlockScanInfo* pBlockScanInfo, STsdbReader* pReader) { @@ -3115,7 +3120,7 @@ int32_t doMergeRowsInLastBlock(SLastBlockReader* pLastBlockReader, STableBlockSc return TSDB_CODE_SUCCESS; } -void doMergeMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, +void doMergeMemTableMultiRows(TSDBROW* pRow, uint64_t uid, SIterInfo* pIter, SArray* pDelList, STSRow** pTSRow, STsdbReader* pReader, bool* freeTSRow) { TSDBROW* pNextRow = NULL; TSDBROW current = *pRow; @@ -3197,6 +3202,7 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR TSDBROW* pRow = getValidRow(&pBlockScanInfo->iter, pBlockScanInfo->delSkyline, pReader); TSDBROW* piRow = getValidRow(&pBlockScanInfo->iiter, pBlockScanInfo->delSkyline, pReader); SArray* pDelList = pBlockScanInfo->delSkyline; + uint64_t uid = pBlockScanInfo->uid; // todo refactor bool asc = ASCENDING_TRAVERSE(pReader->order); @@ -3219,9 +3225,9 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR TSDBKEY ik = TSDBROW_KEY(piRow); if (ik.ts < k.ts) { // ik.ts < k.ts - doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); } else if (k.ts < ik.ts) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(pRow, uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); } else { // ik.ts == k.ts doMergeMemIMemRows(pRow, piRow, pBlockScanInfo, pReader, pTSRow); *freeTSRow = true; @@ -3231,12 +3237,12 @@ int32_t tsdbGetNextRowInMem(STableBlockScanInfo* pBlockScanInfo, STsdbReader* pR } if (pBlockScanInfo->iter.hasVal && pRow != NULL) { - doMergeMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(pRow, pBlockScanInfo->uid, &pBlockScanInfo->iter, pDelList, pTSRow, pReader, freeTSRow); return TSDB_CODE_SUCCESS; } if (pBlockScanInfo->iiter.hasVal && piRow != NULL) { - doMergeMultiRows(piRow, pBlockScanInfo->uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); + doMergeMemTableMultiRows(piRow, uid, &pBlockScanInfo->iiter, pDelList, pTSRow, pReader, freeTSRow); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/executor/src/executil.c b/source/libs/executor/src/executil.c index 2781fee956..118c061c3a 100644 --- a/source/libs/executor/src/executil.c +++ b/source/libs/executor/src/executil.c @@ -782,6 +782,7 @@ int32_t getTableList(void* metaHandle, void* pVnode, SScanPhysiNode* pScanNode, colDataDestroy(pColInfoData); taosMemoryFreeClear(pColInfoData); taosArrayDestroy(res); + qError("failed to getColInfoResult, code: %s", tstrerror(terrno)); return terrno; } diff --git a/source/libs/executor/src/executor.c b/source/libs/executor/src/executor.c index d8f63cb008..fe1f4911ca 100644 --- a/source/libs/executor/src/executor.c +++ b/source/libs/executor/src/executor.c @@ -352,12 +352,14 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, int32_t code = createExecTaskInfoImpl(pSubplan, pTask, readHandle, taskId, sql, model); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createExecTaskInfoImpl, code: %s", tstrerror(code)); goto _error; } SDataSinkMgtCfg cfg = {.maxDataBlockNum = 10000, .maxDataBlockNumPerQuery = 5000}; code = dsDataSinkMgtInit(&cfg); if (code != TSDB_CODE_SUCCESS) { + qError("failed to dsDataSinkMgtInit, code: %s", tstrerror(code)); goto _error; } @@ -365,6 +367,7 @@ int32_t qCreateExecTask(SReadHandle* readHandle, int32_t vgId, uint64_t taskId, void* pSinkParam = NULL; code = createDataSinkParam(pSubplan->pDataSink, &pSinkParam, pTaskInfo, readHandle); if (code != TSDB_CODE_SUCCESS) { + qError("failed to createDataSinkParam, code: %s", tstrerror(code)); goto _error; } diff --git a/source/libs/executor/src/executorimpl.c b/source/libs/executor/src/executorimpl.c index 2d72bc813f..46ef5700fc 100644 --- a/source/libs/executor/src/executorimpl.c +++ b/source/libs/executor/src/executorimpl.c @@ -3492,6 +3492,7 @@ int32_t doInitAggInfoSup(SAggSupporter* pAggSup, SqlFunctionCtx* pCtx, int32_t n qError("Init stream agg supporter failed since %s", terrstr(terrno)); return terrno; } + int32_t code = createDiskbasedBuf(&pAggSup->pResultBuf, defaultPgsz, defaultBufsz, pKey, tsTempDir); if (code != TSDB_CODE_SUCCESS) { qError("Create agg result buf failed since %s", tstrerror(code)); @@ -4049,6 +4050,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4068,6 +4070,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4091,6 +4094,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo pHandle, pTableListInfo, pTagCond, pTagIndexCond, GET_TASKID(pTaskInfo)); if (code) { pTaskInfo->code = code; + qError("failed to createScanTableListInfo, code: %s", tstrerror(code)); return NULL; } @@ -4113,6 +4117,7 @@ SOperatorInfo* createOperatorTree(SPhysiNode* pPhyNode, SExecTaskInfo* pTaskInfo int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanPhyNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { pTaskInfo->code = terrno; + qError("failed to getTableList, code: %s", tstrerror(code)); return NULL; } diff --git a/source/libs/executor/src/projectoperator.c b/source/libs/executor/src/projectoperator.c index f2b79bf703..94da3e23e1 100644 --- a/source/libs/executor/src/projectoperator.c +++ b/source/libs/executor/src/projectoperator.c @@ -50,9 +50,11 @@ static void destroyIndefinitOperatorInfo(void* param, int32_t numOfOutput) { SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhysiNode* pProjPhyNode, SExecTaskInfo* pTaskInfo) { + int32_t code = TSDB_CODE_SUCCESS; SProjectOperatorInfo* pInfo = taosMemoryCalloc(1, sizeof(SProjectOperatorInfo)); SOperatorInfo* pOperator = taosMemoryCalloc(1, sizeof(SOperatorInfo)); if (pInfo == NULL || pOperator == NULL) { + code = TSDB_CODE_OUT_OF_MEMORY; goto _error; } @@ -67,12 +69,11 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pInfo->binfo.pRes = pResBlock; pInfo->pFinalRes = createOneDataBlock(pResBlock, false); pInfo->pFilterNode = pProjPhyNode->node.pConditions; - pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; - - // todo remove it soon if (pTaskInfo->execModel == OPTR_EXEC_MODEL_STREAM) { pInfo->mergeDataBlocks = false; + } else { + pInfo->mergeDataBlocks = pProjPhyNode->mergeDataBlock; } int32_t numOfRows = 4096; @@ -83,9 +84,13 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys if (numOfRows * pResBlock->info.rowSize > TWOMB) { numOfRows = TWOMB / pResBlock->info.rowSize; } - initResultSizeInfo(&pOperator->resultInfo, numOfRows); - initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + initResultSizeInfo(&pOperator->resultInfo, numOfRows); + code = initAggInfo(&pOperator->exprSupp, &pInfo->aggSup, pExprInfo, numOfCols, keyBufSize, pTaskInfo->id.str); + if (code != TSDB_CODE_SUCCESS) { + goto _error; + } + initBasicInfo(&pInfo->binfo, pResBlock); setFunctionResultOutput(pOperator, &pInfo->binfo, &pInfo->aggSup, MAIN_SCAN, numOfCols); @@ -99,7 +104,7 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys pOperator->fpSet = createOperatorFpSet(operatorDummyOpenFn, doProjectOperation, NULL, NULL, destroyProjectOperatorInfo, NULL, NULL, NULL); - int32_t code = appendDownstream(pOperator, &downstream, 1); + code = appendDownstream(pOperator, &downstream, 1); if (code != TSDB_CODE_SUCCESS) { goto _error; } @@ -107,7 +112,9 @@ SOperatorInfo* createProjectOperatorInfo(SOperatorInfo* downstream, SProjectPhys return pOperator; _error: - pTaskInfo->code = TSDB_CODE_OUT_OF_MEMORY; + destroyProjectOperatorInfo(pInfo, numOfCols); + taosMemoryFree(pOperator); + pTaskInfo->code = code; return NULL; } @@ -175,7 +182,8 @@ static int32_t doIngroupLimitOffset(SLimitInfo* pLimitInfo, uint64_t groupId, SS int32_t keepRows = (int32_t)(pLimitInfo->limit.limit - pLimitInfo->numOfOutputRows); blockDataKeepFirstNRows(pBlock, keepRows); //TODO: optimize it later when partition by + limit - if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups) { + if ((pLimitInfo->slimit.limit == -1 && pLimitInfo->currentGroupId == 0) || + (pLimitInfo->slimit.limit > 0 && pLimitInfo->slimit.limit <= pLimitInfo->numOfOutputGroups)) { doSetOperatorCompleted(pOperator); } } diff --git a/source/libs/executor/src/scanoperator.c b/source/libs/executor/src/scanoperator.c index 1377b42b72..599f86f4fa 100644 --- a/source/libs/executor/src/scanoperator.c +++ b/source/libs/executor/src/scanoperator.c @@ -2649,6 +2649,7 @@ int32_t createScanTableListInfo(SScanPhysiNode* pScanNode, SNodeList* pGroupTags int32_t code = getTableList(pHandle->meta, pHandle->vnode, pScanNode, pTagCond, pTagIndexCond, pTableListInfo); if (code != TSDB_CODE_SUCCESS) { + qError("failed to getTableList, code: %s", tstrerror(code)); return code; } diff --git a/source/libs/parser/src/parInsert.c b/source/libs/parser/src/parInsert.c index de9f815618..4e32672697 100644 --- a/source/libs/parser/src/parInsert.c +++ b/source/libs/parser/src/parInsert.c @@ -125,6 +125,37 @@ static int32_t skipInsertInto(char** pSql, SMsgBuf* pMsg) { return TSDB_CODE_SUCCESS; } +static char* tableNameGetPosition(SToken* pToken, char target) { + bool inEscape = false; + bool inQuote = false; + char quotaStr = 0; + + for (uint32_t i = 0; i < pToken->n; ++i) { + if (*(pToken->z + i) == target && (!inEscape) && (!inQuote)) { + return pToken->z + i; + } + + if (*(pToken->z + i) == TS_ESCAPE_CHAR) { + if (!inQuote) { + inEscape = !inEscape; + } + } + + if (*(pToken->z + i) == '\'' || *(pToken->z + i) == '"') { + if (!inEscape) { + if (!inQuote) { + quotaStr = *(pToken->z + i); + inQuote = !inQuote; + } else if (quotaStr == *(pToken->z + i)) { + inQuote = !inQuote; + } + } + } + } + + return NULL; +} + static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, const char* dbName, SMsgBuf* pMsgBuf) { const char* msg1 = "name too long"; const char* msg2 = "invalid database name"; @@ -132,7 +163,7 @@ static int32_t createSName(SName* pName, SToken* pTableName, int32_t acctId, con const char* msg4 = "invalid table name"; int32_t code = TSDB_CODE_SUCCESS; - char* p = strnchr(pTableName->z, TS_PATH_DELIMITER[0], pTableName->n, true); + char* p = tableNameGetPosition(pTableName, TS_PATH_DELIMITER[0]); if (p != NULL) { // db has been specified in sql string so we ignore current db path assert(*p == TS_PATH_DELIMITER[0]); @@ -1691,9 +1722,17 @@ static int32_t collectTableMetaKey(SInsertParseSyntaxCxt* pCxt, bool isStable, i return TSDB_CODE_SUCCESS; } +static int32_t checkTableName(const char* pTableName, SMsgBuf* pMsgBuf) { + if (NULL != strchr(pTableName, '.')) { + return generateSyntaxErrMsgExt(pMsgBuf, TSDB_CODE_PAR_INVALID_IDENTIFIER_NAME, "The table name cannot contain '.'"); + } + return TSDB_CODE_SUCCESS; +} + static int32_t collectAutoCreateTableMetaKey(SInsertParseSyntaxCxt* pCxt, int32_t tableNo, SToken* pTbToken) { SName name; CHECK_CODE(createSName(&name, pTbToken, pCxt->pComCxt->acctId, pCxt->pComCxt->db, &pCxt->msg)); + CHECK_CODE(checkTableName(name.tname, &pCxt->msg)); CHECK_CODE(reserveTableMetaInCacheForInsert(&name, CATALOG_REQ_TYPE_VGROUP, tableNo, pCxt->pMetaCache)); return TSDB_CODE_SUCCESS; } diff --git a/source/libs/transport/src/thttp.c b/source/libs/transport/src/thttp.c index 7cfb188ac9..935f536a90 100644 --- a/source/libs/transport/src/thttp.c +++ b/source/libs/transport/src/thttp.c @@ -145,7 +145,7 @@ static void clientRecvCb(uv_stream_t* handle, ssize_t nread, const uv_buf_t *buf if (nread < 0) { uError("http-report read error:%s", uv_err_name(nread)); } else { - uInfo("http-report succ to read %d bytes, just ignore it", nread); + uTrace("http-report succ to read %d bytes, just ignore it", nread); } uv_close((uv_handle_t*)&cli->tcp, clientCloseCb); } @@ -155,7 +155,7 @@ static void clientSentCb(uv_write_t* req, int32_t status) { terrno = TAOS_SYSTEM_ERROR(status); uError("http-report failed to send data %s", uv_strerror(status)); } else { - uInfo("http-report succ to send data"); + uTrace("http-report succ to send data"); } uv_read_start((uv_stream_t *)&cli->tcp, clientAllocBuffCb, clientRecvCb); } diff --git a/source/util/src/tcache.c b/source/util/src/tcache.c index dd61f7d225..f9f42aa103 100644 --- a/source/util/src/tcache.c +++ b/source/util/src/tcache.c @@ -702,7 +702,7 @@ void taosCacheCleanup(SCacheObj *pCacheObj) { taosMsleep(50); } - uInfo("cache:%s will be cleaned up", pCacheObj->name); + uTrace("cache:%s will be cleaned up", pCacheObj->name); doCleanupDataCache(pCacheObj); } diff --git a/source/util/src/tcompression.c b/source/util/src/tcompression.c index e8f1f06ef1..ba877915b1 100644 --- a/source/util/src/tcompression.c +++ b/source/util/src/tcompression.c @@ -83,8 +83,8 @@ int32_t tsCompressInit() { if (lossyFloat == false && lossyDouble == false) return 0; tdszInit(fPrecision, dPrecision, maxRange, curRange, Compressor); - if (lossyFloat) uInfo("lossy compression float is opened. "); - if (lossyDouble) uInfo("lossy compression double is opened. "); + if (lossyFloat) uTrace("lossy compression float is opened. "); + if (lossyDouble) uTrace("lossy compression double is opened. "); return 1; } // exit call diff --git a/tests/script/tmp/monitor.sim b/tests/script/tmp/monitor.sim index 8eb787e950..c0c1da567c 100644 --- a/tests/script/tmp/monitor.sim +++ b/tests/script/tmp/monitor.sim @@ -21,6 +21,6 @@ sql create table db.stb (ts timestamp, c1 int, c2 binary(4)) tags(t1 int, t2 bin print =============== create drop qnode 1 sql create qnode on dnode 1 -sql create snode on dnode 1 -sql create bnode on dnode 1 +#sql create snode on dnode 1 +#sql create bnode on dnode 1