diff --git a/.github/workflows/taosd-ci-build.yml b/.github/workflows/taosd-ci-build.yml
index e424d0b8ab..372008b585 100644
--- a/.github/workflows/taosd-ci-build.yml
+++ b/.github/workflows/taosd-ci-build.yml
@@ -10,15 +10,25 @@ on:
- 'docs/**'
- 'packaging/**'
- 'tests/**'
-
+ - '*.md'
+
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
- runs-on: ubuntu-latest
- name: Build and test
+ name: Build and test on ${{ matrix.os }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ os:
+ - ubuntu-20.04
+ - ubuntu-22.04
+ - ubuntu-24.04
+ - macos-13
+ - macos-14
+ - macos-15
steps:
- name: Checkout the repository
@@ -29,12 +39,19 @@ jobs:
with:
go-version: 1.18
- - name: Install system dependencies
+ - name: Install dependencies on Linux
+ if: runner.os == 'Linux'
run: |
sudo apt update -y
sudo apt install -y build-essential cmake \
libgeos-dev libjansson-dev libsnappy-dev liblzma-dev libz-dev \
- zlib1g pkg-config libssl-dev gawk
+ zlib1g-dev pkg-config libssl-dev gawk
+
+ - name: Install dependencies on macOS
+ if: runner.os == 'macOS'
+ run: |
+ brew update
+ brew install argp-standalone gflags pkg-config snappy zlib geos jansson gawk openssl
- name: Build and install TDengine
run: |
diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md
index 76cb0e0f31..9a84c79596 100644
--- a/CODE_OF_CONDUCT.md
+++ b/CODE_OF_CONDUCT.md
@@ -75,4 +75,4 @@ available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.ht
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
-https://www.contributor-covenant.org/faq
+https://www.contributor-covenant.org/faq
diff --git a/README-CN.md b/README-CN.md
index 99bbf9aabd..ca046dbe13 100644
--- a/README-CN.md
+++ b/README-CN.md
@@ -10,7 +10,36 @@
简体中文 | [English](README.md) | [TDengine 云服务](https://cloud.taosdata.com/?utm_medium=cn&utm_source=github) | 很多职位正在热招中,请看[这里](https://www.taosdata.com/careers/)
-# TDengine 简介
+# 目录
+
+1. [TDengine 简介](#1-tdengine-简介)
+1. [文档](#2-文档)
+1. [必备工具](#3-必备工具)
+ - [3.1 Linux预备](#31-linux系统)
+ - [3.2 macOS预备](#32-macos系统)
+ - [3.3 Windows预备](#33-windows系统)
+ - [3.4 克隆仓库](#34-克隆仓库)
+1. [构建](#4-构建)
+ - [4.1 Linux系统上构建](#41-linux系统上构建)
+ - [4.2 macOS系统上构建](#42-macos系统上构建)
+ - [4.3 Windows系统上构建](#43-windows系统上构建)
+1. [打包](#5-打包)
+1. [安装](#6-安装)
+ - [6.1 Linux系统上安装](#61-linux系统上安装)
+ - [6.2 macOS系统上安装](#62-macos系统上安装)
+ - [6.3 Windows系统上安装](#63-windows系统上安装)
+1. [快速运行](#7-快速运行)
+ - [7.1 Linux系统上运行](#71-linux系统上运行)
+ - [7.2 macOS系统上运行](#72-macos系统上运行)
+ - [7.3 Windows系统上运行](#73-windows系统上运行)
+1. [测试](#8-测试)
+1. [版本发布](#9-版本发布)
+1. [工作流](#10-工作流)
+1. [覆盖率](#11-覆盖率)
+1. [成为社区贡献者](#12-成为社区贡献者)
+
+
+# 1. 简介
TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series Database, TSDB)。TDengine 能被广泛运用于物联网、工业互联网、车联网、IT 运维、金融等领域。除核心的时序数据库功能外,TDengine 还提供缓存、数据订阅、流式计算等功能,是一极简的时序数据处理平台,最大程度的减小系统设计的复杂度,降低研发和运营成本。与其他时序数据库相比,TDengine 的主要优势如下:
@@ -26,323 +55,335 @@ TDengine 是一款开源、高性能、云原生的时序数据库 (Time-Series
- **核心开源**:TDengine 的核心代码包括集群功能全部开源,截止到2022年8月1日,全球超过 135.9k 个运行实例,GitHub Star 18.7k,Fork 4.4k,社区活跃。
-# 文档
+了解TDengine高级功能的完整列表,请 [点击](https://tdengine.com/tdengine/)。体验TDengine最简单的方式是通过[TDengine云平台](https://cloud.tdengine.com)。
-关于完整的使用手册,系统架构和更多细节,请参考 [TDengine 文档](https://docs.taosdata.com) 或者 [TDengine Documentation](https://docs.tdengine.com)。
+# 2. 文档
-# 构建
+关于完整的使用手册,系统架构和更多细节,请参考 [TDengine](https://www.taosdata.com/) 或者 [TDengine 官方文档](https://docs.taosdata.com)。
+
+用户可根据需求选择通过[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)、[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装或直接使用无需安装部署的[云服务](https://cloud.taosdata.com/)。本快速指南是面向想自己编译、打包、测试的开发者的。
+
+如果想编译或测试TDengine连接器,请访问以下仓库: [JDBC连接器](https://github.com/taosdata/taos-connector-jdbc), [Go连接器](https://github.com/taosdata/driver-go), [Python连接器](https://github.com/taosdata/taos-connector-python), [Node.js连接器](https://github.com/taosdata/taos-connector-node), [C#连接器](https://github.com/taosdata/taos-connector-dotnet), [Rust连接器](https://github.com/taosdata/taos-connector-rust).
+
+# 3. 前置条件
TDengine 目前可以在 Linux、 Windows、macOS 等平台上安装和运行。任何 OS 的应用也可以选择 taosAdapter 的 RESTful 接口连接服务端 taosd。CPU 支持 X64/ARM64,后续会支持 MIPS64、Alpha64、ARM32、RISC-V 等 CPU 架构。目前不支持使用交叉编译器构建。
-用户可根据需求选择通过源码、[容器](https://docs.taosdata.com/get-started/docker/)、[安装包](https://docs.taosdata.com/get-started/package/)或[Kubernetes](https://docs.taosdata.com/deployment/k8s/)来安装。本快速指南仅适用于通过源码安装。
-
-TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
+## 3.1 Linux系统
-为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
+
-## 安装工具
+安装Linux必备工具
-### Ubuntu 18.04 及以上版本 & Debian:
+### Ubuntu 18.04、20.04、22.04
```bash
-sudo apt-get install -y gcc cmake build-essential git libssl-dev libgflags2.2 libgflags-dev
+sudo apt-get udpate
+sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
+ libsnappy-dev liblzma-dev zlib1g-dev pkg-config
```
-#### 为 taos-tools 安装编译需要的软件
-
-为了在 Ubuntu/Debian 系统上编译 [taos-tools](https://github.com/taosdata/taos-tools) 需要安装如下软件:
+### CentOS 8
```bash
-sudo apt install build-essential libjansson-dev libsnappy-dev liblzma-dev libz-dev zlib1g pkg-config
-```
-
-### CentOS 7.9
-
-```bash
-sudo yum install epel-release
sudo yum update
-sudo yum install -y gcc gcc-c++ make cmake3 gflags git openssl-devel
-sudo ln -sf /usr/bin/cmake3 /usr/bin/cmake
+yum install -y epel-release gcc gcc-c++ make cmake git perl dnf-plugins-core
+yum config-manager --set-enabled powertools
+yum install -y zlib-static xz-devel snappy-devel jansson-devel pkgconfig libatomic-static libstdc++-static
```
-### CentOS 8/Fedora/Rocky Linux
+
+
+## 3.2 macOS系统
+
+
+
+安装macOS必备工具
+
+根据提示安装依赖工具 [brew](https://brew.sh/).
```bash
-sudo dnf install -y gcc gcc-c++ gflags make cmake epel-release git openssl-devel
-```
-
-#### 在 CentOS 上构建 taosTools 安装依赖软件
-
-
-#### CentOS 7.9
-
-
-```
-sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
-```
-
-#### CentOS 8/Fedora/Rocky Linux
-
-```
-sudo yum install -y epel-release
-sudo yum install -y dnf-plugins-core
-sudo yum config-manager --set-enabled powertools
-sudo yum install -y zlib-devel zlib-static xz-devel snappy-devel jansson jansson-devel pkgconfig libatomic libatomic-static libstdc++-static openssl-devel
-```
-
-注意:由于 snappy 缺乏 pkg-config 支持(参考 [链接](https://github.com/google/snappy/pull/86)),会导致 cmake 提示无法发现 libsnappy,实际上工作正常。
-
-若 powertools 安装失败,可以尝试改用:
-```
-sudo yum config-manager --set-enabled powertools
-```
-
-#### CentOS + devtoolset
-
-除上述编译依赖包,需要执行以下命令:
-
-```
-sudo yum install centos-release-scl
-sudo yum install devtoolset-9 devtoolset-9-libatomic-devel
-scl enable devtoolset-9 -- bash
-```
-
-### macOS
-
-```
brew install argp-standalone gflags pkgconfig
```
-### 设置 golang 开发环境
+
-TDengine 包含数个使用 Go 语言开发的组件,比如taosAdapter, 请参考 golang.org 官方文档设置 go 开发环境。
+## 3.3 Windows系统
-请使用 1.20 及以上版本。对于中国用户,我们建议使用代理来加速软件包下载。
+
-```
-go env -w GO111MODULE=on
-go env -w GOPROXY=https://goproxy.cn,direct
-```
+安装Windows必备工具
-缺省是不会构建 taosAdapter, 但您可以使用以下命令选择构建 taosAdapter 作为 RESTful 接口的服务。
+进行中。
-```
-cmake .. -DBUILD_HTTP=false
-```
+
-### 设置 rust 开发环境
+## 3.4 克隆仓库
-TDengine 包含数个使用 Rust 语言开发的组件. 请参考 rust-lang.org 官方文档设置 rust 开发环境。
-
-## 获取源码
-
-首先,你需要从 GitHub 克隆源码:
+通过如下命令将TDengine仓库克隆到指定计算机:
```bash
git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
-如果使用 https 协议下载比较慢,可以通过修改 ~/.gitconfig 文件添加以下两行设置使用 ssh 协议下载。需要首先上传 ssh 密钥到 GitHub,详细方法请参考 GitHub 官方文档。
-```
-[url "git@github.com:"]
- insteadOf = https://github.com/
-```
-## 特别说明
+# 4. 构建
-[JDBC 连接器](https://github.com/taosdata/taos-connector-jdbc), [Go 连接器](https://github.com/taosdata/driver-go),[Python 连接器](https://github.com/taosdata/taos-connector-python),[Node.js 连接器](https://github.com/taosdata/taos-connector-node),[C# 连接器](https://github.com/taosdata/taos-connector-dotnet) ,[Rust 连接器](https://github.com/taosdata/taos-connector-rust) 和 [Grafana 插件](https://github.com/taosdata/grafanaplugin)已移到独立仓库。
+TDengine 还提供一组辅助工具软件 taosTools,目前它包含 taosBenchmark(曾命名为 taosdemo)和 taosdump 两个软件。默认 TDengine 编译不包含 taosTools, 您可以在编译 TDengine 时使用`cmake .. -DBUILD_TOOLS=true` 来同时编译 taosTools。
+为了构建TDengine, 请使用 [CMake](https://cmake.org/) 3.13.0 或者更高版本。
-## 构建 TDengine
+## 4.1 Linux系统上构建
-### Linux 系统
+
-可以运行代码仓库中的 `build.sh` 脚本编译出 TDengine 和 taosTools(包含 taosBenchmark 和 taosdump)。
+Linux系统上构建步骤
+
+可以通过以下命令使用脚本 `build.sh` 编译TDengine和taosTools,包括taosBenchmark和taosdump:
```bash
./build.sh
```
-这个脚本等价于执行如下命令:
+也可以通过以下命令进行构建:
```bash
-mkdir debug
-cd debug
+mkdir debug && cd debug
cmake .. -DBUILD_TOOLS=true -DBUILD_CONTRIB=true
make
```
-您也可以选择使用 jemalloc 作为内存分配器,替代默认的 glibc:
+可以使用Jemalloc作为内存分配器,而不是使用glibc:
```bash
-apt install autoconf
cmake .. -DJEMALLOC_ENABLED=true
```
-
-在 X86-64、X86、arm64 平台上,TDengine 生成脚本可以自动检测机器架构。也可以手动配置 CPUTYPE 参数来指定 CPU 类型,如 aarch64 等。
-
-aarch64:
+TDengine构建脚本可以自动检测x86、x86-64、arm64平台上主机的体系结构。
+您也可以通过CPUTYPE选项手动指定架构:
```bash
cmake .. -DCPUTYPE=aarch64 && cmake --build .
```
-### Windows 系统
+
-如果你使用的是 Visual Studio 2013 版本:
+## 4.2 macOS系统上构建
-打开 cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x86_amd64”,为 32 位操作系统指定“x86”。
+
-```bash
+macOS系统上构建步骤
+
+请安装XCode命令行工具和cmake。使用XCode 11.4+在Catalina和Big Sur上完成验证。
+
+```shell
mkdir debug && cd debug
-"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < x86_amd64 | x86 >
+cmake .. && cmake --build .
+```
+
+
+
+## 4.3 Windows系统上构建
+
+
+
+Windows系统上构建步骤
+
+如果您使用的是Visual Studio 2013,请执行“cmd.exe”打开命令窗口执行如下命令。
+执行vcvarsall.bat时,64位的Windows请指定“amd64”,32位的Windows请指定“x86”。
+
+```cmd
+mkdir debug && cd debug
+"C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" < amd64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
-如果你使用的是 Visual Studio 2019 或 2017 版本:
+如果您使用Visual Studio 2019或2017:
-打开 cmd.exe,执行 vcvarsall.bat 时,为 64 位操作系统指定“x64”,为 32 位操作系统指定“x86”。
+请执行“cmd.exe”打开命令窗口执行如下命令。
+执行vcvarsall.bat时,64位的Windows请指定“x64”,32位的Windows请指定“x86”。
-```bash
+```cmd
mkdir debug && cd debug
"c:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" < x64 | x86 >
cmake .. -G "NMake Makefiles"
nmake
```
-你也可以从开始菜单中找到"Visual Studio < 2019 | 2017 >"菜单项,根据你的系统选择"x64 Native Tools Command Prompt for VS < 2019 | 2017 >"或"x86 Native Tools Command Prompt for VS < 2019 | 2017 >",打开命令行窗口,执行:
+或者,您可以通过点击Windows开始菜单打开命令窗口->“Visual Studio < 2019 | 2017 >”文件夹->“x64原生工具命令提示符VS < 2019 | 2017 >”或“x86原生工具命令提示符VS < 2019 | 2017 >”取决于你的Windows是什么架构,然后执行命令如下:
-```bash
+```cmd
mkdir debug && cd debug
cmake .. -G "NMake Makefiles"
nmake
```
+
-### macOS 系统
+# 5. 打包
-安装 XCode 命令行工具和 cmake. 在 Catalina 和 Big Sur 操作系统上,需要安装 XCode 11.4+ 版本。
+由于一些组件依赖关系,TDengine社区安装程序不能仅由该存储库创建。我们仍在努力改进。
+
+# 6. 安装
+
+
+## 6.1 Linux系统上安装
+
+
+
+Linux系统上安装详细步骤
+
+构建成功后,TDengine可以通过以下命令进行安装:
```bash
-mkdir debug && cd debug
-cmake .. && cmake --build .
+sudo make install
```
+从源代码安装还将为TDengine配置服务管理。用户也可以使用[TDengine安装包](https://docs.taosdata.com/get-started/package/)进行安装。
-# 安装
+
-## Linux 系统
+## 6.2 macOS系统上安装
-生成完成后,安装 TDengine:
+
+
+macOS系统上安装详细步骤
+
+构建成功后,TDengine可以通过以下命令进行安装:
```bash
sudo make install
```
-用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
+
-从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
+## 6.3 Windows系统上安装
-安装成功后,在终端中启动 TDengine 服务:
+
-```bash
-sudo systemctl start taosd
-```
+Windows系统上安装详细步骤
-用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
-
-```bash
-taos
-```
-
-如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
-
-## Windows 系统
-
-生成完成后,安装 TDengine:
+构建成功后,TDengine可以通过以下命令进行安装:
```cmd
nmake install
```
-## macOS 系统
+
-生成完成后,安装 TDengine:
+# 7. 快速运行
+
+## 7.1 Linux系统上运行
+
+
+
+Linux系统上运行详细步骤
+
+在Linux系统上安装TDengine完成后,在终端运行如下命令启动服务:
```bash
-sudo make install
+sudo systemctl start taosd
```
-
-用户可以在[文件目录结构](https://docs.taosdata.com/reference/directory/)中了解更多在操作系统中生成的目录或文件。
-
-从源代码安装也会为 TDengine 配置服务管理 ,用户也可以选择[从安装包中安装](https://docs.taosdata.com/get-started/package/)。
-
-安装成功后,可以在应用程序中双击 TDengine 图标启动服务,或者在终端中启动 TDengine 服务:
-
-```bash
-sudo launchctl start com.tdengine.taosd
-```
-
-用户可以使用 TDengine CLI 来连接 TDengine 服务,在终端中,输入:
+然后用户可以通过如下命令使用TDengine命令行连接TDengine服务:
```bash
taos
```
-如果 TDengine CLI 连接服务成功,将会打印出欢迎消息和版本信息。如果失败,则会打印出错误消息。
+如果TDengine 命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示连接错误信息。
-## 快速运行
-
-如果不希望以服务方式运行 TDengine,也可以在终端中直接运行它。也即在生成完成后,执行以下命令(在 Windows 下,生成的可执行文件会带有 .exe 后缀,例如会名为 taosd.exe ):
+如果您不想将TDengine作为服务运行,您可以在当前终端中运行它。例如,要在构建完成后快速启动TDengine服务器,在终端中运行以下命令:(我们以Linux为例,Windows上的命令为 `taosd.exe`)
```bash
./build/bin/taosd -c test/cfg
```
-在另一个终端,使用 TDengine CLI 连接服务器:
+在另一个终端上,使用TDengine命令行连接服务器:
```bash
./build/bin/taos -c test/cfg
```
-"-c test/cfg"指定系统配置文件所在目录。
+选项 `-c test/cfg` 指定系统配置文件的目录。
-# 体验 TDengine
+
-在 TDengine 终端中,用户可以通过 SQL 命令来创建/删除数据库、表等,并进行插入查询操作。
+## 7.2 macOS系统上运行
-```sql
-CREATE DATABASE demo;
-USE demo;
-CREATE TABLE t (ts TIMESTAMP, speed INT);
-INSERT INTO t VALUES('2019-07-15 00:00:00', 10);
-INSERT INTO t VALUES('2019-07-15 01:00:00', 20);
-SELECT * FROM t;
- ts | speed |
-===================================
- 19-07-15 00:00:00.000| 10|
- 19-07-15 01:00:00.000| 20|
-Query OK, 2 row(s) in set (0.001700s)
+
+
+macOS系统上运行详细步骤
+
+在macOS上安装完成后启动服务,双击/applications/TDengine启动程序,或者在终端中执行如下命令:
+
+```bash
+sudo launchctl start com.tdengine.taosd
```
-# 应用开发
+然后在终端中使用如下命令通过TDengine命令行连接TDengine服务器:
-## 官方连接器
+```bash
+taos
+```
-TDengine 提供了丰富的应用程序开发接口,其中包括 C/C++、Java、Python、Go、Node.js、C# 、RESTful 等,便于用户快速开发应用:
+如果TDengine命令行连接服务器成功,系统将打印欢迎信息和版本信息。否则,将显示错误信息。
-- [Java](https://docs.taosdata.com/reference/connector/java/)
-- [C/C++](https://docs.taosdata.com/reference/connector/cpp/)
-- [Python](https://docs.taosdata.com/reference/connector/python/)
-- [Go](https://docs.taosdata.com/reference/connector/go/)
-- [Node.js](https://docs.taosdata.com/reference/connector/node/)
-- [Rust](https://docs.taosdata.com/reference/connector/rust/)
-- [C#](https://docs.taosdata.com/reference/connector/csharp/)
-- [RESTful API](https://docs.taosdata.com/reference/connector/rest-api/)
+
-# 成为社区贡献者
+
+## 7.3 Windows系统上运行
+
+
+
+Windows系统上运行详细步骤
+
+您可以使用以下命令在Windows平台上启动TDengine服务器:
+
+```cmd
+.\build\bin\taosd.exe -c test\cfg
+```
+
+在另一个终端上,使用TDengine命令行连接服务器:
+
+```cmd
+.\build\bin\taos.exe -c test\cfg
+```
+
+选项 `-c test/cfg` 指定系统配置文件的目录。
+
+
+
+# 8. 测试
+
+有关如何在TDengine上运行不同类型的测试,请参考 [TDengine测试](./tests/README-CN.md)
+
+# 9. 版本发布
+
+TDengine发布版本的完整列表,请参考 [版本列表](https://github.com/taosdata/TDengine/releases)
+
+# 10. 工作流
+
+TDengine构建检查工作流可以在参考 [Github Action](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml), 更多的工作流正在创建中,将很快可用。
+
+# 11. 覆盖率
+
+最新的TDengine测试覆盖率报告可参考 [coveralls.io](https://coveralls.io/github/taosdata/TDengine)
+
+
+
+如何在本地运行测试覆盖率报告?
+
+在本地创建测试覆盖率报告(HTML格式),请运行以下命令:
+
+```bash
+cd tests
+bash setup-lcov.sh -v 1.16 && ./run_local_coverage.sh -b main -c task
+# on main branch and run cases in longtimeruning_cases.task
+# for more infomation about options please refer to ./run_local_coverage.sh -h
+```
+> **注意:**
+> 请注意,-b和-i选项将使用-DCOVER=true选项重新编译TDengine,这可能需要花费一些时间。
+
+
+
+# 12. 成为社区贡献者
点击 [这里](https://www.taosdata.com/contributor),了解如何成为 TDengine 的贡献者。
-
-# 加入技术交流群
-
-TDengine 官方社群「物联网大数据群」对外开放,欢迎您加入讨论。搜索微信号 "tdengine",加小 T 为好友,即可入群。
diff --git a/README.md b/README.md
index f540b1cc43..6cfd1980b1 100644
--- a/README.md
+++ b/README.md
@@ -10,10 +10,10 @@
[](https://github.com/taosdata/TDengine/actions/workflows/taosd-ci-build.yml)
[](https://coveralls.io/github/taosdata/TDengine?branch=3.0)
-
+[](https://github.com/feici02/TDengine/commits/main/)
-
-
+[](https://github.com/taosdata/TDengine/releases)
+[](https://github.com/taosdata/TDengine/blob/main/LICENSE)
[](https://bestpractices.coreinfrastructure.org/projects/4201)
[](https://twitter.com/tdenginedb)
@@ -74,8 +74,14 @@ For a full list of TDengine competitive advantages, please [check here](https://
For user manual, system design and architecture, please refer to [TDengine Documentation](https://docs.tdengine.com) ([TDengine 文档](https://docs.taosdata.com))
+You can choose to install TDengine via [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/), [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment) or try [fully managed service](https://cloud.tdengine.com/) without installation. This quick guide is for developers who want to contribute, build, release and test TDengine by themselves.
+
+For contributing/building/testing TDengine Connectors, please check the following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust).
+
# 3. Prerequisites
+At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
+
## 3.1 On Linux
@@ -85,7 +91,7 @@ For user manual, system design and architecture, please refer to [TDengine Docum
### For Ubuntu 18.04、20.04、22.04
```bash
-sudo apt-get udpate
+sudo apt-get update
sudo apt-get install -y gcc cmake build-essential git libjansson-dev \
libsnappy-dev liblzma-dev zlib1g-dev pkg-config
```
@@ -127,10 +133,6 @@ Work in Progress.
## 3.4 Clone the repo
-
-
-Clone the repo
-
Clone the repository to the target machine:
```bash
@@ -138,21 +140,13 @@ git clone https://github.com/taosdata/TDengine.git
cd TDengine
```
-
-> **NOTE:**
-> TDengine Connectors can be found in following repositories: [JDBC Connector](https://github.com/taosdata/taos-connector-jdbc), [Go Connector](https://github.com/taosdata/driver-go), [Python Connector](https://github.com/taosdata/taos-connector-python), [Node.js Connector](https://github.com/taosdata/taos-connector-node), [C# Connector](https://github.com/taosdata/taos-connector-dotnet), [Rust Connector](https://github.com/taosdata/taos-connector-rust).
-
# 4. Building
-At the moment, TDengine server supports running on Linux/Windows/MacOS systems. Any application can also choose the RESTful interface provided by taosAdapter to connect the taosd service. TDengine supports X64/ARM64 CPU, and it will support MIPS64, Alpha64, ARM32, RISC-V and other CPU architectures in the future. Right now we don't support build with cross-compiling environment.
-
-You can choose to install through source code, [container](https://docs.tdengine.com/get-started/deploy-in-docker/), [installation package](https://docs.tdengine.com/get-started/deploy-from-package/) or [Kubernetes](https://docs.tdengine.com/operations-and-maintenance/deploy-your-cluster/#kubernetes-deployment). This quick guide only applies to install from source.
-
TDengine provide a few useful tools such as taosBenchmark (was named taosdemo) and taosdump. They were part of TDengine. By default, TDengine compiling does not include taosTools. You can use `cmake .. -DBUILD_TOOLS=true` to make them be compiled with TDengine.
-To build TDengine, use [CMake](https://cmake.org/) 3.13.0 or higher versions in the project directory.
+TDengine requires [GCC](https://gcc.gnu.org/) 9.3.1 or higher and [CMake](https://cmake.org/) 3.13.0 or higher for building.
## 4.1 Build on Linux
diff --git a/cmake/cmake.options b/cmake/cmake.options
index e3b5782d85..3e655b1796 100644
--- a/cmake/cmake.options
+++ b/cmake/cmake.options
@@ -166,6 +166,10 @@ IF(${BUILD_WITH_ANALYSIS})
set(BUILD_WITH_S3 ON)
ENDIF()
+IF(${TD_LINUX})
+ set(BUILD_WITH_ANALYSIS ON)
+ENDIF()
+
IF(${BUILD_S3})
IF(${BUILD_WITH_S3})
@@ -205,13 +209,6 @@ option(
off
)
-
-option(
- BUILD_WITH_NURAFT
- "If build with NuRaft"
- OFF
-)
-
option(
BUILD_WITH_UV
"If build with libuv"
diff --git a/cmake/curl_CMakeLists.txt.in b/cmake/curl_CMakeLists.txt.in
index 2a14018810..93ee720de1 100644
--- a/cmake/curl_CMakeLists.txt.in
+++ b/cmake/curl_CMakeLists.txt.in
@@ -12,7 +12,7 @@ ExternalProject_Add(curl2
BUILD_IN_SOURCE TRUE
BUILD_ALWAYS 1
UPDATE_COMMAND ""
- CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl #--enable-debug
+ CONFIGURE_COMMAND ${CONTRIB_CONFIG_ENV} ./configure --prefix=$ENV{HOME}/.cos-local.2 --with-ssl=$ENV{HOME}/.cos-local.2 --enable-websockets --enable-shared=no --disable-ldap --disable-ldaps --without-brotli --without-zstd --without-libidn2 --without-nghttp2 --without-libpsl --without-librtmp #--enable-debug
BUILD_COMMAND make -j
INSTALL_COMMAND make install
TEST_COMMAND ""
diff --git a/cmake/taostools_CMakeLists.txt.in b/cmake/taostools_CMakeLists.txt.in
deleted file mode 100644
index 9a6a5329ae..0000000000
--- a/cmake/taostools_CMakeLists.txt.in
+++ /dev/null
@@ -1,13 +0,0 @@
-
-# taos-tools
-ExternalProject_Add(taos-tools
- GIT_REPOSITORY https://github.com/taosdata/taos-tools.git
- GIT_TAG 3.0
- SOURCE_DIR "${TD_SOURCE_DIR}/tools/taos-tools"
- BINARY_DIR ""
- #BUILD_IN_SOURCE TRUE
- CONFIGURE_COMMAND ""
- BUILD_COMMAND ""
- INSTALL_COMMAND ""
- TEST_COMMAND ""
-)
diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt
index b2729ed7a7..745dc01cf6 100644
--- a/contrib/CMakeLists.txt
+++ b/contrib/CMakeLists.txt
@@ -205,9 +205,18 @@ ENDIF()
# download dependencies
configure_file(${CONTRIB_TMP_FILE} "${TD_CONTRIB_DIR}/deps-download/CMakeLists.txt")
execute_process(COMMAND "${CMAKE_COMMAND}" -G "${CMAKE_GENERATOR}" .
- WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
+ WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download"
+ RESULT_VARIABLE result)
+IF(NOT result EQUAL "0")
+ message(FATAL_ERROR "CMake step for dowloading dependencies failed: ${result}")
+ENDIF()
+
execute_process(COMMAND "${CMAKE_COMMAND}" --build .
- WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download")
+ WORKING_DIRECTORY "${TD_CONTRIB_DIR}/deps-download"
+ RESULT_VARIABLE result)
+IF(NOT result EQUAL "0")
+ message(FATAL_ERROR "CMake step for building dependencies failed: ${result}")
+ENDIF()
# ================================================================================================
# Build
diff --git a/contrib/test/CMakeLists.txt b/contrib/test/CMakeLists.txt
index 5d613dfed2..318d00b92c 100644
--- a/contrib/test/CMakeLists.txt
+++ b/contrib/test/CMakeLists.txt
@@ -20,14 +20,6 @@ if(${BUILD_WITH_SQLITE})
add_subdirectory(sqlite)
endif(${BUILD_WITH_SQLITE})
-if(${BUILD_WITH_CRAFT})
- add_subdirectory(craft)
-endif(${BUILD_WITH_CRAFT})
-
-if(${BUILD_WITH_TRAFT})
- # add_subdirectory(traft)
-endif(${BUILD_WITH_TRAFT})
-
if(${BUILD_S3})
add_subdirectory(azure)
endif()
diff --git a/docs/en/07-develop/07-tmq.md b/docs/en/07-develop/07-tmq.md
index e556dc5f37..6b92ace6a2 100644
--- a/docs/en/07-develop/07-tmq.md
+++ b/docs/en/07-develop/07-tmq.md
@@ -31,12 +31,12 @@ There are many parameters for creating consumers, which flexibly support various
| Parameter Name | Type | Description | Remarks |
| :-----------------------: | :-----: | ------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `td.connect.ip` | string | Server IP address | |
+| `td.connect.ip` | string | FQDN of Server | ip or host name |
| `td.connect.user` | string | Username | |
| `td.connect.pass` | string | Password | |
| `td.connect.port` | integer | Server port number | |
-| `group.id` | string | Consumer group ID, the same consumer group shares consumption progress | **Required**. Maximum length: 192. Each topic can have up to 100 consumer groups |
-| `client.id` | string | Client ID | Maximum length: 192 |
+| `group.id` | string | Consumer group ID, the same consumer group shares consumption progress | **Required**. Maximum length: 192,excess length will be cut off. Each topic can have up to 100 consumer groups |
+| `client.id` | string | Client ID | Maximum length: 255, excess length will be cut off. |
| `auto.offset.reset` | enum | Initial position of the consumer group subscription | `earliest`: default(version < 3.2.0.0); subscribe from the beginning; `latest`: default(version >= 3.2.0.0); only subscribe from the latest data; `none`: cannot subscribe without a committed offset |
| `enable.auto.commit` | boolean | Whether to enable automatic consumption point submission, true: automatic submission, client application does not need to commit; false: client application needs to commit manually | Default is true |
| `auto.commit.interval.ms` | integer | Time interval for automatically submitting consumption records, in milliseconds | Default is 5000 |
diff --git a/docs/en/08-operation/03-deployment.md b/docs/en/08-operation/03-deployment.md
deleted file mode 100644
index 5eca331a21..0000000000
--- a/docs/en/08-operation/03-deployment.md
+++ /dev/null
@@ -1,882 +0,0 @@
----
-title: Deploying Your Cluster
-slug: /operations-and-maintenance/deploy-your-cluster
----
-
-Since TDengine was designed with a distributed architecture from the beginning, it has powerful horizontal scaling capabilities to meet the growing data processing needs. Therefore, TDengine supports clustering and has open-sourced this core functionality. Users can choose from four deployment methods according to their actual environment and needs—manual deployment, Docker deployment, Kubernetes deployment, and Helm deployment.
-
-## Manual Deployment
-
-### Deploying taosd
-
-taosd is the most important service component in the TDengine cluster. This section describes the steps to manually deploy a taosd cluster.
-
-#### 1. Clear Data
-
-If the physical nodes for setting up the cluster contain previous test data or have had other versions of TDengine installed (such as 1.x/2.x), please delete them and clear all data first.
-
-#### 2. Check Environment
-
-Before deploying the TDengine cluster, it is crucial to thoroughly check the network settings of all dnodes and the physical nodes where the applications are located. Here are the steps to check:
-
-- Step 1: Execute the `hostname -f` command on each physical node to view and confirm that all node hostnames are unique. This step can be omitted for nodes where application drivers are located.
-- Step 2: Execute the `ping host` command on each physical node, where host is the hostname of other physical nodes. This step aims to detect the network connectivity between the current node and other physical nodes. If you cannot ping through, immediately check the network and DNS settings. For Linux operating systems, check the `/etc/hosts` file; for Windows operating systems, check the `C:\Windows\system32\drivers\etc\hosts` file. Network issues will prevent the formation of a cluster, so be sure to resolve this issue.
-- Step 3: Repeat the above network detection steps on the physical nodes where the application is running. If the network is found to be problematic, the application will not be able to connect to the taosd service. At this point, carefully check the DNS settings or hosts file of the physical node where the application is located to ensure it is configured correctly.
-- Step 4: Check ports to ensure that all hosts in the cluster can communicate over TCP on port 6030.
-
-By following these steps, you can ensure that all nodes communicate smoothly at the network level, laying a solid foundation for the successful deployment of the TDengine cluster.
-
-#### 3. Installation
-
-To ensure consistency and stability within the cluster, install the same version of TDengine on all physical nodes.
-
-#### 4. Modify Configuration
-
-Modify the configuration file of TDengine (the configuration files of all nodes need to be modified). Assuming the endpoint of the first dnode to be started is `h1.tdengine.com:6030`, the cluster-related parameters are as follows.
-
-```shell
-# firstEp is the first dnode that each dnode connects to after the initial startup
-firstEp h1.tdengine.com:6030
-# Must be configured to the FQDN of this dnode, if there is only one hostname on this machine, you can comment out or delete the following line
-fqdn h1.tdengine.com
-# Configure the port of this dnode, default is 6030
-serverPort 6030
-```
-
-The parameters that must be modified are firstEp and fqdn. For each dnode, the firstEp configuration should remain consistent, but fqdn must be set to the value of the dnode it is located on. Other parameters do not need to be modified unless you are clear on why they should be changed.
-
-For dnodes wishing to join the cluster, it is essential to ensure that the parameters related to the TDengine cluster listed in the table below are set identically. Any mismatch in parameters may prevent the dnode from successfully joining the cluster.
-
-| Parameter Name | Meaning |
-|:----------------:|:---------------------------------------------------------:|
-| statusInterval | Interval at which dnode reports status to mnode |
-| timezone | Time zone |
-| locale | System locale information and encoding format |
-| charset | Character set encoding |
-| ttlChangeOnWrite | Whether ttl expiration changes with table modification |
-
-#### 5. Start
-
-Start the first dnode, such as `h1.tdengine.com`, following the steps mentioned above. Then execute taos in the terminal to start TDengine's CLI program taos, and execute the `show dnodes` command within it to view all dnode information in the current cluster.
-
-```shell
-taos> show dnodes;
- id | endpoint | vnodes|support_vnodes|status| create_time | note |
-===================================================================================
- 1| h1.tdengine.com:6030 | 0| 1024| ready| 2022-07-16 10:50:42.673 | |
-```
-
-You can see that the endpoint of the dnode node that has just started is `h1.tdengine.com:6030`. This address is the first Ep of the new cluster.
-
-#### 6. Adding dnode
-
-Follow the steps mentioned earlier, start taosd on each physical node. Each dnode needs to configure the firstEp parameter in the taos.cfg file to the endpoint of the first node of the new cluster, which in this case is `h1.tdengine.com:6030`. On the machine where the first dnode is located, run taos in the terminal, open TDengine's CLI program taos, then log into the TDengine cluster, and execute the following SQL.
-
-```shell
-create dnode "h2.tdengine.com:6030"
-```
-
-Add the new dnode's endpoint to the cluster's endpoint list. You need to put `fqdn:port` in double quotes, otherwise, it will cause an error when running. Please note to replace the example h2.tdengine.com:6030 with the endpoint of this new dnode. Then execute the following SQL to see if the new node has successfully joined. If the dnode you want to join is currently offline, please refer to the "Common Issues" section later in this chapter for a solution.
-
-```shell
-show dnodes;
-```
-
-In the logs, please confirm that the fqdn and port of the output dnode are consistent with the endpoint you just tried to add. If they are not consistent, correct it to the correct endpoint. By following the steps above, you can continuously add new dnodes to the cluster one by one, thereby expanding the scale of the cluster and improving overall performance. Make sure to follow the correct process when adding new nodes, which helps maintain the stability and reliability of the cluster.
-
-**Tips**
-
-- Any dnode that has joined the cluster can serve as the firstEp for subsequent nodes to be added. The firstEp parameter only functions when that dnode first joins the cluster. After joining, the dnode will save the latest mnode's endpoint list, and subsequently, it no longer depends on this parameter. The firstEp parameter in the configuration file is mainly used for client connections, and if no parameters are set for TDengine's CLI, it will default to connecting to the node specified by firstEp.
-- Two dnodes that have not configured the firstEp parameter will run independently after starting. At this time, it is not possible to join one dnode to another to form a cluster.
-- TDengine does not allow merging two independent clusters into a new cluster.
-
-#### 7. Adding mnode
-
-When creating a TDengine cluster, the first dnode automatically becomes the mnode of the cluster, responsible for managing and coordinating the cluster. To achieve high availability of mnode, subsequent dnodes need to manually create mnode. Please note that a cluster can create up to 3 mnodes, and only one mnode can be created on each dnode. When the number of dnodes in the cluster reaches or exceeds 3, you can create mnode for the existing cluster. In the first dnode, first log into TDengine through the CLI program taos, then execute the following SQL.
-
-```shell
-create mnode on dnode
-```
-
-Please note to replace the dnodeId in the example above with the serial number of the newly created dnode (which can be obtained by executing the `show dnodes` command). Finally, execute the following `show mnodes` to see if the newly created mnode has successfully joined the cluster.
-
-**Tips**
-
-During the process of setting up a TDengine cluster, if a new node always shows as offline after executing the create dnode command to add a new node, please follow these steps for troubleshooting.
-
-- Step 1, check whether the taosd service on the new node has started normally. You can confirm this by checking the log files or using the ps command.
-- Step 2, if the taosd service has started, next check whether the new node's network connection is smooth and confirm whether the firewall has been turned off. Network issues or firewall settings may prevent the node from communicating with other nodes in the cluster.
-- Step 3, use the taos -h fqdn command to try to connect to the new node, then execute the show dnodes command. This will display the running status of the new node as an independent cluster. If the displayed list is inconsistent with that shown on the main node, it indicates that the new node may have formed a single-node cluster on its own. To resolve this issue, follow these steps. First, stop the taosd service on the new node. Second, clear all files in the dataDir directory specified in the taos.cfg configuration file on the new node. This will delete all data and configuration information related to that node. Finally, restart the taosd service on the new node. This will reset the new node to its initial state, ready to rejoin the main cluster.
-
-### Deploying taosAdapter
-
-This section discusses how to deploy taosAdapter, which provides RESTful and WebSocket access capabilities for the TDengine cluster, thus playing a very important role in the cluster.
-
-1. Installation
-
-After the installation of TDengine Enterprise is complete, taosAdapter can be used. If you want to deploy taosAdapter on different servers, TDengine Enterprise needs to be installed on these servers.
-
-2. Single Instance Deployment
-
-Deploying a single instance of taosAdapter is very simple. For specific commands and configuration parameters, please refer to the taosAdapter section in the manual.
-
-3. Multiple Instances Deployment
-
-The main purposes of deploying multiple instances of taosAdapter are as follows:
-
-- To increase the throughput of the cluster and prevent taosAdapter from becoming a system bottleneck.
-- To enhance the robustness and high availability of the cluster, allowing requests entering the business system to be automatically routed to other instances when one instance fails.
-
-When deploying multiple instances of taosAdapter, it is necessary to address load balancing issues to avoid overloading some nodes while others remain idle. During the deployment process, multiple single instances need to be deployed separately, and the deployment steps for each instance are exactly the same as those for deploying a single instance. The next critical part is configuring Nginx. Below is a verified best practice configuration; you only need to replace the endpoint with the correct address in the actual environment. For the meanings of each parameter, please refer to the official Nginx documentation.
-
-```json
-user root;
-worker_processes auto;
-error_log /var/log/nginx_error.log;
-
-
-events {
- use epoll;
- worker_connections 1024;
-}
-
-http {
-
- access_log off;
-
- map $http_upgrade $connection_upgrade {
- default upgrade;
- '' close;
- }
-
- server {
- listen 6041;
- location ~* {
- proxy_pass http://dbserver;
- proxy_read_timeout 600s;
- proxy_send_timeout 600s;
- proxy_connect_timeout 600s;
- proxy_next_upstream error http_502 non_idempotent;
- proxy_http_version 1.1;
- proxy_set_header Upgrade $http_upgrade;
- proxy_set_header Connection $http_connection;
- }
- }
- server {
- listen 6043;
- location ~* {
- proxy_pass http://keeper;
- proxy_read_timeout 60s;
- proxy_next_upstream error http_502 http_500 non_idempotent;
- }
- }
-
- server {
- listen 6060;
- location ~* {
- proxy_pass http://explorer;
- proxy_read_timeout 60s;
- proxy_next_upstream error http_502 http_500 non_idempotent;
- }
- }
- upstream dbserver {
- least_conn;
- server 172.16.214.201:6041 max_fails=0;
- server 172.16.214.202:6041 max_fails=0;
- server 172.16.214.203:6041 max_fails=0;
- }
- upstream keeper {
- ip_hash;
- server 172.16.214.201:6043 ;
- server 172.16.214.202:6043 ;
- server 172.16.214.203:6043 ;
- }
- upstream explorer{
- ip_hash;
- server 172.16.214.201:6060 ;
- server 172.16.214.202:6060 ;
- server 172.16.214.203:6060 ;
- }
-}
-```
-
-### Deploying taosKeeper
-
-To use the monitoring capabilities of TDengine, taosKeeper is an essential component. For monitoring, please refer to [TDinsight](../../tdengine-reference/components/tdinsight), and for details on deploying taosKeeper, please refer to the [taosKeeper Reference Manual](../../tdengine-reference/components/taoskeeper).
-
-### Deploying taosX
-
-To utilize the data ingestion capabilities of TDengine, it is necessary to deploy the taosX service. For detailed explanations and deployment, please refer to the enterprise edition reference manual.
-
-### Deploying taosX-Agent
-
-For some data sources such as Pi, OPC, etc., due to network conditions and data source access restrictions, taosX cannot directly access the data sources. In such cases, a proxy service, taosX-Agent, needs to be deployed. For detailed explanations and deployment, please refer to the enterprise edition reference manual.
-
-### Deploying taos-Explorer
-
-TDengine provides the capability to visually manage TDengine clusters. To use the graphical interface, the taos-Explorer service needs to be deployed. For detailed explanations and deployment, please refer to the [taos-Explorer Reference Manual](../../tdengine-reference/components/taosexplorer/)
-
-## Docker Deployment
-
-This section will explain how to start TDengine services in Docker containers and access them. You can use environment variables in the docker run command line or docker-compose file to control the behavior of services in the container.
-
-### Starting TDengine
-
-The TDengine image is launched with HTTP service activated by default. Use the following command to create a containerized TDengine environment with HTTP service.
-
-```shell
-docker run -d --name tdengine \
--v ~/data/taos/dnode/data:/var/lib/taos \
--v ~/data/taos/dnode/log:/var/log/taos \
--p 6041:6041 tdengine/tdengine
-```
-
-Detailed parameter explanations are as follows:
-
-- /var/lib/taos: Default data file directory for TDengine, can be modified through the configuration file.
-- /var/log/taos: Default log file directory for TDengine, can be modified through the configuration file.
-
-The above command starts a container named tdengine and maps the HTTP service's port 6041 to the host port 6041. The following command can verify if the HTTP service in the container is available.
-
-```shell
-curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
-```
-
-Run the following command to access TDengine within the container.
-
-```shell
-$ docker exec -it tdengine taos
-
-taos> show databases;
- name |
-=================================
- information_schema |
- performance_schema |
-Query OK, 2 rows in database (0.033802s)
-```
-
-Within the container, TDengine CLI or various connectors (such as JDBC-JNI) connect to the server via the container's hostname. Accessing TDengine inside the container from outside is more complex, and using RESTful/WebSocket connection methods is the simplest approach.
-
-### Starting TDengine in host network mode
-
-Run the following command to start TDengine in host network mode, which allows using the host's FQDN to establish connections, rather than using the container's hostname.
-
-```shell
-docker run -d --name tdengine --network host tdengine/tdengine
-```
-
-This method is similar to starting TDengine on the host using the systemctl command. If the TDengine client is already installed on the host, you can directly use the following command to access the TDengine service.
-
-```shell
-$ taos
-
-taos> show dnodes;
- id | endpoint | vnodes | support_vnodes | status | create_time | note |
-=================================================================================================================================================
- 1 | vm98:6030 | 0 | 32 | ready | 2022-08-19 14:50:05.337 | |
-Query OK, 1 rows in database (0.010654s)
-```
-
-### Start TDengine with a specified hostname and port
-
-Use the following command to establish a connection on a specified hostname using the TAOS_FQDN environment variable or the fqdn configuration item in taos.cfg. This method provides greater flexibility for deploying TDengine.
-
-```shell
-docker run -d \
- --name tdengine \
- -e TAOS_FQDN=tdengine \
- -p 6030:6030 \
- -p 6041-6049:6041-6049 \
- -p 6041-6049:6041-6049/udp \
- tdengine/tdengine
-```
-
-First, the above command starts a TDengine service in the container, listening on the hostname tdengine, and maps the container's port 6030 to the host's port 6030, and the container's port range [6041, 6049] to the host's port range [6041, 6049]. If the port range on the host is already in use, you can modify the command to specify a free port range on the host.
-
-Secondly, ensure that the hostname tdengine is resolvable in /etc/hosts. Use the following command to save the correct configuration information to the hosts file.
-
-```shell
-echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
-```
-
-Finally, you can access the TDengine service using the TDengine CLI with tdengine as the server address, as follows.
-
-```shell
-taos -h tdengine -P 6030
-```
-
-If TAOS_FQDN is set to the same as the hostname of the host, the effect is the same as "starting TDengine in host network mode".
-
-## Kubernetes Deployment
-
-As a time-series database designed for cloud-native architectures, TDengine inherently supports Kubernetes deployment. This section introduces how to step-by-step create a highly available TDengine cluster for production use using YAML files, with a focus on common operations of TDengine in a Kubernetes environment. This subsection requires readers to have a certain understanding of Kubernetes, be proficient in running common kubectl commands, and understand concepts such as statefulset, service, and pvc. Readers unfamiliar with these concepts can refer to the Kubernetes official website for learning.
-To meet the requirements of high availability, the cluster needs to meet the following requirements:
-
-- 3 or more dnodes: Multiple vnodes in the same vgroup of TDengine should not be distributed on the same dnode, so if creating a database with 3 replicas, the number of dnodes should be 3 or more.
-- 3 mnodes: mnodes are responsible for managing the entire cluster, with TDengine defaulting to one mnode. If the dnode hosting this mnode goes offline, the entire cluster becomes unavailable.
-- 3 replicas of the database: TDengine's replica configuration is at the database level, so 3 replicas can ensure that the cluster remains operational even if any one of the 3 dnodes goes offline. If 2 dnodes go offline, the cluster becomes unavailable because RAFT cannot complete the election. (Enterprise edition: In disaster recovery scenarios, if the data files of any node are damaged, recovery can be achieved by restarting the dnode.)
-
-### Prerequisites
-
-To deploy and manage a TDengine cluster using Kubernetes, the following preparations need to be made.
-
-- This article applies to Kubernetes v1.19 and above.
-- This article uses the kubectl tool for installation and deployment, please install the necessary software in advance.
-- Kubernetes has been installed and deployed and can normally access or update necessary container repositories or other services.
-
-### Configure Service
-
-Create a Service configuration file: taosd-service.yaml, the service name metadata.name (here "taosd") will be used in the next step. First, add the ports used by TDengine, then set the determined labels app (here "tdengine") in the selector.
-
-```yaml
----
-apiVersion: v1
-kind: Service
-metadata:
- name: "taosd"
- labels:
- app: "tdengine"
-spec:
- ports:
- - name: tcp6030
- protocol: "TCP"
- port: 6030
- - name: tcp6041
- protocol: "TCP"
- port: 6041
- selector:
- app: "tdengine"
-```
-
-### Stateful Services StatefulSet
-
-According to Kubernetes' descriptions of various deployment types, we will use StatefulSet as the deployment resource type for TDengine. Create the file tdengine.yaml, where replicas define the number of cluster nodes as 3. The node timezone is set to China (Asia/Shanghai), and each node is allocated 5G of standard storage, which you can modify according to actual conditions.
-
-Please pay special attention to the configuration of startupProbe. After a dnode's Pod goes offline for a period of time and then restarts, the newly online dnode will be temporarily unavailable. If the startupProbe configuration is too small, Kubernetes will consider the Pod to be in an abnormal state and attempt to restart the Pod. This dnode's Pod will frequently restart and never return to a normal state.
-
-```yaml
----
-apiVersion: apps/v1
-kind: StatefulSet
-metadata:
- name: "tdengine"
- labels:
- app: "tdengine"
-spec:
- serviceName: "taosd"
- replicas: 3
- updateStrategy:
- type: RollingUpdate
- selector:
- matchLabels:
- app: "tdengine"
- template:
- metadata:
- name: "tdengine"
- labels:
- app: "tdengine"
- spec:
- affinity:
- podAntiAffinity:
- preferredDuringSchedulingIgnoredDuringExecution:
- - weight: 100
- podAffinityTerm:
- labelSelector:
- matchExpressions:
- - key: app
- operator: In
- values:
- - tdengine
- topologyKey: kubernetes.io/hostname
- containers:
- - name: "tdengine"
- image: "tdengine/tdengine:3.2.3.0"
- imagePullPolicy: "IfNotPresent"
- ports:
- - name: tcp6030
- protocol: "TCP"
- containerPort: 6030
- - name: tcp6041
- protocol: "TCP"
- containerPort: 6041
- env:
- # POD_NAME for FQDN config
- - name: POD_NAME
- valueFrom:
- fieldRef:
- fieldPath: metadata.name
- # SERVICE_NAME and NAMESPACE for fqdn resolve
- - name: SERVICE_NAME
- value: "taosd"
- - name: STS_NAME
- value: "tdengine"
- - name: STS_NAMESPACE
- valueFrom:
- fieldRef:
- fieldPath: metadata.namespace
- # TZ for timezone settings, we recommend to always set it.
- - name: TZ
- value: "Asia/Shanghai"
- # Environment variables with prefix TAOS_ will be parsed and converted into corresponding parameter in taos.cfg. For example, serverPort in taos.cfg should be configured by TAOS_SERVER_PORT when using K8S to deploy
- - name: TAOS_SERVER_PORT
- value: "6030"
- # Must set if you want a cluster.
- - name: TAOS_FIRST_EP
- value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
- # TAOS_FQND should always be set in k8s env.
- - name: TAOS_FQDN
- value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
- volumeMounts:
- - name: taosdata
- mountPath: /var/lib/taos
- startupProbe:
- exec:
- command:
- - taos-check
- failureThreshold: 360
- periodSeconds: 10
- readinessProbe:
- exec:
- command:
- - taos-check
- initialDelaySeconds: 5
- timeoutSeconds: 5000
- livenessProbe:
- exec:
- command:
- - taos-check
- initialDelaySeconds: 15
- periodSeconds: 20
- volumeClaimTemplates:
- - metadata:
- name: taosdata
- spec:
- accessModes:
- - "ReadWriteOnce"
- storageClassName: "standard"
- resources:
- requests:
- storage: "5Gi"
-```
-
-### Deploying TDengine Cluster Using kubectl Command
-
-First, create the corresponding namespace `dengine-test`, as well as the PVC, ensuring that there is enough remaining space with `storageClassName` set to `standard`. Then execute the following commands in sequence:
-
-```shell
-kubectl apply -f taosd-service.yaml -n tdengine-test
-```
-
-The above configuration will create a three-node TDengine cluster, with `dnode` automatically configured. You can use the `show dnodes` command to view the current cluster nodes:
-
-```shell
-kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
-kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show dnodes"
-kubectl exec -it tdengine-2 -n tdengine-test -- taos -s "show dnodes"
-```
-
-The output is as follows:
-
-```shell
-taos show dnodes
- id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
-=============================================================================================================================================================================================================================================
- 1 | tdengine-0.ta... | 0 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-19 17:54:18.469 | | | |
- 2 | tdengine-1.ta... | 0 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-19 17:54:38.698 | | | |
- 3 | tdengine-2.ta... | 0 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-19 17:55:02.039 | | | |
-Query OK, 3 row(s) in set (0.001853s)
-```
-
-View the current mnode:
-
-```shell
-kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
-taos> show mnodes\G
-*************************** 1.row ***************************
- id: 1
- endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
- role: leader
- status: ready
-create_time: 2023-07-19 17:54:18.559
-reboot_time: 2023-07-19 17:54:19.520
-Query OK, 1 row(s) in set (0.001282s)
-```
-
-Create mnode
-
-```shell
-kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 2"
-kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 3"
-```
-
-View mnode
-
-```shell
-kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
-
-taos> show mnodes\G
-*************************** 1.row ***************************
- id: 1
- endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
- role: leader
- status: ready
-create_time: 2023-07-19 17:54:18.559
-reboot_time: 2023-07-20 09:19:36.060
-*************************** 2.row ***************************
- id: 2
- endpoint: tdengine-1.taosd.tdengine-test.svc.cluster.local:6030
- role: follower
- status: ready
-create_time: 2023-07-20 09:22:05.600
-reboot_time: 2023-07-20 09:22:12.838
-*************************** 3.row ***************************
- id: 3
- endpoint: tdengine-2.taosd.tdengine-test.svc.cluster.local:6030
- role: follower
- status: ready
-create_time: 2023-07-20 09:22:20.042
-reboot_time: 2023-07-20 09:22:23.271
-Query OK, 3 row(s) in set (0.003108s)
-```
-
-### Port Forwarding
-
-Using kubectl port forwarding feature allows applications to access the TDengine cluster running in the Kubernetes environment.
-
-```shell
-kubectl port-forward -n tdengine-test tdengine-0 6041:6041 &
-```
-
-Use the curl command to verify the TDengine REST API using port 6041.
-
-```shell
-curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
-{"code":0,"column_meta":[["name","VARCHAR",64]],"data":[["information_schema"],["performance_schema"],["test"],["test1"]],"rows":4}
-```
-
-### Cluster Expansion
-
-TDengine supports cluster expansion:
-
-```shell
-kubectl scale statefulsets tdengine -n tdengine-test --replicas=4
-```
-
-The command line argument `--replica=4` indicates that the TDengine cluster is to be expanded to 4 nodes. After execution, first check the status of the POD:
-
-```shell
-kubectl get pod -l app=tdengine -n tdengine-test -o wide
-```
-
-Output as follows:
-
-```text
-NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
-tdengine-0 1/1 Running 4 (6h26m ago) 6h53m 10.244.2.75 node86
-tdengine-1 1/1 Running 1 (6h39m ago) 6h53m 10.244.0.59 node84
-tdengine-2 1/1 Running 0 5h16m 10.244.1.224 node85
-tdengine-3 1/1 Running 0 3m24s 10.244.2.76 node86
-```
-
-At this point, the Pod status is still Running. The dnode status in the TDengine cluster can be seen after the Pod status changes to ready:
-
-```shell
-kubectl exec -it tdengine-3 -n tdengine-test -- taos -s "show dnodes"
-```
-
-The dnode list of the four-node TDengine cluster after expansion:
-
-```text
-taos> show dnodes
- id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
-=============================================================================================================================================================================================================================================
- 1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
- 2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
- 3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
- 4 | tdengine-3.ta... | 0 | 16 | ready | 2023-07-20 16:01:44.007 | 2023-07-20 16:01:44.889 | | | |
-Query OK, 4 row(s) in set (0.003628s)
-```
-
-### Cleaning up the Cluster
-
-**Warning**
-When deleting PVCs, pay attention to the PV persistentVolumeReclaimPolicy. It is recommended to set it to Delete, so that when the PVC is deleted, the PV will be automatically cleaned up, along with the underlying CSI storage resources. If the policy to automatically clean up PVs when deleting PVCs is not configured, after deleting the PVCs, manually cleaning up the PVs may not release the corresponding CSI storage resources.
-
-To completely remove the TDengine cluster, you need to clean up the statefulset, svc, pvc, and finally delete the namespace.
-
-```shell
-kubectl delete statefulset -l app=tdengine -n tdengine-test
-kubectl delete svc -l app=tdengine -n tdengine-test
-kubectl delete pvc -l app=tdengine -n tdengine-test
-kubectl delete namespace tdengine-test
-```
-
-### Cluster Disaster Recovery Capabilities
-
-For high availability and reliability of TDengine in a Kubernetes environment, in terms of hardware damage and disaster recovery, it is discussed on two levels:
-
-- The disaster recovery capabilities of the underlying distributed block storage, which includes multiple replicas of block storage. Popular distributed block storage like Ceph has multi-replica capabilities, extending storage replicas to different racks, cabinets, rooms, and data centers (or directly using block storage services provided by public cloud vendors).
-- TDengine's disaster recovery, in TDengine Enterprise, inherently supports the recovery of a dnode's work by launching a new blank dnode when an existing dnode permanently goes offline (due to physical disk damage and data loss).
-
-## Deploying TDengine Cluster with Helm
-
-Helm is the package manager for Kubernetes.
-The previous section on deploying the TDengine cluster with Kubernetes was simple enough, but Helm can provide even more powerful capabilities.
-
-### Installing Helm
-
-```shell
-curl -fsSL -o get_helm.sh \
- https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
-chmod +x get_helm.sh
-./get_helm.sh
-```
-
-Helm operates Kubernetes using kubectl and kubeconfig configurations, which can be set up following the Rancher installation configuration for Kubernetes.
-
-### Installing TDengine Chart
-
-The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
-
-```shell
-wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-3.0.2.tgz
-```
-
-Retrieve the current Kubernetes storage class:
-
-```shell
-kubectl get storageclass
-```
-
-In minikube, the default is standard. Then, use the helm command to install:
-
-```shell
-helm install tdengine tdengine-3.0.2.tgz \
- --set storage.className= \
- --set image.tag=3.2.3.0
-
-```
-
-In a minikube environment, you can set a smaller capacity to avoid exceeding disk space:
-
-```shell
-helm install tdengine tdengine-3.0.2.tgz \
- --set storage.className=standard \
- --set storage.dataSize=2Gi \
- --set storage.logSize=10Mi \
- --set image.tag=3.2.3.0
-```
-
-After successful deployment, the TDengine Chart will output instructions for operating TDengine:
-
-```shell
-export POD_NAME=$(kubectl get pods --namespace default \
- -l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=tdengine" \
- -o jsonpath="{.items[0].metadata.name}")
-kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
-kubectl --namespace default exec -it $POD_NAME -- taos
-```
-
-You can create a table for testing:
-
-```shell
-kubectl --namespace default exec $POD_NAME -- \
- taos -s "create database test;
- use test;
- create table t1 (ts timestamp, n int);
- insert into t1 values(now, 1)(now + 1s, 2);
- select * from t1;"
-```
-
-### Configuring values
-
-TDengine supports customization through `values.yaml`.
-You can obtain the complete list of values supported by the TDengine Chart with helm show values:
-
-```shell
-helm show values tdengine-3.0.2.tgz
-```
-
-You can save the results as `values.yaml`, then modify various parameters in it, such as the number of replicas, storage class name, capacity size, TDengine configuration, etc., and then use the following command to install the TDengine cluster:
-
-```shell
-helm install tdengine tdengine-3.0.2.tgz -f values.yaml
-```
-
-All parameters are as follows:
-
-```yaml
-# Default values for tdengine.
-# This is a YAML-formatted file.
-# Declare variables to be passed into helm templates.
-
-replicaCount: 1
-
-image:
- prefix: tdengine/tdengine
- #pullPolicy: Always
- # Overrides the image tag whose default is the chart appVersion.
-# tag: "3.0.2.0"
-
-service:
- # ClusterIP is the default service type, use NodeIP only if you know what you are doing.
- type: ClusterIP
- ports:
- # TCP range required
- tcp: [6030, 6041, 6042, 6043, 6044, 6046, 6047, 6048, 6049, 6060]
- # UDP range
- udp: [6044, 6045]
-
-
-# Set timezone here, not in taoscfg
-timezone: "Asia/Shanghai"
-
-resources:
- # We usually recommend not to specify default resources and to leave this as a conscious
- # choice for the user. This also increases chances charts run on environments with little
- # resources, such as Minikube. If you do want to specify resources, uncomment the following
- # lines, adjust them as necessary, and remove the curly braces after 'resources:'.
- # limits:
- # cpu: 100m
- # memory: 128Mi
- # requests:
- # cpu: 100m
- # memory: 128Mi
-
-storage:
- # Set storageClassName for pvc. K8s use default storage class if not set.
- #
- className: ""
- dataSize: "100Gi"
- logSize: "10Gi"
-
-nodeSelectors:
- taosd:
- # node selectors
-
-clusterDomainSuffix: ""
-# Config settings in taos.cfg file.
-#
-# The helm/k8s support will use environment variables for taos.cfg,
-# converting an upper-snake-cased variable like `TAOS_DEBUG_FLAG`,
-# to a camelCase taos config variable `debugFlag`.
-#
-# Note:
-# 1. firstEp/secondEp: should not be set here, it's auto generated at scale-up.
-# 2. serverPort: should not be set, we'll use the default 6030 in many places.
-# 3. fqdn: will be auto generated in kubernetes, user should not care about it.
-# 4. role: currently role is not supported - every node is able to be mnode and vnode.
-#
-# Btw, keep quotes "" around the value like below, even the value will be number or not.
-taoscfg:
- # Starts as cluster or not, must be 0 or 1.
- # 0: all pods will start as a separate TDengine server
- # 1: pods will start as TDengine server cluster. [default]
- CLUSTER: "1"
-
- # number of replications, for cluster only
- TAOS_REPLICA: "1"
-
-
- # TAOS_NUM_OF_RPC_THREADS: number of threads for RPC
- #TAOS_NUM_OF_RPC_THREADS: "2"
-
- #
- # TAOS_NUM_OF_COMMIT_THREADS: number of threads to commit cache data
- #TAOS_NUM_OF_COMMIT_THREADS: "4"
-
- # enable/disable installation / usage report
- #TAOS_TELEMETRY_REPORTING: "1"
-
- # time interval of system monitor, seconds
- #TAOS_MONITOR_INTERVAL: "30"
-
- # time interval of dnode status reporting to mnode, seconds, for cluster only
- #TAOS_STATUS_INTERVAL: "1"
-
- # time interval of heart beat from shell to dnode, seconds
- #TAOS_SHELL_ACTIVITY_TIMER: "3"
-
- # minimum sliding window time, milli-second
- #TAOS_MIN_SLIDING_TIME: "10"
-
- # minimum time window, milli-second
- #TAOS_MIN_INTERVAL_TIME: "1"
-
- # the compressed rpc message, option:
- # -1 (no compression)
- # 0 (all message compressed),
- # > 0 (rpc message body which larger than this value will be compressed)
- #TAOS_COMPRESS_MSG_SIZE: "-1"
-
- # max number of connections allowed in dnode
- #TAOS_MAX_SHELL_CONNS: "50000"
-
- # stop writing logs when the disk size of the log folder is less than this value
- #TAOS_MINIMAL_LOG_DIR_G_B: "0.1"
-
- # stop writing temporary files when the disk size of the tmp folder is less than this value
- #TAOS_MINIMAL_TMP_DIR_G_B: "0.1"
-
- # if disk free space is less than this value, taosd service exit directly within startup process
- #TAOS_MINIMAL_DATA_DIR_G_B: "0.1"
-
- # One mnode is equal to the number of vnode consumed
- #TAOS_MNODE_EQUAL_VNODE_NUM: "4"
-
- # enbale/disable http service
- #TAOS_HTTP: "1"
-
- # enable/disable system monitor
- #TAOS_MONITOR: "1"
-
- # enable/disable async log
- #TAOS_ASYNC_LOG: "1"
-
- #
- # time of keeping log files, days
- #TAOS_LOG_KEEP_DAYS: "0"
-
- # The following parameters are used for debug purpose only.
- # debugFlag 8 bits mask: FILE-SCREEN-UNUSED-HeartBeat-DUMP-TRACE_WARN-ERROR
- # 131: output warning and error
- # 135: output debug, warning and error
- # 143: output trace, debug, warning and error to log
- # 199: output debug, warning and error to both screen and file
- # 207: output trace, debug, warning and error to both screen and file
- #
- # debug flag for all log type, take effect when non-zero value\
- #TAOS_DEBUG_FLAG: "143"
-
- # generate core file when service crash
- #TAOS_ENABLE_CORE_FILE: "1"
-```
-
-### Expansion
-
-For expansion, refer to the explanation in the previous section, with some additional operations needed from the helm deployment.
-First, retrieve the name of the StatefulSet from the deployment.
-
-```shell
-export STS_NAME=$(kubectl get statefulset \
- -l "app.kubernetes.io/name=tdengine" \
- -o jsonpath="{.items[0].metadata.name}")
-```
-
-The expansion operation is extremely simple, just increase the replica. The following command expands TDengine to three nodes:
-
-```shell
-kubectl scale --replicas 3 statefulset/$STS_NAME
-```
-
-Use the commands `show dnodes` and `show mnodes` to check if the expansion was successful.
-
-### Cleaning up the Cluster
-
-Under Helm management, the cleanup operation also becomes simple:
-
-```shell
-helm uninstall tdengine
-```
-
-However, Helm will not automatically remove PVCs, you need to manually retrieve and then delete the PVCs.
diff --git a/docs/en/08-operation/03-deployment/01-manual.md b/docs/en/08-operation/03-deployment/01-manual.md
new file mode 100644
index 0000000000..3b07364f1d
--- /dev/null
+++ b/docs/en/08-operation/03-deployment/01-manual.md
@@ -0,0 +1,215 @@
+---
+title: Manual Deployment
+slug: /operations-and-maintenance/deploy-your-cluster/manual-deployment
+---
+
+You can deploy TDengine manually on a physical or virtual machine.
+
+## Deploying taosd
+
+taosd is the most important service component in the TDengine cluster. This section describes the steps to manually deploy a taosd cluster.
+
+### 1. Clear Data
+
+If the physical nodes for setting up the cluster contain previous test data or have had other versions of TDengine installed (such as 1.x/2.x), please delete them and clear all data first.
+
+### 2. Check Environment
+
+Before deploying the TDengine cluster, it is crucial to thoroughly check the network settings of all dnodes and the physical nodes where the applications are located. Here are the steps to check:
+
+- Step 1: Execute the `hostname -f` command on each physical node to view and confirm that all node hostnames are unique. This step can be omitted for nodes where application drivers are located.
+- Step 2: Execute the `ping host` command on each physical node, where host is the hostname of other physical nodes. This step aims to detect the network connectivity between the current node and other physical nodes. If you cannot ping through, immediately check the network and DNS settings. For Linux operating systems, check the `/etc/hosts` file; for Windows operating systems, check the `C:\Windows\system32\drivers\etc\hosts` file. Network issues will prevent the formation of a cluster, so be sure to resolve this issue.
+- Step 3: Repeat the above network detection steps on the physical nodes where the application is running. If the network is found to be problematic, the application will not be able to connect to the taosd service. At this point, carefully check the DNS settings or hosts file of the physical node where the application is located to ensure it is configured correctly.
+- Step 4: Check ports to ensure that all hosts in the cluster can communicate over TCP on port 6030.
+
+By following these steps, you can ensure that all nodes communicate smoothly at the network level, laying a solid foundation for the successful deployment of the TDengine cluster.
+
+### 3. Installation
+
+To ensure consistency and stability within the cluster, install the same version of TDengine on all physical nodes.
+
+### 4. Modify Configuration
+
+Modify the configuration file of TDengine (the configuration files of all nodes need to be modified). Assuming the endpoint of the first dnode to be started is `h1.tdengine.com:6030`, the cluster-related parameters are as follows.
+
+```shell
+# firstEp is the first dnode that each dnode connects to after the initial startup
+firstEp h1.tdengine.com:6030
+# Must be configured to the FQDN of this dnode, if there is only one hostname on this machine, you can comment out or delete the following line
+fqdn h1.tdengine.com
+# Configure the port of this dnode, default is 6030
+serverPort 6030
+```
+
+The parameters that must be modified are firstEp and fqdn. For each dnode, the firstEp configuration should remain consistent, but fqdn must be set to the value of the dnode it is located on. Other parameters do not need to be modified unless you are clear on why they should be changed.
+
+For dnodes wishing to join the cluster, it is essential to ensure that the parameters related to the TDengine cluster listed in the table below are set identically. Any mismatch in parameters may prevent the dnode from successfully joining the cluster.
+
+| Parameter Name | Meaning |
+|:----------------:|:---------------------------------------------------------:|
+| statusInterval | Interval at which dnode reports status to mnode |
+| timezone | Time zone |
+| locale | System locale information and encoding format |
+| charset | Character set encoding |
+| ttlChangeOnWrite | Whether ttl expiration changes with table modification |
+
+### 5. Start
+
+Start the first dnode, such as `h1.tdengine.com`, following the steps mentioned above. Then execute taos in the terminal to start TDengine's CLI program taos, and execute the `show dnodes` command within it to view all dnode information in the current cluster.
+
+```shell
+taos> show dnodes;
+ id | endpoint | vnodes|support_vnodes|status| create_time | note |
+===================================================================================
+ 1| h1.tdengine.com:6030 | 0| 1024| ready| 2022-07-16 10:50:42.673 | |
+```
+
+You can see that the endpoint of the dnode node that has just started is `h1.tdengine.com:6030`. This address is the first Ep of the new cluster.
+
+### 6. Adding dnode
+
+Follow the steps mentioned earlier, start taosd on each physical node. Each dnode needs to configure the firstEp parameter in the taos.cfg file to the endpoint of the first node of the new cluster, which in this case is `h1.tdengine.com:6030`. On the machine where the first dnode is located, run taos in the terminal, open TDengine's CLI program taos, then log into the TDengine cluster, and execute the following SQL.
+
+```shell
+create dnode "h2.tdengine.com:6030"
+```
+
+Add the new dnode's endpoint to the cluster's endpoint list. You need to put `fqdn:port` in double quotes, otherwise, it will cause an error when running. Please note to replace the example h2.tdengine.com:6030 with the endpoint of this new dnode. Then execute the following SQL to see if the new node has successfully joined. If the dnode you want to join is currently offline, please refer to the "Common Issues" section later in this chapter for a solution.
+
+```shell
+show dnodes;
+```
+
+In the logs, please confirm that the fqdn and port of the output dnode are consistent with the endpoint you just tried to add. If they are not consistent, correct it to the correct endpoint. By following the steps above, you can continuously add new dnodes to the cluster one by one, thereby expanding the scale of the cluster and improving overall performance. Make sure to follow the correct process when adding new nodes, which helps maintain the stability and reliability of the cluster.
+
+**Tips**
+
+- Any dnode that has joined the cluster can serve as the firstEp for subsequent nodes to be added. The firstEp parameter only functions when that dnode first joins the cluster. After joining, the dnode will save the latest mnode's endpoint list, and subsequently, it no longer depends on this parameter. The firstEp parameter in the configuration file is mainly used for client connections, and if no parameters are set for TDengine's CLI, it will default to connecting to the node specified by firstEp.
+- Two dnodes that have not configured the firstEp parameter will run independently after starting. At this time, it is not possible to join one dnode to another to form a cluster.
+- TDengine does not allow merging two independent clusters into a new cluster.
+
+### 7. Adding mnode
+
+When creating a TDengine cluster, the first dnode automatically becomes the mnode of the cluster, responsible for managing and coordinating the cluster. To achieve high availability of mnode, subsequent dnodes need to manually create mnode. Please note that a cluster can create up to 3 mnodes, and only one mnode can be created on each dnode. When the number of dnodes in the cluster reaches or exceeds 3, you can create mnode for the existing cluster. In the first dnode, first log into TDengine through the CLI program taos, then execute the following SQL.
+
+```shell
+create mnode on dnode
+```
+
+Please note to replace the dnodeId in the example above with the serial number of the newly created dnode (which can be obtained by executing the `show dnodes` command). Finally, execute the following `show mnodes` to see if the newly created mnode has successfully joined the cluster.
+
+**Tips**
+
+During the process of setting up a TDengine cluster, if a new node always shows as offline after executing the create dnode command to add a new node, please follow these steps for troubleshooting.
+
+- Step 1, check whether the taosd service on the new node has started normally. You can confirm this by checking the log files or using the ps command.
+- Step 2, if the taosd service has started, next check whether the new node's network connection is smooth and confirm whether the firewall has been turned off. Network issues or firewall settings may prevent the node from communicating with other nodes in the cluster.
+- Step 3, use the taos -h fqdn command to try to connect to the new node, then execute the show dnodes command. This will display the running status of the new node as an independent cluster. If the displayed list is inconsistent with that shown on the main node, it indicates that the new node may have formed a single-node cluster on its own. To resolve this issue, follow these steps. First, stop the taosd service on the new node. Second, clear all files in the dataDir directory specified in the taos.cfg configuration file on the new node. This will delete all data and configuration information related to that node. Finally, restart the taosd service on the new node. This will reset the new node to its initial state, ready to rejoin the main cluster.
+
+## Deploying taosAdapter
+
+This section discusses how to deploy taosAdapter, which provides RESTful and WebSocket access capabilities for the TDengine cluster, thus playing a very important role in the cluster.
+
+1. Installation
+
+After the installation of TDengine Enterprise is complete, taosAdapter can be used. If you want to deploy taosAdapter on different servers, TDengine Enterprise needs to be installed on these servers.
+
+2. Single Instance Deployment
+
+Deploying a single instance of taosAdapter is very simple. For specific commands and configuration parameters, please refer to the taosAdapter section in the manual.
+
+3. Multiple Instances Deployment
+
+The main purposes of deploying multiple instances of taosAdapter are as follows:
+
+- To increase the throughput of the cluster and prevent taosAdapter from becoming a system bottleneck.
+- To enhance the robustness and high availability of the cluster, allowing requests entering the business system to be automatically routed to other instances when one instance fails.
+
+When deploying multiple instances of taosAdapter, it is necessary to address load balancing issues to avoid overloading some nodes while others remain idle. During the deployment process, multiple single instances need to be deployed separately, and the deployment steps for each instance are exactly the same as those for deploying a single instance. The next critical part is configuring Nginx. Below is a verified best practice configuration; you only need to replace the endpoint with the correct address in the actual environment. For the meanings of each parameter, please refer to the official Nginx documentation.
+
+```json
+user root;
+worker_processes auto;
+error_log /var/log/nginx_error.log;
+
+
+events {
+ use epoll;
+ worker_connections 1024;
+}
+
+http {
+
+ access_log off;
+
+ map $http_upgrade $connection_upgrade {
+ default upgrade;
+ '' close;
+ }
+
+ server {
+ listen 6041;
+ location ~* {
+ proxy_pass http://dbserver;
+ proxy_read_timeout 600s;
+ proxy_send_timeout 600s;
+ proxy_connect_timeout 600s;
+ proxy_next_upstream error http_502 non_idempotent;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection $http_connection;
+ }
+ }
+ server {
+ listen 6043;
+ location ~* {
+ proxy_pass http://keeper;
+ proxy_read_timeout 60s;
+ proxy_next_upstream error http_502 http_500 non_idempotent;
+ }
+ }
+
+ server {
+ listen 6060;
+ location ~* {
+ proxy_pass http://explorer;
+ proxy_read_timeout 60s;
+ proxy_next_upstream error http_502 http_500 non_idempotent;
+ }
+ }
+ upstream dbserver {
+ least_conn;
+ server 172.16.214.201:6041 max_fails=0;
+ server 172.16.214.202:6041 max_fails=0;
+ server 172.16.214.203:6041 max_fails=0;
+ }
+ upstream keeper {
+ ip_hash;
+ server 172.16.214.201:6043 ;
+ server 172.16.214.202:6043 ;
+ server 172.16.214.203:6043 ;
+ }
+ upstream explorer{
+ ip_hash;
+ server 172.16.214.201:6060 ;
+ server 172.16.214.202:6060 ;
+ server 172.16.214.203:6060 ;
+ }
+}
+```
+
+## Deploying taosKeeper
+
+To use the monitoring capabilities of TDengine, taosKeeper is an essential component. For monitoring, please refer to [TDinsight](../../../tdengine-reference/components/tdinsight), and for details on deploying taosKeeper, please refer to the [taosKeeper Reference Manual](../../../tdengine-reference/components/taoskeeper).
+
+## Deploying taosX
+
+To utilize the data ingestion capabilities of TDengine, it is necessary to deploy the taosX service. For detailed explanations and deployment, please refer to the enterprise edition reference manual.
+
+## Deploying taosX-Agent
+
+For some data sources such as Pi, OPC, etc., due to network conditions and data source access restrictions, taosX cannot directly access the data sources. In such cases, a proxy service, taosX-Agent, needs to be deployed. For detailed explanations and deployment, please refer to the enterprise edition reference manual.
+
+## Deploying taos-Explorer
+
+TDengine provides the capability to visually manage TDengine clusters. To use the graphical interface, the taos-Explorer service needs to be deployed. For detailed explanations and deployment, please refer to the [taos-Explorer Reference Manual](../../../tdengine-reference/components/taosexplorer/)
diff --git a/docs/en/08-operation/03-deployment/02-docker.md b/docs/en/08-operation/03-deployment/02-docker.md
new file mode 100644
index 0000000000..49c0b115f7
--- /dev/null
+++ b/docs/en/08-operation/03-deployment/02-docker.md
@@ -0,0 +1,93 @@
+---
+title: Docker Deployment
+slug: /operations-and-maintenance/deploy-your-cluster/docker-deployment
+---
+
+You can deploy TDengine services in Docker containers and use environment variables in the docker run command line or docker-compose file to control the behavior of services in the container.
+
+## Starting TDengine
+
+The TDengine image is launched with HTTP service activated by default. Use the following command to create a containerized TDengine environment with HTTP service.
+
+```shell
+docker run -d --name tdengine \
+-v ~/data/taos/dnode/data:/var/lib/taos \
+-v ~/data/taos/dnode/log:/var/log/taos \
+-p 6041:6041 tdengine/tdengine
+```
+
+Detailed parameter explanations are as follows:
+
+- /var/lib/taos: Default data file directory for TDengine, can be modified through the configuration file.
+- /var/log/taos: Default log file directory for TDengine, can be modified through the configuration file.
+
+The above command starts a container named tdengine and maps the HTTP service's port 6041 to the host port 6041. The following command can verify if the HTTP service in the container is available.
+
+```shell
+curl -u root:taosdata -d "show databases" localhost:6041/rest/sql
+```
+
+Run the following command to access TDengine within the container.
+
+```shell
+$ docker exec -it tdengine taos
+
+taos> show databases;
+ name |
+=================================
+ information_schema |
+ performance_schema |
+Query OK, 2 rows in database (0.033802s)
+```
+
+Within the container, TDengine CLI or various connectors (such as JDBC-JNI) connect to the server via the container's hostname. Accessing TDengine inside the container from outside is more complex, and using RESTful/WebSocket connection methods is the simplest approach.
+
+## Starting TDengine in host network mode
+
+Run the following command to start TDengine in host network mode, which allows using the host's FQDN to establish connections, rather than using the container's hostname.
+
+```shell
+docker run -d --name tdengine --network host tdengine/tdengine
+```
+
+This method is similar to starting TDengine on the host using the systemctl command. If the TDengine client is already installed on the host, you can directly use the following command to access the TDengine service.
+
+```shell
+$ taos
+
+taos> show dnodes;
+ id | endpoint | vnodes | support_vnodes | status | create_time | note |
+=================================================================================================================================================
+ 1 | vm98:6030 | 0 | 32 | ready | 2022-08-19 14:50:05.337 | |
+Query OK, 1 rows in database (0.010654s)
+```
+
+## Start TDengine with a specified hostname and port
+
+Use the following command to establish a connection on a specified hostname using the TAOS_FQDN environment variable or the fqdn configuration item in taos.cfg. This method provides greater flexibility for deploying TDengine.
+
+```shell
+docker run -d \
+ --name tdengine \
+ -e TAOS_FQDN=tdengine \
+ -p 6030:6030 \
+ -p 6041-6049:6041-6049 \
+ -p 6041-6049:6041-6049/udp \
+ tdengine/tdengine
+```
+
+First, the above command starts a TDengine service in the container, listening on the hostname tdengine, and maps the container's port 6030 to the host's port 6030, and the container's port range [6041, 6049] to the host's port range [6041, 6049]. If the port range on the host is already in use, you can modify the command to specify a free port range on the host.
+
+Secondly, ensure that the hostname tdengine is resolvable in /etc/hosts. Use the following command to save the correct configuration information to the hosts file.
+
+```shell
+echo 127.0.0.1 tdengine |sudo tee -a /etc/hosts
+```
+
+Finally, you can access the TDengine service using the TDengine CLI with tdengine as the server address, as follows.
+
+```shell
+taos -h tdengine -P 6030
+```
+
+If TAOS_FQDN is set to the same as the hostname of the host, the effect is the same as "starting TDengine in host network mode".
diff --git a/docs/en/08-operation/03-deployment/03-kubernetes.md b/docs/en/08-operation/03-deployment/03-kubernetes.md
new file mode 100644
index 0000000000..8a6edcead3
--- /dev/null
+++ b/docs/en/08-operation/03-deployment/03-kubernetes.md
@@ -0,0 +1,812 @@
+---
+title: Kubernetes Deployment
+slug: /operations-and-maintenance/deploy-your-cluster/kubernetes-deployment
+---
+
+You can use kubectl or Helm to deploy TDengine in Kubernetes.
+
+Note that Helm is only supported in TDengine Enterprise. To deploy TDengine OSS in Kubernetes, use kubectl.
+
+## Deploy TDengine with kubectl
+
+As a time-series database designed for cloud-native architectures, TDengine inherently supports Kubernetes deployment. This section introduces how to step-by-step create a highly available TDengine cluster for production use using YAML files, with a focus on common operations of TDengine in a Kubernetes environment. This subsection requires readers to have a certain understanding of Kubernetes, be proficient in running common kubectl commands, and understand concepts such as statefulset, service, and pvc. Readers unfamiliar with these concepts can refer to the Kubernetes official website for learning.
+To meet the requirements of high availability, the cluster needs to meet the following requirements:
+
+- 3 or more dnodes: Multiple vnodes in the same vgroup of TDengine should not be distributed on the same dnode, so if creating a database with 3 replicas, the number of dnodes should be 3 or more.
+- 3 mnodes: mnodes are responsible for managing the entire cluster, with TDengine defaulting to one mnode. If the dnode hosting this mnode goes offline, the entire cluster becomes unavailable.
+- 3 replicas of the database: TDengine's replica configuration is at the database level, so 3 replicas can ensure that the cluster remains operational even if any one of the 3 dnodes goes offline. If 2 dnodes go offline, the cluster becomes unavailable because RAFT cannot complete the election. (Enterprise edition: In disaster recovery scenarios, if the data files of any node are damaged, recovery can be achieved by restarting the dnode.)
+
+### Prerequisites
+
+To deploy and manage a TDengine cluster using Kubernetes, the following preparations need to be made.
+
+- This article applies to Kubernetes v1.19 and above.
+- This article uses the kubectl tool for installation and deployment, please install the necessary software in advance.
+- Kubernetes has been installed and deployed and can normally access or update necessary container repositories or other services.
+
+### Configure Service
+
+Create a Service configuration file: taosd-service.yaml, the service name metadata.name (here "taosd") will be used in the next step. First, add the ports used by TDengine, then set the determined labels app (here "tdengine") in the selector.
+
+```yaml
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: "taosd"
+ labels:
+ app: "tdengine"
+spec:
+ ports:
+ - name: tcp6030
+ protocol: "TCP"
+ port: 6030
+ - name: tcp6041
+ protocol: "TCP"
+ port: 6041
+ selector:
+ app: "tdengine"
+```
+
+### Stateful Services StatefulSet
+
+According to Kubernetes' descriptions of various deployment types, we will use StatefulSet as the deployment resource type for TDengine. Create the file tdengine.yaml, where replicas define the number of cluster nodes as 3. The node timezone is set to China (Asia/Shanghai), and each node is allocated 5G of standard storage, which you can modify according to actual conditions.
+
+Please pay special attention to the configuration of startupProbe. After a dnode's Pod goes offline for a period of time and then restarts, the newly online dnode will be temporarily unavailable. If the startupProbe configuration is too small, Kubernetes will consider the Pod to be in an abnormal state and attempt to restart the Pod. This dnode's Pod will frequently restart and never return to a normal state.
+
+```yaml
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: "tdengine"
+ labels:
+ app: "tdengine"
+spec:
+ serviceName: "taosd"
+ replicas: 3
+ updateStrategy:
+ type: RollingUpdate
+ selector:
+ matchLabels:
+ app: "tdengine"
+ template:
+ metadata:
+ name: "tdengine"
+ labels:
+ app: "tdengine"
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - weight: 100
+ podAffinityTerm:
+ labelSelector:
+ matchExpressions:
+ - key: app
+ operator: In
+ values:
+ - tdengine
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - name: "tdengine"
+ image: "tdengine/tdengine:3.2.3.0"
+ imagePullPolicy: "IfNotPresent"
+ ports:
+ - name: tcp6030
+ protocol: "TCP"
+ containerPort: 6030
+ - name: tcp6041
+ protocol: "TCP"
+ containerPort: 6041
+ env:
+ # POD_NAME for FQDN config
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ # SERVICE_NAME and NAMESPACE for fqdn resolve
+ - name: SERVICE_NAME
+ value: "taosd"
+ - name: STS_NAME
+ value: "tdengine"
+ - name: STS_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # TZ for timezone settings, we recommend to always set it.
+ - name: TZ
+ value: "Asia/Shanghai"
+ # Environment variables with prefix TAOS_ will be parsed and converted into corresponding parameter in taos.cfg. For example, serverPort in taos.cfg should be configured by TAOS_SERVER_PORT when using K8S to deploy
+ - name: TAOS_SERVER_PORT
+ value: "6030"
+ # Must set if you want a cluster.
+ - name: TAOS_FIRST_EP
+ value: "$(STS_NAME)-0.$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local:$(TAOS_SERVER_PORT)"
+ # TAOS_FQND should always be set in k8s env.
+ - name: TAOS_FQDN
+ value: "$(POD_NAME).$(SERVICE_NAME).$(STS_NAMESPACE).svc.cluster.local"
+ volumeMounts:
+ - name: taosdata
+ mountPath: /var/lib/taos
+ startupProbe:
+ exec:
+ command:
+ - taos-check
+ failureThreshold: 360
+ periodSeconds: 10
+ readinessProbe:
+ exec:
+ command:
+ - taos-check
+ initialDelaySeconds: 5
+ timeoutSeconds: 5000
+ livenessProbe:
+ exec:
+ command:
+ - taos-check
+ initialDelaySeconds: 15
+ periodSeconds: 20
+ volumeClaimTemplates:
+ - metadata:
+ name: taosdata
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ storageClassName: "standard"
+ resources:
+ requests:
+ storage: "5Gi"
+```
+
+### Deploying TDengine Cluster Using kubectl Command
+
+First, create the corresponding namespace `dengine-test`, as well as the PVC, ensuring that there is enough remaining space with `storageClassName` set to `standard`. Then execute the following commands in sequence:
+
+```shell
+kubectl apply -f taosd-service.yaml -n tdengine-test
+```
+
+The above configuration will create a three-node TDengine cluster, with `dnode` automatically configured. You can use the `show dnodes` command to view the current cluster nodes:
+
+```shell
+kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "show dnodes"
+kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show dnodes"
+kubectl exec -it tdengine-2 -n tdengine-test -- taos -s "show dnodes"
+```
+
+The output is as follows:
+
+```shell
+taos show dnodes
+ id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
+=============================================================================================================================================================================================================================================
+ 1 | tdengine-0.ta... | 0 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-19 17:54:18.469 | | | |
+ 2 | tdengine-1.ta... | 0 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-19 17:54:38.698 | | | |
+ 3 | tdengine-2.ta... | 0 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-19 17:55:02.039 | | | |
+Query OK, 3 row(s) in set (0.001853s)
+```
+
+View the current mnode:
+
+```shell
+kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
+taos> show mnodes\G
+*************************** 1.row ***************************
+ id: 1
+ endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
+ role: leader
+ status: ready
+create_time: 2023-07-19 17:54:18.559
+reboot_time: 2023-07-19 17:54:19.520
+Query OK, 1 row(s) in set (0.001282s)
+```
+
+Create mnode
+
+```shell
+kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 2"
+kubectl exec -it tdengine-0 -n tdengine-test -- taos -s "create mnode on dnode 3"
+```
+
+View mnode
+
+```shell
+kubectl exec -it tdengine-1 -n tdengine-test -- taos -s "show mnodes\G"
+
+taos> show mnodes\G
+*************************** 1.row ***************************
+ id: 1
+ endpoint: tdengine-0.taosd.tdengine-test.svc.cluster.local:6030
+ role: leader
+ status: ready
+create_time: 2023-07-19 17:54:18.559
+reboot_time: 2023-07-20 09:19:36.060
+*************************** 2.row ***************************
+ id: 2
+ endpoint: tdengine-1.taosd.tdengine-test.svc.cluster.local:6030
+ role: follower
+ status: ready
+create_time: 2023-07-20 09:22:05.600
+reboot_time: 2023-07-20 09:22:12.838
+*************************** 3.row ***************************
+ id: 3
+ endpoint: tdengine-2.taosd.tdengine-test.svc.cluster.local:6030
+ role: follower
+ status: ready
+create_time: 2023-07-20 09:22:20.042
+reboot_time: 2023-07-20 09:22:23.271
+Query OK, 3 row(s) in set (0.003108s)
+```
+
+### Port Forwarding
+
+Using kubectl port forwarding feature allows applications to access the TDengine cluster running in the Kubernetes environment.
+
+```shell
+kubectl port-forward -n tdengine-test tdengine-0 6041:6041 &
+```
+
+Use the curl command to verify the TDengine REST API using port 6041.
+
+```shell
+curl -u root:taosdata -d "show databases" 127.0.0.1:6041/rest/sql
+{"code":0,"column_meta":[["name","VARCHAR",64]],"data":[["information_schema"],["performance_schema"],["test"],["test1"]],"rows":4}
+```
+
+### Cluster Expansion
+
+TDengine supports cluster expansion:
+
+```shell
+kubectl scale statefulsets tdengine -n tdengine-test --replicas=4
+```
+
+The command line argument `--replica=4` indicates that the TDengine cluster is to be expanded to 4 nodes. After execution, first check the status of the POD:
+
+```shell
+kubectl get pod -l app=tdengine -n tdengine-test -o wide
+```
+
+Output as follows:
+
+```text
+NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES
+tdengine-0 1/1 Running 4 (6h26m ago) 6h53m 10.244.2.75 node86
+tdengine-1 1/1 Running 1 (6h39m ago) 6h53m 10.244.0.59 node84
+tdengine-2 1/1 Running 0 5h16m 10.244.1.224 node85
+tdengine-3 1/1 Running 0 3m24s 10.244.2.76 node86
+```
+
+At this point, the Pod status is still Running. The dnode status in the TDengine cluster can be seen after the Pod status changes to ready:
+
+```shell
+kubectl exec -it tdengine-3 -n tdengine-test -- taos -s "show dnodes"
+```
+
+The dnode list of the four-node TDengine cluster after expansion:
+
+```text
+taos> show dnodes
+ id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | active_code | c_active_code |
+=============================================================================================================================================================================================================================================
+ 1 | tdengine-0.ta... | 10 | 16 | ready | 2023-07-19 17:54:18.552 | 2023-07-20 09:39:04.297 | | | |
+ 2 | tdengine-1.ta... | 10 | 16 | ready | 2023-07-19 17:54:37.828 | 2023-07-20 09:28:24.240 | | | |
+ 3 | tdengine-2.ta... | 10 | 16 | ready | 2023-07-19 17:55:01.141 | 2023-07-20 10:48:43.445 | | | |
+ 4 | tdengine-3.ta... | 0 | 16 | ready | 2023-07-20 16:01:44.007 | 2023-07-20 16:01:44.889 | | | |
+Query OK, 4 row(s) in set (0.003628s)
+```
+
+### Cleaning up the Cluster
+
+**Warning**
+When deleting PVCs, pay attention to the PV persistentVolumeReclaimPolicy. It is recommended to set it to Delete, so that when the PVC is deleted, the PV will be automatically cleaned up, along with the underlying CSI storage resources. If the policy to automatically clean up PVs when deleting PVCs is not configured, after deleting the PVCs, manually cleaning up the PVs may not release the corresponding CSI storage resources.
+
+To completely remove the TDengine cluster, you need to clean up the statefulset, svc, pvc, and finally delete the namespace.
+
+```shell
+kubectl delete statefulset -l app=tdengine -n tdengine-test
+kubectl delete svc -l app=tdengine -n tdengine-test
+kubectl delete pvc -l app=tdengine -n tdengine-test
+kubectl delete namespace tdengine-test
+```
+
+### Cluster Disaster Recovery Capabilities
+
+For high availability and reliability of TDengine in a Kubernetes environment, in terms of hardware damage and disaster recovery, it is discussed on two levels:
+
+- The disaster recovery capabilities of the underlying distributed block storage, which includes multiple replicas of block storage. Popular distributed block storage like Ceph has multi-replica capabilities, extending storage replicas to different racks, cabinets, rooms, and data centers (or directly using block storage services provided by public cloud vendors).
+- TDengine's disaster recovery, in TDengine Enterprise, inherently supports the recovery of a dnode's work by launching a new blank dnode when an existing dnode permanently goes offline (due to physical disk damage and data loss).
+
+## Deploy TDengine with Helm
+
+Helm is the package manager for Kubernetes.
+The previous section on deploying the TDengine cluster with Kubernetes was simple enough, but Helm can provide even more powerful capabilities.
+
+### Installing Helm
+
+```shell
+curl -fsSL -o get_helm.sh \
+ https://raw.githubusercontent.com/helm/helm/master/scripts/get-helm-3
+chmod +x get_helm.sh
+./get_helm.sh
+```
+
+Helm operates Kubernetes using kubectl and kubeconfig configurations, which can be set up following the Rancher installation configuration for Kubernetes.
+
+### Installing TDengine Chart
+
+The TDengine Chart has not yet been released to the Helm repository, it can currently be downloaded directly from GitHub:
+
+```shell
+wget https://github.com/taosdata/TDengine-Operator/raw/3.0/helm/tdengine-enterpise-3.5.0.tgz
+```
+
+Note that it's for the enterprise edition, and the community edition is not yet available.
+
+Follow the steps below to install the TDengine Chart:
+
+```shell
+# Edit the values.yaml file to set the topology of the cluster
+vim values.yaml
+helm install tdengine tdengine-enterprise-3.5.0.tgz -f values.yaml
+```
+
+#### Case 1: Simple 1-node Deployment
+
+The following is a simple example of deploying a single-node TDengine cluster using Helm.
+
+```yaml
+# This example is a simple deployment with one server replica.
+name: "tdengine"
+
+image:
+ repository: image.cloud.taosdata.com/ # Leave a trailing slash for the repository, or "" for no repository
+ server: taosx/integrated:3.3.5.1-b0a54bdd
+
+# Set timezone here, not in taoscfg
+timezone: "Asia/Shanghai"
+
+labels:
+ app: "tdengine"
+ # Add more labels as needed.
+
+services:
+ server:
+ type: ClusterIP
+ replica: 1
+ ports:
+ # TCP range required
+ tcp: [6041, 6030, 6060]
+ # UDP range, optional
+ udp:
+ volumes:
+ - name: data
+ mountPath: /var/lib/taos
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ files:
+ - name: cfg # must be lower case.
+ mountPath: /etc/taos/taos.cfg
+ content: |
+ dataDir /var/lib/taos/
+ logDir /var/log/taos/
+```
+
+Let's explain the above configuration:
+
+- name: The name of the deployment, here it is "tdengine".
+- image:
+ - repository: The image repository address, remember to leave a trailing slash for the repository, or set it to an empty string to use docker.io.
+ - server: The specific name and tag of the server image. You need to ask your business partner for the TDengine Enterprise image.
+- timezone: Set the timezone, here it is "Asia/Shanghai".
+- labels: Add labels to the deployment, here is an app label with the value "tdengine", more labels can be added as needed.
+- services:
+ - server: Configure the server service.
+ - type: The service type, here it is **ClusterIP**.
+ - replica: The number of replicas, here it is 1.
+ - ports: Configure the ports of the service.
+ - tcp: The required TCP port range, here it is [6041, 6030, 6060].
+ - udp: The optional UDP port range, which is not configured here.
+ - volumes: Configure the volumes.
+ - name: The name of the volume, here there are two volumes, data and log.
+ - mountPath: The mount path of the volume.
+ - spec: The specification of the volume.
+ - storageClassName: The storage class name, here it is **local-path**.
+ - accessModes: The access mode, here it is **ReadWriteOnce**.
+ - resources.requests.storage: The requested storage size, here it is **10Gi**.
+ - files: Configure the files to mount in TDengine server.
+ - name: The name of the file, here it is **cfg**.
+ - mountPath: The mount path of the file, which is **taos.cfg**.
+ - content: The content of the file, here the **dataDir** and **logDir** are configured.
+
+After configuring the values.yaml file, use the following command to install the TDengine Chart:
+
+```shell
+helm install simple tdengine-enterprise-3.5.0.tgz -f values.yaml
+```
+
+After installation, you can see the instructions to see the status of the TDengine cluster:
+
+```shell
+NAME: simple
+LAST DEPLOYED: Sun Feb 9 13:40:00 2025 default
+STATUS: deployed
+REVISION: 1
+TEST SUITE: None
+NOTES:
+1. Get first POD name:
+
+export POD_NAME=$(kubectl get pods --namespace default \
+ -l "app.kubernetes.io/name=tdengine,app.kubernetes.io/instance=simple" -o jsonpath="{.items[0].metadata.name}")
+
+2. Show dnodes/mnodes:
+
+kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
+
+3. Run into taos shell:
+
+kubectl --namespace default exec -it $POD_NAME -- taos
+```
+
+Follow the instructions to check the status of the TDengine cluster:
+
+```shell
+root@u1-58:/data1/projects/helm# kubectl --namespace default exec $POD_NAME -- taos -s "show dnodes; show mnodes"
+Welcome to the TDengine Command Line Interface, Client Version:3.3.5.1
+Copyright (c) 2023 by TDengine, all rights reserved.
+
+taos> show dnodes; show mnodes
+ id | endpoint | vnodes | support_vnodes | status | create_time | reboot_time | note | machine_id |
+==========================================================================================================================================================================================================
+ 1 | simple-tdengine-0.simple-td... | 0 | 85 | ready | 2025-02-07 21:17:34.903 | 2025-02-08 15:52:34.781 | | BWhWyPiEBrWZrQCSqTSc2a/H |
+Query OK, 1 row(s) in set (0.005133s)
+
+ id | endpoint | role | status | create_time | role_time |
+==================================================================================================================================
+ 1 | simple-tdengine-0.simple-td... | leader | ready | 2025-02-07 21:17:34.906 | 2025-02-08 15:52:34.878 |
+Query OK, 1 row(s) in set (0.004299s)
+```
+
+To clean up the TDengine cluster, use the following command:
+
+```shell
+helm uninstall simple
+kubectl delete pvc -l app.kubernetes.io/instance=simple
+```
+
+#### Case 2: Tiered-Storage Deployment
+
+The following is an example of deploying a TDengine cluster with tiered storage using Helm.
+
+```yaml
+# This is an example of a 3-tiered storage deployment with one server replica.
+name: "tdengine"
+
+image:
+ repository: image.cloud.taosdata.com/ # Leave a trailing slash for the repository, or "" for no repository
+ server: taosx/integrated:3.3.5.1-b0a54bdd
+
+# Set timezone here, not in taoscfg
+timezone: "Asia/Shanghai"
+
+labels:
+ # Add more labels as needed.
+
+services:
+ server:
+ type: ClusterIP
+ replica: 1
+ ports:
+ # TCP range required
+ tcp: [6041, 6030, 6060]
+ # UDP range, optional
+ udp:
+ volumes:
+ - name: tier0
+ mountPath: /data/taos0/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: tier1
+ mountPath: /data/taos1/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: tier2
+ mountPath: /data/taos2/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ environment:
+ TAOS_DEBUG_FLAG: "131"
+ files:
+ - name: cfg # must be lower case.
+ mountPath: /etc/taos/taos.cfg
+ content: |
+ dataDir /data/taos0/ 0 1
+ dataDir /data/taos1/ 1 0
+ dataDir /data/taos2/ 2 0
+```
+
+You can see that the configuration is similar to the previous one, with the addition of the tiered storage configuration. The dataDir configuration in the taos.cfg file is also modified to support tiered storage.
+
+After configuring the values.yaml file, use the following command to install the TDengine Chart:
+
+```shell
+helm install tiered tdengine-enterprise-3.5.0.tgz -f values.yaml
+```
+
+#### Case 3: 2-replica Deployment
+
+TDengine support 2-replica deployment with an arbitrator, which can be configured as follows:
+
+```yaml
+# This example shows how to deploy a 2-replica TDengine cluster with an arbitrator.
+name: "tdengine"
+
+image:
+ repository: image.cloud.taosdata.com/ # Leave a trailing slash for the repository, or "" for no repository
+ server: taosx/integrated:3.3.5.1-b0a54bdd
+
+# Set timezone here, not in taoscfg
+timezone: "Asia/Shanghai"
+
+labels:
+ my-app: "tdengine"
+ # Add more labels as needed.
+
+services:
+ arbitrator:
+ type: ClusterIP
+ volumes:
+ - name: arb-data
+ mountPath: /var/lib/taos
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: arb-log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ server:
+ type: ClusterIP
+ replica: 2
+ ports:
+ # TCP range required
+ tcp: [6041, 6030, 6060]
+ # UDP range, optional
+ udp:
+ volumes:
+ - name: data
+ mountPath: /var/lib/taos
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: "10Gi"
+```
+
+You can see that the configuration is similar to the first one, with the addition of the arbitrator configuration. The arbitrator service is configured with the same storage as the server service, and the server service is configured with 2 replicas (the arbitrator should be 1 replica and not able to be changed).
+
+#### Case 4: 3-replica Deployment with Single taosX
+
+```yaml
+# This example shows how to deploy a 3-replica TDengine cluster with separate taosx/explorer service.
+# Users should know that the explorer/taosx service is not cluster-ready, so it is recommended to deploy it separately.
+name: "tdengine"
+
+image:
+ repository: image.cloud.taosdata.com/ # Leave a trailing slash for the repository, or "" for no repository
+ server: taosx/integrated:3.3.5.1-b0a54bdd
+
+# Set timezone here, not in taoscfg
+timezone: "Asia/Shanghai"
+
+labels:
+ # Add more labels as needed.
+
+services:
+ server:
+ type: ClusterIP
+ replica: 3
+ ports:
+ # TCP range required
+ tcp: [6041, 6030]
+ # UDP range, optional
+ udp:
+ volumes:
+ - name: data
+ mountPath: /var/lib/taos
+ spec:
+ storageClassName: "local-path"
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "10Gi"
+ environment:
+ ENABLE_TAOSX: "0" # Disable taosx in server replicas.
+ taosx:
+ type: ClusterIP
+ volumes:
+ - name: taosx-data
+ mountPath: /var/lib/taos
+ spec:
+ storageClassName: "local-path"
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "10Gi"
+ - name: taosx-log
+ mountPath: /var/log/taos/
+ spec:
+ storageClassName: "local-path"
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: "10Gi"
+ files:
+ - name: taosx
+ mountPath: /etc/taos/taosx.toml
+ content: |-
+ # TAOSX configuration in TOML format.
+ [monitor]
+ # FQDN of taosKeeper service, no default value
+ fqdn = "localhost"
+ # How often to send metrics to taosKeeper, default every 10 seconds. Only value from 1 to 10 is valid.
+ interval = 10
+
+ # log configuration
+ [log]
+ # All log files are stored in this directory
+ #
+ #path = "/var/log/taos" # on linux/macOS
+
+ # log filter level
+ #
+ #level = "info"
+
+ # Compress archived log files or not
+ #
+ #compress = false
+
+ # The number of log files retained by the current explorer server instance in the `path` directory
+ #
+ #rotationCount = 30
+
+ # Rotate when the log file reaches this size
+ #
+ #rotationSize = "1GB"
+
+ # Log downgrade when the remaining disk space reaches this size, only logging `ERROR` level logs
+ #
+ #reservedDiskSize = "1GB"
+
+ # The number of days log files are retained
+ #
+ #keepDays = 30
+
+ # Watching the configuration file for log.loggers changes, default to true.
+ #
+ #watching = true
+
+ # Customize the log output level of modules, and changes will be applied after modifying the file when log.watching is enabled
+ #
+ # ## Examples:
+ #
+ # crate = "error"
+ # crate::mod1::mod2 = "info"
+ # crate::span[field=value] = "warn"
+ #
+ [log.loggers]
+ #"actix_server::accept" = "warn"
+ #"taos::query" = "warn"
+```
+
+You can see that the configuration is similar to the first one, with the addition of the taosx configuration. The taosx service is configured with similar storage configuration as the server service, and the server service is configured with 3 replicas. Since the taosx service is not cluster-ready, it is recommended to deploy it separately.
+
+After configuring the values.yaml file, use the following command to install the TDengine Chart:
+
+```shell
+helm install replica3 tdengine-enterprise-3.5.0.tgz -f values.yaml
+```
+
+You can use the following command to expose the explorer service to the outside world with ingress:
+
+```shell
+tee replica3-ingress.yaml <
diff --git a/docs/en/08-operation/04-maintenance.md b/docs/en/08-operation/04-maintenance.md
index 2f6afbf9df..5712a710a1 100644
--- a/docs/en/08-operation/04-maintenance.md
+++ b/docs/en/08-operation/04-maintenance.md
@@ -17,7 +17,9 @@ TDengine is designed for various writing scenarios, and many of these scenarios
```sql
COMPACT DATABASE db_name [start with 'XXXX'] [end with 'YYYY'];
-SHOW COMPACT [compact_id];
+COMPACT [db_name.]VGROUPS IN (vgroup_id1, vgroup_id2, ...) [start with 'XXXX'] [end with 'YYYY'];
+SHOW COMPACTS;
+SHOW COMPACT compact_id;
KILL COMPACT compact_id;
```
diff --git a/docs/en/08-operation/09-backup.md b/docs/en/08-operation/09-backup.md
index 6457719d88..ee0a56c497 100644
--- a/docs/en/08-operation/09-backup.md
+++ b/docs/en/08-operation/09-backup.md
@@ -1,14 +1,19 @@
---
title: Data Backup and Restoration
-slug: /operations-and-maintenance/back-up-and-restore-data
+slug: /operations-and-maintenance/data-backup-and-restoration
---
-To prevent data loss and accidental deletions, TDengine provides comprehensive features such as data backup, restoration, fault tolerance, and real-time synchronization of remote data to ensure the security of data storage. This section briefly explains the backup and restoration functions.
+import Image from '@theme/IdealImage';
+import imgBackup from '../assets/data-backup-01.png';
+
+You can back up the data in your TDengine cluster and restore it in the event that data is lost or damaged.
## Data Backup and Restoration Using taosdump
taosdump is an open-source tool that supports backing up data from a running TDengine cluster and restoring the backed-up data to the same or another running TDengine cluster. taosdump can back up the database as a logical data unit or back up data records within a specified time period in the database. When using taosdump, you can specify the directory path for data backup. If no directory path is specified, taosdump will default to backing up the data in the current directory.
+### Back Up Data with taosdump
+
Below is an example of using taosdump to perform data backup.
```shell
@@ -19,6 +24,8 @@ After executing the above command, taosdump will connect to the TDengine cluster
When using taosdump, if the specified storage path already contains data files, taosdump will prompt the user and exit immediately to avoid data overwriting. This means the same storage path can only be used for one backup. If you see related prompts, please operate carefully to avoid accidental data loss.
+### Restore Data with taosdump
+
To restore data files from a specified local file path to a running TDengine cluster, you can execute the taosdump command by specifying command-line parameters and the data file path. Below is an example code for taosdump performing data restoration.
```shell
@@ -27,25 +34,62 @@ taosdump -i /file/path -h localhost -P 6030
After executing the above command, taosdump will connect to the TDengine cluster at localhost:6030 and restore the data files from /file/path to the TDengine cluster.
-## Data Backup and Restoration Based on TDengine Enterprise
+## Data Backup and Restoration in TDengine Enterprise
-TDengine Enterprise provides an efficient incremental backup feature, with the following process.
+TDengine Enterprise implements incremental backup and recovery of data by using data subscription. The backup and recovery functions of TDengine Enterprise include the following concepts:
-Step 1, access the taosExplorer service through a browser, usually at the port 6060 of the IP address where the TDengine cluster is located, such as `http://localhost:6060`.
+1. Incremental data backup: Based on TDengine's data subscription function, all data changes of **the backup object** (including: addition, modification, deletion, metadata change, etc.) are recorded to generate a backup file.
+2. Data recovery: Use the backup file generated by incremental data backup to restore **the backup object** to a specified point in time.
+3. Backup object: The object that the user backs up can be a **database** or a **supertable**.
+4. Backup plan: The user creates a periodic backup task for the backup object. The backup plan starts at a specified time point and periodically executes the backup task at intervals of **the backup cycle. Each backup task generates a** **backup point** .
+5. Backup point: Each time a backup task is executed, a set of backup files is generated. They correspond to a time point, called **a backup point** . The first backup point is called **the initial backup point** .
+6. Restore task: The user selects a backup point in the backup plan and creates a restore task. The restore task starts from **the initial backup point** and plays back the data changes in **the backup file** one by one until the specified backup point ends.
-Step 2, in the "System Management - Backup" page of the taosExplorer service, add a new data backup task, fill in the database name and backup storage file path in the task configuration information, and start the data backup after completing the task creation. Three parameters can be configured on the data backup configuration page:
+### Incremental Backup Example
-- Backup cycle: Required, configure the time interval for each data backup execution, which can be selected from a dropdown menu to execute once every day, every 7 days, or every 30 days. After configuration, a data backup task will be initiated at 0:00 of the corresponding backup cycle;
-- Database: Required, configure the name of the database to be backed up (the database's wal_retention_period parameter must be greater than 0);
-- Directory: Required, configure the path in the running environment of taosX where the data will be backed up, such as `/root/data_backup`;
+
+
+Figure 1. Incremental backup process
+
-Step 3, after the data backup task is completed, find the created data backup task in the list of created tasks on the same page, and directly perform one-click restoration to restore the data to TDengine.
+1. The user creates a backup plan to execute the backup task every 1 day starting from 2024-08-27 00:00:00 .
+2. The first backup task was executed at 2024-08-27 00:00:00, generating an initial backup point .
+3. After that, the backup task is executed every 1 day, and multiple backup points are generated .
+4. Users can select a backup point and create a restore task .
+5. The restore task starts from the initial backup point, applies the backup points one by one, and restores to the specified backup point.
-Compared to taosdump, if the same data is backed up multiple times in the specified storage path, since TDengine Enterprise not only has high backup efficiency but also implements incremental processing, each backup task will be completed quickly. As taosdump always performs full backups, TDengine Enterprise can significantly reduce system overhead in scenarios with large data volumes and is more convenient.
+### Back Up Data in TDengine Enterprise
-**Common Error Troubleshooting**
+1. In a web browser, open the taosExplorer interface for TDengine. This interface is located on port 6060 on the hostname or IP address running TDengine.
+2. In the main menu on the left, click **Management** and open the **Backup** tab.
+3. Under **Backup Plan**, click **Create New Backup** to define your backup plan.
+ 1. **Database:** Select the database that you want to backup.
+ 2. **Super Table:** (Optional) Select the supertable that you want to backup. If you do not select a supertable, all data in the database is backed up.
+ 3. **Next execution time:** Enter the date and time when you want to perform the initial backup for this backup plan. If you specify a date and time in the past, the initial backup is performed immediately.
+ 4. **Backup Cycle:** Specify how often you want to perform incremental backups. The value of this field must be less than the value of `WAL_RETENTION_PERIOD` for the specified database.
+ 5. **Retry times:** Enter how many times you want to retry a backup task that has failed, provided that the specific failure might be resolved by retrying.
+ 6. **Retry interval:** Enter the delay in seconds between retry attempts.
+ 7. **Directory:** Enter the full path of the directory in which you want to store backup files.
+ 8. **Backup file max size:** Enter the maximum size of a single backup file. If the total size of your backup exceeds this number, the backup is split into multiple files.
+ 9. **Compression level:** Select **fastest** for the fastest performance but lowest compression ratio, **best** for the highest compression ratio but slowest performance, or **balanced** for a combination of performance and compression.
-1. If the task fails to start and reports the following error:
+4. Click **Confirm** to create the backup plan.
+
+You can view your backup plans and modify, clone, or delete them using the buttons in the **Operation** columns. Click **Refresh** to update the status of your plans. Note that you must stop a backup plan before you can delete it. You can also click **View** in the **Backup File** column to view the backup record points and files created by each plan.
+
+### Restore Data in TDengine Enterprise
+
+1. Locate the backup plan containing data that you want to restore and click **View** in the **Backup File** column.
+2. Determine the backup record point to which you want to restore and click the Restore icon in the **Operation** column.
+3. Select the backup file timestamp and target database and click **Confirm**.
+
+
+
+## Troubleshooting
+
+### Port Access Exception
+
+A port access exception is indicated by the following error:
```text
Error: tmq to td task exec error
@@ -54,9 +98,11 @@ Caused by:
[0x000B] Unable to establish connection
```
-The cause is an abnormal connection to the data source port, check whether the data source FQDN is connected and whether port 6030 is accessible.
+If you encounter this error, check whether the data source FQDN is connected and whether port 6030 is listening and accessible.
-2. If using a WebSocket connection, the task fails to start and reports the following error:
+### Connection Issues
+
+A connection issue is indicated by the task failing to start and reporting the following error:
```text
Error: tmq to td task exec error
@@ -67,15 +113,16 @@ Caused by:
2: failed to lookup address information: Temporary failure in name resolution
```
-When using a WebSocket connection, you may encounter various types of errors, which can be seen after "Caused by". Here are some possible errors:
+The following are some possible errors for WebSocket connections:
+- "Temporary failure in name resolution": DNS resolution error. Check whether the specified IP address or FQDN can be accessed normally.
+- "IO error: Connection refused (os error 111)": Port access failed. Check whether the port is configured correctly and is enabled and accessible.
+- "IO error: received corrupt message": Message parsing failed. This may be because SSL was enabled using the WSS method, but the source port is not supported.
+- "HTTP error: *": Confirm that you are connecting to the correct taosAdapter port and that your LSB/Nginx/Proxy has been configured correctly.
+- "WebSocket protocol error: Handshake not finished": WebSocket connection error. This is typically caused by an incorrectly configured port.
-- "Temporary failure in name resolution": DNS resolution error, check if the IP or FQDN can be accessed normally.
-- "IO error: Connection refused (os error 111)": Port access failure, check if the port is configured correctly or if it is open and accessible.
-- "IO error: received corrupt message": Message parsing failed, possibly because SSL was enabled using wss, but the source port does not support it.
-- "HTTP error: *": Possibly connected to the wrong taosAdapter port or incorrect LSB/Nginx/Proxy configuration.
-- "WebSocket protocol error: Handshake not finished": WebSocket connection error, usually because the configured port is incorrect.
+### WAL Configuration
-3. If the task fails to start and reports the following error:
+A WAL configuration issue is indicated by the task failing to start and reporting the following error:
```text
Error: tmq to td task exec error
@@ -84,11 +131,8 @@ Caused by:
[0x038C] WAL retention period is zero
```
-This is due to incorrect WAL configuration in the source database, preventing subscription.
-
-Solution:
-Modify the data WAL configuration:
+To resolve this error, modify the WAL retention period for the affected database:
```sql
-alter database test wal_retention_period 3600;
+ALTER DATABASE test WAL_RETENTION_PERIOD 3600;
```
diff --git a/docs/en/14-reference/01-components/03-taosadapter.md b/docs/en/14-reference/01-components/03-taosadapter.md
index 0d454756dd..ce021e9c60 100644
--- a/docs/en/14-reference/01-components/03-taosadapter.md
+++ b/docs/en/14-reference/01-components/03-taosadapter.md
@@ -13,45 +13,108 @@ import Icinga2 from "../../assets/resources/_icinga2.mdx"
import TCollector from "../../assets/resources/_tcollector.mdx"
taosAdapter is a companion tool for TDengine, serving as a bridge and adapter between the TDengine cluster and applications. It provides an easy and efficient way to ingest data directly from data collection agents (such as Telegraf, StatsD, collectd, etc.). It also offers InfluxDB/OpenTSDB compatible data ingestion interfaces, allowing InfluxDB/OpenTSDB applications to be seamlessly ported to TDengine.
+The connectors of TDengine in various languages communicate with TDengine through the WebSocket interface, hence the taosAdapter must be installed.
-taosAdapter offers the following features:
-
-- RESTful interface
-- Compatible with InfluxDB v1 write interface
-- Compatible with OpenTSDB JSON and telnet format writing
-- Seamless connection to Telegraf
-- Seamless connection to collectd
-- Seamless connection to StatsD
-- Supports Prometheus remote_read and remote_write
-- Retrieves the VGroup ID of the virtual node group (VGroup) where the table is located
-
-## taosAdapter Architecture Diagram
+The architecture diagram is as follows:
Figure 1. taosAdapter architecture
-## Deployment Methods for taosAdapter
+## Feature List
-### Installing taosAdapter
+The taosAdapter provides the following features:
+
+- WebSocket Interface:
+ Supports executing SQL, schemaless writing, parameter binding, and data subscription through the WebSocket protocol.
+- Compatible with InfluxDB v1 write interface:
+ [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
+- Compatible with OpenTSDB JSON and telnet format writing:
+ - [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
+ - [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
+- collectd data writing:
+ collectd is a system statistics collection daemon, visit [https://collectd.org/](https://collectd.org/) for more information.
+- StatsD data writing:
+ StatsD is a simple yet powerful daemon for gathering statistics. Visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information.
+- icinga2 OpenTSDB writer data writing:
+ icinga2 is a software for collecting check results metrics and performance data. Visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information.
+- TCollector data writing:
+ TCollector is a client process that collects data from local collectors and pushes it to OpenTSDB. Visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information.
+- node_exporter data collection and writing:
+ node_exporter is an exporter of machine metrics. Visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
+- Supports Prometheus remote_read and remote_write:
+ remote_read and remote_write are Prometheus's data read-write separation cluster solutions. Visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
+- RESTful API:
+ [RESTful API](../../client-libraries/rest-api/)
+
+### WebSocket Interface
+
+Through the WebSocket interface of taosAdapter, connectors in various languages can achieve SQL execution, schemaless writing, parameter binding, and data subscription functionalities. Refer to the [Development Guide](../../../developer-guide/connecting-to-tdengine/#websocket-connection) for more details.
+
+### Compatible with InfluxDB v1 write interface
+
+You can use any client that supports the HTTP protocol to write data in InfluxDB compatible format to TDengine by accessing the Restful interface URL `http://:6041/influxdb/v1/write`.
+
+Supported InfluxDB parameters are as follows:
+
+- `db` specifies the database name used by TDengine
+- `precision` the time precision used by TDengine
+- `u` TDengine username
+- `p` TDengine password
+- `ttl` the lifespan of automatically created subtables, determined by the TTL parameter of the first data entry in the subtable, which cannot be updated. For more information, please refer to the TTL parameter in the [table creation document](../../sql-manual/manage-tables/).
+
+Note: Currently, InfluxDB's token authentication method is not supported, only Basic authentication and query parameter verification are supported.
+Example: `curl --request POST http://127.0.0.1:6041/influxdb/v1/write?db=test --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"`
+
+### Compatible with OpenTSDB JSON and telnet format writing
+
+You can use any client that supports the HTTP protocol to write data in OpenTSDB compatible format to TDengine by accessing the Restful interface URL `http://:6041/`. EndPoint as follows:
+
+```text
+/opentsdb/v1/put/json/
+/opentsdb/v1/put/telnet/
+```
+
+### collectd data writing
+
+
+
+### StatsD data writing
+
+
+
+### icinga2 OpenTSDB writer data writing
+
+
+
+### TCollector data writing
+
+
+
+### node_exporter data collection and writing
+
+An exporter used by Prometheus that exposes hardware and operating system metrics from \*NIX kernels
+
+- Enable configuration of taosAdapter node_exporter.enable
+- Set the relevant configuration for node_exporter
+- Restart taosAdapter
+
+### Supports Prometheus remote_read and remote_write
+
+
+
+### RESTful API
+
+You can use any client that supports the HTTP protocol to write data to TDengine or query data from TDengine by accessing the RESTful interface URL `http://:6041/rest/sql`. For details, please refer to the [REST API documentation](../../client-libraries/rest-api/).
+
+## Installation
taosAdapter is part of the TDengine server software. If you are using TDengine server, you do not need any additional steps to install taosAdapter. If you need to deploy taosAdapter separately from the TDengine server, you should install the complete TDengine on that server to install taosAdapter. If you need to compile taosAdapter from source code, you can refer to the [Build taosAdapter](https://github.com/taosdata/taosadapter/blob/3.0/BUILD.md) document.
-### Starting/Stopping taosAdapter
+After the installation is complete, you can start the taosAdapter service using the command `systemctl start taosadapter`.
-On Linux systems, the taosAdapter service is managed by default by systemd. Use the command `systemctl start taosadapter` to start the taosAdapter service. Use the command `systemctl stop taosadapter` to stop the taosAdapter service.
-
-### Removing taosAdapter
-
-Use the command rmtaos to remove the TDengine server software, including taosAdapter.
-
-### Upgrading taosAdapter
-
-taosAdapter and TDengine server need to use the same version. Please upgrade taosAdapter by upgrading the TDengine server.
-taosAdapter deployed separately from taosd must be upgraded by upgrading the TDengine server on its server.
-
-## taosAdapter Parameter List
+## Configuration
taosAdapter supports configuration through command-line parameters, environment variables, and configuration files. The default configuration file is `/etc/taos/taosadapter.toml`.
@@ -80,6 +143,7 @@ Usage of taosAdapter:
--instanceId int instance ID. Env "TAOS_ADAPTER_INSTANCE_ID" (default 32)
--log.compress whether to compress old log. Env "TAOS_ADAPTER_LOG_COMPRESS"
--log.enableRecordHttpSql whether to record http sql. Env "TAOS_ADAPTER_LOG_ENABLE_RECORD_HTTP_SQL"
+ --log.keepDays uint log retention days, must be a positive integer. Env "TAOS_ADAPTER_LOG_KEEP_DAYS" (default 30)
--log.level string log level (trace debug info warning error). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
--log.path string log path. Env "TAOS_ADAPTER_LOG_PATH" (default "/var/log/taos")
--log.reservedDiskSize string reserved disk size for log dir (KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_RESERVED_DISK_SIZE" (default "1GB")
@@ -90,6 +154,8 @@ Usage of taosAdapter:
--log.sqlRotationSize string record sql log rotation size(KB MB GB), must be a positive integer. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_SIZE" (default "1GB")
--log.sqlRotationTime duration record sql log rotation time. Env "TAOS_ADAPTER_LOG_SQL_ROTATION_TIME" (default 24h0m0s)
--logLevel string log level (trace debug info warning error). Env "TAOS_ADAPTER_LOG_LEVEL" (default "info")
+ --maxAsyncConcurrentLimit int The maximum number of concurrent calls allowed for the C asynchronous method. 0 means use CPU core count. Env "TAOS_ADAPTER_MAX_ASYNC_CONCURRENT_LIMIT"
+ --maxSyncConcurrentLimit int The maximum number of concurrent calls allowed for the C synchronized method. 0 means use CPU core count. Env "TAOS_ADAPTER_MAX_SYNC_CONCURRENT_LIMIT"
--monitor.collectDuration duration Set monitor duration. Env "TAOS_ADAPTER_MONITOR_COLLECT_DURATION" (default 3s)
--monitor.disable Whether to disable monitoring. Env "TAOS_ADAPTER_MONITOR_DISABLE" (default true)
--monitor.identity string The identity of the current instance, or 'hostname:port' if it is empty. Env "TAOS_ADAPTER_MONITOR_IDENTITY"
@@ -118,7 +184,7 @@ Usage of taosAdapter:
--opentsdb_telnet.flushInterval duration opentsdb_telnet flush interval (0s means not valid) . Env "TAOS_ADAPTER_OPENTSDB_TELNET_FLUSH_INTERVAL"
--opentsdb_telnet.maxTCPConnections int max tcp connections. Env "TAOS_ADAPTER_OPENTSDB_TELNET_MAX_TCP_CONNECTIONS" (default 250)
--opentsdb_telnet.password string opentsdb_telnet password. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PASSWORD" (default "taosdata")
- --opentsdb_telnet.ports ints opentsdb_telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
+ --opentsdb_telnet.ports ints opentsdb telnet tcp port. Env "TAOS_ADAPTER_OPENTSDB_TELNET_PORTS" (default [6046,6047,6048,6049])
--opentsdb_telnet.tcpKeepAlive enable tcp keep alive. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TCP_KEEP_ALIVE"
--opentsdb_telnet.ttl int opentsdb_telnet data ttl. Env "TAOS_ADAPTER_OPENTSDB_TELNET_TTL"
--opentsdb_telnet.user string opentsdb_telnet user. Env "TAOS_ADAPTER_OPENTSDB_TELNET_USER" (default "root")
@@ -131,6 +197,9 @@ Usage of taosAdapter:
--prometheus.enable enable prometheus. Env "TAOS_ADAPTER_PROMETHEUS_ENABLE" (default true)
--restfulRowLimit int restful returns the maximum number of rows (-1 means no limit). Env "TAOS_ADAPTER_RESTFUL_ROW_LIMIT" (default -1)
--smlAutoCreateDB Whether to automatically create db when writing with schemaless. Env "TAOS_ADAPTER_SML_AUTO_CREATE_DB"
+ --ssl.certFile string ssl cert file path. Env "TAOS_ADAPTER_SSL_CERT_FILE"
+ --ssl.enable enable ssl. Env "TAOS_ADAPTER_SSL_ENABLE"
+ --ssl.keyFile string ssl key file path. Env "TAOS_ADAPTER_SSL_KEY_FILE"
--statsd.allowPendingMessages int statsd allow pending messages. Env "TAOS_ADAPTER_STATSD_ALLOW_PENDING_MESSAGES" (default 50000)
--statsd.db string statsd db name. Env "TAOS_ADAPTER_STATSD_DB" (default "statsd")
--statsd.deleteCounters statsd delete counter cache after gather. Env "TAOS_ADAPTER_STATSD_DELETE_COUNTERS" (default true)
@@ -157,27 +226,44 @@ Usage of taosAdapter:
-V, --version Print the version and exit
```
-Note:
-When using a browser to make API calls, please set the following Cross-Origin Resource Sharing (CORS) parameters according to the actual situation:
+See the example configuration file at [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml).
-```text
-AllowAllOrigins
-AllowOrigins
-AllowHeaders
-ExposeHeaders
-AllowCredentials
-AllowWebSockets
-```
+### Cross-Origin Configuration
+When making API calls from the browser, please configure the following Cross-Origin Resource Sharing (CORS) parameters based on your actual situation:
+
+- **`cors.allowAllOrigins`**: Whether to allow all origins to access, default is true.
+- **`cors.allowOrigins`**: A comma-separated list of origins allowed to access. Multiple origins can be specified.
+- **`cors.allowHeaders`**: A comma-separated list of request headers allowed for cross-origin access. Multiple headers can be specified.
+- **`cors.exposeHeaders`**: A comma-separated list of response headers exposed for cross-origin access. Multiple headers can be specified.
+- **`cors.allowCredentials`**: Whether to allow cross-origin requests to include user credentials, such as cookies, HTTP authentication information, or client SSL certificates.
+- **`cors.allowWebSockets`**: Whether to allow WebSockets connections.
+
If you are not making API calls through a browser, you do not need to worry about these configurations.
+The above configurations take effect for the following interfaces:
+
+* RESTful API requests
+* WebSocket API requests
+* InfluxDB v1 write interface
+* OpenTSDB HTTP write interface
+
For details about the CORS protocol, please refer to: [https://www.w3.org/wiki/CORS_Enabled](https://www.w3.org/wiki/CORS_Enabled) or [https://developer.mozilla.org/docs/Web/HTTP/CORS](https://developer.mozilla.org/docs/Web/HTTP/CORS).
-See the example configuration file at [example/config/taosadapter.toml](https://github.com/taosdata/taosadapter/blob/3.0/example/config/taosadapter.toml).
+### Connection Pool Configuration
-### Connection Pool Parameters Description
+taosAdapter uses a connection pool to manage connections to TDengine, improving concurrency performance and resource utilization. The connection pool configuration applies to the following interfaces, and these interfaces share a single connection pool:
-When using the RESTful API, the system will manage TDengine connections through a connection pool. The connection pool can be configured with the following parameters:
+* RESTful API requests
+* InfluxDB v1 write interface
+* OpenTSDB JSON and telnet format writing
+* Telegraf data writing
+* collectd data writing
+* StatsD data writing
+* node_exporter data collection writing
+* Prometheus remote_read and remote_write
+
+The configuration parameters for the connection pool are as follows:
- **`pool.maxConnect`**: The maximum number of connections allowed in the pool, default is twice the number of CPU cores. It is recommended to keep the default setting.
- **`pool.maxIdle`**: The maximum number of idle connections in the pool, default is the same as `pool.maxConnect`. It is recommended to keep the default setting.
@@ -185,153 +271,136 @@ When using the RESTful API, the system will manage TDengine connections through
- **`pool.waitTimeout`**: Timeout for obtaining a connection from the pool, default is set to 60 seconds. If a connection is not obtained within the timeout period, HTTP status code 503 will be returned. This parameter is available starting from version 3.3.3.0.
- **`pool.maxWait`**: The maximum number of requests waiting to get a connection in the pool, default is 0, which means no limit. When the number of queued requests exceeds this value, new requests will return HTTP status code 503. This parameter is available starting from version 3.3.3.0.
-## Feature List
+### HTTP Response Code Configuration
-- RESTful API
- [RESTful API](../../client-libraries/rest-api/)
-- Compatible with InfluxDB v1 write interface
- [https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/](https://docs.influxdata.com/influxdb/v2.0/reference/api/influxdb-1x/write/)
-- Compatible with OpenTSDB JSON and telnet format writing
- - [http://opentsdb.net/docs/build/html/api_http/put.html](http://opentsdb.net/docs/build/html/api_http/put.html)
- - [http://opentsdb.net/docs/build/html/api_telnet/put.html](http://opentsdb.net/docs/build/html/api_telnet/put.html)
-- Seamless connection with collectd.
- collectd is a system statistics collection daemon, visit [https://collectd.org/](https://collectd.org/) for more information.
-- Seamless connection with StatsD.
- StatsD is a simple yet powerful daemon for gathering statistics. Visit [https://github.com/statsd/statsd](https://github.com/statsd/statsd) for more information.
-- Seamless connection with icinga2.
- icinga2 is a software for collecting check results metrics and performance data. Visit [https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer](https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer) for more information.
-- Seamless connection with tcollector.
- TCollector is a client process that collects data from local collectors and pushes it to OpenTSDB. Visit [http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html](http://opentsdb.net/docs/build/html/user_guide/utilities/tcollector.html) for more information.
-- Seamless connection with node_exporter.
- node_exporter is an exporter of machine metrics. Visit [https://github.com/prometheus/node_exporter](https://github.com/prometheus/node_exporter) for more information.
-- Supports Prometheus remote_read and remote_write.
- remote_read and remote_write are Prometheus's data read-write separation cluster solutions. Visit [https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis](https://prometheus.io/blog/2019/10/10/remote-read-meets-streaming/#remote-apis) for more information.
-- Get the VGroup ID of the virtual node group (VGroup) where the table is located.
+taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 HTTP status code when the C interface returns an error. When set to true, it will return different HTTP status codes based on the error code returned by C. See [HTTP Response Codes](../../client-libraries/rest-api/) for details.
-## Interface
+This configuration only affects the **RESTful interface**.
-### TDengine RESTful Interface
+**Parameter Description**
-You can use any client that supports the HTTP protocol to write data to TDengine or query data from TDengine by accessing the RESTful interface URL `http://:6041/rest/sql`. For details, please refer to the [REST API documentation](../../client-libraries/rest-api/).
+- **`httpCodeServerError`**:
+ - **When set to `true`**: Map the error code returned by the C interface to the corresponding HTTP status code.
+ - **When set to `false`**: Regardless of the error returned by the C interface, always return the HTTP status code `200` (default value).
-### InfluxDB
+### Memory limit configuration
-You can use any client that supports the HTTP protocol to write data in InfluxDB compatible format to TDengine by accessing the Restful interface URL `http://:6041/influxdb/v1/write`.
+taosAdapter will monitor the memory usage during its operation and adjust it through two thresholds. The valid value range is an integer from 1 to 100, and the unit is the percentage of system physical memory.
-Supported InfluxDB parameters are as follows:
+This configuration only affects the following interfaces:
-- `db` specifies the database name used by TDengine
-- `precision` the time precision used by TDengine
-- `u` TDengine username
-- `p` TDengine password
-- `ttl` the lifespan of automatically created subtables, determined by the TTL parameter of the first data entry in the subtable, which cannot be updated. For more information, please refer to the TTL parameter in the [table creation document](../../sql-manual/manage-tables/).
+* RESTful interface request
+* InfluxDB v1 write interface
+* OpenTSDB HTTP write interface
+* Prometheus remote_read and remote_write interfaces
-Note: Currently, InfluxDB's token authentication method is not supported, only Basic authentication and query parameter verification are supported.
-Example: curl --request POST `http://127.0.0.1:6041/influxdb/v1/write?db=test` --user "root:taosdata" --data-binary "measurement,host=host1 field1=2i,field2=2.0 1577836800000000000"
+**Parameter Description**
-### OpenTSDB
+- **`pauseQueryMemoryThreshold`**:
+ - When memory usage exceeds this threshold, taosAdapter will stop processing query requests.
+ - Default value: `70` (i.e. 70% of system physical memory).
+- **`pauseAllMemoryThreshold`**:
+ - When memory usage exceeds this threshold, taosAdapter will stop processing all requests (including writes and queries).
+ - Default value: `80` (i.e. 80% of system physical memory).
-You can use any client that supports the HTTP protocol to write data in OpenTSDB compatible format to TDengine by accessing the Restful interface URL `http://:6041/`. EndPoint as follows:
+When memory usage falls below the threshold, taosAdapter will automatically resume the corresponding function.
-```text
-/opentsdb/v1/put/json/
-/opentsdb/v1/put/telnet/
-```
+**HTTP return content:**
-### collectd
+- **When `pauseQueryMemoryThreshold` is exceeded**:
+ - HTTP status code: `503`
+ - Return content: `"query memory exceeds threshold"`
-
+- **When `pauseAllMemoryThreshold` is exceeded**:
+ - HTTP status code: `503`
+ - Return content: `"memory exceeds threshold"`
-### StatsD
+**Status check interface:**
-
+The memory status of taosAdapter can be checked through the following interface:
+- **Normal status**: `http://:6041/-/ping` returns `code 200`.
+- **Memory exceeds threshold**:
+ - If the memory exceeds `pauseAllMemoryThreshold`, `code 503` is returned.
+ - If the memory exceeds `pauseQueryMemoryThreshold` and the request parameter contains `action=query`, `code 503` is returned.
-### icinga2 OpenTSDB writer
+**Related configuration parameters:**
-
+- **`monitor.collectDuration`**: memory monitoring interval, default value is `3s`, environment variable is `TAOS_MONITOR_COLLECT_DURATION`.
+- **`monitor.incgroup`**: whether to run in a container (set to `true` for running in a container), default value is `false`, environment variable is `TAOS_MONITOR_INCGROUP`.
+- **`monitor.pauseQueryMemoryThreshold`**: memory threshold (percentage) for query request pause, default value is `70`, environment variable is `TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD`.
+- **`monitor.pauseAllMemoryThreshold`**: memory threshold (percentage) for query and write request pause, default value is `80`, environment variable is `TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD`.
-### TCollector
+You can make corresponding adjustments based on the specific project application scenario and operation strategy, and it is recommended to use operation monitoring software to monitor the system memory status in a timely manner. The load balancer can also check the operation status of taosAdapter through this interface.
-
+### Schemaless write create DB configuration
-### node_exporter
+Starting from **version 3.0.4.0**, taosAdapter provides the parameter `smlAutoCreateDB` to control whether to automatically create a database (DB) when writing to the schemaless protocol.
-An exporter used by Prometheus that exposes hardware and operating system metrics from \*NIX kernels
+The `smlAutoCreateDB` parameter only affects the following interfaces:
-- Enable configuration of taosAdapter node_exporter.enable
-- Set the relevant configuration for node_exporter
-- Restart taosAdapter
+- InfluxDB v1 write interface
+- OpenTSDB JSON and telnet format writing
+- Telegraf data writing
+- collectd data writing
+- StatsD data writing
+- node_exporter data writing
-### prometheus
+**Parameter Description**
-
+- **`smlAutoCreateDB`**:
+ - **When set to `true`**: When writing to the schemaless protocol, if the target database does not exist, taosAdapter will automatically create the database.
+ - **When set to `false`**: The user needs to manually create the database, otherwise the write will fail (default value).
-### Getting the VGroup ID of a table
+### Number of results returned configuration
-You can send a POST request to the HTTP interface `http://:/rest/sql//vgid` to get the VGroup ID of a table.
-The body should be a JSON array of multiple table names.
+taosAdapter provides the parameter `restfulRowLimit` to control the number of results returned by the HTTP interface.
-Example: Get the VGroup ID for the database power and tables d_bind_1 and d_bind_2.
+The `restfulRowLimit` parameter only affects the return results of the following interfaces:
+
+- RESTful interface
+- Prometheus remote_read interface
+
+**Parameter Description**
+
+- **`restfulRowLimit`**:
+ - **When set to a positive integer**: The number of results returned by the interface will not exceed this value.
+ - **When set to `-1`**: The number of results returned by the interface is unlimited (default value).
+
+### Log configuration
+
+1. You can set the taosAdapter log output detail level by setting the --log.level parameter or the environment variable TAOS_ADAPTER_LOG_LEVEL. Valid values include: panic, fatal, error, warn, warning, info, debug, and trace.
+2. Starting from **3.3.5.0 version**, taosAdapter supports dynamic modification of log level through HTTP interface. Users can dynamically adjust the log level by sending HTTP PUT request to /config interface. The authentication method of this interface is the same as /rest/sql interface, and the configuration item key-value pair in JSON format must be passed in the request body.
+
+The following is an example of setting the log level to debug through the curl command:
```shell
-curl --location 'http://127.0.0.1:6041/rest/sql/power/vgid' \
---user 'root:taosdata' \
---data '["d_bind_1","d_bind_2"]'
+curl --location --request PUT 'http://127.0.0.1:6041/config' \
+-u root:taosdata \
+--data '{"log.level": "debug"}'
```
-response:
+## Service Management
-```json
-{"code":0,"vgIDs":[153,152]}
-```
+### Starting/Stopping taosAdapter
-## Memory Usage Optimization Methods
+On Linux systems, the taosAdapter service is managed by default by systemd. Use the command `systemctl start taosadapter` to start the taosAdapter service. Use the command `systemctl stop taosadapter` to stop the taosAdapter service.
-taosAdapter will monitor its memory usage during operation and adjust it through two thresholds. Valid values range from -1 to 100 as a percentage of system physical memory.
+### Upgrading taosAdapter
-- pauseQueryMemoryThreshold
-- pauseAllMemoryThreshold
+taosAdapter and TDengine server need to use the same version. Please upgrade taosAdapter by upgrading the TDengine server.
+taosAdapter deployed separately from taosd must be upgraded by upgrading the TDengine server on its server.
-When the pauseQueryMemoryThreshold threshold is exceeded, it stops processing query requests.
+### Removing taosAdapter
-HTTP return content:
+Use the command rmtaos to remove the TDengine server software, including taosAdapter.
-- code 503
-- body "query memory exceeds threshold"
+## Monitoring Metrics
-When the pauseAllMemoryThreshold threshold is exceeded, it stops processing all write and query requests.
+Currently, taosAdapter only collects monitoring indicators for RESTful/WebSocket related requests. There are no monitoring indicators for other interfaces.
-HTTP return content:
+taosAdapter reports monitoring indicators to taosKeeper, which will be written to the monitoring database by taosKeeper. The default is the `log` database, which can be modified in the taoskeeper configuration file. The following is a detailed introduction to these monitoring indicators.
-- code 503
-- body "memory exceeds threshold"
-
-When memory falls below the threshold, the corresponding functions are resumed.
-
-Status check interface `http://:6041/-/ping`
-
-- Normally returns `code 200`
-- No parameters If memory exceeds pauseAllMemoryThreshold, it will return `code 503`
-- Request parameter `action=query` If memory exceeds either pauseQueryMemoryThreshold or pauseAllMemoryThreshold, it will return `code 503`
-
-Corresponding configuration parameters
-
-```text
- monitor.collectDuration Monitoring interval Environment variable "TAOS_MONITOR_COLLECT_DURATION" (default value 3s)
- monitor.incgroup Whether it is running in cgroup (set to true in containers) Environment variable "TAOS_MONITOR_INCGROUP"
- monitor.pauseAllMemoryThreshold Memory threshold for stopping inserts and queries Environment variable "TAOS_MONITOR_PAUSE_ALL_MEMORY_THRESHOLD" (default value 80)
- monitor.pauseQueryMemoryThreshold Memory threshold for stopping queries Environment variable "TAOS_MONITOR_PAUSE_QUERY_MEMORY_THRESHOLD" (default value 70)
-```
-
-You can adjust according to the specific project application scenario and operational strategy, and it is recommended to use operational monitoring software to monitor the system memory status in real time. Load balancers can also check the running status of taosAdapter through this interface.
-
-## taosAdapter Monitoring Metrics
-
-taosAdapter collects monitoring metrics related to REST/WebSocket requests. These monitoring metrics are reported to taosKeeper, which writes them into the monitoring database, by default the `log` database, which can be modified in the taoskeeper configuration file. Below is a detailed introduction to these monitoring metrics.
-
-### adapter_requests table
-
-`adapter_requests` records taosadapter monitoring data.
+The `adapter_requests` table records taosAdapter monitoring data, and the fields are as follows:
| field | type | is_tag | comment |
| :--------------- | :----------- | :----- | :---------------------------------------- |
@@ -354,32 +423,10 @@ taosAdapter collects monitoring metrics related to REST/WebSocket requests. Thes
| endpoint | VARCHAR | | request endpoint |
| req_type | NCHAR | tag | request type: 0 for REST, 1 for WebSocket |
-## Result Return Limit
-taosAdapter controls the number of results returned through the parameter `restfulRowLimit`, -1 represents no limit, default is no limit.
+## Changes after upgrading httpd to taosAdapter
-This parameter controls the return of the following interfaces
-
-- `http://:6041/rest/sql`
-- `http://:6041/prometheus/v1/remote_read/:db`
-
-## Configure HTTP Return Codes
-
-taosAdapter uses the parameter `httpCodeServerError` to set whether to return a non-200 HTTP status code when the C interface returns an error. When set to true, it will return different HTTP status codes based on the error code returned by C. See [HTTP Response Codes](../../client-libraries/rest-api/) for details.
-
-## Configure Automatic DB Creation for Schemaless Writes
-
-Starting from version 3.0.4.0, taosAdapter provides the parameter `smlAutoCreateDB` to control whether to automatically create a DB when writing via the schemaless protocol. The default value is false, which does not automatically create a DB, and requires the user to manually create a DB before performing schemaless writes.
-
-## Troubleshooting
-
-You can check the running status of taosAdapter with the command `systemctl status taosadapter`.
-
-You can also adjust the detail level of taosAdapter log output by setting the --logLevel parameter or the environment variable TAOS_ADAPTER_LOG_LEVEL. Valid values include: panic, fatal, error, warn, warning, info, debug, and trace.
-
-## How to Migrate from Older Versions of TDengine to taosAdapter
-
-In TDengine server version 2.2.x.x or earlier, the taosd process included an embedded HTTP service. As mentioned earlier, taosAdapter is a standalone software managed by systemd, having its own process. Moreover, there are some differences in configuration parameters and behaviors between the two, as shown in the table below:
+In TDengine server version 2.2.x.x or earlier, the taosd process included an embedded HTTP service(httpd). As mentioned earlier, taosAdapter is a standalone software managed by systemd, having its own process. Moreover, there are some differences in configuration parameters and behaviors between the two, as shown in the table below:
| **#** | **embedded httpd** | **taosAdapter** | **comment** |
| ----- | ------------------- | ---------------------------------------------------------- | ------------------------------------------------------------ |
diff --git a/docs/en/14-reference/05-connector/14-java.md b/docs/en/14-reference/05-connector/14-java.md
index 0c7fbbdda4..43b219bf4e 100644
--- a/docs/en/14-reference/05-connector/14-java.md
+++ b/docs/en/14-reference/05-connector/14-java.md
@@ -33,6 +33,7 @@ The JDBC driver implementation for TDengine strives to be consistent with relati
| taos-jdbcdriver Version | Major Changes | TDengine Version |
| ----------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------ |
+| 3.5.3 | Support unsigned data types in WebSocket connections. | - |
| 3.5.2 | Fixed WebSocket result set free bug. | - |
| 3.5.1 | Fixed the getObject issue in data subscription. | - |
| 3.5.0 | 1. Optimized the performance of WebSocket connection parameter binding, supporting parameter binding queries using binary data. 2. Optimized the performance of small queries in WebSocket connection. 3. Added support for setting time zone and app info on WebSocket connection. | 3.3.5.0 and higher |
@@ -128,24 +129,27 @@ Please refer to the specific error codes:
TDengine currently supports timestamp, numeric, character, boolean types, and the corresponding Java type conversions are as follows:
-| TDengine DataType | JDBCType |
-| ----------------- | ------------------ |
-| TIMESTAMP | java.sql.Timestamp |
-| INT | java.lang.Integer |
-| BIGINT | java.lang.Long |
-| FLOAT | java.lang.Float |
-| DOUBLE | java.lang.Double |
-| SMALLINT | java.lang.Short |
-| TINYINT | java.lang.Byte |
-| BOOL | java.lang.Boolean |
-| BINARY | byte array |
-| NCHAR | java.lang.String |
-| JSON | java.lang.String |
-| VARBINARY | byte[] |
-| GEOMETRY | byte[] |
+| TDengine DataType | JDBCType | Remark |
+| ----------------- | -------------------- | --------------------------------------- |
+| TIMESTAMP | java.sql.Timestamp | |
+| BOOL | java.lang.Boolean | |
+| TINYINT | java.lang.Byte | |
+| TINYINT UNSIGNED | java.lang.Short | only supported in WebSocket connections |
+| SMALLINT | java.lang.Short | |
+| SMALLINT UNSIGNED | java.lang.Integer | only supported in WebSocket connections |
+| INT | java.lang.Integer | |
+| INT UNSIGNED | java.lang.Long | only supported in WebSocket connections |
+| BIGINT | java.lang.Long | |
+| BIGINT UNSIGNED | java.math.BigInteger | only supported in WebSocket connections |
+| FLOAT | java.lang.Float | |
+| DOUBLE | java.lang.Double | |
+| BINARY | byte array | |
+| NCHAR | java.lang.String | |
+| JSON | java.lang.String | only supported in tags |
+| VARBINARY | byte[] | |
+| GEOMETRY | byte[] | |
-**Note**: JSON type is only supported in tags.
-Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
+**Note**: Due to historical reasons, the BINARY type in TDengine is not truly binary data and is no longer recommended. Please use VARBINARY type instead.
GEOMETRY type is binary data in little endian byte order, complying with the WKB standard. For more details, please refer to [Data Types](../../sql-manual/data-types/)
For the WKB standard, please refer to [Well-Known Binary (WKB)](https://libgeos.org/specifications/wkb/)
For the Java connector, you can use the jts library to conveniently create GEOMETRY type objects, serialize them, and write to TDengine. Here is an example [Geometry Example](https://github.com/taosdata/TDengine/blob/main/docs/examples/java/src/main/java/com/taos/example/GeometryDemo.java)
diff --git a/docs/en/28-releases/03-notes/index.md b/docs/en/28-releases/03-notes/index.md
index 5ff7350e6c..8e89765865 100644
--- a/docs/en/28-releases/03-notes/index.md
+++ b/docs/en/28-releases/03-notes/index.md
@@ -3,13 +3,9 @@ title: Release Notes
slug: /release-history/release-notes
---
-[3.3.5.0](./3-3-5-0/)
+```mdx-code-block
+import DocCardList from '@theme/DocCardList';
+import {useCurrentSidebarCategory} from '@docusaurus/theme-common';
-[3.3.5.2](./3.3.5.2)
-[3.3.4.8](./3-3-4-8/)
-
-[3.3.4.3](./3-3-4-3/)
-
-[3.3.3.0](./3-3-3-0/)
-
-[3.3.2.0](./3-3-2-0/)
+
+```
\ No newline at end of file
diff --git a/docs/en/assets/data-backup-01.png b/docs/en/assets/data-backup-01.png
new file mode 100644
index 0000000000..b05c571f6c
Binary files /dev/null and b/docs/en/assets/data-backup-01.png differ
diff --git a/docs/en/assets/resources/_collectd.mdx b/docs/en/assets/resources/_collectd.mdx
index 5512f10f64..c39950e4f3 100644
--- a/docs/en/assets/resources/_collectd.mdx
+++ b/docs/en/assets/resources/_collectd.mdx
@@ -1,84 +1,84 @@
-### Configuring taosAdapter
+#### Configuring taosAdapter
Method to configure taosAdapter to receive collectd data:
- Enable the configuration item in the taosAdapter configuration file (default location is /etc/taos/taosadapter.toml)
-```
-...
-[opentsdb_telnet]
-enable = true
-maxTCPConnections = 250
-tcpKeepAlive = false
-dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
-ports = [6046, 6047, 6048, 6049]
-user = "root"
-password = "taosdata"
-...
-```
+ ```toml
+ ...
+ [opentsdb_telnet]
+ enable = true
+ maxTCPConnections = 250
+ tcpKeepAlive = false
+ dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
+ ports = [6046, 6047, 6048, 6049]
+ user = "root"
+ password = "taosdata"
+ ...
+ ```
-The default database name written by taosAdapter is `collectd`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
+ The default database name written by taosAdapter is `collectd`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
- You can also use taosAdapter command line parameters or set environment variables to start, to enable taosAdapter to receive collectd data, for more details please refer to the taosAdapter reference manual.
-### Configuring collectd
+#### Configuring collectd
collectd uses a plugin mechanism that can write the collected monitoring data to different data storage software in various forms. TDengine supports direct collection plugins and write_tsdb plugins.
-#### Configuring to receive direct collection plugin data
+1. **Configuring to receive direct collection plugin data**
-Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
+ Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
-```text
-LoadPlugin network
-
- Server "" ""
-
-```
+ ```xml
+ LoadPlugin network
+
+ Server "" ""
+
+ ```
-Where \ should be filled with the domain name or IP address of the server running taosAdapter. \ should be filled with the port used by taosAdapter to receive collectd data (default is 6045).
+ Where \ should be filled with the domain name or IP address of the server running taosAdapter. \ should be filled with the port used by taosAdapter to receive collectd data (default is 6045).
-Example as follows:
+ Example as follows:
-```text
-LoadPlugin network
-
- Server "127.0.0.1" "6045"
-
-```
+ ```xml
+ LoadPlugin network
+
+ Server "127.0.0.1" "6045"
+
+ ```
-#### Configuring write_tsdb plugin data
+2. **Configuring write_tsdb plugin data**
-Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
+ Modify the related configuration items in the collectd configuration file (default location /etc/collectd/collectd.conf).
-```text
-LoadPlugin write_tsdb
-
-
- Host ""
- Port ""
- ...
-
-
-```
+ ```xml
+ LoadPlugin write_tsdb
+
+
+ Host ""
+ Port ""
+ ...
+
+
+ ```
-Where \ should be filled with the domain name or IP address of the server running taosAdapter. \ should be filled with the port used by taosAdapter to receive collectd write_tsdb plugin data (default is 6047).
+ Where \ should be filled with the domain name or IP address of the server running taosAdapter. \ should be filled with the port used by taosAdapter to receive collectd write_tsdb plugin data (default is 6047).
-```text
-LoadPlugin write_tsdb
-
-
- Host "127.0.0.1"
- Port "6047"
- HostTags "status=production"
- StoreRates false
- AlwaysAppendDS false
-
-
-```
+ ```xml
+ LoadPlugin write_tsdb
+
+
+ Host "127.0.0.1"
+ Port "6047"
+ HostTags "status=production"
+ StoreRates false
+ AlwaysAppendDS false
+
+
+ ```
-Then restart collectd:
+ Then restart collectd:
-```
-systemctl restart collectd
-```
+ ```shell
+ systemctl restart collectd
+ ```
diff --git a/docs/en/assets/resources/_icinga2.mdx b/docs/en/assets/resources/_icinga2.mdx
index 5829c2ee64..4f8ba7c3c4 100644
--- a/docs/en/assets/resources/_icinga2.mdx
+++ b/docs/en/assets/resources/_icinga2.mdx
@@ -1,43 +1,43 @@
-### Configuring taosAdapter
+#### Configuring taosAdapter
Method to configure taosAdapter to receive icinga2 data:
- Enable the configuration item in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
-```
-...
-[opentsdb_telnet]
-enable = true
-maxTCPConnections = 250
-tcpKeepAlive = false
-dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
-ports = [6046, 6047, 6048, 6049]
-user = "root"
-password = "taosdata"
-...
-```
+ ```toml
+ ...
+ [opentsdb_telnet]
+ enable = true
+ maxTCPConnections = 250
+ tcpKeepAlive = false
+ dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
+ ports = [6046, 6047, 6048, 6049]
+ user = "root"
+ password = "taosdata"
+ ...
+ ```
-The default database name written by taosAdapter is `icinga2`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. taosAdapter needs to be restarted after modifications.
+ The default database name written by taosAdapter is `icinga2`, but you can also modify the dbs item in the taosAdapter configuration file to specify a different name. Fill in user and password with the actual TDengine configuration values. taosAdapter needs to be restarted after modifications.
- You can also use taosAdapter command line parameters or set environment variables to enable taosAdapter to receive icinga2 data, for more details please refer to the taosAdapter reference manual
-### Configuring icinga2
+#### Configuring icinga2
- Enable icinga2's opentsdb-writer (reference link https://icinga.com/docs/icinga-2/latest/doc/14-features/#opentsdb-writer)
- Modify the configuration file `/etc/icinga2/features-enabled/opentsdb.conf` filling in \ with the domain name or IP address of the server running taosAdapter, \ with the corresponding port supported by taosAdapter for receiving icinga2 data (default is 6048)
-```
-object OpenTsdbWriter "opentsdb" {
- host = ""
- port =
-}
-```
+ ```c
+ object OpenTsdbWriter "opentsdb" {
+ host = ""
+ port =
+ }
+ ```
-Example file:
+ Example file:
-```
-object OpenTsdbWriter "opentsdb" {
- host = "127.0.0.1"
- port = 6048
-}
-```
+ ```c
+ object OpenTsdbWriter "opentsdb" {
+ host = "127.0.0.1"
+ port = 6048
+ }
+ ```
diff --git a/docs/en/assets/resources/_prometheus.mdx b/docs/en/assets/resources/_prometheus.mdx
index 241c7ac5fe..79b36e440e 100644
--- a/docs/en/assets/resources/_prometheus.mdx
+++ b/docs/en/assets/resources/_prometheus.mdx
@@ -1,18 +1,18 @@
Configuring Prometheus is done by editing the Prometheus configuration file `prometheus.yml` (default location `/etc/prometheus/prometheus.yml`).
-### Configure Third-Party Database Address
+#### Configure Third-Party Database Address
Set the `remote_read url` and `remote_write url` to point to the domain name or IP address of the server running the taosAdapter service, the REST service port (taosAdapter defaults to 6041), and the name of the database you want to write to in TDengine, ensuring the URLs are formatted as follows:
- remote_read url: `http://:/prometheus/v1/remote_read/`
- remote_write url: `http://:/prometheus/v1/remote_write/`
-### Configure Basic Authentication
+#### Configure Basic Authentication
- username: \
- password: \
-### Example configuration of remote_write and remote_read in the prometheus.yml file
+#### Example configuration of remote_write and remote_read in the prometheus.yml file
```yaml
remote_write:
diff --git a/docs/en/assets/resources/_statsd.mdx b/docs/en/assets/resources/_statsd.mdx
index 290821bdff..1b9eb88d32 100644
--- a/docs/en/assets/resources/_statsd.mdx
+++ b/docs/en/assets/resources/_statsd.mdx
@@ -1,46 +1,46 @@
-### Configure taosAdapter
+#### Configure taosAdapter
Method to configure taosAdapter to receive StatsD data:
- Enable the configuration item in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
-```
-...
-[statsd]
-enable = true
-port = 6044
-db = "statsd"
-user = "root"
-password = "taosdata"
-worker = 10
-gatherInterval = "5s"
-protocol = "udp"
-maxTCPConnections = 250
-tcpKeepAlive = false
-allowPendingMessages = 50000
-deleteCounters = true
-deleteGauges = true
-deleteSets = true
-deleteTimings = true
-...
-```
+ ```toml
+ ...
+ [statsd]
+ enable = true
+ port = 6044
+ db = "statsd"
+ user = "root"
+ password = "taosdata"
+ worker = 10
+ gatherInterval = "5s"
+ protocol = "udp"
+ maxTCPConnections = 250
+ tcpKeepAlive = false
+ allowPendingMessages = 50000
+ deleteCounters = true
+ deleteGauges = true
+ deleteSets = true
+ deleteTimings = true
+ ...
+ ```
-The default database name written by taosAdapter is `statsd`, but you can also modify the db item in the taosAdapter configuration file to specify a different name. Fill in the user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
+ The default database name written by taosAdapter is `statsd`, but you can also modify the db item in the taosAdapter configuration file to specify a different name. Fill in the user and password with the actual TDengine configuration values. After modifying the configuration file, taosAdapter needs to be restarted.
- You can also use taosAdapter command line arguments or set environment variables to enable the taosAdapter to receive StatsD data. For more details, please refer to the taosAdapter reference manual.
-### Configure StatsD
+#### Configure StatsD
To use StatsD, download its [source code](https://github.com/statsd/statsd). Modify its configuration file according to the example file `exampleConfig.js` found in the root directory of the local source code download. Replace \ with the domain name or IP address of the server running taosAdapter, and \ with the port that taosAdapter uses to receive StatsD data (default is 6044).
-```
+```text
Add to the backends section "./backends/repeater"
Add to the repeater section { host:'', port: }
```
Example configuration file:
-```
+```js
{
port: 8125
, backends: ["./backends/repeater"]
@@ -50,7 +50,7 @@ port: 8125
After adding the following content, start StatsD (assuming the configuration file is modified to config.js).
-```
+```shell
npm install
node stats.js config.js &
```
diff --git a/docs/en/assets/resources/_tcollector.mdx b/docs/en/assets/resources/_tcollector.mdx
index 25bc1ed028..c4a123cd85 100644
--- a/docs/en/assets/resources/_tcollector.mdx
+++ b/docs/en/assets/resources/_tcollector.mdx
@@ -1,27 +1,27 @@
-### Configuring taosAdapter
+#### Configuring taosAdapter
To configure taosAdapter to receive data from TCollector:
- Enable the configuration in the taosAdapter configuration file (default location /etc/taos/taosadapter.toml)
-```
-...
-[opentsdb_telnet]
-enable = true
-maxTCPConnections = 250
-tcpKeepAlive = false
-dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
-ports = [6046, 6047, 6048, 6049]
-user = "root"
-password = "taosdata"
-...
-```
+ ```toml
+ ...
+ [opentsdb_telnet]
+ enable = true
+ maxTCPConnections = 250
+ tcpKeepAlive = false
+ dbs = ["opentsdb_telnet", "collectd", "icinga2", "tcollector"]
+ ports = [6046, 6047, 6048, 6049]
+ user = "root"
+ password = "taosdata"
+ ...
+ ```
-The default database name that taosAdapter writes to is `tcollector`, but you can specify a different name by modifying the dbs option in the taosAdapter configuration file. Fill in the user and password with the actual values configured in TDengine. After modifying the configuration file, taosAdapter needs to be restarted.
+ The default database name that taosAdapter writes to is `tcollector`, but you can specify a different name by modifying the dbs option in the taosAdapter configuration file. Fill in the user and password with the actual values configured in TDengine. After modifying the configuration file, taosAdapter needs to be restarted.
- You can also use taosAdapter command line arguments or set environment variables to enable the taosAdapter to receive tcollector data. For more details, please refer to the taosAdapter reference manual.
-### Configuring TCollector
+#### Configuring TCollector
To use TCollector, download its [source code](https://github.com/OpenTSDB/tcollector). Its configuration options are in its source code. Note: There are significant differences between different versions of TCollector; this only refers to the latest code in the current master branch (git commit: 37ae920).
@@ -29,7 +29,7 @@ Modify the contents of `collectors/etc/config.py` and `tcollector.py`. Change th
Example of git diff output for source code modifications:
-```
+```diff
index e7e7a1c..ec3e23c 100644
--- a/collectors/etc/config.py
+++ b/collectors/etc/config.py
diff --git a/docs/examples/JDBC/JDBCDemo/pom.xml b/docs/examples/JDBC/JDBCDemo/pom.xml
index a80f7a9cdf..7b8a64e2c7 100644
--- a/docs/examples/JDBC/JDBCDemo/pom.xml
+++ b/docs/examples/JDBC/JDBCDemo/pom.xml
@@ -19,7 +19,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3org.locationtech.jts
diff --git a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
index 20da9bfae8..12e1721112 100644
--- a/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
+++ b/docs/examples/JDBC/SpringJdbcTemplate/pom.xml
@@ -47,7 +47,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3
diff --git a/docs/examples/JDBC/connectionPools/pom.xml b/docs/examples/JDBC/connectionPools/pom.xml
index 86e6fb04a4..f30d7c7f94 100644
--- a/docs/examples/JDBC/connectionPools/pom.xml
+++ b/docs/examples/JDBC/connectionPools/pom.xml
@@ -18,7 +18,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3
diff --git a/docs/examples/JDBC/consumer-demo/pom.xml b/docs/examples/JDBC/consumer-demo/pom.xml
index e439c22224..fa1b4b93a3 100644
--- a/docs/examples/JDBC/consumer-demo/pom.xml
+++ b/docs/examples/JDBC/consumer-demo/pom.xml
@@ -17,7 +17,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3com.google.guava
diff --git a/docs/examples/JDBC/mybatisplus-demo/pom.xml b/docs/examples/JDBC/mybatisplus-demo/pom.xml
index 8b4777bfb0..30b3f7792b 100644
--- a/docs/examples/JDBC/mybatisplus-demo/pom.xml
+++ b/docs/examples/JDBC/mybatisplus-demo/pom.xml
@@ -5,7 +5,7 @@
org.springframework.bootspring-boot-starter-parent
- 2.4.0
+ 2.7.18com.taosdata.example
@@ -18,6 +18,18 @@
1.8
+
+
+
+ com.baomidou
+ mybatis-plus-bom
+ 3.5.10.1
+ pom
+ import
+
+
+
+
org.springframework.boot
@@ -28,14 +40,21 @@
lomboktrue
+
com.baomidoumybatis-plus-boot-starter
- 3.1.2
+
+
+
+
+ com.baomidou
+ mybatis-plus-jsqlparser-4.9com.h2databaseh2
+ 2.3.232runtime
@@ -47,7 +66,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java
index a6ac7f7fc2..f7c57e031d 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/config/MybatisPlusConfig.java
@@ -1,34 +1,26 @@
package com.taosdata.example.mybatisplusdemo.config;
-import com.baomidou.mybatisplus.extension.plugins.PaginationInterceptor;
+
+import com.baomidou.mybatisplus.annotation.DbType;
+import com.baomidou.mybatisplus.extension.plugins.MybatisPlusInterceptor;
+import com.baomidou.mybatisplus.extension.plugins.inner.PaginationInnerInterceptor;
+import org.mybatis.spring.annotation.MapperScan;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
+import org.springframework.transaction.annotation.EnableTransactionManagement;
+@EnableTransactionManagement
@Configuration
+@MapperScan("com.taosdata.example.mybatisplusdemo.mapper")
public class MybatisPlusConfig {
-
- /** mybatis 3.4.1 pagination config start ***/
-// @Bean
-// public MybatisPlusInterceptor mybatisPlusInterceptor() {
-// MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
-// interceptor.addInnerInterceptor(new PaginationInnerInterceptor());
-// return interceptor;
-// }
-
-// @Bean
-// public ConfigurationCustomizer configurationCustomizer() {
-// return configuration -> configuration.setUseDeprecatedExecutor(false);
-// }
-
+ /**
+ * 添加分页插件
+ */
@Bean
- public PaginationInterceptor paginationInterceptor() {
-// return new PaginationInterceptor();
- PaginationInterceptor paginationInterceptor = new PaginationInterceptor();
- //TODO: mybatis-plus do not support TDengine, use postgresql Dialect
- paginationInterceptor.setDialectType("postgresql");
-
- return paginationInterceptor;
+ public MybatisPlusInterceptor mybatisPlusInterceptor() {
+ MybatisPlusInterceptor interceptor = new MybatisPlusInterceptor();
+ interceptor.addInnerInterceptor(new PaginationInnerInterceptor(DbType.MYSQL));
+ return interceptor;
}
-
}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java
index 441c340886..f3eab76d8c 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapper.java
@@ -5,6 +5,7 @@ import com.taosdata.example.mybatisplusdemo.domain.Meters;
import org.apache.ibatis.annotations.Insert;
import org.apache.ibatis.annotations.Param;
import org.apache.ibatis.annotations.Update;
+import org.apache.ibatis.executor.BatchResult;
import java.util.List;
@@ -15,17 +16,6 @@ public interface MetersMapper extends BaseMapper {
@Insert("insert into meters (tbname, ts, groupid, location, current, voltage, phase) values(#{tbname}, #{ts}, #{groupid}, #{location}, #{current}, #{voltage}, #{phase})")
int insertOne(Meters one);
-
- @Insert({
- ""
- })
- int insertBatch(@Param("list") List metersList);
-
@Update("drop stable if exists meters")
void dropTable();
}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java
index 3e122524d5..a43a40a89e 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapper.java
@@ -11,9 +11,6 @@ public interface TemperatureMapper extends BaseMapper {
@Update("CREATE TABLE if not exists temperature(ts timestamp, temperature float) tags(location nchar(64), tbIndex int)")
int createSuperTable();
- @Update("create table #{tbName} using temperature tags( #{location}, #{tbindex})")
- int createTable(@Param("tbName") String tbName, @Param("location") String location, @Param("tbindex") int tbindex);
-
@Update("drop table if exists temperature")
void dropSuperTable();
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
index 1f0338db34..04d26202a0 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapper.java
@@ -10,7 +10,7 @@ public interface WeatherMapper extends BaseMapper {
@Update("CREATE TABLE if not exists weather(ts timestamp, temperature float, humidity int, location nchar(100))")
int createTable();
- @Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location})")
+ @Insert("insert into weather (ts, temperature, humidity, location) values(#{ts}, #{temperature}, #{humidity}, #{location, jdbcType=NCHAR})")
int insertOne(Weather one);
@Update("drop table if exists weather")
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/DatabaseConnectionService.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/DatabaseConnectionService.java
new file mode 100644
index 0000000000..1c1a62cee5
--- /dev/null
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/DatabaseConnectionService.java
@@ -0,0 +1,19 @@
+package com.taosdata.example.mybatisplusdemo.service;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import javax.sql.DataSource;
+import java.sql.Connection;
+import java.sql.SQLException;
+
+@Service
+public class DatabaseConnectionService {
+
+ @Autowired
+ private DataSource dataSource;
+
+ public Connection getConnection() throws SQLException {
+ return dataSource.getConnection();
+ }
+}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/TemperatureService.java b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/TemperatureService.java
new file mode 100644
index 0000000000..6b81eac479
--- /dev/null
+++ b/docs/examples/JDBC/mybatisplus-demo/src/main/java/com/taosdata/example/mybatisplusdemo/service/TemperatureService.java
@@ -0,0 +1,23 @@
+package com.taosdata.example.mybatisplusdemo.service;
+
+import org.springframework.beans.factory.annotation.Autowired;
+import org.springframework.stereotype.Service;
+
+import java.sql.Connection;
+import java.sql.SQLException;
+import java.sql.Statement;
+
+@Service
+public class TemperatureService {
+ @Autowired
+ private DatabaseConnectionService databaseConnectionService;
+
+ public void createTable(String tableName, String location, int tbIndex) throws SQLException {
+
+
+ try (Connection connection = databaseConnectionService.getConnection();
+ Statement statement = connection.createStatement()) {
+ statement.executeUpdate("create table " + tableName + " using temperature tags( '" + location +"', " + tbIndex + ")");
+ }
+ }
+}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java
index 2d8458e9d9..8405777ab0 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/MetersMapperTest.java
@@ -5,6 +5,7 @@ import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.taosdata.example.mybatisplusdemo.domain.Meters;
import com.taosdata.example.mybatisplusdemo.domain.Weather;
+import org.apache.ibatis.executor.BatchResult;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@@ -18,6 +19,8 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Random;
+import static java.sql.Statement.SUCCESS_NO_INFO;
+
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest
public class MetersMapperTest {
@@ -63,8 +66,19 @@ public class MetersMapperTest {
metersList.add(one);
}
- int affectRows = mapper.insertBatch(metersList);
- Assert.assertEquals(100, affectRows);
+ List affectRowsList = mapper.insert(metersList, 10000);
+
+ long totalAffectedRows = 0;
+ for (BatchResult batchResult : affectRowsList) {
+ int[] updateCounts = batchResult.getUpdateCounts();
+ for (int status : updateCounts) {
+ if (status == SUCCESS_NO_INFO) {
+ totalAffectedRows++;
+ }
+ }
+ }
+
+ Assert.assertEquals(100, totalAffectedRows);
}
@Test
@@ -93,7 +107,7 @@ public class MetersMapperTest {
@Test
public void testSelectCount() {
- int count = mapper.selectCount(null);
+ long count = mapper.selectCount(null);
// Assert.assertEquals(5, count);
System.out.println(count);
}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java
index 4d9dbf8d2f..016466ffec 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/TemperatureMapperTest.java
@@ -4,6 +4,7 @@ import com.baomidou.mybatisplus.core.conditions.query.QueryWrapper;
import com.baomidou.mybatisplus.core.metadata.IPage;
import com.baomidou.mybatisplus.extension.plugins.pagination.Page;
import com.taosdata.example.mybatisplusdemo.domain.Temperature;
+import com.taosdata.example.mybatisplusdemo.service.TemperatureService;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@@ -13,6 +14,8 @@ import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.test.context.SpringBootTest;
import org.springframework.test.context.junit4.SpringJUnit4ClassRunner;
+import java.sql.ResultSet;
+import java.sql.SQLException;
import java.sql.Timestamp;
import java.util.HashMap;
import java.util.List;
@@ -22,18 +25,20 @@ import java.util.Random;
@RunWith(SpringJUnit4ClassRunner.class)
@SpringBootTest
public class TemperatureMapperTest {
+ @Autowired
+ private TemperatureService temperatureService;
private static Random random = new Random(System.currentTimeMillis());
private static String[] locations = {"北京", "上海", "深圳", "广州", "杭州"};
@Before
- public void before() {
+ public void before() throws SQLException {
mapper.dropSuperTable();
// create table temperature
mapper.createSuperTable();
// create table t_X using temperature
for (int i = 0; i < 10; i++) {
- mapper.createTable("t" + i, locations[random.nextInt(locations.length)], i);
+ temperatureService.createTable("t" + i, locations[i % locations.length], i);
}
// insert into table
int affectRows = 0;
@@ -107,7 +112,7 @@ public class TemperatureMapperTest {
* **/
@Test
public void testSelectCount() {
- int count = mapper.selectCount(null);
+ long count = mapper.selectCount(null);
Assert.assertEquals(10, count);
}
diff --git a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
index dba8abd1ed..19f2e70f3d 100644
--- a/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
+++ b/docs/examples/JDBC/mybatisplus-demo/src/test/java/com/taosdata/example/mybatisplusdemo/mapper/WeatherMapperTest.java
@@ -52,7 +52,7 @@ public class WeatherMapperTest {
one.setTemperature(random.nextFloat() * 50);
one.setHumidity(random.nextInt(100));
one.setLocation("望京");
- int affectRows = mapper.insert(one);
+ int affectRows = mapper.insertOne(one);
Assert.assertEquals(1, affectRows);
}
@@ -82,7 +82,7 @@ public class WeatherMapperTest {
@Test
public void testSelectCount() {
- int count = mapper.selectCount(null);
+ long count = mapper.selectCount(null);
// Assert.assertEquals(5, count);
System.out.println(count);
}
diff --git a/docs/examples/JDBC/springbootdemo/pom.xml b/docs/examples/JDBC/springbootdemo/pom.xml
index 825f6fb9c2..098f3d1f39 100644
--- a/docs/examples/JDBC/springbootdemo/pom.xml
+++ b/docs/examples/JDBC/springbootdemo/pom.xml
@@ -5,7 +5,7 @@
org.springframework.bootspring-boot-starter-parent
- 2.6.15
+ 2.7.18com.taosdata.example
@@ -34,7 +34,7 @@
org.mybatis.spring.bootmybatis-spring-boot-starter
- 2.1.1
+ 2.3.2
@@ -70,7 +70,7 @@
com.taosdata.jdbctaos-jdbcdriver
- 3.5.2
+ 3.5.3
diff --git a/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml b/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
index 4899ec4654..c142031e97 100644
--- a/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
+++ b/docs/examples/JDBC/springbootdemo/src/main/java/com/taosdata/example/springbootdemo/dao/WeatherMapper.xml
@@ -50,13 +50,6 @@
), groupId int)
-
- create table if not exists test.t#{groupId} using test.weather tags
- (
- #{location},
- #{groupId}
- )
-
- insert into test.t#{groupId} (ts, temperature, humidity, note, bytes)
- values (#{ts}, ${temperature}, ${humidity}, #{note}, #{bytes})
+ insert into test.t${groupId} (ts, temperature, humidity, note, bytes)
+ values (#{ts}, #{temperature}, #{humidity}, #{note}, #{bytes})